new cmd/chatd on :3220 routes /v1/chat to the right provider based
on model-name prefix or :cloud suffix. closes the architectural gap
named in lakehouse.toml [models]: tiers map to model IDs, but until
phase 4 there was no service that could actually CALL those models
from go.
routing rules (registry.Resolve):
ollama/<m> → local Ollama (prefix stripped)
ollama_cloud/<m> → Ollama Cloud
<m>:cloud → Ollama Cloud (suffix variant — kimi-k2.6:cloud)
openrouter/<v>/<m> → OpenRouter (prefix stripped, OpenAI-compat)
opencode/<m> → OpenCode unified Zen+Go
kimi/<m> → Kimi For Coding (api.kimi.com/coding/v1)
bare names → local Ollama (default)
provider implementations:
- internal/chat/types.go Provider interface, Request/Response, errors
- internal/chat/registry.go prefix + :cloud suffix dispatch
- internal/chat/ollama.go local Ollama via /api/chat (think=false default)
- internal/chat/ollama_cloud.go Ollama Cloud via /api/generate (Bearer auth)
- internal/chat/openai_compat.go shared OpenAI Chat Completions for the
OpenRouter/OpenCode/Kimi family
- internal/chat/builder.go BuildRegistry from BuilderInput;
ResolveKey reads env then .env file fallback
config:
- ChatdConfig in internal/shared/config.go with bind, ollama_url,
per-provider key env names + .env fallback paths, timeout
- Gateway gains chatd_url + /v1/chat + /v1/chat/* routes
- lakehouse.toml [chatd] block with /etc/lakehouse/<provider>.env defaults
tests (19 in internal/chat):
- registry: prefix + :cloud + errors + telemetry + provider listing
- ollama: happy path + prefix strip + format=json + 500 mapping +
flatten_messages
- openai_compat: happy path + format=json + 429 mapping + zero-choices
think=false default in ollama + ollama_cloud — local hot path skips
reasoning, low-budget callers (the playbook_lift judge at max_tokens=10)
get direct answers instead of empty content + done_reason=length.
proven via chatd_smoke acceptance.
acceptance gate: scripts/chatd_smoke.sh — 6/6 PASS:
1. /v1/chat/providers lists exactly registered providers (1 in dev mode)
2. bare model → ollama default with content + token counts + latency
3. explicit ollama/<m> → prefix stripped at upstream
4. <m>:cloud without ollama_cloud registered → 404 (no silent fall-through)
5. unknown/<m> → falls through to default → upstream 502 (no prefix rewrite)
6. missing model field → 400
just verify: PASS (vet + 30 packages × short tests + 9 smokes).
chatd_smoke is a domain smoke (not in just verify, mirrors matrix /
observer / pathway pattern).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
143 lines
3.9 KiB
Go
143 lines
3.9 KiB
Go
package chat
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
)
|
|
|
|
// Ollama (local) provider — calls /api/chat on the local Ollama
|
|
// server. No auth needed; default URL http://localhost:11434.
|
|
//
|
|
// Bare model names route here by default (registry.defaultName=ollama),
|
|
// so "qwen3.5:latest" → ollama. Explicit "ollama/qwen3.5:latest" also
|
|
// works (prefix stripped).
|
|
type Ollama struct {
|
|
baseURL string
|
|
httpClient *http.Client
|
|
}
|
|
|
|
// NewOllama returns a local Ollama provider. baseURL defaults to
|
|
// http://localhost:11434 when empty. timeout 0 → 180s.
|
|
func NewOllama(baseURL string, timeout time.Duration) *Ollama {
|
|
if baseURL == "" {
|
|
baseURL = "http://localhost:11434"
|
|
}
|
|
if timeout == 0 {
|
|
timeout = 180 * time.Second
|
|
}
|
|
return &Ollama{
|
|
baseURL: strings.TrimRight(baseURL, "/"),
|
|
httpClient: &http.Client{Timeout: timeout},
|
|
}
|
|
}
|
|
|
|
func (o *Ollama) Name() string { return "ollama" }
|
|
|
|
// Available pings /api/tags. Cached negative result would be a
|
|
// premature optimization for G0 — Ollama is typically up. If down,
|
|
// next call gets ErrUpstream which is the right signal anyway.
|
|
func (o *Ollama) Available() bool {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
defer cancel()
|
|
req, _ := http.NewRequestWithContext(ctx, "GET", o.baseURL+"/api/tags", nil)
|
|
resp, err := o.httpClient.Do(req)
|
|
if err != nil {
|
|
return false
|
|
}
|
|
defer resp.Body.Close()
|
|
return resp.StatusCode/100 == 2
|
|
}
|
|
|
|
// Chat translates Request to Ollama's /api/chat shape and back.
|
|
// Strips the optional "ollama/" prefix from req.Model.
|
|
func (o *Ollama) Chat(ctx context.Context, req Request) (*Response, error) {
|
|
model := StripPrefix(req.Model, "ollama")
|
|
|
|
body := map[string]any{
|
|
"model": model,
|
|
"messages": req.Messages,
|
|
"stream": false,
|
|
// Local hot path: skip reasoning by default. qwen3 / qwen3.5 are
|
|
// thinking-capable but the inner-loop use case wants direct
|
|
// answers, not reasoning traces. Without this, low max_tokens
|
|
// budgets get consumed by thinking before any content is
|
|
// produced. Cloud tier (Ollama Cloud) inherits the same default
|
|
// — see ollama_cloud.go.
|
|
"think": false,
|
|
"options": map[string]any{
|
|
"temperature": req.Temperature,
|
|
},
|
|
}
|
|
if req.MaxTokens > 0 {
|
|
body["options"].(map[string]any)["num_predict"] = req.MaxTokens
|
|
}
|
|
if req.Format == "json" {
|
|
body["format"] = "json"
|
|
}
|
|
|
|
bs, _ := json.Marshal(body)
|
|
httpReq, err := http.NewRequestWithContext(ctx, "POST", o.baseURL+"/api/chat", bytes.NewReader(bs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
httpReq.Header.Set("Content-Type", "application/json")
|
|
|
|
resp, err := o.httpClient.Do(httpReq)
|
|
if err != nil {
|
|
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
|
return nil, fmt.Errorf("%w: %s", ErrTimeout, "ollama")
|
|
}
|
|
return nil, fmt.Errorf("ollama: %w", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
rb, _ := io.ReadAll(resp.Body)
|
|
if resp.StatusCode/100 != 2 {
|
|
return nil, fmt.Errorf("%w: ollama %d: %s", ErrUpstream, resp.StatusCode, abbrev(string(rb), 200))
|
|
}
|
|
|
|
var ollamaResp struct {
|
|
Model string `json:"model"`
|
|
Message struct {
|
|
Content string `json:"content"`
|
|
} `json:"message"`
|
|
Done bool `json:"done"`
|
|
PromptEvalCount int `json:"prompt_eval_count"`
|
|
EvalCount int `json:"eval_count"`
|
|
}
|
|
if err := json.Unmarshal(rb, &ollamaResp); err != nil {
|
|
return nil, fmt.Errorf("ollama decode: %w (body=%s)", err, abbrev(string(rb), 200))
|
|
}
|
|
|
|
return &Response{
|
|
Model: model,
|
|
Content: ollamaResp.Message.Content,
|
|
InputTokens: ollamaResp.PromptEvalCount,
|
|
OutputTokens: ollamaResp.EvalCount,
|
|
FinishReason: finishReasonFromDone(ollamaResp.Done),
|
|
}, nil
|
|
}
|
|
|
|
func finishReasonFromDone(done bool) string {
|
|
if done {
|
|
return "stop"
|
|
}
|
|
return "length"
|
|
}
|
|
|
|
// abbrev shortens long error bodies for log/error messages without
|
|
// pulling fmt's truncation flags everywhere.
|
|
func abbrev(s string, n int) string {
|
|
if len(s) <= n {
|
|
return s
|
|
}
|
|
return s[:n] + "…"
|
|
}
|