Scaffold project

This commit is contained in:
2026-02-28 20:03:51 +00:00
commit f818b82a43
13 changed files with 1530 additions and 0 deletions

86
internal/api/types.go Normal file
View File

@@ -0,0 +1,86 @@
package api
import (
"errors"
"fmt"
)
// ResponseRequest models the Open Responses create request payload.
type ResponseRequest struct {
Model string `json:"model"`
Provider string `json:"provider,omitempty"`
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
Metadata map[string]string `json:"metadata,omitempty"`
Input []Message `json:"input"`
Stream bool `json:"stream,omitempty"`
}
// Message captures user, assistant, or system roles.
type Message struct {
Role string `json:"role"`
Content []ContentBlock `json:"content"`
}
// ContentBlock represents a typed content element (text, data, tool call, etc.).
type ContentBlock struct {
Type string `json:"type"`
Text string `json:"text,omitempty"`
}
// Response is a simplified Open Responses response payload.
type Response struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Provider string `json:"provider"`
Output []Message `json:"output"`
Usage Usage `json:"usage"`
}
// Usage captures token accounting.
type Usage struct {
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
TotalTokens int `json:"total_tokens"`
}
// StreamChunk represents a single Server-Sent Event in a streaming response.
type StreamChunk struct {
ID string `json:"id,omitempty"`
Object string `json:"object"`
Created int64 `json:"created,omitempty"`
Model string `json:"model,omitempty"`
Provider string `json:"provider,omitempty"`
Delta *StreamDelta `json:"delta,omitempty"`
Usage *Usage `json:"usage,omitempty"`
Done bool `json:"done,omitempty"`
}
// StreamDelta represents incremental content in a stream chunk.
type StreamDelta struct {
Role string `json:"role,omitempty"`
Content []ContentBlock `json:"content,omitempty"`
}
// Validate performs basic structural validation.
func (r *ResponseRequest) Validate() error {
if r == nil {
return errors.New("request is nil")
}
if r.Model == "" {
return errors.New("model is required")
}
if len(r.Input) == 0 {
return errors.New("input messages are required")
}
for i, msg := range r.Input {
if msg.Role == "" {
return fmt.Errorf("input[%d] role is required", i)
}
if len(msg.Content) == 0 {
return fmt.Errorf("input[%d] content is required", i)
}
}
return nil
}

64
internal/config/config.go Normal file
View File

@@ -0,0 +1,64 @@
package config
import (
"fmt"
"os"
"gopkg.in/yaml.v3"
)
// Config describes the full gateway configuration file.
type Config struct {
Server ServerConfig `yaml:"server"`
Providers ProvidersConfig `yaml:"providers"`
}
// ServerConfig controls HTTP server values.
type ServerConfig struct {
Address string `yaml:"address"`
}
// ProvidersConfig wraps supported provider settings.
type ProvidersConfig struct {
Google ProviderConfig `yaml:"google"`
Anthropic ProviderConfig `yaml:"anthropic"`
OpenAI ProviderConfig `yaml:"openai"`
}
// ProviderConfig contains shared provider configuration fields.
type ProviderConfig struct {
APIKey string `yaml:"api_key"`
Model string `yaml:"model"`
Endpoint string `yaml:"endpoint"`
}
// Load reads and parses a YAML configuration file and applies env overrides.
func Load(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read config: %w", err)
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parse config: %w", err)
}
cfg.applyEnvOverrides()
return &cfg, nil
}
func (cfg *Config) applyEnvOverrides() {
overrideAPIKey(&cfg.Providers.Google, "GOOGLE_API_KEY")
overrideAPIKey(&cfg.Providers.Anthropic, "ANTHROPIC_API_KEY")
overrideAPIKey(&cfg.Providers.OpenAI, "OPENAI_API_KEY")
}
func overrideAPIKey(cfg *ProviderConfig, envKey string) {
if cfg == nil {
return
}
if v := os.Getenv(envKey); v != "" {
cfg.APIKey = v
}
}

View File

@@ -0,0 +1,241 @@
package anthropic
import (
"context"
"fmt"
"time"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/yourusername/go-llm-gateway/internal/api"
"github.com/yourusername/go-llm-gateway/internal/config"
)
const Name = "anthropic"
// Provider implements the Anthropic SDK integration.
type Provider struct {
cfg config.ProviderConfig
client *anthropic.Client
}
// New constructs a Provider from configuration.
func New(cfg config.ProviderConfig) *Provider {
var client *anthropic.Client
if cfg.APIKey != "" {
c := anthropic.NewClient(option.WithAPIKey(cfg.APIKey))
client = &c
}
return &Provider{
cfg: cfg,
client: client,
}
}
func (p *Provider) Name() string { return Name }
// Generate routes the Open Responses request to Anthropic's API.
func (p *Provider) Generate(ctx context.Context, req *api.ResponseRequest) (*api.Response, error) {
if p.cfg.APIKey == "" {
return nil, fmt.Errorf("anthropic api key missing")
}
if p.client == nil {
return nil, fmt.Errorf("anthropic client not initialized")
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert Open Responses messages to Anthropic format
messages := make([]anthropic.MessageParam, 0, len(req.Input))
var system string
for _, msg := range req.Input {
var content string
for _, block := range msg.Content {
if block.Type == "input_text" {
content += block.Text
}
}
switch msg.Role {
case "user":
messages = append(messages, anthropic.NewUserMessage(anthropic.NewTextBlock(content)))
case "assistant":
messages = append(messages, anthropic.NewAssistantMessage(anthropic.NewTextBlock(content)))
case "system":
system = content
}
}
// Build request params
params := anthropic.MessageNewParams{
Model: anthropic.Model(model),
Messages: messages,
MaxTokens: int64(4096),
}
if system != "" {
systemBlocks := []anthropic.TextBlockParam{
{Text: system, Type: "text"},
}
params.System = systemBlocks
}
// Call Anthropic API
resp, err := p.client.Messages.New(ctx, params)
if err != nil {
return nil, fmt.Errorf("anthropic api error: %w", err)
}
// Convert Anthropic response to Open Responses format
output := make([]api.Message, 0, 1)
var text string
for _, block := range resp.Content {
if block.Type == "text" {
text += block.Text
}
}
output = append(output, api.Message{
Role: "assistant",
Content: []api.ContentBlock{
{Type: "output_text", Text: text},
},
})
return &api.Response{
ID: resp.ID,
Object: "response",
Created: time.Now().Unix(),
Model: string(resp.Model),
Provider: Name,
Output: output,
Usage: api.Usage{
InputTokens: int(resp.Usage.InputTokens),
OutputTokens: int(resp.Usage.OutputTokens),
TotalTokens: int(resp.Usage.InputTokens + resp.Usage.OutputTokens),
},
}, nil
}
// GenerateStream handles streaming requests to Anthropic.
func (p *Provider) GenerateStream(ctx context.Context, req *api.ResponseRequest) (<-chan *api.StreamChunk, <-chan error) {
chunkChan := make(chan *api.StreamChunk)
errChan := make(chan error, 1)
go func() {
defer close(chunkChan)
defer close(errChan)
if p.cfg.APIKey == "" {
errChan <- fmt.Errorf("anthropic api key missing")
return
}
if p.client == nil {
errChan <- fmt.Errorf("anthropic client not initialized")
return
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert messages
messages := make([]anthropic.MessageParam, 0, len(req.Input))
var system string
for _, msg := range req.Input {
var content string
for _, block := range msg.Content {
if block.Type == "input_text" {
content += block.Text
}
}
switch msg.Role {
case "user":
messages = append(messages, anthropic.NewUserMessage(anthropic.NewTextBlock(content)))
case "assistant":
messages = append(messages, anthropic.NewAssistantMessage(anthropic.NewTextBlock(content)))
case "system":
system = content
}
}
// Build params
params := anthropic.MessageNewParams{
Model: anthropic.Model(model),
Messages: messages,
MaxTokens: int64(4096),
}
if system != "" {
systemBlocks := []anthropic.TextBlockParam{
{Text: system, Type: "text"},
}
params.System = systemBlocks
}
// Create stream
stream := p.client.Messages.NewStreaming(ctx, params)
// Process stream
for stream.Next() {
event := stream.Current()
delta := &api.StreamDelta{}
var text string
// Handle different event types
if event.Type == "content_block_delta" && event.Delta.Type == "text_delta" {
text = event.Delta.Text
delta.Content = []api.ContentBlock{
{Type: "output_text", Text: text},
}
}
if event.Type == "message_start" {
delta.Role = "assistant"
}
streamChunk := &api.StreamChunk{
Object: "response.chunk",
Created: time.Now().Unix(),
Model: string(model),
Provider: Name,
Delta: delta,
}
select {
case chunkChan <- streamChunk:
case <-ctx.Done():
errChan <- ctx.Err()
return
}
}
if err := stream.Err(); err != nil {
errChan <- fmt.Errorf("anthropic stream error: %w", err)
return
}
// Send final chunk
select {
case chunkChan <- &api.StreamChunk{Object: "response.chunk", Done: true}:
case <-ctx.Done():
errChan <- ctx.Err()
}
}()
return chunkChan, errChan
}
func chooseModel(requested, defaultModel string) string {
if requested != "" {
return requested
}
if defaultModel != "" {
return defaultModel
}
return "claude-3-5-sonnet"
}

View File

@@ -0,0 +1,227 @@
package google
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"google.golang.org/genai"
"github.com/yourusername/go-llm-gateway/internal/api"
"github.com/yourusername/go-llm-gateway/internal/config"
)
const Name = "google"
// Provider implements the Google Generative AI integration.
type Provider struct {
cfg config.ProviderConfig
client *genai.Client
}
// New constructs a Provider using the provided configuration.
func New(cfg config.ProviderConfig) *Provider {
var client *genai.Client
if cfg.APIKey != "" {
var err error
client, err = genai.NewClient(context.Background(), &genai.ClientConfig{
APIKey: cfg.APIKey,
})
if err != nil {
// Log error but don't fail construction - will fail on Generate
fmt.Printf("warning: failed to create google client: %v\n", err)
}
}
return &Provider{
cfg: cfg,
client: client,
}
}
func (p *Provider) Name() string { return Name }
// Generate routes the Open Responses request to Gemini.
func (p *Provider) Generate(ctx context.Context, req *api.ResponseRequest) (*api.Response, error) {
if p.cfg.APIKey == "" {
return nil, fmt.Errorf("google api key missing")
}
if p.client == nil {
return nil, fmt.Errorf("google client not initialized")
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert Open Responses messages to Gemini format
var contents []*genai.Content
for _, msg := range req.Input {
var parts []*genai.Part
for _, block := range msg.Content {
if block.Type == "input_text" {
parts = append(parts, genai.NewPartFromText(block.Text))
}
}
role := "user"
if msg.Role == "assistant" || msg.Role == "model" {
role = "model"
}
contents = append(contents, &genai.Content{
Role: role,
Parts: parts,
})
}
// Generate content
resp, err := p.client.Models.GenerateContent(ctx, model, contents, nil)
if err != nil {
return nil, fmt.Errorf("google api error: %w", err)
}
// Convert Gemini response to Open Responses format
output := make([]api.Message, 0, 1)
var text string
if len(resp.Candidates) > 0 && resp.Candidates[0].Content != nil {
for _, part := range resp.Candidates[0].Content.Parts {
if part != nil {
text += part.Text
}
}
}
output = append(output, api.Message{
Role: "assistant",
Content: []api.ContentBlock{
{Type: "output_text", Text: text},
},
})
// Extract usage info if available
var inputTokens, outputTokens int
if resp.UsageMetadata != nil {
inputTokens = int(resp.UsageMetadata.PromptTokenCount)
outputTokens = int(resp.UsageMetadata.CandidatesTokenCount)
}
return &api.Response{
ID: uuid.NewString(),
Object: "response",
Created: time.Now().Unix(),
Model: model,
Provider: Name,
Output: output,
Usage: api.Usage{
InputTokens: inputTokens,
OutputTokens: outputTokens,
TotalTokens: inputTokens + outputTokens,
},
}, nil
}
// GenerateStream handles streaming requests to Google.
func (p *Provider) GenerateStream(ctx context.Context, req *api.ResponseRequest) (<-chan *api.StreamChunk, <-chan error) {
chunkChan := make(chan *api.StreamChunk)
errChan := make(chan error, 1)
go func() {
defer close(chunkChan)
defer close(errChan)
if p.cfg.APIKey == "" {
errChan <- fmt.Errorf("google api key missing")
return
}
if p.client == nil {
errChan <- fmt.Errorf("google client not initialized")
return
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert messages
var contents []*genai.Content
for _, msg := range req.Input {
var parts []*genai.Part
for _, block := range msg.Content {
if block.Type == "input_text" {
parts = append(parts, genai.NewPartFromText(block.Text))
}
}
role := "user"
if msg.Role == "assistant" || msg.Role == "model" {
role = "model"
}
contents = append(contents, &genai.Content{
Role: role,
Parts: parts,
})
}
// Create stream
stream := p.client.Models.GenerateContentStream(ctx, model, contents, nil)
// Process stream
for resp, err := range stream {
if err != nil {
errChan <- fmt.Errorf("google stream error: %w", err)
return
}
var text string
if len(resp.Candidates) > 0 && resp.Candidates[0].Content != nil {
for _, part := range resp.Candidates[0].Content.Parts {
if part != nil {
text += part.Text
}
}
}
delta := &api.StreamDelta{}
if text != "" {
delta.Content = []api.ContentBlock{
{Type: "output_text", Text: text},
}
}
streamChunk := &api.StreamChunk{
Object: "response.chunk",
Created: time.Now().Unix(),
Model: model,
Provider: Name,
Delta: delta,
}
select {
case chunkChan <- streamChunk:
case <-ctx.Done():
errChan <- ctx.Err()
return
}
}
// Send final chunk
select {
case chunkChan <- &api.StreamChunk{Object: "response.chunk", Done: true}:
case <-ctx.Done():
errChan <- ctx.Err()
}
}()
return chunkChan, errChan
}
func chooseModel(requested, defaultModel string) string {
if requested != "" {
return requested
}
if defaultModel != "" {
return defaultModel
}
return "gemini-2.0-flash-exp"
}

View File

@@ -0,0 +1,210 @@
package openai
import (
"context"
"fmt"
"time"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"github.com/yourusername/go-llm-gateway/internal/api"
"github.com/yourusername/go-llm-gateway/internal/config"
)
const Name = "openai"
// Provider implements the OpenAI SDK integration.
type Provider struct {
cfg config.ProviderConfig
client *openai.Client
}
// New constructs a Provider from configuration.
func New(cfg config.ProviderConfig) *Provider {
var client *openai.Client
if cfg.APIKey != "" {
c := openai.NewClient(option.WithAPIKey(cfg.APIKey))
client = &c
}
return &Provider{
cfg: cfg,
client: client,
}
}
// Name returns the provider identifier.
func (p *Provider) Name() string { return Name }
// Generate routes the Open Responses request to OpenAI.
func (p *Provider) Generate(ctx context.Context, req *api.ResponseRequest) (*api.Response, error) {
if p.cfg.APIKey == "" {
return nil, fmt.Errorf("openai api key missing")
}
if p.client == nil {
return nil, fmt.Errorf("openai client not initialized")
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert Open Responses messages to OpenAI format
messages := make([]openai.ChatCompletionMessageParamUnion, 0, len(req.Input))
for _, msg := range req.Input {
var content string
for _, block := range msg.Content {
if block.Type == "input_text" {
content += block.Text
}
}
switch msg.Role {
case "user":
messages = append(messages, openai.UserMessage(content))
case "assistant":
messages = append(messages, openai.AssistantMessage(content))
case "system":
messages = append(messages, openai.SystemMessage(content))
}
}
// Call OpenAI API
resp, err := p.client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
Model: openai.ChatModel(model),
Messages: messages,
})
if err != nil {
return nil, fmt.Errorf("openai api error: %w", err)
}
// Convert OpenAI response to Open Responses format
output := make([]api.Message, 0, len(resp.Choices))
for _, choice := range resp.Choices {
output = append(output, api.Message{
Role: "assistant",
Content: []api.ContentBlock{
{Type: "output_text", Text: choice.Message.Content},
},
})
}
return &api.Response{
ID: resp.ID,
Object: "response",
Created: time.Now().Unix(),
Model: resp.Model,
Provider: Name,
Output: output,
Usage: api.Usage{
InputTokens: int(resp.Usage.PromptTokens),
OutputTokens: int(resp.Usage.CompletionTokens),
TotalTokens: int(resp.Usage.TotalTokens),
},
}, nil
}
// GenerateStream handles streaming requests to OpenAI.
func (p *Provider) GenerateStream(ctx context.Context, req *api.ResponseRequest) (<-chan *api.StreamChunk, <-chan error) {
chunkChan := make(chan *api.StreamChunk)
errChan := make(chan error, 1)
go func() {
defer close(chunkChan)
defer close(errChan)
if p.cfg.APIKey == "" {
errChan <- fmt.Errorf("openai api key missing")
return
}
if p.client == nil {
errChan <- fmt.Errorf("openai client not initialized")
return
}
model := chooseModel(req.Model, p.cfg.Model)
// Convert messages
messages := make([]openai.ChatCompletionMessageParamUnion, 0, len(req.Input))
for _, msg := range req.Input {
var content string
for _, block := range msg.Content {
if block.Type == "input_text" {
content += block.Text
}
}
switch msg.Role {
case "user":
messages = append(messages, openai.UserMessage(content))
case "assistant":
messages = append(messages, openai.AssistantMessage(content))
case "system":
messages = append(messages, openai.SystemMessage(content))
}
}
// Create streaming request
stream := p.client.Chat.Completions.NewStreaming(ctx, openai.ChatCompletionNewParams{
Model: openai.ChatModel(model),
Messages: messages,
})
// Process stream
for stream.Next() {
chunk := stream.Current()
for _, choice := range chunk.Choices {
delta := &api.StreamDelta{}
if choice.Delta.Role != "" {
delta.Role = string(choice.Delta.Role)
}
if choice.Delta.Content != "" {
delta.Content = []api.ContentBlock{
{Type: "output_text", Text: choice.Delta.Content},
}
}
streamChunk := &api.StreamChunk{
ID: chunk.ID,
Object: "response.chunk",
Created: time.Now().Unix(),
Model: chunk.Model,
Provider: Name,
Delta: delta,
}
select {
case chunkChan <- streamChunk:
case <-ctx.Done():
errChan <- ctx.Err()
return
}
}
}
if err := stream.Err(); err != nil {
errChan <- fmt.Errorf("openai stream error: %w", err)
return
}
// Send final chunk
select {
case chunkChan <- &api.StreamChunk{Object: "response.chunk", Done: true}:
case <-ctx.Done():
errChan <- ctx.Err()
}
}()
return chunkChan, errChan
}
func chooseModel(requested, defaultModel string) string {
if requested != "" {
return requested
}
if defaultModel != "" {
return defaultModel
}
return "gpt-4o-mini"
}

View File

@@ -0,0 +1,78 @@
package providers
import (
"context"
"fmt"
"strings"
"github.com/yourusername/go-llm-gateway/internal/api"
"github.com/yourusername/go-llm-gateway/internal/config"
anthropicprovider "github.com/yourusername/go-llm-gateway/internal/providers/anthropic"
googleprovider "github.com/yourusername/go-llm-gateway/internal/providers/google"
openaiprovider "github.com/yourusername/go-llm-gateway/internal/providers/openai"
)
// Provider represents a unified interface that each LLM provider must implement.
type Provider interface {
Name() string
Generate(ctx context.Context, req *api.ResponseRequest) (*api.Response, error)
GenerateStream(ctx context.Context, req *api.ResponseRequest) (<-chan *api.StreamChunk, <-chan error)
}
// Registry keeps track of registered providers by key (e.g. "openai").
type Registry struct {
providers map[string]Provider
}
// NewRegistry constructs provider implementations from configuration.
func NewRegistry(cfg config.ProvidersConfig) (*Registry, error) {
reg := &Registry{providers: make(map[string]Provider)}
if cfg.Google.APIKey != "" {
reg.providers[googleprovider.Name] = googleprovider.New(cfg.Google)
}
if cfg.Anthropic.APIKey != "" {
reg.providers[anthropicprovider.Name] = anthropicprovider.New(cfg.Anthropic)
}
if cfg.OpenAI.APIKey != "" {
reg.providers[openaiprovider.Name] = openaiprovider.New(cfg.OpenAI)
}
if len(reg.providers) == 0 {
return nil, fmt.Errorf("no providers configured")
}
return reg, nil
}
// Get returns provider by key.
func (r *Registry) Get(name string) (Provider, bool) {
p, ok := r.providers[name]
return p, ok
}
// Default returns provider based on inferred name.
func (r *Registry) Default(model string) (Provider, error) {
if model != "" {
switch {
case strings.HasPrefix(model, "gpt") || strings.HasPrefix(model, "o1"):
if p, ok := r.providers[openaiprovider.Name]; ok {
return p, nil
}
case strings.HasPrefix(model, "claude"):
if p, ok := r.providers[anthropicprovider.Name]; ok {
return p, nil
}
case strings.HasPrefix(model, "gemini"):
if p, ok := r.providers[googleprovider.Name]; ok {
return p, nil
}
}
}
for _, p := range r.providers {
return p, nil
}
return nil, fmt.Errorf("no providers available")
}

132
internal/server/server.go Normal file
View File

@@ -0,0 +1,132 @@
package server
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/yourusername/go-llm-gateway/internal/api"
"github.com/yourusername/go-llm-gateway/internal/providers"
)
// GatewayServer hosts the Open Responses API for the gateway.
type GatewayServer struct {
registry *providers.Registry
logger *log.Logger
}
// New creates a GatewayServer bound to the provider registry.
func New(registry *providers.Registry, logger *log.Logger) *GatewayServer {
return &GatewayServer{registry: registry, logger: logger}
}
// RegisterRoutes wires the HTTP handlers onto the provided mux.
func (s *GatewayServer) RegisterRoutes(mux *http.ServeMux) {
mux.HandleFunc("/v1/responses", s.handleResponses)
}
func (s *GatewayServer) handleResponses(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
var req api.ResponseRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "invalid JSON payload", http.StatusBadRequest)
return
}
if err := req.Validate(); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
provider, err := s.resolveProvider(&req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
// Handle streaming vs non-streaming
if req.Stream {
s.handleStreamingResponse(w, r, provider, &req)
} else {
s.handleSyncResponse(w, r, provider, &req)
}
}
func (s *GatewayServer) handleSyncResponse(w http.ResponseWriter, r *http.Request, provider providers.Provider, req *api.ResponseRequest) {
resp, err := provider.Generate(r.Context(), req)
if err != nil {
s.logger.Printf("provider %s error: %v", provider.Name(), err)
http.Error(w, "provider error", http.StatusBadGateway)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(resp)
}
func (s *GatewayServer) handleStreamingResponse(w http.ResponseWriter, r *http.Request, provider providers.Provider, req *api.ResponseRequest) {
// Set headers for SSE
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.WriteHeader(http.StatusOK)
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "streaming not supported", http.StatusInternalServerError)
return
}
chunkChan, errChan := provider.GenerateStream(r.Context(), req)
for {
select {
case chunk, ok := <-chunkChan:
if !ok {
return
}
data, err := json.Marshal(chunk)
if err != nil {
s.logger.Printf("failed to marshal chunk: %v", err)
continue
}
fmt.Fprintf(w, "data: %s\n\n", data)
flusher.Flush()
if chunk.Done {
return
}
case err := <-errChan:
if err != nil {
s.logger.Printf("stream error: %v", err)
errData, _ := json.Marshal(map[string]string{"error": err.Error()})
fmt.Fprintf(w, "data: %s\n\n", errData)
flusher.Flush()
}
return
case <-r.Context().Done():
s.logger.Printf("client disconnected")
return
}
}
}
func (s *GatewayServer) resolveProvider(req *api.ResponseRequest) (providers.Provider, error) {
if req.Provider != "" {
if provider, ok := s.registry.Get(req.Provider); ok {
return provider, nil
}
return nil, fmt.Errorf("provider %s not configured", req.Provider)
}
return s.registry.Default(req.Model)
}