Expose version/build metadata and improve provider error messaging

This commit is contained in:
2025-10-01 22:21:44 +02:00
parent a3e6b105d0
commit 7d4d56671f
5 changed files with 278 additions and 27 deletions

View File

@@ -37,6 +37,7 @@ type Service struct {
model string
temperature float64
stream bool
streamNotice string
history []openai.ChatMessage
sessionID int64
summarySet bool
@@ -92,6 +93,8 @@ func (s *Service) Send(ctx context.Context, input string, streamHandler openai.C
messages := append([]openai.ChatMessage(nil), s.history...)
temperature := s.temperature
s.streamNotice = ""
req := openai.ChatCompletionRequest{
Model: s.model,
Messages: messages,
@@ -105,11 +108,17 @@ func (s *Service) Send(ctx context.Context, input string, streamHandler openai.C
var err error
if s.stream {
resp, err = s.client.StreamChatCompletion(ctx, req, streamHandler)
if err != nil {
resp, err = s.handleStreamingFailure(ctx, req, err)
if err != nil {
return "", s.translateProviderError(err)
}
}
} else {
resp, err = s.client.CreateChatCompletion(ctx, req)
}
if err != nil {
return "", err
if err != nil {
return "", s.translateProviderError(err)
}
}
if len(resp.Choices) == 0 {
return "", errors.New("no choices returned from completion")
@@ -135,6 +144,16 @@ func (s *Service) History() []openai.ChatMessage {
return historyCopy
}
// ConsumeStreamingNotice returns any pending streaming notice and clears it.
func (s *Service) ConsumeStreamingNotice() string {
if s == nil {
return ""
}
notice := s.streamNotice
s.streamNotice = ""
return notice
}
// StreamingEnabled reports whether streaming completions are configured for this service.
func (s *Service) StreamingEnabled() bool {
if s == nil {
@@ -143,6 +162,74 @@ func (s *Service) StreamingEnabled() bool {
return s.stream
}
func (s *Service) translateProviderError(err error) error {
var reqErr *openai.RequestError
if !errors.As(err, &reqErr) {
return err
}
if guidance, ok := providerStatusGuidance(reqErr.StatusCode()); ok {
return errors.New(guidance)
}
return err
}
func (s *Service) handleStreamingFailure(ctx context.Context, req openai.ChatCompletionRequest, streamErr error) (*openai.ChatCompletionResponse, error) {
if s == nil {
return nil, streamErr
}
var reqErr *openai.RequestError
if !errors.As(streamErr, &reqErr) {
return nil, streamErr
}
status := reqErr.StatusCode()
if status < 400 || status >= 500 {
return nil, streamErr
}
guidance, hasGuidance := providerStatusGuidance(status)
message := guidance
if !hasGuidance {
message = strings.TrimSpace(reqErr.Message())
if message == "" {
message = strings.TrimSpace(streamErr.Error())
}
if message == "" {
message = "Streaming is unavailable"
}
}
message = fmt.Sprintf("%s\nStreaming has been disabled; responses will be fully buffered.", message)
s.logger.WarnContext(ctx, "streaming disabled", "status", status, "error", strings.TrimSpace(reqErr.Message()))
s.stream = false
s.streamNotice = message
req.Stream = false
resp, err := s.client.CreateChatCompletion(ctx, req)
if err != nil {
return nil, s.translateProviderError(err)
}
return resp, nil
}
func providerStatusGuidance(status int) (string, bool) {
switch status {
case 401:
return "Incorrect API key provided.\nVerify API key, clear browser cache, or generate a new key.", true
case 429:
return "Rate limit reached.\nPace requests and implement exponential backoff.", true
case 500:
return "Server error.\nRetry after a brief wait; contact support if persistent.", true
case 503:
return "Engine overloaded.\nRetry request after a brief wait; contact support if persistent.", true
default:
return "", false
}
}
// Reset clears the in-memory conversation history.
func (s *Service) Reset() {
if s == nil {