package ollama import ( "bufio" "bytes" "encoding/json" "errors" "fmt" "net/http" ) type GenerateChatMessageRequest struct { Model string `json:"model"` Messages []GenerateChatMessageRequestMessage `json:"messages"` Tools []GenerateChatMessageRequestTool `json:"tools,omitempty"` Format string `json:"format,omitempty"` Options *GenerateChatMessageRequestOptions `json:"options,omitempty"` Stream *bool `json:"stream,omitempty"` Think *bool `json:"think,omitempty"` KeepAlive string `json:"keep_alive,omitempty"` Logprobs *bool `json:"logprobs,omitempty"` TopLogprobs *int `json:"top_logprobs,omitempty"` } type GenerateChatMessageRequestMessage struct { Role string `json:"role"` Content string `json:"content"` Images []string `json:"images,omitempty"` ToolCalls []GenerateChatMessageRequestMessageToolCall `json:"tool_calls,omitempty"` } type GenerateChatMessageRequestMessageToolCall struct { Function *GenerateChatMessageRequestMessageToolCallFunction `json:"function,omitempty"` } type GenerateChatMessageRequestMessageToolCallFunction struct { Name string `json:"name"` Description string `json:"description,omitempty"` Arguments map[string]any `json:"arguments,omitempty"` } type GenerateChatMessageRequestTool struct { Type string `json:"type"` Function GenerateChatMessageRequestToolFunction `json:"function"` } type GenerateChatMessageRequestToolFunction struct { Name string `json:"name"` Description string `json:"description"` Parameters map[string]any `json:"parameters,omitempty"` } type GenerateChatMessageRequestOptions struct { Seed *int `json:"seed,omitempty"` Temperature *float32 `json:"temperature,omitempty"` TopK *int `json:"top_k,omitempty"` TopP *float32 `json:"top_p,omitempty"` MinP *float32 `json:"min_p,omitempty"` Stop []string `json:"stop,omitempty"` NumCtx *int `json:"num_ctx,omitempty"` NumPredict *int `json:"num_predict,omitempty"` } type GenerateChatMessageResponse struct { Model string `json:"model"` CreatedAt string `json:"created_at"` Message struct { Role string `json:"role"` Content string `json:"content"` Thinking string `json:"thinking"` ToolCalls []struct { Function struct { Name string `json:"name"` Description string `json:"description"` Arguments map[string]any `json:"arguments"` } `json:"function"` } `json:"tool_calls"` Images []string `json:"images"` } `json:"message"` Done bool `json:"done"` DoneReason string `json:"done_reason"` TotalDuration int `json:"total_duration"` LoadDuration int `json:"load_duration"` PromptEvalCount int `json:"prompt_eval_count"` PromptEvalDuration int `json:"prompt_eval_duration"` EvalCount int `json:"eval_count"` EvalDuration int `json:"eval_duration"` Logprobs []struct { Token string `json:"token"` Logprob int `json:"logprob"` Bytes []int `json:"bytes"` TopLogprobs []struct { Token string `json:"token"` Logprob int `json:"logprob"` Bytes []int `json:"bytes"` } `json:"top_logprobs"` } `json:"logprobs"` } type GenerateChatMessageResponseStream struct { Model string `json:"model"` CreatedAt string `json:"created_at"` Message struct { Role string `json:"role"` Content string `json:"content"` Thinking string `json:"thinking"` ToolCalls []struct { Function struct { Name string `json:"name"` Description string `json:"description"` Arguments map[string]any `json:"arguments"` } `json:"function"` } `json:"tool_calls"` Images []string `json:"images"` } `json:"message"` Done bool `json:"done"` } func (o Ollama) GenerateChatMessage(reqBody GenerateChatMessageRequest) (GenerateChatMessageResponse, error) { reqBodyBytes, err := json.Marshal(reqBody) if err != nil { return GenerateChatMessageResponse{}, err } req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/chat", o.baseUrl), bytes.NewReader(reqBodyBytes)) if err != nil { return GenerateChatMessageResponse{}, err } for key, val := range o.customHeaders { req.Header.Set(key, val) } req.Header.Set("Content-Type", "application/json") resp, err := http.DefaultClient.Do(req) if err != nil { return GenerateChatMessageResponse{}, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return GenerateChatMessageResponse{}, errors.New("status code is not 200") } var respBody GenerateChatMessageResponse if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil { return GenerateChatMessageResponse{}, err } return respBody, nil } func (o Ollama) GenerateChatMessageStream(reqBody GenerateChatMessageRequest, onChunk func(chunk GenerateChatMessageResponseStream)) error { reqBody.Stream = PtrOf(true) reqBodyBytes, err := json.Marshal(reqBody) if err != nil { return err } req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/chat", o.baseUrl), bytes.NewReader(reqBodyBytes)) if err != nil { return err } for key, val := range o.customHeaders { req.Header.Set(key, val) } req.Header.Set("Content-Type", "application/json") resp, err := http.DefaultClient.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return errors.New("status code is not 200") } scanner := bufio.NewScanner(resp.Body) for scanner.Scan() { line := bytes.TrimSpace(scanner.Bytes()) var chunk GenerateChatMessageResponseStream if err := json.Unmarshal(line, &chunk); err != nil { return err } onChunk(chunk) if chunk.Done { return nil } } if err := scanner.Err(); err != nil { return err } return nil }