From 97c41ee233987264bab63c0dcc8123e457c21ffa Mon Sep 17 00:00:00 2001 From: zhouyu Date: Sun, 7 Apr 2024 13:37:32 +0800 Subject: [PATCH] =?UTF-8?q?add:=20=E6=94=AF=E6=8C=81Openai?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 121 +++++++++++++++++++++------------------------ ai.go | 19 +++++-- ai_flow.go | 12 +++-- aliyun.ai.go | 18 ++++--- config.go | 33 ++++++++----- config.sample.yaml | 39 +++++++++------ main.go | 9 ++-- model.go | 68 ------------------------- ollama.ai.go | 49 ++++++++++++++++-- openai.ai.go | 50 +++++++++++++++++-- prompt.go | 8 +++ 11 files changed, 242 insertions(+), 184 deletions(-) delete mode 100644 model.go diff --git a/README.md b/README.md index b79debe..439d716 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,16 @@ -# Git to DailyReport - -将Git日志通过大模型自动转换成每日工作报告 - -[![Release](https://img.shields.io/github/release/muyu66/git-to-dailyreport.svg?style=flat-square)](https://github.com/muyu66/git-to-dailyreport/releases) -[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT) +
+

Git to DailyReport

+将Git日志通过大模型自动转换成每日/周工作报告 +
+ +
+ + + + + + +
## 特点 @@ -18,22 +25,48 @@ * 支持周报 * 支持终端显示工作报告,亦或是生成文本文件 * 支持AI Flow工作方式,提供模型文本准确度 +* 已接入OpenAI,并可以支持国内代理地址 +* 已接入ollama ![](public/pic1.gif "to boss") -## 一键开始 +## 开始使用 + +1. **申请AI大模型** + +```markdown +提供 Ollama、阿里云DashScope、OpenAI三种接入方式 +``` + +Ollama:前往 [Ollama Github](https://github.com/ollama/ollama) 安装 + +阿里云DashScope:前往 [DashScope主页](https://dashscope.aliyun.com) 开通相关功能 + +OpenAI: 前往 [OpenAI Doc](https://platform.openai.com/api-keys) 获取ApiKey。或者尝试免费的国内代理 [Chatanywhere](https://github.com/chatanywhere/GPT_API_free) + +2. **配置** + +```markdown +根据配置文件 `config.sample.yaml` 配置相关参数,并重命名为`config.yaml`放置于 report.exe 同目录下 +``` + +3. **运行在 Windows** + +```shell + .\一键生成日报.bat +``` -Windows +```shell + .\一键生成周报.bat +``` - 双击 一键生成日报.bat - OR - 双击 一键生成周报.bat +4. **运行在 Linux Or Docker** -* 开通阿里大模型 https://dashscope.aliyun.com -* 配置文件 需要将`config.sample.yaml`重命名为`config.yaml`,并填写配置 -* 确保`config.yaml`与`report.exe`在同一个目录下 +```markdown +// TODO: +``` -## 命令行使用 +## 高级使用 $ .\bin\report.exe @@ -45,6 +78,14 @@ Windows |----|-----|----------|--------| | -c | day | day week | 工作报告周期 | +## 编译 Win64 (可选) + +需要 go 1.22+ + +```shell + .\build.bat # 执行编译脚本 +``` + ## 效果展示 日报: @@ -105,54 +146,6 @@ Best regards, [Your Title] ``` -## 编译 Win64 - - .\build.bat # 编译脚本 - .\bin\report.exe # 开始使用 - -## 配置文件示例 - -config.yaml (与report.exe同目录) - -```yaml -ai: - # [aliyun|ollama] - name: aliyun - ak: sk-xxxxxxxxxxxxxxxxxxx - # qwen1.5-72b-chat ⭐⭐⭐⭐ - # qwen1.5-14b-chat ⭐⭐⭐ - # qwen1.5-7b-chat ⭐⭐ - # qwen-1.8b-chat ⭐ - # qwen-plus 未知 - # qwen-turbo 未知 - # qwen-max-1201 未知 - # qwen-max-longcontext ⭐⭐⭐⭐ - model: qwen-max-longcontext -git: - # 不会空则以此username为准,防止误领别人的工作 - # 为空字符串则自动获取 - username: - repo: - # [数组] - # 自动遍历目录下的所有git仓库 - # 可以填项目集中的目录: C:\Web - # 也可以填具体仓库目录: C:\Web\niubiGame - - C:\Web -report: - # 报告模式 - # [normal 详细叙述] [simple 简单概要] - mode: normal - # 间隔多少天汇报一次,默认一天报告一次 - # 1=只含今天,2=今天和昨天 - intervalDay: 1 - # 输出模式 - # file 输出文本文件 - # print 打印到终端 - out: file - # 报告语言 [chs|en] - lang: chs -``` - ## Roadmap * 将工作区、Stage区纳入上报的工作范围 @@ -164,8 +157,6 @@ report: * 将会提交的工作范围简要报告给使用者 * 应对加班等可能跨天的日报 * tty模式配置向导 -* 支持本地大模型 -* 接入Openai ## Contributors diff --git a/ai.go b/ai.go index 106761e..c34fd80 100644 --- a/ai.go +++ b/ai.go @@ -6,15 +6,26 @@ import ( ) type Ai interface { + before() request(*resty.Client, []AiReqBodyMessage) AiReqBodyMessage + after() } -func AiFactory(aiName string) (Ai, error) { +type AiConfig struct { + Model string + BaseUrl string + ApiKey string + MaxInputTokens uint32 +} + +func AiFactory(aiName string, aiConf AiConfig) (Ai, error) { switch aiName { - case "aliyun": - return AliyunAi{}, nil + case "aliyun-dashscope": + return AliyunAi{Config: aiConf}, nil case "ollama": - return OllamaAi{}, nil + return OllamaAi{Config: aiConf}, nil + case "openai": + return OpenAi{Config: aiConf}, nil default: return nil, errors.New("找不到合适的AI引擎") } diff --git a/ai_flow.go b/ai_flow.go index 8e87629..bb6de0c 100644 --- a/ai_flow.go +++ b/ai_flow.go @@ -6,11 +6,13 @@ import ( ) func flow(ai Ai, client *resty.Client, prompt string) AiReqBodyMessage { - log.Info("开始AI-FLOW......") + var flowPrompts = getFlowPrompt() + + log.Info("开启AI-FLOW......") var userMsgs = []AiReqBodyMessage{ { Role: "system", - Content: "你是一个擅于帮助程序员分析GIT日志,并整理归纳今天工作内容的助理。", + Content: flowPrompts[0], }, { Role: "user", @@ -22,7 +24,7 @@ func flow(ai Ai, client *resty.Client, prompt string) AiReqBodyMessage { var messagesBoss = []AiReqBodyMessage{ { Role: "system", - Content: "你是一个程序员的领导,你要审查他提交的今日工作报告,并提出修改意见。", + Content: flowPrompts[1], }, { Role: "user", @@ -34,9 +36,9 @@ func flow(ai Ai, client *resty.Client, prompt string) AiReqBodyMessage { userMsgs = append(userMsgs, msgs1) userMsgs = append(userMsgs, AiReqBodyMessage{ Role: "user", - Content: "我将你的报告提交给了我的领导,他反馈了一些修改建议,请你进行补充,并重新提交。建议如下:" + msgs2.Content, + Content: flowPrompts[2] + msgs2.Content, }) var res = ai.request(client, userMsgs) - log.Info("已结束AI-FLOW......") + log.Info("已关闭AI-FLOW......") return res } diff --git a/aliyun.ai.go b/aliyun.ai.go index ae06dd2..17c0724 100644 --- a/aliyun.ai.go +++ b/aliyun.ai.go @@ -7,15 +7,23 @@ import ( ) type AliyunAi struct { + Config AiConfig } -func (ai AliyunAi) request(client *resty.Client, messages []AiReqBodyMessage) AiReqBodyMessage { +func (ai AliyunAi) before() { log.Info("请求大模型中......") +} + +func (ai AliyunAi) after() { + log.Info("请求完成......") +} + +func (ai AliyunAi) request(client *resty.Client, messages []AiReqBodyMessage) AiReqBodyMessage { resp, err := client.R(). SetHeader("Content-Type", "application/json"). - SetAuthToken("Bearer " + getAiAkConf()). + SetAuthToken(ai.Config.ApiKey). SetBody(AiReqBody{ - Model: getAiModelConf(), + Model: ai.Config.Model, Input: AiReqBodyInput{ Messages: messages, }, @@ -23,7 +31,7 @@ func (ai AliyunAi) request(client *resty.Client, messages []AiReqBodyMessage) Ai ResultFormat: Message, }, }). - Post("https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation") + Post(ai.Config.BaseUrl + "/api/v1/services/aigc/text-generation/generation") if err != nil { log.Fatal(err) @@ -32,8 +40,6 @@ func (ai AliyunAi) request(client *resty.Client, messages []AiReqBodyMessage) Ai log.Fatal(resp) } - log.Info("请求完成......") - result := AiRes{} jsonErr := json.Unmarshal(resp.Body(), &result) if jsonErr != nil { diff --git a/config.go b/config.go index 23c5b5b..93e7e97 100644 --- a/config.go +++ b/config.go @@ -1,18 +1,9 @@ package main -import "github.com/spf13/viper" - -func getAiNameConf() string { - return viper.GetString("ai.name") -} - -func getAiAkConf() string { - return viper.GetString("ai.ak") -} - -func getAiModelConf() string { - return viper.GetString("ai.model") -} +import ( + "github.com/spf13/viper" + "strconv" +) func getGitUsernameConf() string { return viper.GetString("git.username") @@ -41,3 +32,19 @@ func getReportLangConf() string { func getReportFlowConf() bool { return viper.GetBool("report.flow") } + +func getAiConf() AiConfig { + usedAi := getUseAiConf() + m := viper.GetStringMapString("ai." + usedAi) + maxInputTokens, _ := strconv.ParseUint(m["maxinputtokens"], 10, 32) + return AiConfig{ + Model: m["model"], + BaseUrl: m["baseurl"], + ApiKey: m["apikey"], + MaxInputTokens: uint32(maxInputTokens), + } +} + +func getUseAiConf() string { + return viper.GetString("useAi") +} diff --git a/config.sample.yaml b/config.sample.yaml index 90842ff..5dc9c52 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -1,16 +1,25 @@ +# 对于不使用的接口,可以不填 ai: - # [aliyun|ollama] - name: aliyun - ak: sk-xxxxxxxxxxxxxxxxxxx - # qwen1.5-72b-chat ⭐⭐⭐⭐ - # qwen1.5-14b-chat ⭐⭐⭐ - # qwen1.5-7b-chat ⭐⭐ - # qwen-1.8b-chat ⭐ - # qwen-plus 未知 - # qwen-turbo 未知 - # qwen-max-1201 未知 - # qwen-max-longcontext ⭐⭐⭐⭐ - model: qwen-max-longcontext + aliyun-dashscope: + model: qwen1.5-72b-chat + apiKey: sk-xxxxxxxxxxxxxxxxxxx + baseUrl: https://dashscope.aliyuncs.com + # 请按当前大模型支持的TOKEN数量来输入,如有节省TOKEN考虑,可以设小。TOKEN数量可以查阅相关文档 + maxInputTokens: 6000 + ollama: + model: qwen:latest + apiKey: + baseUrl: http://localhost:11434 + # 同上 + maxInputTokens: 6000 + openai: + model: gpt-3.5-turbo + apiKey: sk-xxxxxxxxxxxxxxxxxxx + baseUrl: https://api.chatanywhere.com.cn + # 同上 + maxInputTokens: 6000 +# 选择使用的AI接口 +useAi: openai git: # 不会空则以此username为准,防止误领别人的工作 # 为空字符串则自动获取 @@ -31,6 +40,8 @@ report: # 输出模式 # file 输出文本文件 # print 打印到终端 - out: file + out: print # 报告语言 [chs|en] - lang: chs \ No newline at end of file + lang: chs + # 是否开启AI-FLOW以提高报告精准度 (会额外消耗小部分TOKEN和时间) + flow: true diff --git a/main.go b/main.go index 5951aee..9a8426f 100644 --- a/main.go +++ b/main.go @@ -22,6 +22,8 @@ var ( reportModeConf string reportLangConf string reportFlowConf bool + aiConf AiConfig + useAiConf string ) func init() { @@ -45,6 +47,8 @@ func init() { reportModeConf = getReportModeConf() reportLangConf = getReportLangConf() reportFlowConf = getReportFlowConf() + aiConf = getAiConf() + useAiConf = getUseAiConf() } func loadCmdParams() { @@ -63,7 +67,7 @@ func main() { client := resty.New() - ai, err := AiFactory(getAiNameConf()) + ai, err := AiFactory(useAiConf, aiConf) if err != nil { log.Fatal(err.Error()) } @@ -178,9 +182,8 @@ func makeAiReq(gitLogMap *sync.Map, maxLogLen int64, prompt string) string { }) gitLogLarge := "" - aiModel := getAiModel(getAiModelConf()) // 大模型最多支持一次传输TOKEN数量 - maxInputTokenCount := int(aiModel.MaxInputTokenCount*1000) - len(prompt) - (gitLogMapKeyCount * 15) - 10 + maxInputTokenCount := int(aiConf.MaxInputTokens) - len(prompt) - (gitLogMapKeyCount * 15) - 10 // 每个GIT仓库可以分配到多少TOKEN的比例 ratio := float64(maxInputTokenCount) / float64(maxLogLen) log.Debug("maxInputTokenCount=", maxInputTokenCount) diff --git a/model.go b/model.go deleted file mode 100644 index 45c6f1d..0000000 --- a/model.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -type AiModel struct { - Name string - MaxInputTokenCount uint16 // 单位K -} - -var aiModels = []AiModel{ - // 默认 - // TODO: INPUT TOKEN考虑自定义覆盖 - { - Name: "other", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-1.8b-chat", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-7b-chat", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-14b-chat", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-72b-chat", - MaxInputTokenCount: 30, - }, - { - Name: "qwen1.5-7b-chat", - MaxInputTokenCount: 6, - }, - { - Name: "qwen1.5-14b-chat", - MaxInputTokenCount: 6, - }, - { - Name: "qwen1.5-72b-chat", - MaxInputTokenCount: 30, - }, - { - Name: "qwen-max-1201", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-plus", - MaxInputTokenCount: 30, - }, - { - Name: "qwen-turbo", - MaxInputTokenCount: 6, - }, - { - Name: "qwen-max-longcontext", - MaxInputTokenCount: 28, - }, -} - -func getAiModel(name string) AiModel { - for _, model := range aiModels { - if name == model.Name { - return model - } - } - return aiModels[0] -} diff --git a/ollama.ai.go b/ollama.ai.go index b0daf0d..d6ef5e8 100644 --- a/ollama.ai.go +++ b/ollama.ai.go @@ -1,11 +1,54 @@ package main -import "github.com/go-resty/resty/v2" +import ( + "encoding/json" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" +) type OllamaAi struct { + Config AiConfig +} + +type OllamaAiReqBody struct { + Model string `json:"model"` + Messages []AiReqBodyMessage `json:"messages"` + Stream bool `json:"stream"` +} + +type OllamaAiRes struct { + Message AiReqBodyMessage `json:"message"` +} + +func (ai OllamaAi) before() { + log.Info("请求大模型中......") +} + +func (ai OllamaAi) after() { + log.Info("请求完成......") } -// TODO: func (ai OllamaAi) request(client *resty.Client, messages []AiReqBodyMessage) AiReqBodyMessage { - return AiReqBodyMessage{} + resp, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetBody(OllamaAiReqBody{ + Model: ai.Config.Model, + Messages: messages, + Stream: false, + }). + Post(ai.Config.BaseUrl + "/api/chat") + + if err != nil { + log.Fatal(err) + } + if resp.StatusCode() != 200 { + log.Fatal(resp) + } + + result := OllamaAiRes{} + jsonErr := json.Unmarshal(resp.Body(), &result) + if jsonErr != nil { + log.Fatal(jsonErr) + } + return result.Message } diff --git a/openai.ai.go b/openai.ai.go index fff3e49..bf41f2d 100644 --- a/openai.ai.go +++ b/openai.ai.go @@ -1,11 +1,55 @@ package main -import "github.com/go-resty/resty/v2" +import ( + "encoding/json" + "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" +) type OpenAi struct { + Config AiConfig +} + +type OpenAiReqBody struct { + Model string `json:"model"` + Messages []AiReqBodyMessage `json:"messages"` + Temperature float64 `json:"temperature"` +} + +type OpenAiRes struct { + Choices []AiResChoice `json:"choices"` +} + +func (ai OpenAi) before() { + log.Info("请求大模型中......") +} + +func (ai OpenAi) after() { + log.Info("请求完成......") } -// TODO: func (ai OpenAi) request(client *resty.Client, messages []AiReqBodyMessage) AiReqBodyMessage { - return AiReqBodyMessage{} + resp, err := client.R(). + SetHeader("Content-Type", "application/json"). + SetAuthToken(ai.Config.ApiKey). + SetBody(OpenAiReqBody{ + Model: ai.Config.Model, + Messages: messages, + Temperature: 0.7, + }). + Post(ai.Config.BaseUrl + "/v1/chat/completions") + + if err != nil { + log.Fatal(err) + } + if resp.StatusCode() != 200 { + log.Fatal(resp) + } + + result := OpenAiRes{} + jsonErr := json.Unmarshal(resp.Body(), &result) + if jsonErr != nil { + log.Fatal(jsonErr) + } + return result.Choices[0].Message } diff --git a/prompt.go b/prompt.go index 9822583..2ad38fd 100644 --- a/prompt.go +++ b/prompt.go @@ -33,3 +33,11 @@ func getWeekPrompt(lang string) string { %s 我的git日志如下:`, basePrompt(), getLangPrompt(lang)) } + +func getFlowPrompt() [3]string { + return [3]string{ + "你是一个擅于帮助程序员分析GIT日志,并整理归纳今天工作内容的助理。", + "你是一个程序员的领导,你要审查他提交的今日工作报告,并提出修改意见。", + "我将你的报告提交给了我的领导,他反馈了一些修改建议,请你进行补充,并重新提交。建议如下:", + } +}