123
This commit is contained in:
@@ -874,10 +874,37 @@ func (s *AIBookkeepingService) ProcessChat(ctx context.Context, userID uint, ses
|
||||
Content: message,
|
||||
})
|
||||
|
||||
// 检测是否为消费建议意图(想吃/想买/想喝等)
|
||||
// 1. 获取财务上下文(用于所有高级功能)
|
||||
fc, err := s.GetUserFinancialContext(ctx, userID)
|
||||
if err != nil {
|
||||
// 降级处理,不中断流程
|
||||
fmt.Printf("Failed to get financial context: %v\n", err)
|
||||
}
|
||||
|
||||
// 2. 检测纯查询意图(预算、资产、统计)
|
||||
queryIntent := s.detectQueryIntent(message)
|
||||
if queryIntent != "" && fc != nil {
|
||||
responseMsg := s.handleQueryIntent(ctx, queryIntent, message, fc)
|
||||
|
||||
response := &AIChatResponse{
|
||||
SessionID: session.ID,
|
||||
Message: responseMsg,
|
||||
Intent: queryIntent,
|
||||
Params: session.Params, // 保持参数上下文
|
||||
}
|
||||
|
||||
// 记录 AI 回复
|
||||
session.Messages = append(session.Messages, ChatMessage{
|
||||
Role: "assistant",
|
||||
Content: response.Message,
|
||||
})
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// 3. 检测消费建议意图(想吃/想买/想喝等)
|
||||
isSpendingAdvice := s.isSpendingAdviceIntent(message)
|
||||
|
||||
// Parse intent
|
||||
// Parse intent for transaction
|
||||
params, responseMsg, err := s.llmService.ParseIntent(ctx, message, session.Messages[:len(session.Messages)-1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse intent: %w", err)
|
||||
@@ -933,14 +960,9 @@ func (s *AIBookkeepingService) ProcessChat(ctx context.Context, userID uint, ses
|
||||
Params: session.Params,
|
||||
}
|
||||
|
||||
// 如果是消费建议意图且有金额,获取财务上下文并综合分析
|
||||
// 4. 处理消费建议意图
|
||||
if isSpendingAdvice && session.Params.Amount != nil {
|
||||
response.Intent = "spending_advice"
|
||||
|
||||
// 获取财务上下文
|
||||
fc, _ := s.GetUserFinancialContext(ctx, userID)
|
||||
|
||||
// 生成综合分析建议
|
||||
advice := s.generateSpendingAdvice(ctx, message, session.Params, fc)
|
||||
if advice != "" {
|
||||
response.Message = advice
|
||||
@@ -952,6 +974,7 @@ func (s *AIBookkeepingService) ProcessChat(ctx context.Context, userID uint, ses
|
||||
if len(missingFields) > 0 {
|
||||
response.NeedsFollowUp = true
|
||||
response.FollowUpQuestion = s.generateFollowUpQuestion(missingFields)
|
||||
// 如果有了更好的建议回复(来自 handleQuery 或 spendingAdvice),且是 FollowUp,优先保留建议的部分内容或组合
|
||||
if response.Message == "" || response.Message == responseMsg {
|
||||
response.Message = response.FollowUpQuestion
|
||||
}
|
||||
@@ -984,6 +1007,117 @@ func (s *AIBookkeepingService) ProcessChat(ctx context.Context, userID uint, ses
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// detectQueryIntent 检测用户查询意图
|
||||
func (s *AIBookkeepingService) detectQueryIntent(message string) string {
|
||||
budgetKeywords := []string{"预算", "剩多少", "还能花", "余额"} // 注意:余额可能指账户余额,这里简化处理
|
||||
assetKeywords := []string{"资产", "多少钱", "家底", "存款", "身家", "总钱"}
|
||||
statsKeywords := []string{"花了多少", "支出", "账单", "消费", "统计"}
|
||||
|
||||
for _, kw := range budgetKeywords {
|
||||
if strings.Contains(message, kw) {
|
||||
return "query_budget"
|
||||
}
|
||||
}
|
||||
for _, kw := range assetKeywords {
|
||||
if strings.Contains(message, kw) {
|
||||
return "query_assets"
|
||||
}
|
||||
}
|
||||
for _, kw := range statsKeywords {
|
||||
if strings.Contains(message, kw) {
|
||||
return "query_stats"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// handleQueryIntent 处理查询意图并生成 LLM 回复
|
||||
func (s *AIBookkeepingService) handleQueryIntent(ctx context.Context, intent string, message string, fc *FinancialContext) string {
|
||||
if s.config.OpenAIAPIKey == "" {
|
||||
return "抱歉,我的大脑(API Key)似乎离家出走了,无法思考..."
|
||||
}
|
||||
|
||||
// 计算人设模式
|
||||
personaMode := "balance" // 默认平衡
|
||||
healthScore := 60 // 默认及格
|
||||
|
||||
// 简单估算健康分 (需与前端算法保持一致性趋势)
|
||||
if fc.TotalAssets > 0 {
|
||||
ratio := (fc.TotalAssets - fc.TotalLiabilities) / fc.TotalAssets
|
||||
healthScore = 40 + int(ratio*50)
|
||||
}
|
||||
|
||||
if healthScore > 80 {
|
||||
personaMode = "rich"
|
||||
} else if healthScore <= 40 {
|
||||
personaMode = "poor"
|
||||
}
|
||||
|
||||
// 构建 Prompt
|
||||
systemPrompt := fmt.Sprintf(`你是「小金」,Novault 的首席财务 AI。
|
||||
当前模式:%s (根据用户财务健康分 %d 判定)
|
||||
|
||||
角色设定:
|
||||
- **rich (富裕)**:撒娇卖萌,夸用户会赚钱,鼓励适度享受。用词:哎哟、不错哦、老板大气。
|
||||
- **balance (平衡)**:理性贴心,温和提醒。用词:虽然、但是、建议。
|
||||
- **poor (吃土)**:毒舌、阴阳怪气、恨铁不成钢。用词:啧啧、清醒点、吃土、西北风。
|
||||
|
||||
用户意图:%s
|
||||
用户问题:「%s」
|
||||
|
||||
财务数据上下文:
|
||||
%s
|
||||
|
||||
要求:
|
||||
1. 根据意图提取并回答关键数据(预算剩余、总资产、或本月支出)。
|
||||
2. 必须符合当前人设模式的语气。
|
||||
3. 回复简短有力(100字以内)。
|
||||
4. 不要罗列所有数据,只回答用户问的。`,
|
||||
personaMode, healthScore, intent, message, s.formatFinancialContextForLLM(fc))
|
||||
|
||||
messages := []ChatMessage{
|
||||
{Role: "system", Content: systemPrompt},
|
||||
{Role: "user", Content: message},
|
||||
}
|
||||
|
||||
return s.callLLM(ctx, messages)
|
||||
}
|
||||
|
||||
// formatFinancialContextForLLM 格式化上下文给 LLM
|
||||
func (s *AIBookkeepingService) formatFinancialContextForLLM(fc *FinancialContext) string {
|
||||
data, _ := json.MarshalIndent(fc, "", " ")
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// callLLM 通用 LLM 调用 helper
|
||||
func (s *AIBookkeepingService) callLLM(ctx context.Context, messages []ChatMessage) string {
|
||||
reqBody := ChatCompletionRequest{
|
||||
Model: s.config.ChatModel,
|
||||
Messages: messages,
|
||||
Temperature: 0.8, // 稍微调高以增加人设表现力
|
||||
}
|
||||
|
||||
jsonBody, _ := json.Marshal(reqBody)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", s.config.OpenAIBaseURL+"/chat/completions", bytes.NewReader(jsonBody))
|
||||
if err != nil {
|
||||
return "思考中断..."
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+s.config.OpenAIAPIKey)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := s.llmService.httpClient.Do(req)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return "大脑短路了..."
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var chatResp ChatCompletionResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil || len(chatResp.Choices) == 0 {
|
||||
return "..."
|
||||
}
|
||||
return strings.TrimSpace(chatResp.Choices[0].Message.Content)
|
||||
}
|
||||
|
||||
// isSpendingAdviceIntent 检测是否为消费建议意图
|
||||
func (s *AIBookkeepingService) isSpendingAdviceIntent(message string) bool {
|
||||
keywords := []string{"想吃", "想喝", "想买", "想花", "打算买", "准备买", "要不要", "可以买", "能买", "想要"}
|
||||
@@ -998,63 +1132,44 @@ func (s *AIBookkeepingService) isSpendingAdviceIntent(message string) bool {
|
||||
// generateSpendingAdvice 生成消费建议
|
||||
func (s *AIBookkeepingService) generateSpendingAdvice(ctx context.Context, message string, params *AITransactionParams, fc *FinancialContext) string {
|
||||
if s.config.OpenAIAPIKey == "" || fc == nil {
|
||||
// 无 API 或无上下文,返回简单建议
|
||||
if params.Amount != nil {
|
||||
return fmt.Sprintf("记下来!%.0f元的%s", *params.Amount, params.Note)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// 构建综合分析 prompt
|
||||
fcJSON, _ := json.Marshal(fc)
|
||||
// 动态人设逻辑
|
||||
personaMode := "balance"
|
||||
healthScore := 60
|
||||
if fc.TotalAssets > 0 {
|
||||
ratio := (fc.TotalAssets - fc.TotalLiabilities) / fc.TotalAssets
|
||||
healthScore = 40 + int(ratio*50)
|
||||
}
|
||||
if healthScore > 80 {
|
||||
personaMode = "rich"
|
||||
} else if healthScore <= 40 {
|
||||
personaMode = "poor"
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf(`你是「小金」,用户的贴心理财助手。性格活泼、接地气、偶尔毒舌但心软。
|
||||
prompt := fmt.Sprintf(`你是「小金」,Novault 的首席财务 AI。
|
||||
当前模式:%s (根据用户财务健康分 %d 判定)
|
||||
角色设定:
|
||||
- **rich**: 鼓励享受,语气轻松。
|
||||
- **balance**: 理性建议,温和提醒。
|
||||
- **poor**: 毒舌劝阻,语气严厉。
|
||||
|
||||
用户说:「%s」
|
||||
|
||||
用户财务状况:
|
||||
财务数据:
|
||||
%s
|
||||
|
||||
请综合分析后给出建议,要求:
|
||||
1. 根据预算剩余和消费趋势判断是否应该消费
|
||||
2. 如果预算紧张,委婉劝阻或建议替代方案
|
||||
3. 如果预算充足,可以鼓励适度消费
|
||||
4. 用轻松幽默的语气,像朋友聊天一样
|
||||
5. 回复60-100字左右,不要太长
|
||||
|
||||
直接输出建议,不要加前缀。`, message, string(fcJSON))
|
||||
请分析消费请求,给出建议。不要加前缀,直接回复。`,
|
||||
personaMode, healthScore, message, s.formatFinancialContextForLLM(fc))
|
||||
|
||||
messages := []ChatMessage{
|
||||
{Role: "user", Content: prompt},
|
||||
}
|
||||
|
||||
reqBody := ChatCompletionRequest{
|
||||
Model: s.config.ChatModel,
|
||||
Messages: messages,
|
||||
Temperature: 0.7,
|
||||
}
|
||||
|
||||
jsonBody, _ := json.Marshal(reqBody)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", s.config.OpenAIBaseURL+"/chat/completions", bytes.NewReader(jsonBody))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+s.config.OpenAIAPIKey)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := s.llmService.httpClient.Do(req)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return ""
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var chatResp ChatCompletionResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil || len(chatResp.Choices) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSpace(chatResp.Choices[0].Message.Content)
|
||||
return s.callLLM(ctx, messages)
|
||||
}
|
||||
|
||||
// mergeParams merges new params into existing params
|
||||
|
||||
Reference in New Issue
Block a user