fix: check LLM env

This commit is contained in:
lilong.129
2025-03-19 21:28:28 +08:00
parent 55acaceb09
commit a4d3c08a6a
8 changed files with 44 additions and 16 deletions

View File

@@ -104,7 +104,8 @@ var (
CVResponseError = errors.New("CV parse response error") // 83
CVResultNotFoundError = errors.New("CV result not found") // 84
StateUnknowError = errors.New("detect state failed") // 85
LLMEnvMissedError = errors.New("LLM env missed error") // 85
StateUnknowError = errors.New("detect state failed") // 89
)
// trackings related: [90, 100)
@@ -199,7 +200,8 @@ var errorsMap = map[error]int{
CVServiceConnectionError: 82,
CVResponseError: 83,
CVResultNotFoundError: 84,
StateUnknowError: 85,
LLMEnvMissedError: 85,
StateUnknowError: 89,
// trackings related
TrackingGetError: 90,

View File

@@ -1 +1 @@
v5.0.0-beta-2503192116
v5.0.0-beta-2503192228

View File

@@ -53,6 +53,11 @@ const (
)
func WithLLMService(service LLMServiceType) AIServiceOption {
if err := checkEnvLLM(); err != nil {
log.Error().Err(err).Msg("check LLM env failed")
os.Exit(code.GetErrorCode(err))
}
return func(opts *AIServices) {
if service == LLMServiceTypeGPT4o {
var err error

View File

@@ -1,6 +1,8 @@
package ai
import "testing"
import (
"testing"
)
func TestOption(t *testing.T) {
options := NewAIService(

View File

@@ -9,7 +9,9 @@ import (
"time"
"github.com/cloudwego/eino-ext/components/model/openai"
"github.com/httprunner/httprunner/v5/code"
"github.com/joho/godotenv"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
@@ -25,11 +27,29 @@ type OpenAIInitConfig struct {
const (
EnvOpenAIBaseURL = "OPENAI_BASE_URL"
EnvOpenAIAPIKey = "OPENAI_API_KEY"
EnvModelName = "MIDSCENE_MODEL_NAME"
EnvOpenAIInitConfigJSON = "MIDSCENE_OPENAI_INIT_CONFIG_JSON"
EnvUseVLMUITars = "MIDSCENE_USE_VLM_UI_TARS"
EnvModelName = "LLM_MODEL_NAME"
EnvOpenAIInitConfigJSON = "OPENAI_INIT_CONFIG_JSON"
)
func checkEnvLLM() error {
openaiBaseURL := os.Getenv("OPENAI_BASE_URL")
if openaiBaseURL == "" {
return errors.Wrap(code.LLMEnvMissedError, "OPENAI_BASE_URL missed")
}
log.Info().Str("OPENAI_BASE_URL", openaiBaseURL).Msg("get env")
openaiAPIKey := os.Getenv("OPENAI_API_KEY")
if openaiAPIKey == "" {
return errors.Wrap(code.LLMEnvMissedError, "OPENAI_API_KEY missed")
}
log.Info().Str("OPENAI_API_KEY", maskAPIKey(openaiAPIKey)).Msg("get env")
modelName := os.Getenv("LLM_MODEL_NAME")
if modelName == "" {
return errors.Wrap(code.LLMEnvMissedError, "LLM_MODEL_NAME missed")
}
log.Info().Str("LLM_MODEL_NAME", modelName).Msg("get env")
return nil
}
// loadEnv loads environment variables from a file
func loadEnv(envPath string) error {
err := godotenv.Load(envPath)
@@ -164,7 +184,3 @@ func maskAPIKey(key string) string {
return key[:4] + "******" + key[len(key)-4:]
}
func IsUseVLMUITars() bool {
return GetEnvConfigInBool(EnvUseVLMUITars)
}

View File

@@ -1,6 +1,8 @@
package ai
import "github.com/cloudwego/eino/schema"
import (
"github.com/cloudwego/eino/schema"
)
type ILLMService interface {
Call(opts *PlanningOptions) (*PlanningResult, error)

View File

@@ -11,7 +11,7 @@ import (
)
func TestVLMPlanning(t *testing.T) {
imageBase64, err := loadImage("testdata/llk_3.jpg")
imageBase64, err := loadImage("testdata/llk_1.png")
require.NoError(t, err)
userInstruction := `连连看是一款经典的益智消除类小游戏,通常以图案或图标为主要元素。以下是连连看的基本规则说明:
@@ -25,9 +25,7 @@ func TestVLMPlanning(t *testing.T) {
5. 得分机制: 每成功连接并消除一对图案,玩家会获得相应的分数。完成游戏后,根据剩余时间和消除效率计算总分。
6. 关卡设计: 游戏可能包含多个关卡,随着关卡的推进,图案的复杂度和数量会增加。`
// userInstruction += "\n\n请基于以上游戏规则请先点击第一个图标"
userInstruction += "\n\n点击[3排1列]果酱图案"
userInstruction += "\n\n请基于以上游戏规则给出消除所有图案的行动序列"
planner, err := NewPlanner(context.Background())
require.NoError(t, err)

View File

@@ -69,8 +69,11 @@ func (dExt *XTDriver) GetScreenShotBase64() (base64Str string, err error) {
if err != nil {
return "", err
}
// convert buffer to base64 string
base64Str = "data:image/jpeg;base64," +
base64.StdEncoding.EncodeToString(compressedBufSource.Bytes())
return base64Str, nil
}