mirror of
https://github.com/httprunner/httprunner.git
synced 2026-05-11 18:11:21 +08:00
refactor: select model type by env LLM_MODEL_USE
This commit is contained in:
@@ -1 +1 @@
|
||||
v5.0.0-beta-2504292233
|
||||
v5.0.0-beta-2504292314
|
||||
|
||||
@@ -49,10 +49,9 @@ func WithCVService(service CVServiceType) AIServiceOption {
|
||||
type LLMServiceType string
|
||||
|
||||
const (
|
||||
LLMServiceTypeUITARS LLMServiceType = "ui-tars"
|
||||
LLMServiceTypeGPT4o LLMServiceType = "gpt-4o"
|
||||
LLMServiceTypeGPT4Vision LLMServiceType = "gpt-4-vision"
|
||||
LLMServiceTypeQwenVL LLMServiceType = "qwen-vl"
|
||||
LLMServiceTypeUITARS LLMServiceType = "ui-tars"
|
||||
LLMServiceTypeGPT LLMServiceType = "gpt"
|
||||
LLMServiceTypeQwenVL LLMServiceType = "qwen-vl"
|
||||
)
|
||||
|
||||
// ILLMService 定义了 LLM 服务接口,包括规划和断言功能
|
||||
@@ -67,7 +66,7 @@ func WithLLMService(modelType LLMServiceType) AIServiceOption {
|
||||
var planner IPlanner
|
||||
var err error
|
||||
switch modelType {
|
||||
case LLMServiceTypeGPT4o:
|
||||
case LLMServiceTypeGPT:
|
||||
// TODO: implement gpt-4o planner and asserter
|
||||
planner, err = NewPlanner(context.Background())
|
||||
case LLMServiceTypeUITARS:
|
||||
@@ -79,9 +78,9 @@ func WithLLMService(modelType LLMServiceType) AIServiceOption {
|
||||
}
|
||||
|
||||
// init asserter
|
||||
asserter, err := NewAsserter(context.Background(), modelType)
|
||||
asserter, err := NewAsserter(context.Background())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("init %s asserter failed", modelType)
|
||||
log.Error().Err(err).Msg("init asserter failed")
|
||||
os.Exit(code.GetErrorCode(err))
|
||||
}
|
||||
|
||||
@@ -115,11 +114,14 @@ const (
|
||||
EnvModelName = "LLM_MODEL_NAME"
|
||||
)
|
||||
|
||||
var EnvModelUse string
|
||||
|
||||
// GetOpenAIModelConfig get OpenAI config
|
||||
func GetOpenAIModelConfig() (*openai.ChatModelConfig, error) {
|
||||
if err := config.LoadEnv(); err != nil {
|
||||
return nil, errors.Wrap(code.LoadEnvError, err.Error())
|
||||
}
|
||||
EnvModelUse = os.Getenv("LLM_MODEL_USE")
|
||||
|
||||
openaiBaseURL := os.Getenv(EnvOpenAIBaseURL)
|
||||
if openaiBaseURL == "" {
|
||||
|
||||
@@ -43,14 +43,12 @@ type Asserter struct {
|
||||
model model.ToolCallingChatModel
|
||||
systemPrompt string
|
||||
history ConversationHistory
|
||||
modelType LLMServiceType
|
||||
}
|
||||
|
||||
// NewAsserter creates a new Asserter instance
|
||||
func NewAsserter(ctx context.Context, modelType LLMServiceType) (*Asserter, error) {
|
||||
func NewAsserter(ctx context.Context) (*Asserter, error) {
|
||||
asserter := &Asserter{
|
||||
ctx: ctx,
|
||||
modelType: modelType,
|
||||
systemPrompt: defaultAssertionPrompt,
|
||||
}
|
||||
|
||||
@@ -59,12 +57,11 @@ func NewAsserter(ctx context.Context, modelType LLMServiceType) (*Asserter, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch modelType {
|
||||
case LLMServiceTypeUITARS:
|
||||
if strings.Contains(EnvModelUse, string(LLMServiceTypeUITARS)) {
|
||||
asserter.systemPrompt += "\n\n" + uiTarsAssertionResponseFormat
|
||||
case LLMServiceTypeQwenVL:
|
||||
} else if strings.Contains(EnvModelUse, string(LLMServiceTypeQwenVL)) {
|
||||
asserter.systemPrompt += "\n\n" + defaultAssertionResponseJsonFormat
|
||||
case LLMServiceTypeGPT4Vision, LLMServiceTypeGPT4o:
|
||||
} else if strings.Contains(EnvModelUse, string(LLMServiceTypeGPT)) {
|
||||
// define output format
|
||||
type OutputFormat struct {
|
||||
Thought string `json:"thought"`
|
||||
@@ -86,8 +83,8 @@ func NewAsserter(ctx context.Context, modelType LLMServiceType) (*Asserter, erro
|
||||
Strict: false,
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("not supported model type for asserter")
|
||||
} else {
|
||||
return nil, fmt.Errorf("model type %s not supported for asserter", EnvModelUse)
|
||||
}
|
||||
|
||||
asserter.model, err = openai.NewChatModel(ctx, config)
|
||||
@@ -144,7 +141,7 @@ Here is the assertion. Please tell whether it is truthy according to the screens
|
||||
startTime := time.Now()
|
||||
resp, err := a.model.Generate(a.ctx, a.history)
|
||||
log.Info().Float64("elapsed(s)", time.Since(startTime).Seconds()).
|
||||
Str("model", string(a.modelType)).Msg("call model service for assertion")
|
||||
Str("model", EnvModelUse).Msg("call model service for assertion")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(code.LLMRequestServiceError, err.Error())
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func createAsserter(t *testing.T) *Asserter {
|
||||
asserter, err := NewAsserter(context.Background(), LLMServiceTypeUITARS)
|
||||
asserter, err := NewAsserter(context.Background())
|
||||
require.NoError(t, err)
|
||||
return asserter
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func NewPlanner(ctx context.Context) (*Planner, error) {
|
||||
return &Planner{
|
||||
ctx: ctx,
|
||||
model: model,
|
||||
modelType: LLMServiceTypeGPT4o,
|
||||
modelType: LLMServiceTypeGPT,
|
||||
systemPrompt: uiTarsPlanningPrompt, // TODO: change prompt with function calling
|
||||
}, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user