This commit is contained in:
debugtalk
2022-08-21 19:10:06 +08:00
4 changed files with 52 additions and 0 deletions

View File

@@ -65,6 +65,7 @@ func (b *HRPBoomer) InitBoomer() {
}
b.SetSpawnCount(b.GetProfile().SpawnCount)
b.SetSpawnRate(b.GetProfile().SpawnRate)
b.SetRunTime(b.GetProfile().RunTime)
if b.GetProfile().LoopCount > 0 {
b.SetLoopCount(b.GetProfile().LoopCount)
}
@@ -251,6 +252,7 @@ func (b *HRPBoomer) rebalanceRunner(profile *boomer.Profile) {
log.Info().Interface("profile", profile).Msg("rebalance tasks successfully")
}
func (b *HRPBoomer) PollTasks(ctx context.Context) {
for {
select {

View File

@@ -67,6 +67,7 @@ var boomCmd = &cobra.Command{
hrpBoomer.SetExpectWorkers(boomArgs.expectWorkers, boomArgs.expectWorkersMaxWait)
hrpBoomer.SetSpawnCount(boomArgs.SpawnCount)
hrpBoomer.SetSpawnRate(boomArgs.SpawnRate)
hrpBoomer.SetRunTime(boomArgs.RunTime)
}
if boomArgs.autoStart {
hrpBoomer.InitBoomer()
@@ -116,6 +117,7 @@ func init() {
boomCmd.Flags().StringVar(&boomArgs.RequestIncreaseRate, "request-increase-rate", "-1", "Request increase rate, disabled by default.")
boomCmd.Flags().Int64Var(&boomArgs.SpawnCount, "spawn-count", 1, "The number of users to spawn for load testing")
boomCmd.Flags().Float64Var(&boomArgs.SpawnRate, "spawn-rate", 1, "The rate for spawning users")
boomCmd.Flags().Int64Var(&boomArgs.RunTime, "run-time", 0, "Stop after the specified amount of time(s), Only used --autostart. Defaults to run forever.")
boomCmd.Flags().Int64Var(&boomArgs.LoopCount, "loop-count", -1, "The specify running cycles for load testing")
boomCmd.Flags().StringVar(&boomArgs.MemoryProfile, "mem-profile", "", "Enable memory profiling.")
boomCmd.Flags().DurationVar(&boomArgs.MemoryProfileDuration, "mem-profile-duration", 30*time.Second, "Memory profile duration.")

View File

@@ -50,6 +50,7 @@ type Boomer struct {
type Profile struct {
SpawnCount int64 `json:"spawn-count,omitempty" yaml:"spawn-count,omitempty" mapstructure:"spawn-count,omitempty"`
SpawnRate float64 `json:"spawn-rate,omitempty" yaml:"spawn-rate,omitempty" mapstructure:"spawn-rate,omitempty"`
RunTime int64 `json:"run-time,omitempty" yaml:"run-time,omitempty" mapstructure:"run-time,omitempty"`
MaxRPS int64 `json:"max-rps,omitempty" yaml:"max-rps,omitempty" mapstructure:"max-rps,omitempty"`
LoopCount int64 `json:"loop-count,omitempty" yaml:"loop-count,omitempty" mapstructure:"loop-count,omitempty"`
RequestIncreaseRate string `json:"request-increase-rate,omitempty" yaml:"request-increase-rate,omitempty" mapstructure:"request-increase-rate,omitempty"`
@@ -274,6 +275,18 @@ func (b *Boomer) SetSpawnRate(spawnRate float64) {
}
}
// SetRunTime sets run time
func (b *Boomer) SetRunTime(runTime int64) {
switch b.mode {
case DistributedMasterMode:
b.masterRunner.setRunTime(runTime)
case DistributedWorkerMode:
b.workerRunner.setRunTime(runTime)
default:
b.localRunner.setRunTime(runTime)
}
}
// SetExpectWorkers sets expect workers while load testing
func (b *Boomer) SetExpectWorkers(expectWorkers int, expectWorkersMaxWait int) {
b.masterRunner.setExpectWorkers(expectWorkers, expectWorkersMaxWait)
@@ -497,6 +510,7 @@ func (b *Boomer) Start(Args *Profile) error {
}
b.SetSpawnCount(Args.SpawnCount)
b.SetSpawnRate(Args.SpawnRate)
b.SetRunTime(Args.RunTime)
b.SetProfile(Args)
err := b.masterRunner.start()
return err
@@ -512,6 +526,7 @@ func (b *Boomer) ReBalance(Args *Profile) error {
}
b.SetSpawnCount(Args.SpawnCount)
b.SetSpawnRate(Args.SpawnRate)
b.SetRunTime(Args.RunTime)
b.SetProfile(Args)
err := b.masterRunner.rebalance()
if err != nil {

View File

@@ -203,6 +203,7 @@ type runner struct {
spawnCount int64 // target clients to spawn
spawnRate float64
runTime int64
controller *Controller
loop *Loop // specify loop count for testcase, count = loopCount * spawnCount
@@ -243,6 +244,14 @@ func (r *runner) getSpawnRate() float64 {
return r.spawnRate
}
func (r *runner) setRunTime(runTime int64) {
atomic.StoreInt64(&r.runTime, time.Now().Unix()+runTime)
}
func (r *runner) getRunTime() int64 {
return atomic.LoadInt64(&r.runTime)
}
func (r *runner) getSpawnCount() int64 {
return atomic.LoadInt64(&r.spawnCount)
}
@@ -368,6 +377,26 @@ func (r *runner) reset() {
r.reportedChan = make(chan bool)
}
func (r *runner) runTimeCheck(runTime int64) {
if runTime <= 0 {
return
}
var ticker = time.NewTicker(time.Second * 3)
for {
select {
case <-r.stopChan:
return
case <-ticker.C:
nowTime := time.Now().Unix()
if nowTime > runTime {
r.stop()
return
}
}
}
}
func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) {
r.updateState(StateSpawning)
log.Info().
@@ -628,6 +657,8 @@ func (r *localRunner) start() {
// output setup
r.outputOnStart()
go r.runTimeCheck(r.getRunTime())
go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, nil)
defer func() {
@@ -923,6 +954,8 @@ func (r *workerRunner) start() {
r.outputOnStart()
go r.runTimeCheck(r.getRunTime())
go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, r.spawnComplete)
defer func() {