From 0ab2017f93d258b3f2e700f2912a713d71638295 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Tue, 29 Mar 2022 22:10:39 +0800 Subject: [PATCH 01/31] feat: support multi-machine collaborative distributed load testing #1193 --- docs/cmd/hrp_boom.md | 9 + go.mod | 11 +- go.sum | 16 +- hrp/boomer.go | 129 ++- hrp/boomer_test.go | 2 +- hrp/cmd/boom.go | 85 +- hrp/config.go | 2 +- hrp/internal/boomer/boomer.go | 333 ++++++- hrp/internal/boomer/boomer_test.go | 4 +- hrp/internal/boomer/client.go | 9 + hrp/internal/boomer/client_grpc.go | 224 +++++ hrp/internal/boomer/client_grpc_test.go | 1 + hrp/internal/boomer/message.go | 51 ++ hrp/internal/boomer/message_test.go | 1 + hrp/internal/boomer/output.go | 12 +- hrp/internal/boomer/runner.go | 830 +++++++++++++++--- hrp/internal/boomer/runner_test.go | 414 +++++++++ hrp/internal/boomer/server.go | 1 + hrp/internal/boomer/server_grpc.go | 343 ++++++++ hrp/internal/boomer/server_grpc_test.go | 1 + hrp/internal/boomer/utils.go | 29 + hrp/internal/builtin/utils.go | 167 ++++ hrp/internal/grpc/messager/messager.pb.go | 276 ++++++ .../grpc/messager/messager_grpc.pb.go | 137 +++ hrp/internal/grpc/proto/messager.proto | 22 + hrp/parameters.go | 8 + hrp/server.go | 299 +++++++ hrp/step_rendezvous.go | 16 +- hrp/testcase.go | 93 +- 29 files changed, 3354 insertions(+), 171 deletions(-) create mode 100644 hrp/internal/boomer/client.go create mode 100644 hrp/internal/boomer/client_grpc.go create mode 100644 hrp/internal/boomer/client_grpc_test.go create mode 100644 hrp/internal/boomer/message.go create mode 100644 hrp/internal/boomer/message_test.go create mode 100644 hrp/internal/boomer/server.go create mode 100644 hrp/internal/boomer/server_grpc.go create mode 100644 hrp/internal/boomer/server_grpc_test.go create mode 100644 hrp/internal/grpc/messager/messager.pb.go create mode 100644 hrp/internal/grpc/messager/messager_grpc.pb.go create mode 100644 hrp/internal/grpc/proto/messager.proto create mode 100644 hrp/server.go diff --git a/docs/cmd/hrp_boom.md b/docs/cmd/hrp_boom.md index 85ceb90e..d79d7f14 100644 --- a/docs/cmd/hrp_boom.md +++ b/docs/cmd/hrp_boom.md @@ -21,13 +21,21 @@ hrp boom [flags] ### Options ``` + --autostart Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and run time --cpu-profile string Enable CPU profiling. --cpu-profile-duration duration CPU profile duration. (default 30s) --disable-compression Disable compression --disable-console-output Disable console output. --disable-keepalive Disable keepalive + --expect-workers int How many workers master should expect to connect before starting the test (only when --autostart is used (default 1) + --expect-workers-max-wait int How many workers master should expect to connect before starting the test (only when --autostart is used -h, --help help for boom --loop-count int The specify running cycles for load testing (default -1) + --master master of distributed testing + --master-bind-host string Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces). (default "127.0.0.1") + --master-bind-port int Port that hrp master should bind to. Only used when running with --master. Defaults to 5557. (default 5557) + --master-host string Host or IP address of hrp master for distributed load testing. (default "127.0.0.1") + --master-port int The port to connect to that is used by the hrp master for distributed load testing. (default 5557) --max-rps int Max RPS that boomer can generate, disabled by default. --mem-profile string Enable memory profiling. --mem-profile-duration duration Memory profile duration. (default 30s) @@ -36,6 +44,7 @@ hrp boom [flags] --request-increase-rate string Request increase rate, disabled by default. (default "-1") --spawn-count int The number of users to spawn for load testing (default 1) --spawn-rate float The rate for spawning users (default 1) + --worker worker of distributed testing ``` ### SEE ALSO diff --git a/go.mod b/go.mod index f4d02e95..1bc11398 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,13 @@ module github.com/httprunner/httprunner/v4 -go 1.16 +go 1.18 require ( github.com/andybalholm/brotli v1.0.4 github.com/denisbrodbeck/machineid v1.0.1 github.com/fatih/color v1.13.0 github.com/getsentry/sentry-go v0.13.0 + github.com/go-errors/errors v1.0.1 github.com/go-openapi/spec v0.20.6 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.3.0 @@ -17,14 +18,20 @@ require ( github.com/json-iterator/go v1.1.12 github.com/maja42/goval v1.2.1 github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mitchellh/mapstructure v1.4.1 github.com/olekukonko/tablewriter v0.0.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/rs/zerolog v1.26.1 + github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cobra v1.2.1 github.com/stretchr/testify v1.7.0 + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f - gopkg.in/yaml.v3 v3.0.0 + google.golang.org/grpc v1.45.0 + google.golang.org/protobuf v1.27.1 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ) // replace github.com/httprunner/funplugin => ../funplugin diff --git a/go.sum b/go.sum index b7292409..a36ed4ad 100644 --- a/go.sum +++ b/go.sum @@ -132,6 +132,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -352,6 +354,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -421,6 +424,8 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -452,6 +457,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= @@ -474,6 +483,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -624,6 +635,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -668,6 +680,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -877,9 +890,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/hrp/boomer.go b/hrp/boomer.go index c44448f2..57d3695f 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -6,13 +6,13 @@ import ( "time" "github.com/httprunner/funplugin" - "github.com/rs/zerolog/log" - "github.com/httprunner/httprunner/v4/hrp/internal/boomer" + "github.com/httprunner/httprunner/v4/hrp/internal/json" "github.com/httprunner/httprunner/v4/hrp/internal/sdk" + "github.com/rs/zerolog/log" ) -func NewBoomer(spawnCount int, spawnRate float64) *HRPBoomer { +func NewStandaloneBoomer(spawnCount int, spawnRate float64) *HRPBoomer { b := &HRPBoomer{ Boomer: boomer.NewStandaloneBoomer(spawnCount, spawnRate), pluginsMutex: new(sync.RWMutex), @@ -22,6 +22,27 @@ func NewBoomer(spawnCount int, spawnRate float64) *HRPBoomer { return b } +func NewMasterBoomer(masterBindHost string, masterBindPort int) *HRPBoomer { + b := &HRPBoomer{ + Boomer: boomer.NewMasterBoomer(masterBindHost, masterBindPort), + pluginsMutex: new(sync.RWMutex), + } + b.hrpRunner = NewRunner(nil) + return b +} + +func NewWorkerBoomer(masterHost string, masterPort int) *HRPBoomer { + b := &HRPBoomer{ + Boomer: boomer.NewWorkerBoomer(masterHost, masterPort), + pluginsMutex: new(sync.RWMutex), + } + + b.hrpRunner = NewRunner(nil) + // set client transport for high concurrency load testing + b.hrpRunner.SetClientTransport(b.GetSpawnCount(), b.GetDisableKeepAlive(), b.GetDisableCompression()) + return b +} + type HRPBoomer struct { *boomer.Boomer hrpRunner *HRPRunner @@ -52,8 +73,12 @@ func (b *HRPBoomer) Run(testcases ...ITestCase) { // report execution timing event defer sdk.SendEvent(event.StartTiming("execution")) - var taskSlice []*boomer.Task + taskSlice := b.ConvertTestCasesToTasks(testcases...) + b.Boomer.Run(taskSlice...) +} + +func (b *HRPBoomer) ConvertTestCasesToTasks(testcases ...ITestCase) (taskSlice []*boomer.Task) { // load all testcases testCases, err := LoadTestCases(testcases...) if err != nil { @@ -74,15 +99,107 @@ func (b *HRPBoomer) Run(testcases ...ITestCase) { rendezvousList := initRendezvous(testcase, int64(b.GetSpawnCount())) task := b.convertBoomerTask(testcase, rendezvousList) taskSlice = append(taskSlice, task) - waitRendezvous(rendezvousList) + waitRendezvous(rendezvousList, b) } - b.Boomer.Run(taskSlice...) + return taskSlice +} + +func (b *HRPBoomer) LoopTestCases() { + for { + select { + case <-b.Boomer.ParseTestCasesChan(): + var tcs []ITestCase + for _, tc := range b.GetTestCasesPath() { + tcp := TestCasePath(tc) + tcs = append(tcs, &tcp) + } + b.GetTestCaseBytesChan() <- b.TestCasesToBytes(tcs...) + log.Info().Msg("put testcase successful") + case <-b.Boomer.GetCloseChan(): + return + } + } +} + +func (b *HRPBoomer) OutTestCases(testCases []*TestCase) []*TCase { + var outTestCases []*TCase + for _, tc := range testCases { + caseRunner, err := b.hrpRunner.newCaseRunner(tc) + if err != nil { + log.Error().Err(err).Msg("failed to create runner") + os.Exit(1) + } + caseRunner.parsedConfig.Parameters = caseRunner.parametersIterator.outParameters() + outTestCases = append(outTestCases, &TCase{ + Config: caseRunner.parsedConfig, + TestSteps: caseRunner.testCase.ToTCase().TestSteps, + }) + } + return outTestCases +} + +func (b *HRPBoomer) TestCasesToBytes(testcases ...ITestCase) []byte { + // load all testcases + testCases, err := LoadTestCases(testcases...) + if err != nil { + log.Error().Err(err).Msg("failed to load testcases") + os.Exit(1) + } + tcs := b.OutTestCases(testCases) + testCasesBytes, err := json.Marshal(tcs) + if err != nil { + log.Error().Err(err).Msg("failed to marshal testcases") + return nil + } + return testCasesBytes +} + +func (b *HRPBoomer) BytesToTestCases(testCasesBytes []byte) []*TCase { + var testcase []*TCase + err := json.Unmarshal(testCasesBytes, &testcase) + if err != nil { + log.Error().Err(err).Msg("failed to unmarshal testcases") + } + return testcase } func (b *HRPBoomer) Quit() { b.Boomer.Quit() } +func (b *HRPBoomer) handleTasks(tcs []byte) { + //Todo: 过滤掉已经传输过的task + testCases := b.BytesToTestCases(tcs) + var testcases []ITestCase + for _, tc := range testCases { + tesecase, err := tc.toTestCase() + if err != nil { + log.Error().Err(err).Msg("failed to load testcases") + } + testcases = append(testcases, tesecase) + } + log.Info().Interface("testcases", testcases).Msg("loop tasks successful") + if b.Boomer.GetState() == boomer.StateRunning || b.Boomer.GetState() == boomer.StateSpawning { + b.Boomer.SetTasks(b.ConvertTestCasesToTasks(testcases...)...) + } else { + b.Run(testcases...) + } +} + +func (b *HRPBoomer) LoopTasks() { + for { + select { + case tcs := <-b.Boomer.GetTestCaseBytesChan(): + if len(b.Boomer.GetTestCaseBytesChan()) > 0 { + continue + } + go b.handleTasks(tcs) + case <-b.Boomer.GetCloseChan(): + return + } + } +} + func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rendezvous) *boomer.Task { // init runner for testcase // this runner is shared by multiple session runners diff --git a/hrp/boomer_test.go b/hrp/boomer_test.go index 547a4618..83151b5e 100644 --- a/hrp/boomer_test.go +++ b/hrp/boomer_test.go @@ -27,7 +27,7 @@ func TestBoomerStandaloneRun(t *testing.T) { } testcase2 := TestCasePath(demoTestCaseWithPluginJSONPath) - b := NewBoomer(2, 1) + b := NewStandaloneBoomer(2, 1) go b.Run(testcase1, &testcase2) time.Sleep(5 * time.Second) b.Quit() diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 120109e0..09d87f2a 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -21,7 +21,7 @@ var boomCmd = &cobra.Command{ Example: ` $ hrp boom demo.json # run specified json testcase file $ hrp boom demo.yaml # run specified yaml testcase file $ hrp boom examples/ # run testcases in specified folder`, - Args: cobra.MinimumNArgs(1), + Args: cobra.MinimumNArgs(0), PreRun: func(cmd *cobra.Command, args []string) { boomer.SetUlimit(10240) // ulimit -n 10240 if !strings.EqualFold(logLevel, "DEBUG") { @@ -35,8 +35,65 @@ var boomCmd = &cobra.Command{ path := hrp.TestCasePath(arg) paths = append(paths, &path) } - hrpBoomer := makeHRPBoomer() - hrpBoomer.Run(paths...) + + // if set profile, the priority is higher than the other commands + if boomArgs.profile != "" { + err := builtin.LoadFile(boomArgs.profile, &boomArgs) + if err != nil { + log.Error().Err(err).Msg("failed to load profile") + os.Exit(1) + } + } + + var hrpBoomer *hrp.HRPBoomer + if boomArgs.master { + hrpBoomer = hrp.NewMasterBoomer(boomArgs.masterBindHost, boomArgs.masterBindPort) + hrpBoomer.SetTestCasesPath(args) + if boomArgs.autoStart { + hrpBoomer.SetAutoStart() + hrpBoomer.SetExpectWorkers(boomArgs.expectWorkers, boomArgs.expectWorkersMaxWait) + hrpBoomer.SetSpawnCount(boomArgs.SpawnCount) + hrpBoomer.SetSpawnRate(boomArgs.SpawnRate) + } + hrpBoomer.EnableGracefulQuit() + go hrpBoomer.StartServer() + go hrpBoomer.RunMaster() + hrpBoomer.LoopTestCases() + return + } else if boomArgs.worker { + hrpBoomer = hrp.NewWorkerBoomer(boomArgs.masterHost, boomArgs.masterPort) + if boomArgs.ignoreQuit { + hrpBoomer.SetIgnoreQuit() + } + go hrpBoomer.RunWorker() + } else { + hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) + if boomArgs.LoopCount > 0 { + hrpBoomer.SetLoopCount(boomArgs.LoopCount) + } + } + hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) + if !boomArgs.DisableConsoleOutput { + + hrpBoomer.AddOutput(boomer.NewConsoleOutput()) + } + if boomArgs.PrometheusPushgatewayURL != "" { + hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode())) + } + hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive) + hrpBoomer.SetDisableCompression(boomArgs.DisableCompression) + hrpBoomer.SetClientTransport() + if venv != "" { + hrpBoomer.SetPython3Venv(venv) + } + hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration) + hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration) + hrpBoomer.EnableGracefulQuit() + if boomArgs.worker { + hrpBoomer.LoopTasks() + } else { + hrpBoomer.Run(paths...) + } }, } @@ -55,6 +112,16 @@ type BoomArgs struct { DisableCompression bool `json:"disable-compression,omitempty" yaml:"disable-compression,omitempty"` DisableKeepalive bool `json:"disable-keepalive,omitempty" yaml:"disable-keepalive,omitempty"` profile string + master bool + worker bool + ignoreQuit bool + masterHost string + masterPort int + masterBindHost string + masterBindPort int + autoStart bool + expectWorkers int + expectWorkersMaxWait int } var boomArgs BoomArgs @@ -76,6 +143,16 @@ func init() { boomCmd.Flags().BoolVar(&boomArgs.DisableCompression, "disable-compression", false, "Disable compression") boomCmd.Flags().BoolVar(&boomArgs.DisableKeepalive, "disable-keepalive", false, "Disable keepalive") boomCmd.Flags().StringVar(&boomArgs.profile, "profile", "", "profile for load testing") + boomCmd.Flags().BoolVar(&boomArgs.master, "master", false, "master of distributed testing") + boomCmd.Flags().StringVar(&boomArgs.masterBindHost, "master-bind-host", "127.0.0.1", "Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces).") + boomCmd.Flags().IntVar(&boomArgs.masterBindPort, "master-bind-port", 5557, "Port that hrp master should bind to. Only used when running with --master. Defaults to 5557.") + boomCmd.Flags().BoolVar(&boomArgs.worker, "worker", false, "worker of distributed testing") + boomCmd.Flags().BoolVar(&boomArgs.ignoreQuit, "ignore-quit", false, "ignores quit from master (only when --worker is used)") + boomCmd.Flags().StringVar(&boomArgs.masterHost, "master-host", "127.0.0.1", "Host or IP address of hrp master for distributed load testing.") + boomCmd.Flags().IntVar(&boomArgs.masterPort, "master-port", 5557, "The port to connect to that is used by the hrp master for distributed load testing.") + boomCmd.Flags().BoolVar(&boomArgs.autoStart, "autostart", false, "Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and run time") + boomCmd.Flags().IntVar(&boomArgs.expectWorkers, "expect-workers", 1, "How many workers master should expect to connect before starting the test (only when --autostart is used)") + boomCmd.Flags().IntVar(&boomArgs.expectWorkersMaxWait, "expect-workers-max-wait", 0, "How many workers master should expect to connect before starting the test (only when --autostart is used") } func makeHRPBoomer() *hrp.HRPBoomer { @@ -88,7 +165,7 @@ func makeHRPBoomer() *hrp.HRPBoomer { } } - hrpBoomer := hrp.NewBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) + hrpBoomer := hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) if boomArgs.LoopCount > 0 { hrpBoomer.SetLoopCount(boomArgs.LoopCount) diff --git a/hrp/config.go b/hrp/config.go index 8d50c690..06930564 100644 --- a/hrp/config.go +++ b/hrp/config.go @@ -98,7 +98,7 @@ func (c *TConfig) SetWebSocket(times, interval, timeout, size int64) { } type ThinkTimeConfig struct { - Strategy thinkTimeStrategy `json:"strategy,omitempty" yaml:"strategy,omitempty"` // default、random、limit、multiply、ignore + Strategy thinkTimeStrategy `json:"strategy,omitempty" yaml:"strategy,omitempty"` // default、random、multiply、ignore Setting interface{} `json:"setting,omitempty" yaml:"setting,omitempty"` // random(map): {"min_percentage": 0.5, "max_percentage": 1.5}; 10、multiply(float64): 1.5 Limit float64 `json:"limit,omitempty" yaml:"limit,omitempty"` // limit think time no more than specific time, ignore if value <= 0 } diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index cc424b12..a4e9bb72 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -4,9 +4,13 @@ import ( "math" "os" "os/signal" + "strconv" + "strings" "syscall" "time" + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" + "github.com/pkg/errors" "github.com/rs/zerolog/log" ) @@ -25,9 +29,18 @@ const ( // A Boomer is used to run tasks. type Boomer struct { - mode Mode + masterHost string + masterPort int + mode Mode - localRunner *localRunner + localRunner *localRunner + workerRunner *workerRunner + masterRunner *masterRunner + + testcasePath []string + + spawnCount int // target clients to spawn + spawnRate float64 cpuProfile string cpuProfileDuration time.Duration @@ -73,9 +86,101 @@ func NewStandaloneBoomer(spawnCount int, spawnRate float64) *Boomer { return &Boomer{ mode: StandaloneMode, localRunner: newLocalRunner(spawnCount, spawnRate), + spawnCount: spawnCount, + spawnRate: spawnRate, } } +// NewMasterBoomer returns a new Boomer. +func NewMasterBoomer(masterBindHost string, masterBindPort int) *Boomer { + return &Boomer{ + masterRunner: newMasterRunner(masterBindHost, masterBindPort), + mode: DistributedMasterMode, + } +} + +// NewWorkerBoomer returns a new Boomer. +func NewWorkerBoomer(masterHost string, masterPort int) *Boomer { + return &Boomer{ + workerRunner: newWorkerRunner(masterHost, masterPort), + masterHost: masterHost, + masterPort: masterPort, + mode: DistributedWorkerMode, + } +} + +// SetAutoStart auto start to load testing +func (b *Boomer) SetAutoStart() { + b.masterRunner.autoStart = true + +} + +// RunMaster start to run master runner +func (b *Boomer) RunMaster() { + b.masterRunner.run() +} + +// RunWorker start to run worker runner +func (b *Boomer) RunWorker() { + b.workerRunner.run() +} + +// GetTestCaseBytesChan gets test case bytes chan +func (b *Boomer) GetTestCaseBytesChan() chan []byte { + switch b.mode { + case DistributedMasterMode: + return b.masterRunner.testCaseBytes + case DistributedWorkerMode: + return b.workerRunner.testCaseBytes + } + return nil +} + +func (b *Boomer) SetTestCasesPath(paths []string) { + b.testcasePath = paths +} + +func (b *Boomer) GetTestCasesPath() []string { + return b.testcasePath +} + +func (b *Boomer) ParseTestCasesChan() chan bool { + return b.masterRunner.parseTestCasesChan +} + +// GetState gets worker state +func (b *Boomer) GetState() int32 { + switch b.mode { + case DistributedWorkerMode: + return b.workerRunner.getState() + case DistributedMasterMode: + return b.masterRunner.getState() + default: + return b.localRunner.getState() + } +} + +// SetSpawnCount sets spawn count +func (b *Boomer) SetSpawnCount(spawnCount int) { + b.spawnCount = spawnCount + if b.mode == DistributedMasterMode { + b.masterRunner.spawn.setSpawn(int64(spawnCount), -1) + } +} + +// SetSpawnRate sets spawn rate +func (b *Boomer) SetSpawnRate(spawnRate float64) { + b.spawnRate = spawnRate + if b.mode == DistributedMasterMode { + b.masterRunner.spawn.setSpawn(-1, spawnRate) + } +} + +// SetExpectWorkers sets expect workers while load testing +func (b *Boomer) SetExpectWorkers(expectWorkers int, expectWorkersMaxWait int) { + b.masterRunner.setExpectWorkers(expectWorkers, expectWorkersMaxWait) +} + // SetRateLimiter creates rate limiter with the given limit and burst. func (b *Boomer) SetRateLimiter(maxRPS int64, requestIncreaseRate string) { var rateLimiter RateLimiter @@ -98,8 +203,14 @@ func (b *Boomer) SetRateLimiter(maxRPS int64, requestIncreaseRate string) { } if rateLimiter != nil { - b.localRunner.rateLimitEnabled = true - b.localRunner.rateLimiter = rateLimiter + switch b.mode { + case DistributedWorkerMode: + b.workerRunner.rateLimitEnabled = true + b.workerRunner.rateLimiter = rateLimiter + case StandaloneMode: + b.localRunner.rateLimitEnabled = true + b.localRunner.rateLimiter = rateLimiter + } } } @@ -108,6 +219,11 @@ func (b *Boomer) SetDisableKeepAlive(disableKeepalive bool) { b.disableKeepalive = disableKeepalive } +// SetIgnoreQuit not quit while master quit +func (b *Boomer) SetIgnoreQuit() { + b.workerRunner.ignoreQuit = true +} + // SetDisableCompression disable compression to prevent the Transport from requesting compression with an "Accept-Encoding: gzip" func (b *Boomer) SetDisableCompression(disableCompression bool) { b.disableCompression = disableCompression @@ -124,12 +240,26 @@ func (b *Boomer) GetDisableCompression() bool { // SetLoopCount set loop count for test. func (b *Boomer) SetLoopCount(loopCount int64) { // total loop count for testcase, it will be evenly distributed to each worker - b.localRunner.loop = &Loop{loopCount: loopCount * int64(b.localRunner.spawnCount)} + switch b.mode { + case DistributedWorkerMode: + b.workerRunner.loop = &Loop{loopCount: loopCount * b.workerRunner.spawn.getSpawnCount()} + case DistributedMasterMode: + b.masterRunner.loop = &Loop{loopCount: loopCount * b.masterRunner.spawn.getSpawnCount()} + case StandaloneMode: + b.localRunner.loop = &Loop{loopCount: loopCount * b.localRunner.spawn.getSpawnCount()} + } } // AddOutput accepts outputs which implements the boomer.Output interface. func (b *Boomer) AddOutput(o Output) { - b.localRunner.addOutput(o) + switch b.mode { + case DistributedWorkerMode: + b.workerRunner.addOutput(o) + case DistributedMasterMode: + b.masterRunner.addOutput(o) + case StandaloneMode: + b.localRunner.addOutput(o) + } } // EnableCPUProfile will start cpu profiling after run. @@ -150,6 +280,9 @@ func (b *Boomer) EnableGracefulQuit() { signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) go func() { <-c + if b.mode == DistributedWorkerMode { + b.workerRunner.ignoreQuit = false + } b.Quit() }() } @@ -169,13 +302,45 @@ func (b *Boomer) Run(tasks ...*Task) { } } - b.localRunner.setTasks(tasks) - b.localRunner.start() + switch b.mode { + case DistributedWorkerMode: + log.Info().Msg("running in worker mode") + b.workerRunner.setTasks(tasks) + b.workerRunner.start() + case StandaloneMode: + log.Info().Msg("running in standalone mode") + b.localRunner.setTasks(tasks) + b.localRunner.start() + default: + log.Error().Err(errors.New("Invalid mode, expected boomer.DistributedMode or boomer.StandaloneMode")) + } +} + +func (b *Boomer) SetTasks(tasks ...*Task) { + switch b.mode { + case DistributedWorkerMode: + log.Info().Msg("set tasks to worker") + b.workerRunner.setTasks(tasks) + case StandaloneMode: + log.Info().Msg("set tasks to standalone") + b.localRunner.setTasks(tasks) + default: + log.Error().Err(errors.New("Invalid mode, expected boomer.DistributedMode or boomer.StandaloneMode")) + } } // RecordTransaction reports a transaction stat. func (b *Boomer) RecordTransaction(name string, success bool, elapsedTime int64, contentSize int64) { - b.localRunner.stats.transactionChan <- &transaction{ + var runnerStats *requestStats + switch b.mode { + case DistributedWorkerMode: + runnerStats = b.workerRunner.stats + case DistributedMasterMode: + runnerStats = b.masterRunner.stats + case StandaloneMode: + runnerStats = b.localRunner.stats + } + runnerStats.transactionChan <- &transaction{ name: name, success: success, elapsedTime: elapsedTime, @@ -185,7 +350,16 @@ func (b *Boomer) RecordTransaction(name string, success bool, elapsedTime int64, // RecordSuccess reports a success. func (b *Boomer) RecordSuccess(requestType, name string, responseTime int64, responseLength int64) { - b.localRunner.stats.requestSuccessChan <- &requestSuccess{ + var runnerStats *requestStats + switch b.mode { + case DistributedWorkerMode: + runnerStats = b.workerRunner.stats + case DistributedMasterMode: + runnerStats = b.masterRunner.stats + case StandaloneMode: + runnerStats = b.localRunner.stats + } + runnerStats.requestSuccessChan <- &requestSuccess{ requestType: requestType, name: name, responseTime: responseTime, @@ -195,7 +369,16 @@ func (b *Boomer) RecordSuccess(requestType, name string, responseTime int64, res // RecordFailure reports a failure. func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exception string) { - b.localRunner.stats.requestFailureChan <- &requestFailure{ + var runnerStats *requestStats + switch b.mode { + case DistributedWorkerMode: + runnerStats = b.workerRunner.stats + case DistributedMasterMode: + runnerStats = b.masterRunner.stats + case StandaloneMode: + runnerStats = b.localRunner.stats + } + runnerStats.requestFailureChan <- &requestFailure{ requestType: requestType, name: name, responseTime: responseTime, @@ -203,19 +386,139 @@ func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exc } } +// Start starts to run +func (b *Boomer) Start(Args map[string]interface{}) error { + spawnCount, ok := Args["spawn_count"] + if ok { + v, err := strconv.Atoi(spawnCount.(string)) + if err != nil { + log.Error().Err(err).Msg("spawn_count sets error") + return err + } + b.SetSpawnCount(v) + } else { + return errors.New("spawn count error") + } + spawnRate, ok := Args["spawn_rate"] + if ok { + v, err := builtin.Interface2Float64(spawnRate) + if err != nil { + log.Error().Err(err).Msg("spawn_count sets error") + return err + } + b.SetSpawnRate(v) + } else { + b.SetSpawnRate(float64(b.GetSpawnCount())) + } + path, ok := Args["path"].(string) + if ok { + paths := strings.Split(path, ",") + b.SetTestCasesPath(paths) + } else { + return errors.New("testcase path error") + } + err := b.masterRunner.start() + return err +} + +// ReBalance starts to rebalance load test +func (b *Boomer) ReBalance(Args map[string]interface{}) error { + spawnCount, ok := Args["spawn_count"] + if ok { + v, err := strconv.Atoi(spawnCount.(string)) + if err != nil { + log.Error().Err(err).Msg("spawn_count sets error") + return err + } + b.SetSpawnCount(v) + } + spawnRate, ok := Args["spawn_rate"] + if ok { + v, err := builtin.Interface2Float64(spawnRate) + if err != nil { + log.Error().Err(err).Msg("spawn_count sets error") + return err + } + b.SetSpawnRate(v) + } + path, ok := Args["path"].(string) + if ok { + paths := strings.Split(path, ",") + b.SetTestCasesPath(paths) + } + err := b.masterRunner.rebalance() + if err != nil { + log.Error().Err(err).Msg("failed to rebalance") + } + return err +} + +// Stop stops to load test +func (b *Boomer) Stop() { + switch b.mode { + case DistributedMasterMode: + b.masterRunner.stop() + default: + } +} + +// GetWorkersInfo gets workers +func (b *Boomer) GetWorkersInfo() []WorkerNode { + return b.masterRunner.server.getAllWorkers() +} + +func (b *Boomer) GetCloseChan() chan bool { + switch b.mode { + case DistributedWorkerMode: + return b.workerRunner.closeChan + case DistributedMasterMode: + return b.masterRunner.closeChan + default: + return b.localRunner.closeChan + } +} + // Quit will send a quit message to the master. func (b *Boomer) Quit() { - b.localRunner.stop() + switch b.mode { + case DistributedWorkerMode: + b.workerRunner.close() + case DistributedMasterMode: + b.masterRunner.close() + case StandaloneMode: + b.localRunner.stop() + } } func (b *Boomer) GetSpawnDoneChan() chan struct{} { - return b.localRunner.spawnDone + switch b.mode { + case DistributedWorkerMode: + return b.workerRunner.spawn.getSpawnDone() + case DistributedMasterMode: + return b.masterRunner.spawn.getSpawnDone() + default: + return b.localRunner.spawn.getSpawnDone() + } } func (b *Boomer) GetSpawnCount() int { - return b.localRunner.spawnCount + switch b.mode { + case DistributedWorkerMode: + return int(b.workerRunner.spawn.getSpawnCount()) + case DistributedMasterMode: + return int(b.masterRunner.spawn.getSpawnCount()) + default: + return int(b.localRunner.spawn.getSpawnCount()) + } } func (b *Boomer) ResetStartTime() { - b.localRunner.stats.total.resetStartTime() + switch b.mode { + case DistributedWorkerMode: + b.workerRunner.stats.total.resetStartTime() + case DistributedMasterMode: + b.masterRunner.stats.total.resetStartTime() + default: + b.localRunner.stats.total.resetStartTime() + } } diff --git a/hrp/internal/boomer/boomer_test.go b/hrp/internal/boomer/boomer_test.go index 7f113f87..fde9b37b 100644 --- a/hrp/internal/boomer/boomer_test.go +++ b/hrp/internal/boomer/boomer_test.go @@ -12,11 +12,11 @@ import ( func TestNewStandaloneBoomer(t *testing.T) { b := NewStandaloneBoomer(100, 10) - if b.localRunner.spawnCount != 100 { + if b.localRunner.spawn.spawnCount != 100 { t.Error("spawnCount should be 100") } - if b.localRunner.spawnRate != 10 { + if b.localRunner.spawn.spawnRate != 10 { t.Error("spawnRate should be 10") } } diff --git a/hrp/internal/boomer/client.go b/hrp/internal/boomer/client.go new file mode 100644 index 00000000..b3bf6def --- /dev/null +++ b/hrp/internal/boomer/client.go @@ -0,0 +1,9 @@ +package boomer + +type client interface { + connect() (err error) + close() + recvChannel() chan *genericMessage + sendChannel() chan *genericMessage + disconnectedChannel() chan bool +} diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go new file mode 100644 index 00000000..5fa33cc1 --- /dev/null +++ b/hrp/internal/boomer/client_grpc.go @@ -0,0 +1,224 @@ +package boomer + +import ( + "context" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" + "github.com/rs/zerolog/log" + "google.golang.org/grpc" +) + +type grpcClient struct { + masterHost string + masterPort int + identity string // nodeID + + config *grpcClientConfig + + fromMaster chan *genericMessage + toMaster chan *genericMessage + disconnectedFromMaster chan bool + shutdownChan chan bool + + failCount int32 + + wg sync.WaitGroup +} + +type grpcClientConfig struct { + ctx context.Context + cancel context.CancelFunc // use cancel() to stop client + conn *grpc.ClientConn + biStream messager.Message_BidirectionalStreamingMessageClient + + mutex sync.RWMutex +} + +func (c *grpcClientConfig) getBiStreamClient() messager.Message_BidirectionalStreamingMessageClient { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.biStream +} + +func (c *grpcClientConfig) setBiStreamClient(s messager.Message_BidirectionalStreamingMessageClient) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.biStream = s +} + +func newClient(masterHost string, masterPort int, identity string) (client *grpcClient) { + log.Info().Msg("Boomer is built with grpc support.") + // Initiate the stream with a context that supports cancellation. + ctx, cancel := context.WithCancel(context.Background()) + client = &grpcClient{ + masterHost: masterHost, + masterPort: masterPort, + identity: identity, + fromMaster: make(chan *genericMessage, 100), + toMaster: make(chan *genericMessage, 100), + disconnectedFromMaster: make(chan bool), + shutdownChan: make(chan bool), + config: &grpcClientConfig{ + ctx: ctx, + cancel: cancel, + mutex: sync.RWMutex{}, + }, + } + return client +} + +func (c *grpcClient) connect() (err error) { + addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) + c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Error().Err(err).Msg("failed to connect") + return err + } + + biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) + if err != nil { + log.Error().Err(err).Msg("call bidirectional streaming message err") + return err + } + c.config.setBiStreamClient(biStream) + log.Info().Msg(fmt.Sprintf("Boomer is connected to master(%s) press Ctrl+c to quit.\n", addr)) + go c.recv() + go c.send() + + return nil +} + +func (c *grpcClient) reConnect() (err error) { + addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) + c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return + } + + biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) + if err != nil { + return + } + c.config.setBiStreamClient(biStream) + + // register worker information to master + c.sendChannel() <- newGenericMessage("register", nil, c.identity) + //// tell master, I'm ready + //log.Info().Msg("send client ready signal") + //c.sendChannel() <- newClientReadyMessageToMaster(c.identity) + log.Info().Msg(fmt.Sprintf("Boomer is reConnected to master(%s) press Ctrl+c to quit.\n", addr)) + return +} + +func (c *grpcClient) close() { + close(c.shutdownChan) + c.config.cancel() + if c.config.conn != nil { + c.config.conn.Close() + } +} + +func (c *grpcClient) recvChannel() chan *genericMessage { + return c.fromMaster +} + +func (c *grpcClient) recv() { + c.wg.Add(1) + defer c.wg.Done() + for { + select { + case <-c.shutdownChan: + return + default: + if c.config.getBiStreamClient() == nil { + continue + } + msg, err := c.config.getBiStreamClient().Recv() + if err != nil { + time.Sleep(1 * time.Second) + //log.Error().Err(err).Msg("failed to get message") + continue + } + if msg == nil { + continue + } + + if msg.NodeID != c.identity { + log.Warn(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Msg(fmt.Sprintf("not for me(%s)", c.identity)) + continue + } + + c.fromMaster <- &genericMessage{ + Type: msg.Type, + Data: msg.Data, + NodeID: msg.NodeID, + Tasks: msg.Tasks, + } + + log.Info(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Interface("tasks", msg.Tasks). + Msg("receive data from master") + } + } +} + +func (c *grpcClient) sendChannel() chan *genericMessage { + return c.toMaster +} + +func (c *grpcClient) send() { + c.wg.Add(1) + defer c.wg.Done() + for { + select { + case <-c.shutdownChan: + return + case msg := <-c.toMaster: + c.sendMessage(msg) + + // We may send genericMessage to master. + switch msg.Type { + case "quit": + c.disconnectedFromMaster <- true + } + } + } +} + +func (c *grpcClient) sendMessage(msg *genericMessage) { + log.Info(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Msg("send data to server") + if c.config.getBiStreamClient() == nil { + return + } + err := c.config.getBiStreamClient().Send(&messager.StreamRequest{Type: msg.Type, Data: msg.Data, NodeID: msg.NodeID}) + switch err { + case nil: + atomic.StoreInt32(&c.failCount, 0) + break + case io.EOF: + fallthrough + default: + //log.Error().Err(err).Interface("genericMessage", *msg).Msg("failed to send message") + atomic.AddInt32(&c.failCount, 1) + } +} + +func (c *grpcClient) disconnectedChannel() chan bool { + return c.disconnectedFromMaster +} diff --git a/hrp/internal/boomer/client_grpc_test.go b/hrp/internal/boomer/client_grpc_test.go new file mode 100644 index 00000000..853e847e --- /dev/null +++ b/hrp/internal/boomer/client_grpc_test.go @@ -0,0 +1 @@ +package boomer diff --git a/hrp/internal/boomer/message.go b/hrp/internal/boomer/message.go new file mode 100644 index 00000000..93b9a0b3 --- /dev/null +++ b/hrp/internal/boomer/message.go @@ -0,0 +1,51 @@ +package boomer + +const ( + typeClientReady = "client_ready" + typeClientStopped = "client_stopped" + typeHeartbeat = "heartbeat" + typeSpawning = "spawning" + typeSpawningComplete = "spawning_complete" + typeQuit = "quit" + typeException = "exception" +) + +type message interface { +} + +type genericMessage struct { + Type string `json:"type,omitempty"` + Data map[string]int64 `json:"data,omitempty"` + NodeID string `json:"node_id,omitempty"` + Tasks []byte `json:"tasks,omitempty"` +} + +func newGenericMessage(t string, data map[string]int64, nodeID string) (msg *genericMessage) { + return &genericMessage{ + Type: t, + Data: data, + NodeID: nodeID, + } +} + +func newQuitMessage(nodeID string) (msg *genericMessage) { + return &genericMessage{ + Type: "quit", + NodeID: nodeID, + } +} + +func newSpawnMessageToWorker(t string, data map[string]int64, tasks []byte) (msg *genericMessage) { + return &genericMessage{ + Type: t, + Data: data, + Tasks: tasks, + } +} + +func newClientReadyMessageToMaster(nodeID string) (msg *genericMessage) { + return &genericMessage{ + Type: "client_ready", + NodeID: nodeID, + } +} diff --git a/hrp/internal/boomer/message_test.go b/hrp/internal/boomer/message_test.go new file mode 100644 index 00000000..853e847e --- /dev/null +++ b/hrp/internal/boomer/message_test.go @@ -0,0 +1 @@ +package boomer diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index db77b053..3ef8bdc9 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -118,15 +118,15 @@ func (o *ConsoleOutput) OnEvent(data map[string]interface{}) { var state string switch output.State { - case stateInit: + case StateInit: state = "initializing" - case stateSpawning: + case StateSpawning: state = "spawning" - case stateRunning: + case StateRunning: state = "running" - case stateQuitting: + case StateQuitting: state = "quitting" - case stateStopped: + case StateStopped: state = "stopped" } @@ -525,7 +525,7 @@ func (o *PrometheusPusherOutput) OnStart() { // OnStop of PrometheusPusherOutput has nothing to do. func (o *PrometheusPusherOutput) OnStop() { // update runner state: stopped - gaugeState.Set(float64(stateStopped)) + gaugeState.Set(float64(StateStopped)) if err := o.pusher.Push(); err != nil { log.Error().Err(err).Msg("push to Pushgateway failed") } diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 8419f6ab..1181d6c2 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -10,20 +10,26 @@ import ( "sync/atomic" "time" + "github.com/go-errors/errors" + "github.com/olekukonko/tablewriter" "github.com/rs/zerolog/log" ) const ( - stateInit = iota + 1 // initializing - stateSpawning // spawning - stateRunning // running - stateQuitting // quitting - stateStopped // stopped + StateInit = iota + 1 // initializing + StateSpawning // spawning + StateRunning // running + StateStopping // stopping + StateStopped // stopped + StateQuitting // quitting + StateMissing // missing ) const ( reportStatsInterval = 3 * time.Second + heartbeatInterval = 1 * time.Second + heartbeatLiveness = 3 * time.Second ) type Loop struct { @@ -51,23 +57,113 @@ func (l *Loop) increaseFinishedCount() { atomic.AddInt64(&l.finishedCount, 1) } +type SpawnInfo struct { + spawnCount int64 // target clients to spawn + acquiredCount int64 // count acquired of workers + spawnRate float64 + spawnDone chan struct{} + + mutex sync.RWMutex +} + +func (s *SpawnInfo) setSpawn(spawnCount int64, spawnRate float64) { + s.mutex.Lock() + defer s.mutex.Unlock() + if spawnCount > 0 { + atomic.StoreInt64(&s.spawnCount, spawnCount) + } + if spawnRate > 0 { + s.spawnRate = spawnRate + } +} + +func (s *SpawnInfo) getSpawnCount() int64 { + s.mutex.RLock() + defer s.mutex.RUnlock() + return atomic.LoadInt64(&s.spawnCount) +} + +func (s *SpawnInfo) getSpawnRate() float64 { + s.mutex.RLock() + defer s.mutex.RUnlock() + return s.spawnRate +} + +func (s *SpawnInfo) getSpawnDone() chan struct{} { + s.mutex.RLock() + defer s.mutex.RUnlock() + return s.spawnDone +} + +func (s *SpawnInfo) done() { + close(s.spawnDone) +} + +func (s *SpawnInfo) isFinished() bool { + // return true when workers acquired + return atomic.LoadInt64(&s.acquiredCount) == atomic.LoadInt64(&s.spawnCount) +} + +func (s *SpawnInfo) acquire() bool { + // get one ticket when there are still remaining spawn count to test + // return true when getting ticket successfully + if atomic.LoadInt64(&s.acquiredCount) < atomic.LoadInt64(&s.spawnCount) { + atomic.AddInt64(&s.acquiredCount, 1) + return true + } + return false +} + +func (s *SpawnInfo) erase() bool { + // return true if acquiredCount > spawnCount + if atomic.LoadInt64(&s.acquiredCount) > atomic.LoadInt64(&s.spawnCount) { + atomic.AddInt64(&s.acquiredCount, -1) + return true + } + return false +} + +func (s *SpawnInfo) increaseFinishedCount() { + atomic.AddInt64(&s.acquiredCount, -1) +} + +func (s *SpawnInfo) reset() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.spawnCount = 0 + s.spawnRate = 0 + s.acquiredCount = 0 + s.spawnDone = make(chan struct{}) +} + type runner struct { state int32 tasks []*Task totalTaskWeight int + mutex sync.RWMutex rateLimiter RateLimiter rateLimitEnabled bool stats *requestStats currentClientsNum int32 // current clients count - spawnCount int // target clients to spawn - spawnRate float64 + spawn *SpawnInfo loop *Loop // specify loop count for testcase, count = loopCount * spawnCount - spawnDone chan struct{} + + // when this channel is closed, all statistics are reported successfully + reportedChan chan bool + + // all running workers(goroutines) will select on this channel. + // close this channel will stop all running workers. + stopChan chan bool + + // close this channel will stop all goroutines used in runner. + closeChan chan bool outputs []Output + + once *sync.Once } // safeRun runs fn and recovers from unexpected panics. @@ -176,75 +272,104 @@ func (r *runner) reportTestResult() { println() } -func (r *localRunner) spawnWorkers(spawnCount int, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { +func (r *runner) startSpawning(spawnCount int64, spawnRate float64, spawnCompleteFunc func()) { + r.stopChan = make(chan bool) + r.reportedChan = make(chan bool) + r.spawn.reset() + + r.spawn.setSpawn(spawnCount, spawnRate) + + atomic.StoreInt32(&r.currentClientsNum, 0) + + go r.spawnWorkers(spawnCount, spawnRate, r.stopChan, spawnCompleteFunc) +} + +func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { log.Info(). - Int("spawnCount", spawnCount). + Int64("spawnCount", spawnCount). Float64("spawnRate", spawnRate). Msg("Spawning workers") - atomic.StoreInt32(&r.state, stateSpawning) - for i := 1; i <= spawnCount; i++ { - // spawn workers with rate limit - sleepTime := time.Duration(1000000/r.spawnRate) * time.Microsecond - time.Sleep(sleepTime) - - // loop count per worker - var workerLoop *Loop - if r.loop != nil { - workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / int64(r.spawnCount)} - } - + r.updateState(StateSpawning) + for { select { case <-quit: // quit spawning goroutine log.Info().Msg("Quitting spawning workers") return default: - atomic.AddInt32(&r.currentClientsNum, 1) - go func() { - for { - select { - case <-quit: - return - default: - if workerLoop != nil && !workerLoop.acquire() { + if r.isStarted() && r.spawn.acquire() { + // spawn workers with rate limit + sleepTime := time.Duration(1000000/r.spawn.getSpawnRate()) * time.Microsecond + time.Sleep(sleepTime) + + // loop count per worker + var workerLoop *Loop + if r.loop != nil { + workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / int64(r.spawn.spawnCount)} + } + atomic.AddInt32(&r.currentClientsNum, 1) + go func() { + for { + select { + case <-quit: + atomic.AddInt64(&r.spawn.acquiredCount, -1) + atomic.AddInt32(&r.currentClientsNum, -1) return - } - if r.rateLimitEnabled { - blocked := r.rateLimiter.Acquire() - if !blocked { + default: + if workerLoop != nil && !workerLoop.acquire() { + return + } + if r.rateLimitEnabled { + blocked := r.rateLimiter.Acquire() + if !blocked { + task := r.getTask() + r.safeRun(task.Fn) + } + } else { task := r.getTask() r.safeRun(task.Fn) } - } else { - task := r.getTask() - r.safeRun(task.Fn) - } - if workerLoop != nil { - // finished count of total - r.loop.increaseFinishedCount() - // finished count of single worker - workerLoop.increaseFinishedCount() - if r.loop.isFinished() { - r.stop() + if workerLoop != nil { + // finished count of total + r.loop.increaseFinishedCount() + // finished count of single worker + workerLoop.increaseFinishedCount() + if r.loop.isFinished() { + r.stop() + } + } + if r.spawn.erase() { + atomic.AddInt32(&r.currentClientsNum, -1) + return + } + if !r.isStarted() { + atomic.AddInt64(&r.spawn.acquiredCount, -1) + atomic.AddInt32(&r.currentClientsNum, -1) + return } } } + }() + } else { + if r.getState() == StateSpawning { + r.spawn.done() + if spawnCompleteFunc != nil { + spawnCompleteFunc() + } + r.updateState(StateRunning) } - }() + time.Sleep(1 * time.Second) + } } } - - close(r.spawnDone) - if spawnCompleteFunc != nil { - spawnCompleteFunc() - } - atomic.StoreInt32(&r.state, stateRunning) } // setTasks will set the runner's task list AND the total task weight // which is used to get a random task later func (r *runner) setTasks(t []*Task) { + r.mutex.Lock() + defer r.mutex.Unlock() r.tasks = t weightSum := 0 @@ -255,6 +380,8 @@ func (r *runner) setTasks(t []*Task) { } func (r *runner) getTask() *Task { + r.mutex.RLock() + defer r.mutex.RUnlock() tasksCount := len(r.tasks) if tasksCount == 0 { log.Error().Msg("no valid testcase found") @@ -285,30 +412,78 @@ func (r *runner) getTask() *Task { return nil } +func (r *runner) statsStart() { + var ticker = time.NewTicker(reportStatsInterval) + for { + select { + // record stats + case t := <-r.stats.transactionChan: + r.stats.logTransaction(t.name, t.success, t.elapsedTime, t.contentSize) + case m := <-r.stats.requestSuccessChan: + r.stats.logRequest(m.requestType, m.name, m.responseTime, m.responseLength) + case n := <-r.stats.requestFailureChan: + r.stats.logRequest(n.requestType, n.name, n.responseTime, 0) + r.stats.logError(n.requestType, n.name, n.errMsg) + // report stats + case <-ticker.C: + r.reportStats() + // close reportedChan and return if the last stats is reported successfully + if !r.isStarted() { + close(r.reportedChan) + return + } + } + } +} + +func (r *runner) stop() { + // stop previous goroutines without blocking + // those goroutines will exit when r.safeRun returns + close(r.stopChan) + if r.rateLimitEnabled { + r.rateLimiter.Stop() + } +} + +func (r *runner) getState() int32 { + return atomic.LoadInt32(&r.state) +} + +func (r *runner) updateState(state int32) { + log.Debug().Int32("from", atomic.LoadInt32(&r.state)).Int32("to", state).Msg("update runner state") + atomic.StoreInt32(&r.state, state) +} + +func (r *runner) isStarted() bool { + return r.getState() == StateRunning || r.getState() == StateSpawning +} + type localRunner struct { runner - - // close this channel will stop all goroutines used in runner. - stopChan chan bool } func newLocalRunner(spawnCount int, spawnRate float64) *localRunner { return &localRunner{ runner: runner{ - state: stateInit, - spawnRate: spawnRate, - spawnCount: spawnCount, - stats: newRequestStats(), - outputs: make([]Output, 0), - spawnDone: make(chan struct{}), + state: StateInit, + stats: newRequestStats(), + outputs: make([]Output, 0), + spawn: &SpawnInfo{ + spawnCount: int64(spawnCount), + spawnRate: spawnRate, + spawnDone: make(chan struct{}), + }, + reportedChan: make(chan bool), + stopChan: make(chan bool), + closeChan: make(chan bool), + once: &sync.Once{}, }, - stopChan: make(chan bool), } } func (r *localRunner) start() { // init state - atomic.StoreInt32(&r.state, stateInit) + r.updateState(StateInit) atomic.StoreInt32(&r.currentClientsNum, 0) r.stats.clearAll() @@ -317,51 +492,20 @@ func (r *localRunner) start() { r.rateLimiter.Start() } - // all running workers(goroutines) will select on this channel. - // close this channel will stop all running workers. - quitChan := make(chan bool) - // when this channel is closed, all statistics are reported successfully - reportedChan := make(chan bool) - go r.spawnWorkers(r.spawnCount, r.spawnRate, quitChan, nil) + go r.spawnWorkers(r.spawn.spawnCount, r.spawn.spawnRate, r.stopChan, nil) // output setup r.outputOnStart() - // start running - go func() { - ticker := time.NewTicker(reportStatsInterval) - for { - select { - // record stats - case t := <-r.stats.transactionChan: - r.stats.logTransaction(t.name, t.success, t.elapsedTime, t.contentSize) - case m := <-r.stats.requestSuccessChan: - r.stats.logRequest(m.requestType, m.name, m.responseTime, m.responseLength) - case n := <-r.stats.requestFailureChan: - r.stats.logRequest(n.requestType, n.name, n.responseTime, 0) - r.stats.logError(n.requestType, n.name, n.errMsg) - // report stats - case <-ticker.C: - r.reportStats() - // close reportedChan and return if the last stats is reported successfully - if atomic.LoadInt32(&r.state) == stateQuitting { - close(reportedChan) - return - } - } - } - }() + // start stats report + go r.runner.statsStart() // stop <-r.stopChan - atomic.StoreInt32(&r.state, stateQuitting) - - // stop previous goroutines without blocking - // those goroutines will exit when r.safeRun returns - close(quitChan) + r.updateState(StateStopped) // wait until all stats are reported successfully - <-reportedChan + <-r.reportedChan // stop rate limiter if r.rateLimitEnabled { @@ -374,10 +518,494 @@ func (r *localRunner) start() { // output teardown r.outputOnStop() - atomic.StoreInt32(&r.state, stateStopped) + r.updateState(StateQuitting) return } func (r *localRunner) stop() { - close(r.stopChan) + if r.runner.isStarted() { + r.runner.stop() + } +} + +// workerRunner connects to the master, spawns goroutines and collects stats. +type workerRunner struct { + runner + + nodeID string + masterHost string + masterPort int + client *grpcClient + + // this channel will start worker for spawning. + spawnStartChan chan bool + // get testcase from master + testCaseBytes chan []byte + + startFlag bool + + ignoreQuit bool +} + +func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { + r = &workerRunner{ + runner: runner{ + stats: newRequestStats(), + spawn: &SpawnInfo{ + spawnDone: make(chan struct{}), + }, + stopChan: make(chan bool), + reportedChan: make(chan bool), + closeChan: make(chan bool), + once: &sync.Once{}, + }, + masterHost: masterHost, + masterPort: masterPort, + nodeID: getNodeID(), + spawnStartChan: make(chan bool), + testCaseBytes: make(chan []byte, 10), + } + return r +} + +func (r *workerRunner) spawnComplete() { + data := make(map[string]int64) + data["count"] = r.spawn.getSpawnCount() + r.client.sendChannel() <- newGenericMessage("spawning_complete", data, r.nodeID) + r.updateState(StateRunning) +} + +func (r *workerRunner) onSpawnMessage(msg *genericMessage) { + r.client.sendChannel() <- newGenericMessage("spawning", nil, r.nodeID) + spawnCount, ok := msg.Data["spawn_count"] + if ok { + r.spawn.setSpawn(spawnCount, -1) + } + spawnRate, ok := msg.Data["spawn_rate"] + if ok { + r.spawn.setSpawn(-1, float64(spawnRate)) + } + if msg.Tasks != nil { + r.testCaseBytes <- msg.Tasks + } + log.Info().Msg("on spawn message successful") +} + +// Runner acts as a state machine. +func (r *workerRunner) onMessage(msg *genericMessage) { + switch r.getState() { + case StateInit: + switch msg.Type { + case "spawn": + r.onSpawnMessage(msg) + case "quit": + r.close() + } + case StateSpawning: + fallthrough + case StateRunning: + switch msg.Type { + case "spawn": + r.onSpawnMessage(msg) + case "stop": + r.stop() + log.Info().Msg("Recv stop message from master, all the goroutines are stopped") + r.client.sendChannel() <- newGenericMessage("client_stopped", nil, r.nodeID) + case "quit": + r.close() + log.Info().Msg("Recv quit message from master, all the goroutines are stopped") + } + case StateStopped: + switch msg.Type { + case "spawn": + r.onSpawnMessage(msg) + go r.start() + case "quit": + r.close() + } + } +} + +func (r *workerRunner) onQuiting() { + if r.getState() != StateQuitting { + r.client.sendChannel() <- newQuitMessage(r.nodeID) + } + r.updateState(StateQuitting) +} + +func (r *workerRunner) startListener() { + for { + select { + case msg := <-r.client.recvChannel(): + r.onMessage(msg) + case <-r.closeChan: + return + } + } +} + +// run starts service +func (r *workerRunner) run() { + r.updateState(StateInit) + r.client = newClient(r.masterHost, r.masterPort, r.nodeID) + + err := r.client.connect() + if err != nil { + log.Printf("Failed to connect to master(%s:%d) with error %v\n", r.masterHost, r.masterPort, err) + return + } + + // listen to master + go r.startListener() + + // register worker information to master + r.client.sendChannel() <- newGenericMessage("register", nil, r.nodeID) + // tell master, I'm ready + log.Info().Msg("send client ready signal") + r.client.sendChannel() <- newClientReadyMessageToMaster(r.nodeID) + + // heartbeat + // See: https://github.com/locustio/locust/commit/a8c0d7d8c588f3980303358298870f2ea394ab93 + go func() { + var ticker = time.NewTicker(heartbeatInterval) + for { + select { + case <-ticker.C: + if atomic.LoadInt32(&r.client.failCount) > 2 { + r.updateState(StateMissing) + } + if r.getState() == StateMissing { + if r.client.reConnect() == nil { + r.updateState(StateInit) + } + } + CPUUsage := GetCurrentCPUUsage() + data := map[string]int64{ + "state": int64(r.getState()), + "current_cpu_usage": int64(CPUUsage), + "spawn_count": int64(atomic.LoadInt32(&r.currentClientsNum)), + } + r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) + case <-r.closeChan: + return + } + } + }() + <-r.closeChan +} + +func (r *workerRunner) start() { + r.startFlag = true + defer func() { + r.startFlag = false + }() + r.stats.clearAll() + + // start rate limiter + if r.rateLimitEnabled { + r.rateLimiter.Start() + } + + r.once.Do(r.outputOnStart) + + r.startSpawning(r.spawn.getSpawnCount(), r.spawn.getSpawnRate(), r.spawnComplete) + + // start stats report + go r.runner.statsStart() + + <-r.reportedChan + + r.reportTestResult() + r.outputOnStop() +} + +func (r *workerRunner) stop() { + if r.isStarted() { + close(r.stopChan) + // stop rate limiter + if r.rateLimitEnabled { + r.rateLimiter.Stop() + } + r.updateState(StateStopped) + } +} + +func (r *workerRunner) close() { + r.stop() + if r.ignoreQuit { + return + } + for r.startFlag == true { + time.Sleep(1 * time.Second) + } + close(r.closeChan) + var ticker = time.NewTicker(1 * time.Second) + if r.client != nil { + // waitting for quit message is sent to master + select { + case <-r.client.disconnectedChannel(): + break + case <-ticker.C: + log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") + r.onQuiting() + } + r.client.close() + } +} + +// masterRunner controls worker to spawn goroutines and collect stats. +type masterRunner struct { + runner + + masterBindHost string + masterBindPort int + server *grpcServer + + autoStart bool + expectWorkers int + expectWorkersMaxWait int + + parseTestCasesChan chan bool + startFlag bool + testCaseBytes chan []byte + + mutex sync.Mutex +} + +func newMasterRunner(masterBindHost string, masterBindPort int) *masterRunner { + return &masterRunner{ + runner: runner{ + state: StateInit, + spawn: &SpawnInfo{ + spawnDone: make(chan struct{}), + }, + closeChan: make(chan bool), + }, + masterBindHost: masterBindHost, + masterBindPort: masterBindPort, + server: newServer(masterBindHost, masterBindPort), + parseTestCasesChan: make(chan bool), + startFlag: false, + testCaseBytes: make(chan []byte), + } +} + +func (r *masterRunner) setExpectWorkers(expectWorkers int, expectWorkersMaxWait int) { + r.expectWorkers = expectWorkers + r.expectWorkersMaxWait = expectWorkersMaxWait +} + +func (r *masterRunner) heartbeatWorker() { + log.Info().Msg("heartbeatWorker, listen and record heartbeat from worker") + var ticker = time.NewTicker(heartbeatInterval) + for { + select { + case <-r.closeChan: + return + case <-ticker.C: + r.server.clients.Range(func(key, value interface{}) bool { + workerInfo, ok := value.(*WorkerNode) + if !ok { + log.Error().Msg("failed to get worker information") + } + if atomic.LoadInt32(&workerInfo.Heartbeat) <= 0 && workerInfo.getState() != StateMissing { + workerInfo.setState(StateMissing) + if r.getState() == StateRunning { + // all running workers missed, stopping runner + if r.server.getClientsLength() <= 0 { + r.updateState(StateStopped) + } + } + } else { + atomic.AddInt32(&workerInfo.Heartbeat, -1) + } + return true + }) + } + } +} + +func (r *masterRunner) clientListener() { + log.Info().Msg("clientListener, start to deal message from worker") + for { + select { + case <-r.closeChan: + return + case msg := <-r.server.recvChannel(): + worker, ok := r.server.getClients().Load(msg.NodeID) + if !ok { + continue + } + workerInfo, ok := worker.(*WorkerNode) + if !ok { + continue + } + switch msg.Type { + case typeClientReady: + if workerInfo.getState() == StateInit { + break + } + workerInfo.setState(StateInit) + if r.getState() == StateRunning { + println(fmt.Sprintf("worker(%s) joined, ready to rebalance the load of each worker", workerInfo.ID)) + err := r.rebalance() + if err != nil { + log.Error().Err(err).Msg("failed to rebalance") + } + } + case typeClientStopped: + workerInfo.setState(StateStopped) + if r.server.getWorkersLengthByState(StateStopped)+r.server.getWorkersLengthByState(StateInit) == r.server.getClientsLength() { + r.updateState(StateStopped) + } + case typeHeartbeat: + if workerInfo.getState() != int32(msg.Data["state"]) { + workerInfo.setState(int32(msg.Data["state"])) + } + workerInfo.updateHeartbeat(3) + if workerInfo.getCPUUsage() != float64(msg.Data["current_cpu_usage"]) { + workerInfo.updateCPUUsage(float64(msg.Data["current_cpu_usage"])) + } + if workerInfo.getSpawnCount() != msg.Data["spawn_count"] { + workerInfo.updateSpawnCount(msg.Data["spawn_count"]) + } + case typeSpawning: + workerInfo.setState(StateSpawning) + case typeSpawningComplete: + workerInfo.setState(StateRunning) + if r.server.getWorkersLengthByState(StateRunning) == r.server.getClientsLength() { + println(fmt.Sprintf("all(%v) workers spawn done, setting state as running", r.server.getClientsLength())) + r.updateState(StateRunning) + } + case typeQuit: + if workerInfo.getState() == StateQuitting { + break + } + workerInfo.setState(StateQuitting) + if r.isStarted() { + if r.server.getClientsLength() > 0 { + println(fmt.Sprintf("worker(%s) quited, ready to rebalance the load of each worker", workerInfo.ID)) + err := r.rebalance() + if err != nil { + log.Error().Err(err).Msg("failed to rebalance") + } + } + } + case typeException: + // Todo + default: + } + } + } +} + +func (r *masterRunner) run() { + r.updateState(StateInit) + + // start grpc server + err := r.server.start() + if err != nil { + log.Error().Err(err).Msg("failed to start grpc server") + return + } + + // listen and deal message from worker + go r.clientListener() + // listen and record heartbeat from worker + go r.heartbeatWorker() + + if r.autoStart { + log.Info().Msg("auto start, waiting expected workers joined") + var ticker = time.NewTicker(1 * time.Second) + var tickerMaxWait = time.NewTicker(time.Duration(r.expectWorkersMaxWait) * time.Second) + FOR: + for { + select { + case <-r.closeChan: + return + case <-ticker.C: + c := r.server.getClientsLength() + log.Info().Msg(fmt.Sprintf("expected worker number: %v, current worker count: %v", r.expectWorkers, c)) + if c >= r.expectWorkers { + go func() { + err = r.start() + if err != nil { + log.Error().Err(err).Msg("failed to run") + os.Exit(1) + } + }() + break FOR + } + case <-tickerMaxWait.C: + log.Warn().Msg("reached max wait time, quiting") + r.onQuiting() + os.Exit(1) + } + } + } + <-r.closeChan +} + +func (r *masterRunner) start() error { + numWorkers := r.server.getClientsLength() + if numWorkers == 0 { + return errors.New("current workers: 0") + } + workerSpawnRate := r.spawn.spawnRate / float64(numWorkers) + workerSpawnCount := r.spawn.getSpawnCount() / int64(numWorkers) + + log.Info().Msg("send spawn data to worker") + r.updateState(StateSpawning) + // waitting to fetch testcase + testcase, ok := r.fetchTestCase() + if !ok { + return errors.New("starting, do not retry frequently") + } + r.server.sendChannel() <- newSpawnMessageToWorker("spawn", map[string]int64{ + "spawn_count": workerSpawnCount, + "spawn_rate": int64(workerSpawnRate), + }, testcase) + println("send spawn data to worker successful") + log.Info().Msg("send spawn data to worker successful") + return nil +} + +func (r *masterRunner) fetchTestCase() ([]byte, bool) { + if r.startFlag { + return nil, false + } + r.startFlag = true + defer func() { + r.startFlag = false + }() + r.parseTestCasesChan <- true + return <-r.testCaseBytes, true +} + +func (r *masterRunner) rebalance() error { + return r.start() +} + +func (r *masterRunner) stop() { + if r.isStarted() { + r.updateState(StateStopping) + r.server.sendChannel() <- &genericMessage{Type: "stop", Data: map[string]int64{}} + r.updateState(StateStopped) + } +} + +func (r *masterRunner) onQuiting() { + if r.getState() != StateQuitting { + r.server.sendChannel() <- &genericMessage{ + Type: "quit", + } + } + r.updateState(StateQuitting) +} + +func (r *masterRunner) close() { + r.onQuiting() + r.server.wg.Wait() + close(r.closeChan) + r.server.close() } diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 549980c9..28305752 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -1,6 +1,7 @@ package boomer import ( + "sync" "sync/atomic" "testing" "time" @@ -112,3 +113,416 @@ func TestLoopCount(t *testing.T) { t.Fatal() } } + +func TestSpawnWorkers(t *testing.T) { + taskA := &Task{ + Weight: 10, + Fn: func() { + time.Sleep(time.Second) + }, + Name: "TaskA", + } + tasks := []*Task{taskA} + + runner := newWorkerRunner("localhost", 5557) + defer runner.close() + + runner.client = newClient("localhost", 5557, runner.nodeID) + runner.setTasks(tasks) + go runner.spawnWorkers(10, 10, runner.stopChan, runner.spawnComplete) + time.Sleep(10 * time.Millisecond) + + currentClients := atomic.LoadInt32(&runner.currentClientsNum) + if currentClients != 10 { + t.Error("Unexpected count", currentClients) + } +} + +func TestSpawnWorkersWithManyTasks(t *testing.T) { + var lock sync.Mutex + taskCalls := map[string]int{} + + createTask := func(name string, weight int) *Task { + return &Task{ + Name: name, + Weight: weight, + Fn: func() { + lock.Lock() + taskCalls[name]++ + lock.Unlock() + }, + } + } + tasks := []*Task{ + createTask("one hundred", 100), + createTask("ten", 10), + createTask("one", 1), + } + + runner := newWorkerRunner("localhost", 5557) + defer runner.close() + + runner.setTasks(tasks) + runner.client = newClient("localhost", 5557, runner.nodeID) + + const numToSpawn int64 = 30 + + runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) + time.Sleep(2 * time.Second) + + currentClients := atomic.LoadInt32(&runner.currentClientsNum) + + assert.Equal(t, numToSpawn, int(currentClients)) + lock.Lock() + hundreds := taskCalls["one hundred"] + tens := taskCalls["ten"] + ones := taskCalls["one"] + lock.Unlock() + + total := hundreds + tens + ones + t.Logf("total tasks run: %d\n", total) + + assert.True(t, total > 111) + + assert.True(t, ones > 1) + actPercentage := float64(ones) / float64(total) + expectedPercentage := 1.0 / 111.0 + if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage { + t.Errorf("Unexpected percentage of ones task: exp %v, act %v", expectedPercentage, actPercentage) + } + + assert.True(t, tens > 10) + actPercentage = float64(tens) / float64(total) + expectedPercentage = 10.0 / 111.0 + if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage { + t.Errorf("Unexpected percentage of tens task: exp %v, act %v", expectedPercentage, actPercentage) + } + + assert.True(t, hundreds > 100) + actPercentage = float64(hundreds) / float64(total) + expectedPercentage = 100.0 / 111.0 + if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage { + t.Errorf("Unexpected percentage of hundreds task: exp %v, act %v", expectedPercentage, actPercentage) + } +} + +func TestSpawnAndStop(t *testing.T) { + taskA := &Task{ + Fn: func() { + time.Sleep(time.Second) + }, + } + taskB := &Task{ + Fn: func() { + time.Sleep(2 * time.Second) + }, + } + tasks := []*Task{taskA, taskB} + runner := newWorkerRunner("localhost", 5557) + defer runner.close() + runner.client = newClient("localhost", 5557, runner.nodeID) + + runner.setTasks(tasks) + runner.spawn.setSpawn(10, 10) + runner.updateState(StateSpawning) + + go runner.start() + + // wait for spawning goroutines + time.Sleep(2 * time.Second) + if atomic.LoadInt32(&runner.currentClientsNum) != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count", atomic.LoadInt32(&runner.currentClientsNum)) + } + + msg := <-runner.client.sendChannel() + if msg.Type != "spawning_complete" { + t.Error("Runner should send spawning_complete message when spawning completed, got", msg.Type) + } + runner.stop() + + runner.onQuiting() + msg = <-runner.client.sendChannel() + if msg.Type != "quit" { + t.Error("Runner should send quit message on quitting, got", msg.Type) + } +} + +func TestStop(t *testing.T) { + taskA := &Task{ + Fn: func() { + time.Sleep(time.Second) + }, + } + tasks := []*Task{taskA} + runner := newWorkerRunner("localhost", 5557) + runner.setTasks(tasks) + runner.spawn.setSpawn(10, 10) + runner.updateState(StateSpawning) + + runner.stop() + + if runner.getState() != StateStopped { + t.Error("Expected runner state to be 5, was", runner.getState()) + } +} + +func TestOnSpawnMessage(t *testing.T) { + taskA := &Task{ + Fn: func() { + time.Sleep(time.Second) + }, + } + runner := newWorkerRunner("localhost", 5557) + defer runner.close() + runner.client = newClient("localhost", 5557, runner.nodeID) + runner.updateState(StateInit) + runner.setTasks([]*Task{taskA}) + runner.spawn.spawnCount = 100 + runner.spawn.spawnRate = 100 + + runner.onSpawnMessage(newGenericMessage("spawn", map[string]int64{ + "spawn_count": 20, + "spawn_rate": 20, + }, runner.nodeID)) + + if runner.spawn.spawnCount != 20 { + t.Error("workers should be overwrote by onSpawnMessage, expected: 20, was:", runner.spawn.spawnCount) + } + if runner.spawn.spawnRate != 20 { + t.Error("spawnRate should be overwrote by onSpawnMessage, expected: 20, was:", runner.spawn.spawnRate) + } + + runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) +} + +func TestOnQuitMessage(t *testing.T) { + runner := newWorkerRunner("localhost", 5557) + runner.client = newClient("localhost", 5557, "test") + runner.updateState(StateInit) + + runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) + <-runner.closeChan + + runner.updateState(StateRunning) + runner.closeChan = make(chan bool) + runner.stopChan = make(chan bool) + runner.client.shutdownChan = make(chan bool) + runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) + <-runner.closeChan + if runner.getState() != StateQuitting { + t.Error("Runner's state should be StateQuitting") + } + + runner.updateState(StateStopped) + runner.closeChan = make(chan bool) + runner.stopChan = make(chan bool) + runner.client.shutdownChan = make(chan bool) + runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) + <-runner.closeChan + if runner.getState() != StateQuitting { + t.Error("Runner's state should be StateQuitting") + } +} + +func TestOnMessage(t *testing.T) { + taskA := &Task{ + Fn: func() { + time.Sleep(time.Second) + }, + } + taskB := &Task{ + Fn: func() { + time.Sleep(2 * time.Second) + }, + } + tasks := []*Task{taskA, taskB} + + runner := newWorkerRunner("localhost", 5557) + defer runner.close() + runner.client = newClient("localhost", 5557, runner.nodeID) + runner.updateState(StateInit) + runner.setTasks(tasks) + + go runner.start() + + // start spawning + runner.onMessage(newGenericMessage("spawn", map[string]int64{ + "spawn_count": 10, + "spawn_rate": 10, + }, runner.nodeID)) + + msg := <-runner.client.sendChannel() + if msg.Type != "spawning" { + t.Error("Runner should send spawning message when starting spawn, got", msg.Type) + } + + // spawn complete and running + time.Sleep(2 * time.Second) + if runner.getState() != StateRunning { + t.Error("State of runner is not running after spawn, got", runner.getState()) + } + if atomic.LoadInt32(&runner.currentClientsNum) != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + } + msg = <-runner.client.sendChannel() + if msg.Type != "spawning_complete" { + t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type) + } + + // increase goroutines while running + runner.onMessage(newGenericMessage("spawn", map[string]int64{ + "spawn_count": 15, + "spawn_rate": 15, + }, runner.nodeID)) + + msg = <-runner.client.sendChannel() + if msg.Type != "spawning" { + t.Error("Runner should send spawning message when starting spawn, got", msg.Type) + } + + time.Sleep(2 * time.Second) + msg = <-runner.client.sendChannel() + if msg.Type != "spawning_complete" { + t.Error("Runner should send spawning_complete message, got", msg.Type) + } + if runner.getState() != StateRunning { + t.Error("State of runner is not running after spawn, got", runner.getState()) + } + if atomic.LoadInt32(&runner.currentClientsNum) != 15 { + t.Error("Number of goroutines mismatches, expected: 20, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + } + + // stop all the workers + runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) + if runner.getState() != StateStopped { + t.Error("State of runner is not stopped, got", runner.getState()) + } + msg = <-runner.client.sendChannel() + if msg.Type != "client_stopped" { + t.Error("Runner should send client_stopped message, got", msg.Type) + } + + // spawn again + runner.onMessage(newGenericMessage("spawn", map[string]int64{ + "spawn_count": 10, + "spawn_rate": 10, + }, runner.nodeID)) + + msg = <-runner.client.sendChannel() + if msg.Type != "spawning" { + t.Error("Runner should send spawning message when starting spawn, got", msg.Type) + } + + // spawn complete and running + time.Sleep(2 * time.Second) + if runner.getState() != StateRunning { + t.Error("State of runner is not running after spawn, got", runner.getState()) + } + if atomic.LoadInt32(&runner.currentClientsNum) != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + } + msg = <-runner.client.sendChannel() + if msg.Type != "spawning_complete" { + t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type) + } + + // stop all the workers + runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) + if runner.getState() != StateStopped { + t.Error("State of runner is not stopped, got", runner.getState()) + } + msg = <-runner.client.sendChannel() + if msg.Type != "client_stopped" { + t.Error("Runner should send client_stopped message, got", msg.Type) + } +} + +func TestClientListener(t *testing.T) { + runner := newMasterRunner("localhost", 5557) + defer runner.close() + runner.updateState(StateInit) + runner.spawn.setSpawn(10, 10) + go runner.clientListener() + runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 3}) + runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 3}) + runner.server.recvChannel() <- &genericMessage{ + Type: typeClientReady, + NodeID: "testID1", + } + worker1, ok := runner.server.getClients().Load("testID1") + if !ok { + t.Fatal("error") + } + workerInfo1, ok := worker1.(*WorkerNode) + if !ok { + t.Fatal("error") + } + time.Sleep(time.Second) + if workerInfo1.getState() != StateInit { + t.Error("State of worker runner is not init, got", workerInfo1.getState()) + } + runner.server.recvChannel() <- &genericMessage{ + Type: typeClientStopped, + NodeID: "testID2", + } + worker2, ok := runner.server.getClients().Load("testID2") + if !ok { + t.Fatal("error") + } + workerInfo2, ok := worker2.(*WorkerNode) + if !ok { + t.Fatal("error") + } + time.Sleep(time.Second) + if workerInfo2.getState() != StateStopped { + t.Error("State of worker runner is not stopped, got", workerInfo2.getState()) + } + runner.server.recvChannel() <- &genericMessage{ + Type: typeClientStopped, + NodeID: "testID1", + } + time.Sleep(time.Second) + if runner.getState() != StateStopped { + t.Error("State of master runner is not stopped, got", runner.getState()) + } +} + +func TestHeartbeatWorker(t *testing.T) { + runner := newMasterRunner("localhost", 5557) + defer runner.close() + runner.updateState(StateInit) + runner.spawn.setSpawn(10, 10) + runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 1, State: StateInit}) + runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 1, State: StateInit}) + go runner.clientListener() + go runner.heartbeatWorker() + time.Sleep(3 * time.Second) + worker1, ok := runner.server.getClients().Load("testID1") + if !ok { + t.Fatal() + } + workerInfo1, ok := worker1.(*WorkerNode) + if !ok { + t.Fatal() + } + if workerInfo1.getState() != StateMissing { + t.Error("expected state of worker runner is missing, but got", workerInfo1.getState()) + } + runner.server.recvChannel() <- &genericMessage{ + Type: typeHeartbeat, + NodeID: "testID2", + Data: map[string]int64{"state": 3}, + } + worker2, ok := runner.server.getClients().Load("testID2") + if !ok { + t.Fatal() + } + workerInfo2, ok := worker2.(*WorkerNode) + if !ok { + t.Fatal() + } + time.Sleep(time.Second) + if workerInfo2.getState() == StateMissing { + t.Error("expected state of worker runner is not missing, but got missing") + } +} diff --git a/hrp/internal/boomer/server.go b/hrp/internal/boomer/server.go new file mode 100644 index 00000000..853e847e --- /dev/null +++ b/hrp/internal/boomer/server.go @@ -0,0 +1 @@ +package boomer diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go new file mode 100644 index 00000000..7eb92104 --- /dev/null +++ b/hrp/internal/boomer/server_grpc.go @@ -0,0 +1,343 @@ +package boomer + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" + + "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" + "github.com/rs/zerolog/log" +) + +func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error { + s.wg.Add(1) + defer s.wg.Done() + req, err := srv.Recv() + switch err { + case nil: + break + case io.EOF: + return nil + default: + if err.Error() == status.Error(codes.Canceled, context.Canceled.Error()).Error() { + return nil + } + log.Error().Err(err).Msg("failed to get stream from client") + return err + } + wn := &WorkerNode{messenger: srv, ID: req.NodeID, Heartbeat: 3} + s.clients.Store(req.NodeID, wn) + println(fmt.Sprintf("worker(%v) joined, current worker count: %v", req.NodeID, s.getClientsLength())) + <-s.disconnectedChannel() + s.clients.Delete(req.NodeID) + println(fmt.Sprintf("worker(%v) quited, current worker count: %v", req.NodeID, s.getClientsLength())) + return nil +} + +type WorkerNode struct { + ID string `json:"id"` + State int32 `json:"state"` + Heartbeat int32 `json:"heartbeat"` + SpawnCount int64 `json:"spawn_count"` + CPUUsage float64 `json:"cpu_usage"` + CPUWarningEmitted bool `json:"cpu_warning_emitted"` + MemoryUsage float64 `json:"memory_usage"` + messenger messager.Message_BidirectionalStreamingMessageServer + mutex sync.RWMutex +} + +func (w *WorkerNode) getState() int32 { + return atomic.LoadInt32(&w.State) +} + +func (w *WorkerNode) setState(state int32) { + atomic.StoreInt32(&w.State, state) +} + +func (w *WorkerNode) updateHeartbeat(heartbeat int32) { + atomic.StoreInt32(&w.Heartbeat, heartbeat) +} + +func (w *WorkerNode) getHeartbeat() int32 { + return atomic.LoadInt32(&w.Heartbeat) +} + +func (w *WorkerNode) updateSpawnCount(spawnCount int64) { + atomic.StoreInt64(&w.SpawnCount, spawnCount) +} + +func (w *WorkerNode) getSpawnCount() int64 { + return atomic.LoadInt64(&w.SpawnCount) +} + +func (w *WorkerNode) updateCPUUsage(cpuUsage float64) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.CPUUsage = cpuUsage +} + +func (w *WorkerNode) getCPUUsage() float64 { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.CPUUsage +} + +func (w *WorkerNode) updateCPUWarningEmitted(cpuWarningEmitted bool) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.CPUWarningEmitted = cpuWarningEmitted +} + +func (w *WorkerNode) getCPUWarningEmitted() bool { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.CPUWarningEmitted +} + +func (w *WorkerNode) updateMemoryUsage(memoryUsage float64) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.MemoryUsage = memoryUsage +} + +func (w *WorkerNode) getMemoryUsage() float64 { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.MemoryUsage +} + +func (w *WorkerNode) getWorkerInfo() WorkerNode { + w.mutex.RLock() + defer w.mutex.RUnlock() + return WorkerNode{ + ID: w.ID, + State: w.getState(), + Heartbeat: w.getHeartbeat(), + SpawnCount: w.getSpawnCount(), + CPUUsage: w.getCPUUsage(), + CPUWarningEmitted: w.getCPUWarningEmitted(), + MemoryUsage: w.getMemoryUsage(), + } +} + +type grpcServer struct { + messager.UnimplementedMessageServer + masterHost string + masterPort int + server *grpc.Server + clients *sync.Map + + fromWorker chan *genericMessage + toWorker chan *genericMessage + disconnectedToWorker chan bool + shutdownChan chan bool + wg sync.WaitGroup +} + +func newServer(masterHost string, masterPort int) (server *grpcServer) { + log.Info().Msg("Boomer is built with grpc support.") + server = &grpcServer{ + masterHost: masterHost, + masterPort: masterPort, + clients: &sync.Map{}, + fromWorker: make(chan *genericMessage, 100), + toWorker: make(chan *genericMessage, 100), + disconnectedToWorker: make(chan bool), + shutdownChan: make(chan bool), + } + return server +} + +func (s *grpcServer) start() (err error) { + addr := fmt.Sprintf("%v:%v", s.masterHost, s.masterPort) + lis, err := net.Listen("tcp", addr) + if err != nil { + log.Error().Err(err).Msg("failed to listen") + return + } + // create gRPC server + serv := grpc.NewServer() + // register message server + messager.RegisterMessageServer(serv, s) + reflection.Register(serv) + // start grpc server + go func() { + err = serv.Serve(lis) + if err != nil { + log.Error().Err(err).Msg("failed to serve") + return + } + }() + + go s.recv() + go s.send() + + return nil +} + +func (s *grpcServer) getWorkersByState(state int32) (wns []*WorkerNode) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == state { + wns = append(wns, workerInfo) + } + } + return true + }) + return wns +} + +func (s *grpcServer) getWorkersLengthByState(state int32) (l int) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == state { + l++ + } + } + return true + }) + return +} + +func (s *grpcServer) getAllWorkers() (wns []WorkerNode) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + wns = append(wns, workerInfo.getWorkerInfo()) + } + return true + }) + return wns +} + +func (s *grpcServer) getClients() *sync.Map { + return s.clients +} + +func (s *grpcServer) getClientsLength() (l int) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() != StateQuitting && workerInfo.getState() != StateMissing { + l++ + } + } + return true + }) + return +} + +func (s *grpcServer) close() { + close(s.shutdownChan) +} + +func (s *grpcServer) recvChannel() chan *genericMessage { + return s.fromWorker +} + +func (s *grpcServer) shutdownChannel() chan bool { + return s.shutdownChan +} + +func (s *grpcServer) recv() { + for { + select { + case <-s.shutdownChan: + return + default: + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { + return true + } + msg, err := workerInfo.messenger.Recv() + switch err { + case nil: + if msg == nil { + return true + } + s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID) + log.Info(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Msg("receive data from worker") + case io.EOF: + s.fromWorker <- newQuitMessage(workerInfo.ID) + default: + if err.Error() == status.Error(codes.Canceled, context.Canceled.Error()).Error() { + s.fromWorker <- newQuitMessage(workerInfo.ID) + return true + } + log.Error().Err(err).Msg("failed to get stream from client") + } + } + return true + }) + } + } +} + +func (s *grpcServer) sendChannel() chan *genericMessage { + return s.toWorker +} + +func (s *grpcServer) send() { + for { + select { + case <-s.shutdownChan: + return + case msg := <-s.toWorker: + s.sendMessage(msg) + + // We may send genericMessage to Worker. + if msg.Type == "quit" { + close(s.disconnectedToWorker) + } + } + } +} + +func (s *grpcServer) sendMessage(msg *genericMessage) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { + return true + } + err := workerInfo.messenger.Send( + &messager.StreamResponse{ + Type: msg.Type, + Data: msg.Data, + NodeID: workerInfo.ID, + Tasks: msg.Tasks}, + ) + switch err { + case nil: + break + case io.EOF: + fallthrough + default: + s.fromWorker <- newQuitMessage(workerInfo.ID) + log.Error().Err(err).Msg("failed to send message") + return true + } + log.Info(). + Str("nodeID", workerInfo.ID). + Str("type", msg.Type). + Interface("data", msg.Data). + Int32("state", workerInfo.getState()). + Msg("send data to worker") + } + return true + }) +} + +func (s *grpcServer) disconnectedChannel() chan bool { + return s.disconnectedToWorker +} diff --git a/hrp/internal/boomer/server_grpc_test.go b/hrp/internal/boomer/server_grpc_test.go new file mode 100644 index 00000000..853e847e --- /dev/null +++ b/hrp/internal/boomer/server_grpc_test.go @@ -0,0 +1 @@ +package boomer diff --git a/hrp/internal/boomer/utils.go b/hrp/internal/boomer/utils.go index 9a6f3fef..bc376ca6 100644 --- a/hrp/internal/boomer/utils.go +++ b/hrp/internal/boomer/utils.go @@ -6,10 +6,15 @@ import ( "io" "math" "os" + "runtime" "runtime/pprof" + "strings" "time" + "github.com/google/uuid" + "github.com/rs/zerolog/log" + "github.com/shirou/gopsutil/process" ) func round(val float64, roundOn float64, places int) (newVal float64) { @@ -75,3 +80,27 @@ func startCPUProfile(file string, duration time.Duration) (err error) { }) return nil } + +// generate a random nodeID like locust does, using the same algorithm. +func getNodeID() (nodeID string) { + hostname, _ := os.Hostname() + id := strings.Replace(uuid.New().String(), "-", "", -1) + nodeID = fmt.Sprintf("%s_%s", hostname, id) + return +} + +// GetCurrentCPUUsage get current CPU usage +func GetCurrentCPUUsage() float64 { + currentPid := os.Getpid() + p, err := process.NewProcess(int32(currentPid)) + if err != nil { + log.Printf("Fail to get CPU percent, %v\n", err) + return 0.0 + } + percent, err := p.CPUPercent() + if err != nil { + log.Printf("Fail to get CPU percent, %v\n", err) + return 0.0 + } + return percent / float64(runtime.NumCPU()) +} diff --git a/hrp/internal/builtin/utils.go b/hrp/internal/builtin/utils.go index abea592e..876eaff3 100644 --- a/hrp/internal/builtin/utils.go +++ b/hrp/internal/builtin/utils.go @@ -1,11 +1,13 @@ package builtin import ( + "archive/zip" "bufio" "bytes" "encoding/csv" builtinJSON "encoding/json" "fmt" + "io" "math/rand" "os" "os/exec" @@ -490,3 +492,168 @@ func GetFileNameWithoutExtension(path string) string { ext := filepath.Ext(base) return base[0 : len(base)-len(ext)] } + +func ZipDir(filename string, root string) error { + p, err := os.Getwd() + if err != nil { + return err + } + if strings.Contains(root, p) { + root, err = filepath.Rel(p, root) + if err != nil { + return err + } + } + err = os.RemoveAll(filename) + if err != nil { + return err + } + var files []string + err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + files = append(files, path) + return nil + }) + if err != nil { + return err + } + err = ZipFiles(filename, files) + return err +} + +// ZipFiles compresses one or many files into a single zip archive file. +// Param 1: filename is the output zip file's name. +// Param 2: files is a list of files to add to the zip. +func ZipFiles(filename string, files []string) error { + newZipFile, err := os.Create(filename) + if err != nil { + return err + } + defer newZipFile.Close() + + zipWriter := zip.NewWriter(newZipFile) + defer zipWriter.Close() + + // Add files to zip + for _, file := range files { + if err = AddFileToZip(zipWriter, file); err != nil { + return err + } + } + return nil +} + +func AddFileToZip(zipWriter *zip.Writer, filename string) error { + fileToZip, err := os.Open(filename) + if err != nil { + return err + } + defer fileToZip.Close() + + // Get the file information + info, err := fileToZip.Stat() + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + + // Using FileInfoHeader() above only uses the basename of the file. If we want + // to preserve the folder structure we can overwrite this with the full path. + header.Name = filename + + // if dir + if info.IsDir() { + header.Name += `/` + } else { + // Change to deflate to gain better compression + // see http://golang.org/pkg/archive/zip/#pkg-constants + header.Method = zip.Deflate + } + + writer, err := zipWriter.CreateHeader(header) + if err != nil { + return err + } + if !info.IsDir() { + _, err = io.Copy(writer, fileToZip) + } + return err +} + +func UnZip(dst, src string) (err error) { + zr, err := zip.OpenReader(src) + defer zr.Close() + if err != nil { + return + } + if dst != "" { + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + } + for _, file := range zr.File { + path := filepath.Join(dst, file.Name) + if file.FileInfo().IsDir() { + if err := os.MkdirAll(path, file.Mode()); err != nil { + return err + } + continue + } + fr, err := file.Open() + if err != nil { + return err + } + fw, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, file.Mode()) + if err != nil { + return err + } + _, err = io.Copy(fw, fr) + if err != nil { + return err + } + log.Info().Msg(fmt.Sprintf("unzip %s successful\n", path)) + _ = fw.Close() + _ = fr.Close() + } + return nil +} + +func File2Bytes(filename string) ([]byte, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + stats, err := file.Stat() + if err != nil { + return nil, err + } + + data := make([]byte, stats.Size()) + count, err := file.Read(data) + if err != nil { + return nil, err + } + log.Info().Msg(fmt.Sprintf("read file %s len: %d \n", filename, count)) + + return data, nil +} + +func Bytes2File(data []byte, filename string) error { + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + count, err := file.Write(data) + if err != nil { + return err + } + log.Info().Msg(fmt.Sprintf("write file %s len: %d \n", filename, count)) + return nil +} diff --git a/hrp/internal/grpc/messager/messager.pb.go b/hrp/internal/grpc/messager/messager.pb.go new file mode 100644 index 00000000..bb389289 --- /dev/null +++ b/hrp/internal/grpc/messager/messager.pb.go @@ -0,0 +1,276 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.20.0 +// source: grpc/proto/messager.proto + +package messager + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StreamRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` +} + +func (x *StreamRequest) Reset() { + *x = StreamRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamRequest) ProtoMessage() {} + +func (x *StreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead. +func (*StreamRequest) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamRequest) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StreamRequest) GetData() map[string]int64 { + if x != nil { + return x.Data + } + return nil +} + +func (x *StreamRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type StreamResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + Tasks []byte `protobuf:"bytes,4,opt,name=tasks,proto3" json:"tasks,omitempty"` +} + +func (x *StreamResponse) Reset() { + *x = StreamResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamResponse) ProtoMessage() {} + +func (x *StreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead. +func (*StreamResponse) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{1} +} + +func (x *StreamResponse) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StreamResponse) GetData() map[string]int64 { + if x != nil { + return x.Data + } + return nil +} + +func (x *StreamResponse) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +func (x *StreamResponse) GetTasks() []byte { + if x != nil { + return x.Tasks + } + return nil +} + +var File_grpc_proto_messager_proto protoreflect.FileDescriptor + +var file_grpc_proto_messager_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc2, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, + 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0x61, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x56, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_proto_messager_proto_rawDescOnce sync.Once + file_grpc_proto_messager_proto_rawDescData = file_grpc_proto_messager_proto_rawDesc +) + +func file_grpc_proto_messager_proto_rawDescGZIP() []byte { + file_grpc_proto_messager_proto_rawDescOnce.Do(func() { + file_grpc_proto_messager_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_proto_messager_proto_rawDescData) + }) + return file_grpc_proto_messager_proto_rawDescData +} + +var file_grpc_proto_messager_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_grpc_proto_messager_proto_goTypes = []interface{}{ + (*StreamRequest)(nil), // 0: message.StreamRequest + (*StreamResponse)(nil), // 1: message.StreamResponse + nil, // 2: message.StreamRequest.DataEntry + nil, // 3: message.StreamResponse.DataEntry +} +var file_grpc_proto_messager_proto_depIdxs = []int32{ + 2, // 0: message.StreamRequest.data:type_name -> message.StreamRequest.DataEntry + 3, // 1: message.StreamResponse.data:type_name -> message.StreamResponse.DataEntry + 0, // 2: message.Message.BidirectionalStreamingMessage:input_type -> message.StreamRequest + 1, // 3: message.Message.BidirectionalStreamingMessage:output_type -> message.StreamResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_grpc_proto_messager_proto_init() } +func file_grpc_proto_messager_proto_init() { + if File_grpc_proto_messager_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_proto_messager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_proto_messager_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_proto_messager_proto_goTypes, + DependencyIndexes: file_grpc_proto_messager_proto_depIdxs, + MessageInfos: file_grpc_proto_messager_proto_msgTypes, + }.Build() + File_grpc_proto_messager_proto = out.File + file_grpc_proto_messager_proto_rawDesc = nil + file_grpc_proto_messager_proto_goTypes = nil + file_grpc_proto_messager_proto_depIdxs = nil +} diff --git a/hrp/internal/grpc/messager/messager_grpc.pb.go b/hrp/internal/grpc/messager/messager_grpc.pb.go new file mode 100644 index 00000000..8237aa3c --- /dev/null +++ b/hrp/internal/grpc/messager/messager_grpc.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.0 +// source: grpc/proto/messager.proto + +package messager + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MessageClient is the client API for Message service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MessageClient interface { + BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) +} + +type messageClient struct { + cc grpc.ClientConnInterface +} + +func NewMessageClient(cc grpc.ClientConnInterface) MessageClient { + return &messageClient{cc} +} + +func (c *messageClient) BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) { + stream, err := c.cc.NewStream(ctx, &Message_ServiceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...) + if err != nil { + return nil, err + } + x := &messageBidirectionalStreamingMessageClient{stream} + return x, nil +} + +type Message_BidirectionalStreamingMessageClient interface { + Send(*StreamRequest) error + Recv() (*StreamResponse, error) + grpc.ClientStream +} + +type messageBidirectionalStreamingMessageClient struct { + grpc.ClientStream +} + +func (x *messageBidirectionalStreamingMessageClient) Send(m *StreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *messageBidirectionalStreamingMessageClient) Recv() (*StreamResponse, error) { + m := new(StreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MessageServer is the server API for Message service. +// All implementations must embed UnimplementedMessageServer +// for forward compatibility +type MessageServer interface { + BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error + mustEmbedUnimplementedMessageServer() +} + +// UnimplementedMessageServer must be embedded to have forward compatible implementations. +type UnimplementedMessageServer struct { +} + +func (UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error { + return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingMessage not implemented") +} +func (UnimplementedMessageServer) mustEmbedUnimplementedMessageServer() {} + +// UnsafeMessageServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MessageServer will +// result in compilation errors. +type UnsafeMessageServer interface { + mustEmbedUnimplementedMessageServer() +} + +func RegisterMessageServer(s grpc.ServiceRegistrar, srv MessageServer) { + s.RegisterService(&Message_ServiceDesc, srv) +} + +func _Message_BidirectionalStreamingMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MessageServer).BidirectionalStreamingMessage(&messageBidirectionalStreamingMessageServer{stream}) +} + +type Message_BidirectionalStreamingMessageServer interface { + Send(*StreamResponse) error + Recv() (*StreamRequest, error) + grpc.ServerStream +} + +type messageBidirectionalStreamingMessageServer struct { + grpc.ServerStream +} + +func (x *messageBidirectionalStreamingMessageServer) Send(m *StreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *messageBidirectionalStreamingMessageServer) Recv() (*StreamRequest, error) { + m := new(StreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Message_ServiceDesc is the grpc.ServiceDesc for Message service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Message_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "message.Message", + HandlerType: (*MessageServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BidirectionalStreamingMessage", + Handler: _Message_BidirectionalStreamingMessage_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/proto/messager.proto", +} diff --git a/hrp/internal/grpc/proto/messager.proto b/hrp/internal/grpc/proto/messager.proto new file mode 100644 index 00000000..ef311339 --- /dev/null +++ b/hrp/internal/grpc/proto/messager.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package message; + +option go_package = "grpc/messager"; + +service Message { + rpc BidirectionalStreamingMessage(stream StreamRequest) returns (stream StreamResponse){}; +} + +message StreamRequest{ + string type = 1; + map data = 2; + string NodeID = 3; +} + +message StreamResponse{ + string type = 1; + map data = 2; + string NodeID = 3; + bytes tasks = 4; +} \ No newline at end of file diff --git a/hrp/parameters.go b/hrp/parameters.go index ae8e1b9f..af54afa0 100644 --- a/hrp/parameters.go +++ b/hrp/parameters.go @@ -178,6 +178,14 @@ func (iter *ParametersIterator) Next() map[string]interface{} { return selectedParameters } +func (iter *ParametersIterator) outParameters() map[string]interface{} { + res := map[string]interface{}{} + for key, params := range iter.data { + res[key] = params + } + return res +} + func genCartesianProduct(multiParameters []Parameters) Parameters { if len(multiParameters) == 0 { return nil diff --git a/hrp/server.go b/hrp/server.go new file mode 100644 index 00000000..dc51e8c4 --- /dev/null +++ b/hrp/server.go @@ -0,0 +1,299 @@ +package hrp + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net/http" + + "github.com/httprunner/httprunner/v4/hrp/internal/boomer" + "github.com/httprunner/httprunner/v4/hrp/internal/json" +) + +const jsonContentType = "application/json; encoding=utf-8" + +func methods(h http.HandlerFunc, methods ...string) http.HandlerFunc { + methodMap := make(map[string]struct{}, len(methods)) + for _, m := range methods { + methodMap[m] = struct{}{} + // GET implies support for HEAD + if m == "GET" { + methodMap["HEAD"] = struct{}{} + } + } + return func(w http.ResponseWriter, r *http.Request) { + if _, ok := methodMap[r.Method]; !ok { + http.Error(w, fmt.Sprintf("method %s not allowed", r.Method), http.StatusMethodNotAllowed) + return + } + h.ServeHTTP(w, r) + } +} + +func parseBody(r *http.Request) (data map[string]interface{}, err error) { + if r.Body == nil { + return nil, nil + } + + // Always set resp.Data to the incoming request body, in case we don't know + // how to handle the content type + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + return nil, err + } + err = json.Unmarshal(body, data) + if err != nil { + return nil, err + } + return data, nil +} + +func writeResponse(w http.ResponseWriter, status int, contentType string, body []byte) { + w.Header().Set("Content-Type", contentType) + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(body))) + w.WriteHeader(status) + w.Write(body) +} + +func writeJSON(w http.ResponseWriter, body []byte, status int) { + writeResponse(w, status, jsonContentType, body) +} + +type StartRequestBody struct { + Worker string `json:"worker"` // all + SpawnCount int64 `json:"spawn_count"` + SpawnRate int64 `json:"spawn_rate"` + TestCasePath string `json:"testcase_path"` +} + +type ServerCode int + +// server response code +const ( + Success ServerCode = iota + ParamsError + ServerError + StopError +) + +// ServerStatus stores http response code and message +type ServerStatus struct { + Code ServerCode `json:"code"` + Message string `json:"message"` +} + +var EnumAPIResponseSuccess = ServerStatus{ + Code: Success, + Message: "success", +} + +func EnumAPIResponseParamError(errMsg string) ServerStatus { + return ServerStatus{ + Code: ParamsError, + Message: errMsg, + } +} + +func EnumAPIResponseServerError(errMsg string) ServerStatus { + return ServerStatus{ + Code: ServerError, + Message: errMsg, + } +} + +func EnumAPIResponseStopError(errMsg string) ServerStatus { + return ServerStatus{ + Code: StopError, + Message: errMsg, + } +} + +func CustomAPIResponse(errCode ServerCode, errMsg string) ServerStatus { + return ServerStatus{ + Code: errCode, + Message: errMsg, + } +} + +type RebalanceRequestBody struct { + Worker string `json:"worker"` + SpawnCount int64 `json:"spawn_count"` + SpawnRate int64 `json:"spawn_rate"` + TestCasePath string `json:"testcase_path"` +} + +type StopRequestBody struct { + Worker string `json:"worker"` +} + +type QuitRequestBody struct { + Worker string `json:"worker"` +} + +type CommonResponseBody struct { + ServerStatus +} + +type APIGetWorkersRequestBody struct { + ID string `json:"id"` + State int32 `json:"state"` + CPUUsage float64 `json:"cpu_usage"` + MemoryUsage float64 `json:"memory_usage"` +} + +type APIGetWorkersResponseBody struct { + ServerStatus + Data []boomer.WorkerNode `json:"data"` +} + +type apiHandler struct { + boomer *HRPBoomer +} + +func (b *HRPBoomer) NewAPIHandler() *apiHandler { + return &apiHandler{boomer: b} +} + +// Index renders an HTML index page +func (api *apiHandler) Index(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not Found", http.StatusNotFound) + return + } + w.Header().Set("Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' camo.githubusercontent.com") + fmt.Fprintf(w, "Welcome to httprunner page!") +} + +func (api *apiHandler) Start(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{} + args := r.URL.Query() + for k, vs := range args { + for _, v := range vs { + data[k] = v + } + } + var resp *CommonResponseBody + err := api.boomer.Start(data) + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseServerError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) +} + +func (api *apiHandler) Stop(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{} + args := r.URL.Query() + for k, vs := range args { + for _, v := range vs { + data[k] = v + } + } + + api.boomer.Stop() + resp := &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) +} + +func (api *apiHandler) Quit(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{} + args := r.URL.Query() + for k, vs := range args { + for _, v := range vs { + data[k] = v + } + } + + resp := &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + api.boomer.Quit() +} + +func (api *apiHandler) ReBalance(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{} + args := r.URL.Query() + for k, vs := range args { + for _, v := range vs { + data[k] = v + } + } + var resp *CommonResponseBody + err := api.boomer.ReBalance(data) + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseParamError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) +} + +func (api *apiHandler) GetWorkersInfo(w http.ResponseWriter, r *http.Request) { + resp := &APIGetWorkersResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + Data: api.boomer.GetWorkersInfo(), + } + + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) +} + +func (api *apiHandler) Handler() http.Handler { + mux := http.NewServeMux() + + mux.HandleFunc("/", methods(api.Index, "GET")) + mux.HandleFunc("/start", methods(api.Start, "GET")) + mux.HandleFunc("/stop", methods(api.Stop, "GET")) + mux.HandleFunc("/quit", methods(api.Quit, "GET")) + mux.HandleFunc("/rebalance", methods(api.ReBalance, "GET")) + mux.HandleFunc("/workers", methods(api.GetWorkersInfo, "GET")) + + return mux +} + +func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {} + +func (b *HRPBoomer) StartServer() { + h := b.NewAPIHandler() + mux := h.Handler() + + server := &http.Server{ + Addr: ":9771", + Handler: mux, + } + + go func() { + <-b.GetCloseChan() + if err := server.Shutdown(context.Background()); err != nil { + log.Fatal("shutdown server:", err) + } + }() + + log.Println("Starting HTTP server...") + err := server.ListenAndServe() + if err != nil { + if err == http.ErrServerClosed { + log.Print("server closed under request") + } else { + log.Fatal("server closed unexpected") + } + } +} diff --git a/hrp/step_rendezvous.go b/hrp/step_rendezvous.go index 77291a36..edd9cf84 100644 --- a/hrp/step_rendezvous.go +++ b/hrp/step_rendezvous.go @@ -155,9 +155,9 @@ func (r *Rendezvous) setReleased() { } func initRendezvous(testcase *TestCase, total int64) []*Rendezvous { - tCase := testcase.ToTCase() var rendezvousList []*Rendezvous - for _, step := range tCase.TestSteps { + for _, s := range testcase.TestSteps { + step := s.Struct() if step.Rendezvous == nil { continue } @@ -188,16 +188,20 @@ func initRendezvous(testcase *TestCase, total int64) []*Rendezvous { return rendezvousList } -func waitRendezvous(rendezvousList []*Rendezvous) { +func (r *Rendezvous) updateRendezvousNumber(number int64) { + atomic.StoreInt64(&r.Number, int64(float32(number)*r.Percent)) +} + +func waitRendezvous(rendezvousList []*Rendezvous, b *HRPBoomer) { if rendezvousList != nil { lastRendezvous := rendezvousList[len(rendezvousList)-1] for _, rendezvous := range rendezvousList { - go waitSingleRendezvous(rendezvous, rendezvousList, lastRendezvous) + go waitSingleRendezvous(rendezvous, rendezvousList, lastRendezvous, b) } } } -func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous, lastRendezvous *Rendezvous) { +func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous, lastRendezvous *Rendezvous, b *HRPBoomer) { for { // cycle start: block current checking until current rendezvous activated <-rendezvous.activateChan @@ -241,6 +245,8 @@ func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous, if rendezvous == lastRendezvous { for _, r := range rendezvousList { r.reset() + // dynamic adjustment based on the number of concurrent users + r.updateRendezvousNumber(int64(b.GetSpawnCount())) } } else { <-lastRendezvous.releaseChan diff --git a/hrp/testcase.go b/hrp/testcase.go index afe03713..ffedd829 100644 --- a/hrp/testcase.go +++ b/hrp/testcase.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog/log" "github.com/httprunner/httprunner/v4/hrp/internal/builtin" + "github.com/mitchellh/mapstructure" ) // ITestCase represents interface for testcases, @@ -40,6 +41,11 @@ func (tc *TestCase) ToTCase() *TCase { Config: tc.Config, } for _, step := range tc.TestSteps { + if step.Type() == stepTypeTestCase { + if testcase, ok := step.Struct().TestCase.(*TestCase); ok { + step.Struct().TestCase = testcase.ToTCase() + } + } tCase.TestSteps = append(tCase.TestSteps, step.Struct()) } return tCase @@ -106,13 +112,17 @@ func (tc *TCase) ToTestCase(casePath string) (*TestCase, error) { tc.Config = &TConfig{Name: "please input testcase name"} } tc.Config.Path = casePath + return tc.toTestCase() +} +// toTestCase converts *TCase to *TestCase +func (tc *TCase) toTestCase() (*TestCase, error) { testCase := &TestCase{ Config: tc.Config, } // locate project root dir by plugin path - projectRootDir, err := GetProjectRootDirPath(casePath) + projectRootDir, err := GetProjectRootDirPath(tc.Config.Path) if err != nil { return nil, errors.Wrap(err, "failed to get project root dir") } @@ -139,40 +149,71 @@ func (tc *TCase) ToTestCase(casePath string) (*TestCase, error) { for _, step := range tc.TestSteps { if step.API != nil { apiPath, ok := step.API.(string) + if ok { + path := filepath.Join(projectRootDir, apiPath) + if !builtin.IsFilePathExists(path) { + return nil, errors.New("referenced api file not found: " + path) + } + + refAPI := APIPath(path) + apiContent, err := refAPI.ToAPI() + if err != nil { + return nil, err + } + step.API = apiContent + } else { + apiMap, ok := step.API.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("referenced api should be map or path(string), got %v", step.API) + } + api := &API{} + err = mapstructure.Decode(apiMap, api) + if err != nil { + return nil, err + } + step.API = api + } + _, ok = step.API.(*API) if !ok { - return nil, fmt.Errorf("referenced api path should be string, got %v", step.API) + return nil, fmt.Errorf("failed to handle referenced API, got %v", step.TestCase) } - path := filepath.Join(projectRootDir, apiPath) - if !builtin.IsFilePathExists(path) { - return nil, errors.New("referenced api file not found: " + path) - } - - refAPI := APIPath(path) - apiContent, err := refAPI.ToAPI() - if err != nil { - return nil, err - } - step.API = apiContent - testCase.TestSteps = append(testCase.TestSteps, &StepAPIWithOptionalArgs{ step: step, }) } else if step.TestCase != nil { casePath, ok := step.TestCase.(string) - if !ok { - return nil, fmt.Errorf("referenced testcase path should be string, got %v", step.TestCase) - } - path := filepath.Join(projectRootDir, casePath) - if !builtin.IsFilePathExists(path) { - return nil, errors.New("referenced testcase file not found: " + path) - } + if ok { + path := filepath.Join(projectRootDir, casePath) + if !builtin.IsFilePathExists(path) { + return nil, errors.New("referenced testcase file not found: " + path) + } - refTestCase := TestCasePath(path) - tc, err := refTestCase.ToTestCase() - if err != nil { - return nil, err + refTestCase := TestCasePath(path) + tc, err := refTestCase.ToTestCase() + if err != nil { + return nil, err + } + step.TestCase = tc + } else { + testCaseMap, ok := step.TestCase.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("referenced testcase should be map or path(string), got %v", step.TestCase) + } + tCase := &TCase{} + err = mapstructure.Decode(testCaseMap, tCase) + if err != nil { + return nil, err + } + tc, err := tCase.toTestCase() + if err != nil { + return nil, err + } + step.TestCase = tc + } + _, ok = step.TestCase.(*TestCase) + if !ok { + return nil, fmt.Errorf("failed to handle referenced testcase, got %v", step.TestCase) } - step.TestCase = tc testCase.TestSteps = append(testCase.TestSteps, &StepTestCaseWithOptionalArgs{ step: step, }) From 6ad03e81bb0c66ab2b419aee5d1ee552b5167532 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Thu, 19 May 2022 13:02:41 +0800 Subject: [PATCH 02/31] fix: unittest --- hrp/cmd/boom.go | 58 ++++++++++++------------ hrp/internal/boomer/runner.go | 71 ++++++++++++++---------------- hrp/internal/boomer/runner_test.go | 35 ++++++++------- 3 files changed, 82 insertions(+), 82 deletions(-) diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 09d87f2a..1b465d36 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -45,9 +45,28 @@ var boomCmd = &cobra.Command{ } } + // init boomer var hrpBoomer *hrp.HRPBoomer if boomArgs.master { hrpBoomer = hrp.NewMasterBoomer(boomArgs.masterBindHost, boomArgs.masterBindPort) + } else if boomArgs.worker { + hrpBoomer = hrp.NewWorkerBoomer(boomArgs.masterHost, boomArgs.masterPort) + } else { + hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) + } + hrpBoomer.EnableGracefulQuit() + + // init output + if !boomArgs.DisableConsoleOutput { + hrpBoomer.AddOutput(boomer.NewConsoleOutput()) + } + if boomArgs.PrometheusPushgatewayURL != "" { + hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode())) + } + + // run boomer + switch hrpBoomer.GetMode() { + case "master": hrpBoomer.SetTestCasesPath(args) if boomArgs.autoStart { hrpBoomer.SetAutoStart() @@ -55,43 +74,28 @@ var boomCmd = &cobra.Command{ hrpBoomer.SetSpawnCount(boomArgs.SpawnCount) hrpBoomer.SetSpawnRate(boomArgs.SpawnRate) } - hrpBoomer.EnableGracefulQuit() go hrpBoomer.StartServer() go hrpBoomer.RunMaster() hrpBoomer.LoopTestCases() - return - } else if boomArgs.worker { - hrpBoomer = hrp.NewWorkerBoomer(boomArgs.masterHost, boomArgs.masterPort) + case "worker": if boomArgs.ignoreQuit { hrpBoomer.SetIgnoreQuit() } go hrpBoomer.RunWorker() - } else { - hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) + hrpBoomer.LoopTasks() + case "standalone": if boomArgs.LoopCount > 0 { hrpBoomer.SetLoopCount(boomArgs.LoopCount) } - } - hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) - if !boomArgs.DisableConsoleOutput { - - hrpBoomer.AddOutput(boomer.NewConsoleOutput()) - } - if boomArgs.PrometheusPushgatewayURL != "" { - hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode())) - } - hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive) - hrpBoomer.SetDisableCompression(boomArgs.DisableCompression) - hrpBoomer.SetClientTransport() - if venv != "" { - hrpBoomer.SetPython3Venv(venv) - } - hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration) - hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration) - hrpBoomer.EnableGracefulQuit() - if boomArgs.worker { - hrpBoomer.LoopTasks() - } else { + hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) + hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive) + hrpBoomer.SetDisableCompression(boomArgs.DisableCompression) + hrpBoomer.SetClientTransport() + if venv != "" { + hrpBoomer.SetPython3Venv(venv) + } + hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration) + hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration) hrpBoomer.Run(paths...) } }, diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 1181d6c2..67b89a19 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -58,12 +58,11 @@ func (l *Loop) increaseFinishedCount() { } type SpawnInfo struct { + mutex sync.RWMutex spawnCount int64 // target clients to spawn acquiredCount int64 // count acquired of workers spawnRate float64 spawnDone chan struct{} - - mutex sync.RWMutex } func (s *SpawnInfo) setSpawn(spawnCount int64, spawnRate float64) { @@ -154,6 +153,9 @@ type runner struct { // when this channel is closed, all statistics are reported successfully reportedChan chan bool + // rebalance spawn + rebalance chan bool + // all running workers(goroutines) will select on this channel. // close this channel will stop all running workers. stopChan chan bool @@ -273,12 +275,7 @@ func (r *runner) reportTestResult() { } func (r *runner) startSpawning(spawnCount int64, spawnRate float64, spawnCompleteFunc func()) { - r.stopChan = make(chan bool) - r.reportedChan = make(chan bool) r.spawn.reset() - - r.spawn.setSpawn(spawnCount, spawnRate) - atomic.StoreInt32(&r.currentClientsNum, 0) go r.spawnWorkers(spawnCount, spawnRate, r.stopChan, spawnCompleteFunc) @@ -290,6 +287,8 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo Float64("spawnRate", spawnRate). Msg("Spawning workers") + r.spawn.setSpawn(spawnCount, spawnRate) + r.updateState(StateSpawning) for { select { @@ -306,7 +305,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo // loop count per worker var workerLoop *Loop if r.loop != nil { - workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / int64(r.spawn.spawnCount)} + workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / r.spawn.spawnCount} } atomic.AddInt32(&r.currentClientsNum, 1) go func() { @@ -343,23 +342,19 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo atomic.AddInt32(&r.currentClientsNum, -1) return } - if !r.isStarted() { - atomic.AddInt64(&r.spawn.acquiredCount, -1) - atomic.AddInt32(&r.currentClientsNum, -1) - return - } } } }() - } else { - if r.getState() == StateSpawning { - r.spawn.done() - if spawnCompleteFunc != nil { - spawnCompleteFunc() - } - r.updateState(StateRunning) + } else if r.getState() == StateSpawning { + // spawning compete + r.spawn.done() + if spawnCompleteFunc != nil { + spawnCompleteFunc() } - time.Sleep(1 * time.Second) + r.updateState(StateRunning) + } else { + // continue if rebalance + <-r.rebalance } } } @@ -492,6 +487,10 @@ func (r *localRunner) start() { r.rateLimiter.Start() } + r.stopChan = make(chan bool) + r.reportedChan = make(chan bool) + r.rebalance = make(chan bool) + go r.spawnWorkers(r.spawn.spawnCount, r.spawn.spawnRate, r.stopChan, nil) // output setup @@ -525,6 +524,7 @@ func (r *localRunner) start() { func (r *localRunner) stop() { if r.runner.isStarted() { r.runner.stop() + close(r.rebalance) } } @@ -542,8 +542,6 @@ type workerRunner struct { // get testcase from master testCaseBytes chan []byte - startFlag bool - ignoreQuit bool } @@ -554,10 +552,8 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { spawn: &SpawnInfo{ spawnDone: make(chan struct{}), }, - stopChan: make(chan bool), - reportedChan: make(chan bool), - closeChan: make(chan bool), - once: &sync.Once{}, + closeChan: make(chan bool), + once: &sync.Once{}, }, masterHost: masterHost, masterPort: masterPort, @@ -572,7 +568,6 @@ func (r *workerRunner) spawnComplete() { data := make(map[string]int64) data["count"] = r.spawn.getSpawnCount() r.client.sendChannel() <- newGenericMessage("spawning_complete", data, r.nodeID) - r.updateState(StateRunning) } func (r *workerRunner) onSpawnMessage(msg *genericMessage) { @@ -607,6 +602,7 @@ func (r *workerRunner) onMessage(msg *genericMessage) { switch msg.Type { case "spawn": r.onSpawnMessage(msg) + r.rebalance <- true case "stop": r.stop() log.Info().Msg("Recv stop message from master, all the goroutines are stopped") @@ -644,7 +640,7 @@ func (r *workerRunner) startListener() { } } -// run starts service +// run worker service func (r *workerRunner) run() { r.updateState(StateInit) r.client = newClient(r.masterHost, r.masterPort, r.nodeID) @@ -694,11 +690,8 @@ func (r *workerRunner) run() { <-r.closeChan } +// start load test func (r *workerRunner) start() { - r.startFlag = true - defer func() { - r.startFlag = false - }() r.stats.clearAll() // start rate limiter @@ -706,6 +699,10 @@ func (r *workerRunner) start() { r.rateLimiter.Start() } + r.stopChan = make(chan bool) + r.reportedChan = make(chan bool) + r.rebalance = make(chan bool) + r.once.Do(r.outputOnStart) r.startSpawning(r.spawn.getSpawnCount(), r.spawn.getSpawnRate(), r.spawnComplete) @@ -722,6 +719,7 @@ func (r *workerRunner) start() { func (r *workerRunner) stop() { if r.isStarted() { close(r.stopChan) + close(r.rebalance) // stop rate limiter if r.rateLimitEnabled { r.rateLimiter.Stop() @@ -735,9 +733,8 @@ func (r *workerRunner) close() { if r.ignoreQuit { return } - for r.startFlag == true { - time.Sleep(1 * time.Second) - } + // waiting report finished + time.Sleep(3 * time.Second) close(r.closeChan) var ticker = time.NewTicker(1 * time.Second) if r.client != nil { @@ -768,8 +765,6 @@ type masterRunner struct { parseTestCasesChan chan bool startFlag bool testCaseBytes chan []byte - - mutex sync.Mutex } func newMasterRunner(masterBindHost string, masterBindPort int) *masterRunner { diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 28305752..e48856ac 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -107,9 +107,8 @@ func TestLoopCount(t *testing.T) { runner := newLocalRunner(2, 2) runner.loop = &Loop{loopCount: 4} runner.setTasks(tasks) - go runner.start() - <-runner.stopChan - if !assert.Equal(t, runner.loop.loopCount, atomic.LoadInt64(&runner.loop.finishedCount)) { + runner.start() + if !assert.Equal(t, atomic.LoadInt64(&runner.loop.loopCount), atomic.LoadInt64(&runner.loop.finishedCount)) { t.Fatal() } } @@ -129,8 +128,9 @@ func TestSpawnWorkers(t *testing.T) { runner.client = newClient("localhost", 5557, runner.nodeID) runner.setTasks(tasks) + runner.stopChan = make(chan bool) go runner.spawnWorkers(10, 10, runner.stopChan, runner.spawnComplete) - time.Sleep(10 * time.Millisecond) + time.Sleep(2 * time.Second) currentClients := atomic.LoadInt32(&runner.currentClientsNum) if currentClients != 10 { @@ -166,13 +166,14 @@ func TestSpawnWorkersWithManyTasks(t *testing.T) { runner.client = newClient("localhost", 5557, runner.nodeID) const numToSpawn int64 = 30 + runner.stopChan = make(chan bool) - runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) + go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) time.Sleep(2 * time.Second) currentClients := atomic.LoadInt32(&runner.currentClientsNum) - assert.Equal(t, numToSpawn, int(currentClients)) + assert.Equal(t, numToSpawn, int64(currentClients)) lock.Lock() hundreds := taskCalls["one hundred"] tens := taskCalls["ten"] @@ -255,6 +256,7 @@ func TestStop(t *testing.T) { } tasks := []*Task{taskA} runner := newWorkerRunner("localhost", 5557) + runner.stopChan = make(chan bool) runner.setTasks(tasks) runner.spawn.setSpawn(10, 10) runner.updateState(StateSpawning) @@ -358,9 +360,6 @@ func TestOnMessage(t *testing.T) { // spawn complete and running time.Sleep(2 * time.Second) - if runner.getState() != StateRunning { - t.Error("State of runner is not running after spawn, got", runner.getState()) - } if atomic.LoadInt32(&runner.currentClientsNum) != 10 { t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) } @@ -368,6 +367,9 @@ func TestOnMessage(t *testing.T) { if msg.Type != "spawning_complete" { t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type) } + if runner.getState() != StateRunning { + t.Error("State of runner is not running after spawn, got", runner.getState()) + } // increase goroutines while running runner.onMessage(newGenericMessage("spawn", map[string]int64{ @@ -381,10 +383,6 @@ func TestOnMessage(t *testing.T) { } time.Sleep(2 * time.Second) - msg = <-runner.client.sendChannel() - if msg.Type != "spawning_complete" { - t.Error("Runner should send spawning_complete message, got", msg.Type) - } if runner.getState() != StateRunning { t.Error("State of runner is not running after spawn, got", runner.getState()) } @@ -402,6 +400,9 @@ func TestOnMessage(t *testing.T) { t.Error("Runner should send client_stopped message, got", msg.Type) } + time.Sleep(3 * time.Second) + + go runner.start() // spawn again runner.onMessage(newGenericMessage("spawn", map[string]int64{ "spawn_count": 10, @@ -414,13 +415,13 @@ func TestOnMessage(t *testing.T) { } // spawn complete and running - time.Sleep(2 * time.Second) - if runner.getState() != StateRunning { - t.Error("State of runner is not running after spawn, got", runner.getState()) - } + time.Sleep(3 * time.Second) if atomic.LoadInt32(&runner.currentClientsNum) != 10 { t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) } + if runner.getState() != StateRunning { + t.Error("State of runner is not running after spawn, got", runner.getState()) + } msg = <-runner.client.sendChannel() if msg.Type != "spawning_complete" { t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type) From ab91feedbcecc79f4d795aabf5687f7ceb3e359c Mon Sep 17 00:00:00 2001 From: xucong053 Date: Fri, 20 May 2022 14:48:55 +0800 Subject: [PATCH 03/31] fix: unittest --- hrp/internal/boomer/runner_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index e48856ac..356b5c74 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -129,6 +129,7 @@ func TestSpawnWorkers(t *testing.T) { runner.client = newClient("localhost", 5557, runner.nodeID) runner.setTasks(tasks) runner.stopChan = make(chan bool) + runner.rebalance = make(chan bool) go runner.spawnWorkers(10, 10, runner.stopChan, runner.spawnComplete) time.Sleep(2 * time.Second) @@ -167,6 +168,7 @@ func TestSpawnWorkersWithManyTasks(t *testing.T) { const numToSpawn int64 = 30 runner.stopChan = make(chan bool) + runner.rebalance = make(chan bool) go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) time.Sleep(2 * time.Second) @@ -257,6 +259,7 @@ func TestStop(t *testing.T) { tasks := []*Task{taskA} runner := newWorkerRunner("localhost", 5557) runner.stopChan = make(chan bool) + runner.rebalance = make(chan bool) runner.setTasks(tasks) runner.spawn.setSpawn(10, 10) runner.updateState(StateSpawning) @@ -308,6 +311,7 @@ func TestOnQuitMessage(t *testing.T) { runner.updateState(StateRunning) runner.closeChan = make(chan bool) runner.stopChan = make(chan bool) + runner.rebalance = make(chan bool) runner.client.shutdownChan = make(chan bool) runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) <-runner.closeChan From ae8fc70ff6e2ae11be4e8650332eaf6caa3fd450 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Sun, 22 May 2022 12:44:18 +0800 Subject: [PATCH 04/31] refactor --- hrp/cmd/boom.go | 2 +- hrp/internal/boomer/boomer.go | 68 +++--- hrp/internal/boomer/boomer_test.go | 4 +- hrp/internal/boomer/output.go | 4 +- hrp/internal/boomer/runner.go | 331 +++++++++++++++++------------ hrp/internal/boomer/runner_test.go | 71 +++---- hrp/server.go | 13 +- 7 files changed, 278 insertions(+), 215 deletions(-) diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 1b465d36..a0be404c 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -71,7 +71,7 @@ var boomCmd = &cobra.Command{ if boomArgs.autoStart { hrpBoomer.SetAutoStart() hrpBoomer.SetExpectWorkers(boomArgs.expectWorkers, boomArgs.expectWorkersMaxWait) - hrpBoomer.SetSpawnCount(boomArgs.SpawnCount) + hrpBoomer.SetSpawnCount(int64(boomArgs.SpawnCount)) hrpBoomer.SetSpawnRate(boomArgs.SpawnRate) } go hrpBoomer.StartServer() diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index a4e9bb72..16cb0e5c 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -39,9 +39,6 @@ type Boomer struct { testcasePath []string - spawnCount int // target clients to spawn - spawnRate float64 - cpuProfile string cpuProfileDuration time.Duration @@ -86,8 +83,6 @@ func NewStandaloneBoomer(spawnCount int, spawnRate float64) *Boomer { return &Boomer{ mode: StandaloneMode, localRunner: newLocalRunner(spawnCount, spawnRate), - spawnCount: spawnCount, - spawnRate: spawnRate, } } @@ -161,18 +156,26 @@ func (b *Boomer) GetState() int32 { } // SetSpawnCount sets spawn count -func (b *Boomer) SetSpawnCount(spawnCount int) { - b.spawnCount = spawnCount - if b.mode == DistributedMasterMode { - b.masterRunner.spawn.setSpawn(int64(spawnCount), -1) +func (b *Boomer) SetSpawnCount(spawnCount int64) { + switch b.mode { + case DistributedMasterMode: + b.masterRunner.setSpawnCount(spawnCount) + case DistributedWorkerMode: + b.workerRunner.setSpawnCount(spawnCount) + default: + b.localRunner.setSpawnCount(spawnCount) } } // SetSpawnRate sets spawn rate func (b *Boomer) SetSpawnRate(spawnRate float64) { - b.spawnRate = spawnRate - if b.mode == DistributedMasterMode { - b.masterRunner.spawn.setSpawn(-1, spawnRate) + switch b.mode { + case DistributedMasterMode: + b.masterRunner.setSpawnRate(spawnRate) + case DistributedWorkerMode: + b.workerRunner.setSpawnRate(spawnRate) + default: + b.localRunner.setSpawnRate(spawnRate) } } @@ -242,11 +245,11 @@ func (b *Boomer) SetLoopCount(loopCount int64) { // total loop count for testcase, it will be evenly distributed to each worker switch b.mode { case DistributedWorkerMode: - b.workerRunner.loop = &Loop{loopCount: loopCount * b.workerRunner.spawn.getSpawnCount()} + b.workerRunner.loop = &Loop{loopCount: loopCount * b.workerRunner.getSpawnCount()} case DistributedMasterMode: - b.masterRunner.loop = &Loop{loopCount: loopCount * b.masterRunner.spawn.getSpawnCount()} + b.masterRunner.loop = &Loop{loopCount: loopCount * b.masterRunner.getSpawnCount()} case StandaloneMode: - b.localRunner.loop = &Loop{loopCount: loopCount * b.localRunner.spawn.getSpawnCount()} + b.localRunner.loop = &Loop{loopCount: loopCount * b.localRunner.getSpawnCount()} } } @@ -388,6 +391,9 @@ func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exc // Start starts to run func (b *Boomer) Start(Args map[string]interface{}) error { + if b.masterRunner.isStarted() { + return errors.New("already started") + } spawnCount, ok := Args["spawn_count"] if ok { v, err := strconv.Atoi(spawnCount.(string)) @@ -395,7 +401,7 @@ func (b *Boomer) Start(Args map[string]interface{}) error { log.Error().Err(err).Msg("spawn_count sets error") return err } - b.SetSpawnCount(v) + b.SetSpawnCount(int64(v)) } else { return errors.New("spawn count error") } @@ -423,6 +429,9 @@ func (b *Boomer) Start(Args map[string]interface{}) error { // ReBalance starts to rebalance load test func (b *Boomer) ReBalance(Args map[string]interface{}) error { + if !b.masterRunner.isStarted() { + return errors.New("no start") + } spawnCount, ok := Args["spawn_count"] if ok { v, err := strconv.Atoi(spawnCount.(string)) @@ -430,7 +439,7 @@ func (b *Boomer) ReBalance(Args map[string]interface{}) error { log.Error().Err(err).Msg("spawn_count sets error") return err } - b.SetSpawnCount(v) + b.SetSpawnCount(int64(v)) } spawnRate, ok := Args["spawn_rate"] if ok { @@ -441,11 +450,6 @@ func (b *Boomer) ReBalance(Args map[string]interface{}) error { } b.SetSpawnRate(v) } - path, ok := Args["path"].(string) - if ok { - paths := strings.Split(path, ",") - b.SetTestCasesPath(paths) - } err := b.masterRunner.rebalance() if err != nil { log.Error().Err(err).Msg("failed to rebalance") @@ -454,12 +458,8 @@ func (b *Boomer) ReBalance(Args map[string]interface{}) error { } // Stop stops to load test -func (b *Boomer) Stop() { - switch b.mode { - case DistributedMasterMode: - b.masterRunner.stop() - default: - } +func (b *Boomer) Stop() error { + return b.masterRunner.stop() } // GetWorkersInfo gets workers @@ -493,22 +493,22 @@ func (b *Boomer) Quit() { func (b *Boomer) GetSpawnDoneChan() chan struct{} { switch b.mode { case DistributedWorkerMode: - return b.workerRunner.spawn.getSpawnDone() + return b.workerRunner.controller.getSpawnDone() case DistributedMasterMode: - return b.masterRunner.spawn.getSpawnDone() + return b.masterRunner.controller.getSpawnDone() default: - return b.localRunner.spawn.getSpawnDone() + return b.localRunner.controller.getSpawnDone() } } func (b *Boomer) GetSpawnCount() int { switch b.mode { case DistributedWorkerMode: - return int(b.workerRunner.spawn.getSpawnCount()) + return int(b.workerRunner.getSpawnCount()) case DistributedMasterMode: - return int(b.masterRunner.spawn.getSpawnCount()) + return int(b.masterRunner.getSpawnCount()) default: - return int(b.localRunner.spawn.getSpawnCount()) + return int(b.localRunner.getSpawnCount()) } } diff --git a/hrp/internal/boomer/boomer_test.go b/hrp/internal/boomer/boomer_test.go index fde9b37b..7f113f87 100644 --- a/hrp/internal/boomer/boomer_test.go +++ b/hrp/internal/boomer/boomer_test.go @@ -12,11 +12,11 @@ import ( func TestNewStandaloneBoomer(t *testing.T) { b := NewStandaloneBoomer(100, 10) - if b.localRunner.spawn.spawnCount != 100 { + if b.localRunner.spawnCount != 100 { t.Error("spawnCount should be 100") } - if b.localRunner.spawn.spawnRate != 10 { + if b.localRunner.spawnRate != 10 { t.Error("spawnRate should be 10") } } diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index 3ef8bdc9..a0866e02 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -169,7 +169,7 @@ type statsEntryOutput struct { } type dataOutput struct { - UserCount int32 `json:"user_count"` + UserCount int64 `json:"user_count"` State int32 `json:"state"` TotalStats *statsEntryOutput `json:"stats_total"` TransactionsPassed int64 `json:"transactions_passed"` @@ -186,7 +186,7 @@ type dataOutput struct { } func convertData(data map[string]interface{}) (output *dataOutput, err error) { - userCount, ok := data["user_count"].(int32) + userCount, ok := data["user_count"].(int64) if !ok { return nil, fmt.Errorf("user_count is not int32") } diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 67b89a19..38c37b7c 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -57,82 +57,106 @@ func (l *Loop) increaseFinishedCount() { atomic.AddInt64(&l.finishedCount, 1) } -type SpawnInfo struct { - mutex sync.RWMutex - spawnCount int64 // target clients to spawn - acquiredCount int64 // count acquired of workers - spawnRate float64 - spawnDone chan struct{} +type Controller struct { + mutex sync.RWMutex + once sync.Once + currentClientsNum int64 // current clients count + spawnCount int64 // target clients to spawn + spawnRate float64 + spawnDone chan struct{} + tasks []*Task } -func (s *SpawnInfo) setSpawn(spawnCount int64, spawnRate float64) { - s.mutex.Lock() - defer s.mutex.Unlock() +func (c *Controller) setSpawn(spawnCount int64, spawnRate float64) { + c.mutex.Lock() + defer c.mutex.Unlock() if spawnCount > 0 { - atomic.StoreInt64(&s.spawnCount, spawnCount) + atomic.StoreInt64(&c.spawnCount, spawnCount) } if spawnRate > 0 { - s.spawnRate = spawnRate + c.spawnRate = spawnRate } } -func (s *SpawnInfo) getSpawnCount() int64 { - s.mutex.RLock() - defer s.mutex.RUnlock() - return atomic.LoadInt64(&s.spawnCount) +func (c *Controller) setSpawnCount(spawnCount int64) { + if spawnCount > 0 { + atomic.StoreInt64(&c.spawnCount, spawnCount) + } } -func (s *SpawnInfo) getSpawnRate() float64 { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.spawnRate +func (c *Controller) setSpawnRate(spawnRate float64) { + c.mutex.Lock() + defer c.mutex.Unlock() + if spawnRate > 0 { + c.spawnRate = spawnRate + } } -func (s *SpawnInfo) getSpawnDone() chan struct{} { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.spawnDone +func (c *Controller) getSpawnCount() int64 { + c.mutex.RLock() + defer c.mutex.RUnlock() + return atomic.LoadInt64(&c.spawnCount) } -func (s *SpawnInfo) done() { - close(s.spawnDone) +func (c *Controller) getSpawnRate() float64 { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.spawnRate } -func (s *SpawnInfo) isFinished() bool { +func (c *Controller) getSpawnDone() chan struct{} { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.spawnDone +} + +func (c *Controller) getCurrentClientsNum() int64 { + c.mutex.RLock() + defer c.mutex.RUnlock() + return atomic.LoadInt64(&c.currentClientsNum) +} + +func (c *Controller) spawnCompete() { + close(c.spawnDone) +} + +func (c *Controller) isFinished() bool { // return true when workers acquired - return atomic.LoadInt64(&s.acquiredCount) == atomic.LoadInt64(&s.spawnCount) + return atomic.LoadInt64(&c.currentClientsNum) == atomic.LoadInt64(&c.spawnCount) } -func (s *SpawnInfo) acquire() bool { +func (c *Controller) acquire() bool { // get one ticket when there are still remaining spawn count to test // return true when getting ticket successfully - if atomic.LoadInt64(&s.acquiredCount) < atomic.LoadInt64(&s.spawnCount) { - atomic.AddInt64(&s.acquiredCount, 1) + if atomic.LoadInt64(&c.currentClientsNum) < atomic.LoadInt64(&c.spawnCount) { + atomic.AddInt64(&c.currentClientsNum, 1) return true } return false } -func (s *SpawnInfo) erase() bool { +func (c *Controller) erase() bool { // return true if acquiredCount > spawnCount - if atomic.LoadInt64(&s.acquiredCount) > atomic.LoadInt64(&s.spawnCount) { - atomic.AddInt64(&s.acquiredCount, -1) + if atomic.LoadInt64(&c.currentClientsNum) > atomic.LoadInt64(&c.spawnCount) { + atomic.AddInt64(&c.currentClientsNum, -1) return true } return false } -func (s *SpawnInfo) increaseFinishedCount() { - atomic.AddInt64(&s.acquiredCount, -1) +func (c *Controller) increaseFinishedCount() { + atomic.AddInt64(&c.currentClientsNum, -1) } -func (s *SpawnInfo) reset() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.spawnCount = 0 - s.spawnRate = 0 - s.acquiredCount = 0 - s.spawnDone = make(chan struct{}) +func (c *Controller) reset() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.spawnCount = 0 + c.spawnRate = 0 + c.currentClientsNum = 0 + c.spawnDone = make(chan struct{}) + c.tasks = []*Task{} + c.once = sync.Once{} } type runner struct { @@ -146,9 +170,11 @@ type runner struct { rateLimitEnabled bool stats *requestStats - currentClientsNum int32 // current clients count - spawn *SpawnInfo - loop *Loop // specify loop count for testcase, count = loopCount * spawnCount + spawnCount int64 // target clients to spawn + spawnRate float64 + + controller *Controller + loop *Loop // specify loop count for testcase, count = loopCount * spawnCount // when this channel is closed, all statistics are reported successfully reportedChan chan bool @@ -168,6 +194,28 @@ type runner struct { once *sync.Once } +func (r *runner) setSpawnRate(spawnRate float64) { + r.mutex.Lock() + defer r.mutex.Unlock() + if spawnRate > 0 { + r.spawnRate = spawnRate + } +} + +func (r *runner) getSpawnRate() float64 { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.spawnRate +} + +func (r *runner) getSpawnCount() int64 { + return atomic.LoadInt64(&r.spawnCount) +} + +func (r *runner) setSpawnCount(spawnCount int64) { + atomic.StoreInt64(&r.spawnCount, spawnCount) +} + // safeRun runs fn and recovers from unexpected panics. // it prevents panics from Task.Fn crashing boomer. func (r *runner) safeRun(fn func()) { @@ -239,7 +287,7 @@ func (r *runner) outputOnStop() { func (r *runner) reportStats() { data := r.stats.collectReportData() - data["user_count"] = atomic.LoadInt32(&r.currentClientsNum) + data["user_count"] = r.controller.getCurrentClientsNum() data["state"] = atomic.LoadInt32(&r.state) r.outputOnEvent(data) } @@ -255,7 +303,7 @@ func (r *runner) reportTestResult() { currentTime := time.Now() println(fmt.Sprint("=========================================== Statistics Summary ==========================================")) println(fmt.Sprintf("Current time: %s, Users: %v, Duration: %v, Accumulated Transactions: %d Passed, %d Failed", - currentTime.Format("2006/01/02 15:04:05"), atomic.LoadInt32(&r.currentClientsNum), duration, r.stats.transactionPassed, r.stats.transactionFailed)) + currentTime.Format("2006/01/02 15:04:05"), r.controller.getCurrentClientsNum(), duration, r.stats.transactionPassed, r.stats.transactionFailed)) table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "# requests", "# fails", "Median", "Average", "Min", "Max", "Content Size", "# reqs/sec", "# fails/sec"}) row := make([]string, 10) @@ -274,11 +322,13 @@ func (r *runner) reportTestResult() { println() } -func (r *runner) startSpawning(spawnCount int64, spawnRate float64, spawnCompleteFunc func()) { - r.spawn.reset() - atomic.StoreInt32(&r.currentClientsNum, 0) - - go r.spawnWorkers(spawnCount, spawnRate, r.stopChan, spawnCompleteFunc) +func (r *runner) reset() { + r.updateState(StateInit) + r.controller.reset() + r.stats.clearAll() + r.rebalance = make(chan bool) + r.stopChan = make(chan bool) + r.reportedChan = make(chan bool) } func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { @@ -287,7 +337,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo Float64("spawnRate", spawnRate). Msg("Spawning workers") - r.spawn.setSpawn(spawnCount, spawnRate) + r.controller.setSpawn(spawnCount, spawnRate) r.updateState(StateSpawning) for { @@ -297,23 +347,21 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo log.Info().Msg("Quitting spawning workers") return default: - if r.isStarted() && r.spawn.acquire() { + if r.isStarted() && r.controller.acquire() { // spawn workers with rate limit - sleepTime := time.Duration(1000000/r.spawn.getSpawnRate()) * time.Microsecond + sleepTime := time.Duration(1000000/r.controller.getSpawnRate()) * time.Microsecond time.Sleep(sleepTime) // loop count per worker var workerLoop *Loop if r.loop != nil { - workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / r.spawn.spawnCount} + workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / r.controller.spawnCount} } - atomic.AddInt32(&r.currentClientsNum, 1) go func() { for { select { case <-quit: - atomic.AddInt64(&r.spawn.acquiredCount, -1) - atomic.AddInt32(&r.currentClientsNum, -1) + atomic.AddInt64(&r.controller.currentClientsNum, -1) return default: if workerLoop != nil && !workerLoop.acquire() { @@ -336,25 +384,31 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo workerLoop.increaseFinishedCount() if r.loop.isFinished() { r.stop() + close(r.rebalance) } } - if r.spawn.erase() { - atomic.AddInt32(&r.currentClientsNum, -1) + if r.controller.erase() { return } } } }() - } else if r.getState() == StateSpawning { + continue + } + + r.controller.once.Do(func() { // spawning compete - r.spawn.done() + r.controller.spawnCompete() if spawnCompleteFunc != nil { spawnCompleteFunc() } r.updateState(StateRunning) - } else { - // continue if rebalance - <-r.rebalance + }) + + <-r.rebalance + if r.isStarted() { + // rebalance spawn count + r.controller.setSpawn(r.getSpawnCount(), r.getSpawnRate()) } } } @@ -425,6 +479,7 @@ func (r *runner) statsStart() { // close reportedChan and return if the last stats is reported successfully if !r.isStarted() { close(r.reportedChan) + log.Info().Msg("Quitting statsStart") return } } @@ -460,38 +515,28 @@ type localRunner struct { func newLocalRunner(spawnCount int, spawnRate float64) *localRunner { return &localRunner{ runner: runner{ - state: StateInit, - stats: newRequestStats(), - outputs: make([]Output, 0), - spawn: &SpawnInfo{ - spawnCount: int64(spawnCount), - spawnRate: spawnRate, - spawnDone: make(chan struct{}), - }, - reportedChan: make(chan bool), - stopChan: make(chan bool), - closeChan: make(chan bool), - once: &sync.Once{}, + state: StateInit, + stats: newRequestStats(), + spawnCount: int64(spawnCount), + spawnRate: spawnRate, + controller: &Controller{}, + outputs: make([]Output, 0), + closeChan: make(chan bool), + once: &sync.Once{}, }, } } func (r *localRunner) start() { - // init state - r.updateState(StateInit) - atomic.StoreInt32(&r.currentClientsNum, 0) - r.stats.clearAll() + // init localRunner + r.reset() // start rate limiter if r.rateLimitEnabled { r.rateLimiter.Start() } - r.stopChan = make(chan bool) - r.reportedChan = make(chan bool) - r.rebalance = make(chan bool) - - go r.spawnWorkers(r.spawn.spawnCount, r.spawn.spawnRate, r.stopChan, nil) + r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, nil) // output setup r.outputOnStart() @@ -548,12 +593,10 @@ type workerRunner struct { func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { r = &workerRunner{ runner: runner{ - stats: newRequestStats(), - spawn: &SpawnInfo{ - spawnDone: make(chan struct{}), - }, - closeChan: make(chan bool), - once: &sync.Once{}, + stats: newRequestStats(), + controller: &Controller{}, + closeChan: make(chan bool), + once: &sync.Once{}, }, masterHost: masterHost, masterPort: masterPort, @@ -566,7 +609,7 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { func (r *workerRunner) spawnComplete() { data := make(map[string]int64) - data["count"] = r.spawn.getSpawnCount() + data["count"] = r.controller.getSpawnCount() r.client.sendChannel() <- newGenericMessage("spawning_complete", data, r.nodeID) } @@ -574,11 +617,11 @@ func (r *workerRunner) onSpawnMessage(msg *genericMessage) { r.client.sendChannel() <- newGenericMessage("spawning", nil, r.nodeID) spawnCount, ok := msg.Data["spawn_count"] if ok { - r.spawn.setSpawn(spawnCount, -1) + r.setSpawnCount(spawnCount) } spawnRate, ok := msg.Data["spawn_rate"] if ok { - r.spawn.setSpawn(-1, float64(spawnRate)) + r.setSpawnRate(float64(spawnRate)) } if msg.Tasks != nil { r.testCaseBytes <- msg.Tasks @@ -586,6 +629,19 @@ func (r *workerRunner) onSpawnMessage(msg *genericMessage) { log.Info().Msg("on spawn message successful") } +func (r *workerRunner) onRebalanceMessage(msg *genericMessage) { + spawnCount, ok := msg.Data["spawn_count"] + if ok { + r.setSpawnCount(spawnCount) + } + spawnRate, ok := msg.Data["spawn_rate"] + if ok { + r.setSpawnRate(float64(spawnRate)) + } + r.rebalance <- true + log.Info().Msg("on rebalance message successful") +} + // Runner acts as a state machine. func (r *workerRunner) onMessage(msg *genericMessage) { switch r.getState() { @@ -602,7 +658,8 @@ func (r *workerRunner) onMessage(msg *genericMessage) { switch msg.Type { case "spawn": r.onSpawnMessage(msg) - r.rebalance <- true + case "rebalance": + r.onRebalanceMessage(msg) case "stop": r.stop() log.Info().Msg("Recv stop message from master, all the goroutines are stopped") @@ -679,7 +736,7 @@ func (r *workerRunner) run() { data := map[string]int64{ "state": int64(r.getState()), "current_cpu_usage": int64(CPUUsage), - "spawn_count": int64(atomic.LoadInt32(&r.currentClientsNum)), + "spawn_count": r.controller.getCurrentClientsNum(), } r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) case <-r.closeChan: @@ -692,23 +749,19 @@ func (r *workerRunner) run() { // start load test func (r *workerRunner) start() { - r.stats.clearAll() + r.reset() // start rate limiter if r.rateLimitEnabled { r.rateLimiter.Start() } - r.stopChan = make(chan bool) - r.reportedChan = make(chan bool) - r.rebalance = make(chan bool) - r.once.Do(r.outputOnStart) - r.startSpawning(r.spawn.getSpawnCount(), r.spawn.getSpawnRate(), r.spawnComplete) + r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, r.spawnComplete) // start stats report - go r.runner.statsStart() + go r.statsStart() <-r.reportedChan @@ -718,12 +771,8 @@ func (r *workerRunner) start() { func (r *workerRunner) stop() { if r.isStarted() { - close(r.stopChan) + r.runner.stop() close(r.rebalance) - // stop rate limiter - if r.rateLimitEnabled { - r.rateLimiter.Stop() - } r.updateState(StateStopped) } } @@ -763,24 +812,19 @@ type masterRunner struct { expectWorkersMaxWait int parseTestCasesChan chan bool - startFlag bool testCaseBytes chan []byte } func newMasterRunner(masterBindHost string, masterBindPort int) *masterRunner { return &masterRunner{ runner: runner{ - state: StateInit, - spawn: &SpawnInfo{ - spawnDone: make(chan struct{}), - }, + state: StateInit, closeChan: make(chan bool), }, masterBindHost: masterBindHost, masterBindPort: masterBindPort, server: newServer(masterBindHost, masterBindPort), parseTestCasesChan: make(chan bool), - startFlag: false, testCaseBytes: make(chan []byte), } } @@ -946,15 +990,15 @@ func (r *masterRunner) start() error { if numWorkers == 0 { return errors.New("current workers: 0") } - workerSpawnRate := r.spawn.spawnRate / float64(numWorkers) - workerSpawnCount := r.spawn.getSpawnCount() / int64(numWorkers) + workerSpawnRate := r.getSpawnRate() / float64(numWorkers) + workerSpawnCount := r.getSpawnCount() / int64(numWorkers) log.Info().Msg("send spawn data to worker") r.updateState(StateSpawning) // waitting to fetch testcase - testcase, ok := r.fetchTestCase() - if !ok { - return errors.New("starting, do not retry frequently") + testcase, err := r.fetchTestCase() + if err != nil { + return err } r.server.sendChannel() <- newSpawnMessageToWorker("spawn", map[string]int64{ "spawn_count": workerSpawnCount, @@ -965,27 +1009,44 @@ func (r *masterRunner) start() error { return nil } -func (r *masterRunner) fetchTestCase() ([]byte, bool) { - if r.startFlag { - return nil, false - } - r.startFlag = true - defer func() { - r.startFlag = false - }() - r.parseTestCasesChan <- true - return <-r.testCaseBytes, true -} - func (r *masterRunner) rebalance() error { - return r.start() + numWorkers := r.server.getClientsLength() + if numWorkers == 0 { + return errors.New("current workers: 0") + } + workerSpawnRate := r.getSpawnRate() / float64(numWorkers) + workerSpawnCount := r.getSpawnCount() / int64(numWorkers) + + r.server.sendChannel() <- newSpawnMessageToWorker("rebalance", map[string]int64{ + "spawn_count": workerSpawnCount, + "spawn_rate": int64(workerSpawnRate), + }, nil) + println("send rebalance data to worker successful") + return nil } -func (r *masterRunner) stop() { +func (r *masterRunner) fetchTestCase() ([]byte, error) { + ticker := time.NewTicker(30 * time.Second) + if len(r.testCaseBytes) > 0 { + <-r.testCaseBytes + } + r.parseTestCasesChan <- true + select { + case <-ticker.C: + return nil, errors.New("parse testcases timeout") + case tcb := <-r.testCaseBytes: + return tcb, nil + } +} + +func (r *masterRunner) stop() error { if r.isStarted() { r.updateState(StateStopping) r.server.sendChannel() <- &genericMessage{Type: "stop", Data: map[string]int64{}} r.updateState(StateStopped) + return nil + } else { + return errors.New("already stopped") } } diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 356b5c74..ae41b5dd 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -127,13 +127,12 @@ func TestSpawnWorkers(t *testing.T) { defer runner.close() runner.client = newClient("localhost", 5557, runner.nodeID) + runner.reset() runner.setTasks(tasks) - runner.stopChan = make(chan bool) - runner.rebalance = make(chan bool) go runner.spawnWorkers(10, 10, runner.stopChan, runner.spawnComplete) time.Sleep(2 * time.Second) - currentClients := atomic.LoadInt32(&runner.currentClientsNum) + currentClients := runner.controller.getCurrentClientsNum() if currentClients != 10 { t.Error("Unexpected count", currentClients) } @@ -163,17 +162,16 @@ func TestSpawnWorkersWithManyTasks(t *testing.T) { runner := newWorkerRunner("localhost", 5557) defer runner.close() + runner.reset() runner.setTasks(tasks) runner.client = newClient("localhost", 5557, runner.nodeID) const numToSpawn int64 = 30 - runner.stopChan = make(chan bool) - runner.rebalance = make(chan bool) go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) time.Sleep(2 * time.Second) - currentClients := atomic.LoadInt32(&runner.currentClientsNum) + currentClients := runner.controller.getCurrentClientsNum() assert.Equal(t, numToSpawn, int64(currentClients)) lock.Lock() @@ -226,15 +224,15 @@ func TestSpawnAndStop(t *testing.T) { runner.client = newClient("localhost", 5557, runner.nodeID) runner.setTasks(tasks) - runner.spawn.setSpawn(10, 10) - runner.updateState(StateSpawning) + runner.setSpawnCount(10) + runner.setSpawnRate(10) go runner.start() // wait for spawning goroutines time.Sleep(2 * time.Second) - if atomic.LoadInt32(&runner.currentClientsNum) != 10 { - t.Error("Number of goroutines mismatches, expected: 10, current count", atomic.LoadInt32(&runner.currentClientsNum)) + if runner.controller.getCurrentClientsNum() != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count", runner.controller.getCurrentClientsNum()) } msg := <-runner.client.sendChannel() @@ -258,10 +256,8 @@ func TestStop(t *testing.T) { } tasks := []*Task{taskA} runner := newWorkerRunner("localhost", 5557) - runner.stopChan = make(chan bool) - runner.rebalance = make(chan bool) runner.setTasks(tasks) - runner.spawn.setSpawn(10, 10) + runner.reset() runner.updateState(StateSpawning) runner.stop() @@ -281,20 +277,21 @@ func TestOnSpawnMessage(t *testing.T) { defer runner.close() runner.client = newClient("localhost", 5557, runner.nodeID) runner.updateState(StateInit) + runner.reset() runner.setTasks([]*Task{taskA}) - runner.spawn.spawnCount = 100 - runner.spawn.spawnRate = 100 + runner.setSpawnCount(100) + runner.setSpawnRate(100) runner.onSpawnMessage(newGenericMessage("spawn", map[string]int64{ "spawn_count": 20, "spawn_rate": 20, }, runner.nodeID)) - if runner.spawn.spawnCount != 20 { - t.Error("workers should be overwrote by onSpawnMessage, expected: 20, was:", runner.spawn.spawnCount) + if runner.getSpawnCount() != 20 { + t.Error("workers should be overwrote by onSpawnMessage, expected: 20, was:", runner.controller.spawnCount) } - if runner.spawn.spawnRate != 20 { - t.Error("spawnRate should be overwrote by onSpawnMessage, expected: 20, was:", runner.spawn.spawnRate) + if runner.getSpawnRate() != 20 { + t.Error("spawnRate should be overwrote by onSpawnMessage, expected: 20, was:", runner.controller.spawnRate) } runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) @@ -309,9 +306,8 @@ func TestOnQuitMessage(t *testing.T) { <-runner.closeChan runner.updateState(StateRunning) + runner.reset() runner.closeChan = make(chan bool) - runner.stopChan = make(chan bool) - runner.rebalance = make(chan bool) runner.client.shutdownChan = make(chan bool) runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) <-runner.closeChan @@ -321,7 +317,7 @@ func TestOnQuitMessage(t *testing.T) { runner.updateState(StateStopped) runner.closeChan = make(chan bool) - runner.stopChan = make(chan bool) + runner.reset() runner.client.shutdownChan = make(chan bool) runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) <-runner.closeChan @@ -344,7 +340,6 @@ func TestOnMessage(t *testing.T) { tasks := []*Task{taskA, taskB} runner := newWorkerRunner("localhost", 5557) - defer runner.close() runner.client = newClient("localhost", 5557, runner.nodeID) runner.updateState(StateInit) runner.setTasks(tasks) @@ -364,8 +359,8 @@ func TestOnMessage(t *testing.T) { // spawn complete and running time.Sleep(2 * time.Second) - if atomic.LoadInt32(&runner.currentClientsNum) != 10 { - t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + if runner.controller.getCurrentClientsNum() != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.controller.getCurrentClientsNum()) } msg = <-runner.client.sendChannel() if msg.Type != "spawning_complete" { @@ -376,22 +371,17 @@ func TestOnMessage(t *testing.T) { } // increase goroutines while running - runner.onMessage(newGenericMessage("spawn", map[string]int64{ + runner.onMessage(newGenericMessage("rebalance", map[string]int64{ "spawn_count": 15, "spawn_rate": 15, }, runner.nodeID)) - msg = <-runner.client.sendChannel() - if msg.Type != "spawning" { - t.Error("Runner should send spawning message when starting spawn, got", msg.Type) - } - time.Sleep(2 * time.Second) if runner.getState() != StateRunning { t.Error("State of runner is not running after spawn, got", runner.getState()) } - if atomic.LoadInt32(&runner.currentClientsNum) != 15 { - t.Error("Number of goroutines mismatches, expected: 20, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + if runner.controller.getCurrentClientsNum() != 15 { + t.Error("Number of goroutines mismatches, expected: 15, current count:", runner.controller.getCurrentClientsNum()) } // stop all the workers @@ -404,7 +394,7 @@ func TestOnMessage(t *testing.T) { t.Error("Runner should send client_stopped message, got", msg.Type) } - time.Sleep(3 * time.Second) + time.Sleep(4 * time.Second) go runner.start() // spawn again @@ -420,8 +410,8 @@ func TestOnMessage(t *testing.T) { // spawn complete and running time.Sleep(3 * time.Second) - if atomic.LoadInt32(&runner.currentClientsNum) != 10 { - t.Error("Number of goroutines mismatches, expected: 10, current count:", atomic.LoadInt32(&runner.currentClientsNum)) + if runner.controller.getCurrentClientsNum() != 10 { + t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.controller.getCurrentClientsNum()) } if runner.getState() != StateRunning { t.Error("State of runner is not running after spawn, got", runner.getState()) @@ -440,13 +430,17 @@ func TestOnMessage(t *testing.T) { if msg.Type != "client_stopped" { t.Error("Runner should send client_stopped message, got", msg.Type) } + + // quit + runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) } func TestClientListener(t *testing.T) { runner := newMasterRunner("localhost", 5557) defer runner.close() runner.updateState(StateInit) - runner.spawn.setSpawn(10, 10) + runner.setSpawnCount(10) + runner.setSpawnRate(10) go runner.clientListener() runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 3}) runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 3}) @@ -496,7 +490,8 @@ func TestHeartbeatWorker(t *testing.T) { runner := newMasterRunner("localhost", 5557) defer runner.close() runner.updateState(StateInit) - runner.spawn.setSpawn(10, 10) + runner.setSpawnCount(10) + runner.setSpawnRate(10) runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 1, State: StateInit}) runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 1, State: StateInit}) go runner.clientListener() diff --git a/hrp/server.go b/hrp/server.go index dc51e8c4..68db4815 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -198,9 +198,16 @@ func (api *apiHandler) Stop(w http.ResponseWriter, r *http.Request) { } } - api.boomer.Stop() - resp := &CommonResponseBody{ - ServerStatus: EnumAPIResponseSuccess, + var resp *CommonResponseBody + err := api.boomer.Stop() + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseStopError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } } body, _ := json.Marshal(resp) writeJSON(w, body, http.StatusOK) From 58de0852b8978e053a0cb5d394814a3d2579c5c3 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Mon, 23 May 2022 16:45:47 +0800 Subject: [PATCH 05/31] feat: support dispatch profile to worker --- hrp/boomer.go | 64 +++++-- hrp/cmd/boom.go | 65 ++----- hrp/internal/boomer/boomer.go | 168 ++++++++++++------ hrp/internal/boomer/client_grpc.go | 24 ++- hrp/internal/boomer/message.go | 24 +-- hrp/internal/boomer/runner.go | 89 +++++----- hrp/internal/boomer/server_grpc.go | 9 +- hrp/internal/grpc/messager/messager.pb.go | 63 ++++--- .../grpc/messager/messager_grpc.pb.go | 31 +--- hrp/internal/grpc/proto/messager.proto | 9 +- hrp/server.go | 91 +++++++--- 11 files changed, 367 insertions(+), 270 deletions(-) diff --git a/hrp/boomer.go b/hrp/boomer.go index 57d3695f..5bb37f4c 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -12,7 +12,7 @@ import ( "github.com/rs/zerolog/log" ) -func NewStandaloneBoomer(spawnCount int, spawnRate float64) *HRPBoomer { +func NewStandaloneBoomer(spawnCount int64, spawnRate float64) *HRPBoomer { b := &HRPBoomer{ Boomer: boomer.NewStandaloneBoomer(spawnCount, spawnRate), pluginsMutex: new(sync.RWMutex), @@ -50,6 +50,27 @@ type HRPBoomer struct { pluginsMutex *sync.RWMutex // avoid data race } +func (b *HRPBoomer) InitBoomer() { + // init output + if !b.GetProfile().DisableConsoleOutput { + b.AddOutput(boomer.NewConsoleOutput()) + } + if b.GetProfile().PrometheusPushgatewayURL != "" { + b.AddOutput(boomer.NewPrometheusPusherOutput(b.GetProfile().PrometheusPushgatewayURL, "hrp", b.GetMode())) + } + b.SetSpawnCount(b.GetProfile().SpawnCount) + b.SetSpawnRate(b.GetProfile().SpawnRate) + if b.GetProfile().LoopCount > 0 { + b.SetLoopCount(b.GetProfile().LoopCount) + } + b.SetRateLimiter(b.GetProfile().MaxRPS, b.GetProfile().RequestIncreaseRate) + b.SetDisableKeepAlive(b.GetProfile().DisableKeepalive) + b.SetDisableCompression(b.GetProfile().DisableCompression) + b.SetClientTransport() + b.EnableCPUProfile(b.GetProfile().CPUProfile, b.GetProfile().CPUProfileDuration) + b.EnableMemoryProfile(b.GetProfile().MemoryProfile, b.GetProfile().MemoryProfileDuration) +} + func (b *HRPBoomer) SetClientTransport() *HRPBoomer { // set client transport for high concurrency load testing b.hrpRunner.SetClientTransport(b.GetSpawnCount(), b.GetDisableKeepAlive(), b.GetDisableCompression()) @@ -104,7 +125,7 @@ func (b *HRPBoomer) ConvertTestCasesToTasks(testcases ...ITestCase) (taskSlice [ return taskSlice } -func (b *HRPBoomer) LoopTestCases() { +func (b *HRPBoomer) PollTestCases() { for { select { case <-b.Boomer.ParseTestCasesChan(): @@ -167,9 +188,7 @@ func (b *HRPBoomer) Quit() { b.Boomer.Quit() } -func (b *HRPBoomer) handleTasks(tcs []byte) { - //Todo: 过滤掉已经传输过的task - testCases := b.BytesToTestCases(tcs) +func (b *HRPBoomer) runTasks(testCases []*TCase, profile *boomer.Profile) { var testcases []ITestCase for _, tc := range testCases { tesecase, err := tc.toTestCase() @@ -178,22 +197,37 @@ func (b *HRPBoomer) handleTasks(tcs []byte) { } testcases = append(testcases, tesecase) } - log.Info().Interface("testcases", testcases).Msg("loop tasks successful") - if b.Boomer.GetState() == boomer.StateRunning || b.Boomer.GetState() == boomer.StateSpawning { - b.Boomer.SetTasks(b.ConvertTestCasesToTasks(testcases...)...) - } else { - b.Run(testcases...) - } + b.SetProfile(profile) + b.InitBoomer() + log.Info().Interface("testcases", testcases).Interface("profile", profile).Msg("run tasks successful") + b.Run(testcases...) } -func (b *HRPBoomer) LoopTasks() { +func (b *HRPBoomer) rebalanceTasks(profile *boomer.Profile) { + b.SetProfile(profile) + b.SetSpawnCount(b.GetProfile().SpawnCount) + b.SetSpawnRate(b.GetProfile().SpawnRate) + b.GetRebalanceChan() <- true + log.Info().Interface("profile", profile).Msg("rebalance tasks successful") +} + +func (b *HRPBoomer) PollTasks() { for { select { - case tcs := <-b.Boomer.GetTestCaseBytesChan(): - if len(b.Boomer.GetTestCaseBytesChan()) > 0 { + case tasks := <-b.Boomer.GetTasksChan(): + // 清理过时测试用例任务 + if len(b.Boomer.GetTasksChan()) > 0 { continue } - go b.handleTasks(tcs) + profile := boomer.BytesToProfile(tasks.Profile) + //Todo: 过滤掉已经传输过的task + if tasks.Tasks != nil { + testCases := b.BytesToTestCases(tasks.Tasks) + go b.runTasks(testCases, profile) + } else { + go b.rebalanceTasks(profile) + } + case <-b.Boomer.GetCloseChan(): return } diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index a0be404c..7fe7c93a 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -38,7 +38,7 @@ var boomCmd = &cobra.Command{ // if set profile, the priority is higher than the other commands if boomArgs.profile != "" { - err := builtin.LoadFile(boomArgs.profile, &boomArgs) + err := builtin.LoadFile(boomArgs.profile, &boomArgs.profile) if err != nil { log.Error().Err(err).Msg("failed to load profile") os.Exit(1) @@ -54,16 +54,9 @@ var boomCmd = &cobra.Command{ } else { hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) } + hrpBoomer.SetProfile(&boomArgs.Profile) hrpBoomer.EnableGracefulQuit() - // init output - if !boomArgs.DisableConsoleOutput { - hrpBoomer.AddOutput(boomer.NewConsoleOutput()) - } - if boomArgs.PrometheusPushgatewayURL != "" { - hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode())) - } - // run boomer switch hrpBoomer.GetMode() { case "master": @@ -71,61 +64,41 @@ var boomCmd = &cobra.Command{ if boomArgs.autoStart { hrpBoomer.SetAutoStart() hrpBoomer.SetExpectWorkers(boomArgs.expectWorkers, boomArgs.expectWorkersMaxWait) - hrpBoomer.SetSpawnCount(int64(boomArgs.SpawnCount)) + hrpBoomer.SetSpawnCount(boomArgs.SpawnCount) hrpBoomer.SetSpawnRate(boomArgs.SpawnRate) } go hrpBoomer.StartServer() go hrpBoomer.RunMaster() - hrpBoomer.LoopTestCases() + hrpBoomer.PollTestCases() case "worker": if boomArgs.ignoreQuit { hrpBoomer.SetIgnoreQuit() } go hrpBoomer.RunWorker() - hrpBoomer.LoopTasks() + hrpBoomer.PollTasks() case "standalone": - if boomArgs.LoopCount > 0 { - hrpBoomer.SetLoopCount(boomArgs.LoopCount) - } - hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) - hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive) - hrpBoomer.SetDisableCompression(boomArgs.DisableCompression) - hrpBoomer.SetClientTransport() if venv != "" { hrpBoomer.SetPython3Venv(venv) } - hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration) - hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration) + hrpBoomer.InitBoomer() hrpBoomer.Run(paths...) } }, } type BoomArgs struct { - SpawnCount int `json:"spawn-count,omitempty" yaml:"spawn-count,omitempty"` - SpawnRate float64 `json:"spawn-rate,omitempty" yaml:"spawn-rate,omitempty"` - MaxRPS int64 `json:"max-rps,omitempty" yaml:"max-rps,omitempty"` - LoopCount int64 `json:"loop-count,omitempty" yaml:"loop-count,omitempty"` - RequestIncreaseRate string `json:"request-increase-rate,omitempty" yaml:"request-increase-rate,omitempty"` - MemoryProfile string `json:"memory-profile,omitempty" yaml:"memory-profile,omitempty"` - MemoryProfileDuration time.Duration `json:"memory-profile-duration" yaml:"memory-profile-duration"` - CPUProfile string `json:"cpu-profile,omitempty" yaml:"cpu-profile,omitempty"` - CPUProfileDuration time.Duration `json:"cpu-profile-duration,omitempty" yaml:"cpu-profile-duration,omitempty"` - PrometheusPushgatewayURL string `json:"prometheus-gateway,omitempty" yaml:"prometheus-gateway,omitempty"` - DisableConsoleOutput bool `json:"disable-console-output,omitempty" yaml:"disable-console-output,omitempty"` - DisableCompression bool `json:"disable-compression,omitempty" yaml:"disable-compression,omitempty"` - DisableKeepalive bool `json:"disable-keepalive,omitempty" yaml:"disable-keepalive,omitempty"` - profile string - master bool - worker bool - ignoreQuit bool - masterHost string - masterPort int - masterBindHost string - masterBindPort int - autoStart bool - expectWorkers int - expectWorkersMaxWait int + boomer.Profile + profile string + master bool + worker bool + ignoreQuit bool + masterHost string + masterPort int + masterBindHost string + masterBindPort int + autoStart bool + expectWorkers int + expectWorkersMaxWait int } var boomArgs BoomArgs @@ -135,7 +108,7 @@ func init() { boomCmd.Flags().Int64Var(&boomArgs.MaxRPS, "max-rps", 0, "Max RPS that boomer can generate, disabled by default.") boomCmd.Flags().StringVar(&boomArgs.RequestIncreaseRate, "request-increase-rate", "-1", "Request increase rate, disabled by default.") - boomCmd.Flags().IntVar(&boomArgs.SpawnCount, "spawn-count", 1, "The number of users to spawn for load testing") + boomCmd.Flags().Int64Var(&boomArgs.SpawnCount, "spawn-count", 1, "The number of users to spawn for load testing") boomCmd.Flags().Float64Var(&boomArgs.SpawnRate, "spawn-rate", 1, "The rate for spawning users") boomCmd.Flags().Int64Var(&boomArgs.LoopCount, "loop-count", -1, "The specify running cycles for load testing") boomCmd.Flags().StringVar(&boomArgs.MemoryProfile, "mem-profile", "", "Enable memory profiling.") diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index 16cb0e5c..ed314249 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -1,16 +1,13 @@ package boomer import ( + "github.com/httprunner/httprunner/v4/hrp/internal/json" "math" "os" "os/signal" - "strconv" - "strings" "syscall" "time" - "github.com/httprunner/httprunner/v4/hrp/internal/builtin" - "github.com/pkg/errors" "github.com/rs/zerolog/log" ) @@ -49,6 +46,58 @@ type Boomer struct { disableCompression bool } +type Profile struct { + SpawnCount int64 `json:"spawn-count,omitempty" yaml:"spawn-count,omitempty" mapstructure:"spawn-count,omitempty"` + SpawnRate float64 `json:"spawn-rate,omitempty" yaml:"spawn-rate,omitempty" mapstructure:"spawn-rate,omitempty"` + MaxRPS int64 `json:"max-rps,omitempty" yaml:"max-rps,omitempty" mapstructure:"max-rps,omitempty"` + LoopCount int64 `json:"loop-count,omitempty" yaml:"loop-count,omitempty" mapstructure:"loop-count,omitempty"` + RequestIncreaseRate string `json:"request-increase-rate,omitempty" yaml:"request-increase-rate,omitempty" mapstructure:"request-increase-rate,omitempty"` + MemoryProfile string `json:"memory-profile,omitempty" yaml:"memory-profile,omitempty" mapstructure:"memory-profile,omitempty"` + MemoryProfileDuration time.Duration `json:"memory-profile-duration,omitempty" yaml:"memory-profile-duration,omitempty" mapstructure:"memory-profile-duration,omitempty"` + CPUProfile string `json:"cpu-profile,omitempty" yaml:"cpu-profile,omitempty" mapstructure:"cpu-profile,omitempty"` + CPUProfileDuration time.Duration `json:"cpu-profile-duration,omitempty" yaml:"cpu-profile-duration,omitempty" mapstructure:"cpu-profile-duration,omitempty"` + PrometheusPushgatewayURL string `json:"prometheus-gateway,omitempty" yaml:"prometheus-gateway,omitempty" mapstructure:"prometheus-gateway,omitempty"` + DisableConsoleOutput bool `json:"disable-console-output,omitempty" yaml:"disable-console-output,omitempty" mapstructure:"disable-console-output,omitempty"` + DisableCompression bool `json:"disable-compression,omitempty" yaml:"disable-compression,omitempty" mapstructure:"disable-compression,omitempty"` + DisableKeepalive bool `json:"disable-keepalive,omitempty" yaml:"disable-keepalive,omitempty" mapstructure:"disable-keepalive,omitempty"` +} + +func (b *Boomer) GetProfile() *Profile { + switch b.mode { + case DistributedMasterMode: + return b.masterRunner.profile + case DistributedWorkerMode: + return b.workerRunner.profile + default: + return b.localRunner.profile + } +} + +func (b *Boomer) SetProfile(profile *Profile) { + switch b.mode { + case DistributedMasterMode: + b.masterRunner.profile = profile + case DistributedWorkerMode: + b.workerRunner.profile = profile + default: + b.localRunner.profile = profile + } +} + +func (p *Profile) dispatch(workers int64) *Profile { + workerProfile := *p + if p.SpawnCount > 0 { + workerProfile.SpawnCount = p.SpawnCount / workers + } + if p.SpawnRate > 0 { + workerProfile.SpawnRate = p.SpawnRate / float64(workers) + } + if p.MaxRPS > 0 { + workerProfile.MaxRPS = p.MaxRPS / workers + } + return &workerProfile +} + // SetMode only accepts boomer.DistributedMasterMode、boomer.DistributedWorkerMode and boomer.StandaloneMode. func (b *Boomer) SetMode(mode Mode) { switch mode { @@ -79,7 +128,7 @@ func (b *Boomer) GetMode() string { } // NewStandaloneBoomer returns a new Boomer, which can run without master. -func NewStandaloneBoomer(spawnCount int, spawnRate float64) *Boomer { +func NewStandaloneBoomer(spawnCount int64, spawnRate float64) *Boomer { return &Boomer{ mode: StandaloneMode, localRunner: newLocalRunner(spawnCount, spawnRate), @@ -125,10 +174,56 @@ func (b *Boomer) GetTestCaseBytesChan() chan []byte { switch b.mode { case DistributedMasterMode: return b.masterRunner.testCaseBytes - case DistributedWorkerMode: - return b.workerRunner.testCaseBytes + default: + return nil + } +} + +func ProfileToBytes(profile *Profile) []byte { + profileBytes, err := json.Marshal(profile) + if err != nil { + log.Error().Err(err).Msg("failed to marshal testcases") + return nil + } + return profileBytes +} + +func BytesToProfile(profileBytes []byte) *Profile { + var profile *Profile + err := json.Unmarshal(profileBytes, &profile) + if err != nil { + log.Error().Err(err).Msg("failed to unmarshal testcases") + } + return profile +} + +// GetProfileBytesChan gets profile bytes chan +func (b *Boomer) GetProfileBytesChan() chan []byte { + switch b.mode { + case DistributedMasterMode: + return b.masterRunner.profileBytes + default: + return nil + } +} + +// GetTasksChan gets profile bytes chan +func (b *Boomer) GetTasksChan() chan *profileMessage { + switch b.mode { + case DistributedWorkerMode: + return b.workerRunner.tasksChan + default: + return nil + } +} + +func (b *Boomer) GetRebalanceChan() chan bool { + switch b.mode { + case DistributedWorkerMode: + return b.workerRunner.rebalance + default: + return nil } - return nil } func (b *Boomer) SetTestCasesPath(paths []string) { @@ -390,66 +485,25 @@ func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exc } // Start starts to run -func (b *Boomer) Start(Args map[string]interface{}) error { +func (b *Boomer) Start(Args *Profile) error { if b.masterRunner.isStarted() { return errors.New("already started") } - spawnCount, ok := Args["spawn_count"] - if ok { - v, err := strconv.Atoi(spawnCount.(string)) - if err != nil { - log.Error().Err(err).Msg("spawn_count sets error") - return err - } - b.SetSpawnCount(int64(v)) - } else { - return errors.New("spawn count error") - } - spawnRate, ok := Args["spawn_rate"] - if ok { - v, err := builtin.Interface2Float64(spawnRate) - if err != nil { - log.Error().Err(err).Msg("spawn_count sets error") - return err - } - b.SetSpawnRate(v) - } else { - b.SetSpawnRate(float64(b.GetSpawnCount())) - } - path, ok := Args["path"].(string) - if ok { - paths := strings.Split(path, ",") - b.SetTestCasesPath(paths) - } else { - return errors.New("testcase path error") - } + b.SetSpawnCount(Args.SpawnCount) + b.SetSpawnRate(Args.SpawnRate) + b.SetProfile(Args) err := b.masterRunner.start() return err } // ReBalance starts to rebalance load test -func (b *Boomer) ReBalance(Args map[string]interface{}) error { +func (b *Boomer) ReBalance(Args *Profile) error { if !b.masterRunner.isStarted() { return errors.New("no start") } - spawnCount, ok := Args["spawn_count"] - if ok { - v, err := strconv.Atoi(spawnCount.(string)) - if err != nil { - log.Error().Err(err).Msg("spawn_count sets error") - return err - } - b.SetSpawnCount(int64(v)) - } - spawnRate, ok := Args["spawn_rate"] - if ok { - v, err := builtin.Interface2Float64(spawnRate) - if err != nil { - log.Error().Err(err).Msg("spawn_count sets error") - return err - } - b.SetSpawnRate(v) - } + b.SetSpawnCount(Args.SpawnCount) + b.SetSpawnRate(Args.SpawnRate) + b.SetProfile(Args) err := b.masterRunner.rebalance() if err != nil { log.Error().Err(err).Msg("failed to rebalance") diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 5fa33cc1..8082074d 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -80,6 +80,9 @@ func (c *grpcClient) connect() (err error) { return err } + go c.recv() + go c.send() + biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) if err != nil { log.Error().Err(err).Msg("call bidirectional streaming message err") @@ -87,19 +90,11 @@ func (c *grpcClient) connect() (err error) { } c.config.setBiStreamClient(biStream) log.Info().Msg(fmt.Sprintf("Boomer is connected to master(%s) press Ctrl+c to quit.\n", addr)) - go c.recv() - go c.send() return nil } func (c *grpcClient) reConnect() (err error) { - addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) - c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure()) - if err != nil { - return - } - biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) if err != nil { return @@ -111,7 +106,7 @@ func (c *grpcClient) reConnect() (err error) { //// tell master, I'm ready //log.Info().Msg("send client ready signal") //c.sendChannel() <- newClientReadyMessageToMaster(c.identity) - log.Info().Msg(fmt.Sprintf("Boomer is reConnected to master(%s) press Ctrl+c to quit.\n", addr)) + log.Info().Msg(fmt.Sprintf("Boomer is reConnected to master press Ctrl+c to quit.\n")) return } @@ -136,6 +131,7 @@ func (c *grpcClient) recv() { return default: if c.config.getBiStreamClient() == nil { + time.Sleep(1 * time.Second) continue } msg, err := c.config.getBiStreamClient().Recv() @@ -158,10 +154,11 @@ func (c *grpcClient) recv() { } c.fromMaster <- &genericMessage{ - Type: msg.Type, - Data: msg.Data, - NodeID: msg.NodeID, - Tasks: msg.Tasks, + Type: msg.Type, + Profile: msg.Profile, + Data: msg.Data, + NodeID: msg.NodeID, + Tasks: msg.Tasks, } log.Info(). @@ -204,6 +201,7 @@ func (c *grpcClient) sendMessage(msg *genericMessage) { Interface("data", msg.Data). Msg("send data to server") if c.config.getBiStreamClient() == nil { + atomic.AddInt32(&c.failCount, 1) return } err := c.config.getBiStreamClient().Send(&messager.StreamRequest{Type: msg.Type, Data: msg.Data, NodeID: msg.NodeID}) diff --git a/hrp/internal/boomer/message.go b/hrp/internal/boomer/message.go index 93b9a0b3..69819854 100644 --- a/hrp/internal/boomer/message.go +++ b/hrp/internal/boomer/message.go @@ -10,14 +10,17 @@ const ( typeException = "exception" ) -type message interface { +type genericMessage struct { + Type string `json:"type,omitempty"` + Profile []byte `json:"profile,omitempty"` + Data map[string]int64 `json:"data,omitempty"` + NodeID string `json:"node_id,omitempty"` + Tasks []byte `json:"tasks,omitempty"` } -type genericMessage struct { - Type string `json:"type,omitempty"` - Data map[string]int64 `json:"data,omitempty"` - NodeID string `json:"node_id,omitempty"` - Tasks []byte `json:"tasks,omitempty"` +type profileMessage struct { + Profile []byte `json:"profile,omitempty"` + Tasks []byte `json:"tasks,omitempty"` } func newGenericMessage(t string, data map[string]int64, nodeID string) (msg *genericMessage) { @@ -35,11 +38,12 @@ func newQuitMessage(nodeID string) (msg *genericMessage) { } } -func newSpawnMessageToWorker(t string, data map[string]int64, tasks []byte) (msg *genericMessage) { +func newMessageToWorker(t string, profile []byte, data map[string]int64, tasks []byte) (msg *genericMessage) { return &genericMessage{ - Type: t, - Data: data, - Tasks: tasks, + Type: t, + Profile: profile, + Data: data, + Tasks: tasks, } } diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 38c37b7c..61506db2 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -270,6 +270,9 @@ func (r *runner) outputOnEvent(data map[string]interface{}) { } func (r *runner) outputOnStop() { + defer func() { + r.outputs = make([]Output, 0) + }() size := len(r.outputs) if size == 0 { return @@ -332,6 +335,7 @@ func (r *runner) reset() { } func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { + r.updateState(StateSpawning) log.Info(). Int64("spawnCount", spawnCount). Float64("spawnRate", spawnRate). @@ -339,7 +343,6 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo r.controller.setSpawn(spawnCount, spawnRate) - r.updateState(StateSpawning) for { select { case <-quit: @@ -510,14 +513,16 @@ func (r *runner) isStarted() bool { type localRunner struct { runner + + profile *Profile } -func newLocalRunner(spawnCount int, spawnRate float64) *localRunner { +func newLocalRunner(spawnCount int64, spawnRate float64) *localRunner { return &localRunner{ runner: runner{ state: StateInit, stats: newRequestStats(), - spawnCount: int64(spawnCount), + spawnCount: spawnCount, spawnRate: spawnRate, controller: &Controller{}, outputs: make([]Output, 0), @@ -535,14 +540,13 @@ func (r *localRunner) start() { if r.rateLimitEnabled { r.rateLimiter.Start() } - - r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, nil) - // output setup r.outputOnStart() + go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, nil) + // start stats report - go r.runner.statsStart() + go r.statsStart() // stop <-r.stopChan @@ -582,10 +586,9 @@ type workerRunner struct { masterPort int client *grpcClient - // this channel will start worker for spawning. - spawnStartChan chan bool - // get testcase from master - testCaseBytes chan []byte + profile *Profile + + tasksChan chan *profileMessage ignoreQuit bool } @@ -594,15 +597,15 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { r = &workerRunner{ runner: runner{ stats: newRequestStats(), + outputs: make([]Output, 0), controller: &Controller{}, closeChan: make(chan bool), once: &sync.Once{}, }, - masterHost: masterHost, - masterPort: masterPort, - nodeID: getNodeID(), - spawnStartChan: make(chan bool), - testCaseBytes: make(chan []byte, 10), + masterHost: masterHost, + masterPort: masterPort, + nodeID: getNodeID(), + tasksChan: make(chan *profileMessage, 10), } return r } @@ -615,30 +618,26 @@ func (r *workerRunner) spawnComplete() { func (r *workerRunner) onSpawnMessage(msg *genericMessage) { r.client.sendChannel() <- newGenericMessage("spawning", nil, r.nodeID) - spawnCount, ok := msg.Data["spawn_count"] - if ok { - r.setSpawnCount(spawnCount) + if msg.Profile == nil { + log.Error().Msg("miss profile") } - spawnRate, ok := msg.Data["spawn_rate"] - if ok { - r.setSpawnRate(float64(spawnRate)) + if msg.Tasks == nil { + log.Error().Msg("miss tasks") } - if msg.Tasks != nil { - r.testCaseBytes <- msg.Tasks + r.tasksChan <- &profileMessage{ + Profile: msg.Profile, + Tasks: msg.Tasks, } log.Info().Msg("on spawn message successful") } func (r *workerRunner) onRebalanceMessage(msg *genericMessage) { - spawnCount, ok := msg.Data["spawn_count"] - if ok { - r.setSpawnCount(spawnCount) + if msg.Profile == nil { + log.Error().Msg("miss profile") } - spawnRate, ok := msg.Data["spawn_rate"] - if ok { - r.setSpawnRate(float64(spawnRate)) + r.tasksChan <- &profileMessage{ + Profile: msg.Profile, } - r.rebalance <- true log.Info().Msg("on rebalance message successful") } @@ -705,7 +704,6 @@ func (r *workerRunner) run() { err := r.client.connect() if err != nil { log.Printf("Failed to connect to master(%s:%d) with error %v\n", r.masterHost, r.masterPort, err) - return } // listen to master @@ -758,7 +756,7 @@ func (r *workerRunner) start() { r.once.Do(r.outputOnStart) - r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, r.spawnComplete) + go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, r.spawnComplete) // start stats report go r.statsStart() @@ -783,7 +781,7 @@ func (r *workerRunner) close() { return } // waiting report finished - time.Sleep(3 * time.Second) + time.Sleep(1 * time.Second) close(r.closeChan) var ticker = time.NewTicker(1 * time.Second) if r.client != nil { @@ -811,8 +809,12 @@ type masterRunner struct { expectWorkers int expectWorkersMaxWait int + profile *Profile + parseTestCasesChan chan bool testCaseBytes chan []byte + // set profile to worker + profileBytes chan []byte } func newMasterRunner(masterBindHost string, masterBindPort int) *masterRunner { @@ -990,20 +992,17 @@ func (r *masterRunner) start() error { if numWorkers == 0 { return errors.New("current workers: 0") } - workerSpawnRate := r.getSpawnRate() / float64(numWorkers) - workerSpawnCount := r.getSpawnCount() / int64(numWorkers) log.Info().Msg("send spawn data to worker") r.updateState(StateSpawning) - // waitting to fetch testcase + // fetching testcase testcase, err := r.fetchTestCase() if err != nil { return err } - r.server.sendChannel() <- newSpawnMessageToWorker("spawn", map[string]int64{ - "spawn_count": workerSpawnCount, - "spawn_rate": int64(workerSpawnRate), - }, testcase) + profile := r.profile.dispatch(int64(numWorkers)) + + r.server.sendChannel() <- newMessageToWorker("spawn", ProfileToBytes(profile), nil, testcase) println("send spawn data to worker successful") log.Info().Msg("send spawn data to worker successful") return nil @@ -1014,13 +1013,9 @@ func (r *masterRunner) rebalance() error { if numWorkers == 0 { return errors.New("current workers: 0") } - workerSpawnRate := r.getSpawnRate() / float64(numWorkers) - workerSpawnCount := r.getSpawnCount() / int64(numWorkers) + profile := r.profile.dispatch(int64(numWorkers)) - r.server.sendChannel() <- newSpawnMessageToWorker("rebalance", map[string]int64{ - "spawn_count": workerSpawnCount, - "spawn_rate": int64(workerSpawnRate), - }, nil) + r.server.sendChannel() <- newMessageToWorker("rebalance", ProfileToBytes(profile), nil, nil) println("send rebalance data to worker successful") return nil } diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 7eb92104..bcd85001 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -312,10 +312,11 @@ func (s *grpcServer) sendMessage(msg *genericMessage) { } err := workerInfo.messenger.Send( &messager.StreamResponse{ - Type: msg.Type, - Data: msg.Data, - NodeID: workerInfo.ID, - Tasks: msg.Tasks}, + Type: msg.Type, + Profile: msg.Profile, + Data: msg.Data, + NodeID: workerInfo.ID, + Tasks: msg.Tasks}, ) switch err { case nil: diff --git a/hrp/internal/grpc/messager/messager.pb.go b/hrp/internal/grpc/messager/messager.pb.go index bb389289..a9d2efde 100644 --- a/hrp/internal/grpc/messager/messager.pb.go +++ b/hrp/internal/grpc/messager/messager.pb.go @@ -7,6 +7,8 @@ package messager import ( + context "context" + grpc "google.golang.org/grpc" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -88,10 +90,11 @@ type StreamResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` - Tasks []byte `protobuf:"bytes,4,opt,name=tasks,proto3" json:"tasks,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Profile []byte `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"` + Data map[string]int64 `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NodeID string `protobuf:"bytes,4,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + Tasks []byte `protobuf:"bytes,5,opt,name=tasks,proto3" json:"tasks,omitempty"` } func (x *StreamResponse) Reset() { @@ -133,6 +136,13 @@ func (x *StreamResponse) GetType() string { return "" } +func (x *StreamResponse) GetProfile() []byte { + if x != nil { + return x.Profile + } + return nil +} + func (x *StreamResponse) GetData() map[string]int64 { if x != nil { return x.Data @@ -170,27 +180,28 @@ var file_grpc_proto_messager_proto_rawDesc = []byte{ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xc2, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, + 0x01, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, - 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0x61, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x56, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, - 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x32, 0x61, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x56, 0x0a, 0x1d, 0x42, + 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -274,3 +285,7 @@ func file_grpc_proto_messager_proto_init() { file_grpc_proto_messager_proto_goTypes = nil file_grpc_proto_messager_proto_depIdxs = nil } + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface diff --git a/hrp/internal/grpc/messager/messager_grpc.pb.go b/hrp/internal/grpc/messager/messager_grpc.pb.go index 8237aa3c..d59a25e8 100644 --- a/hrp/internal/grpc/messager/messager_grpc.pb.go +++ b/hrp/internal/grpc/messager/messager_grpc.pb.go @@ -15,12 +15,11 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +const _ = grpc.SupportPackageIsVersion6 // MessageClient is the client API for Message service. // -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MessageClient interface { BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) } @@ -34,7 +33,7 @@ func NewMessageClient(cc grpc.ClientConnInterface) MessageClient { } func (c *messageClient) BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) { - stream, err := c.cc.NewStream(ctx, &Message_ServiceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...) + stream, err := c.cc.NewStream(ctx, &_Message_serviceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...) if err != nil { return nil, err } @@ -65,31 +64,20 @@ func (x *messageBidirectionalStreamingMessageClient) Recv() (*StreamResponse, er } // MessageServer is the server API for Message service. -// All implementations must embed UnimplementedMessageServer -// for forward compatibility type MessageServer interface { BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error - mustEmbedUnimplementedMessageServer() } -// UnimplementedMessageServer must be embedded to have forward compatible implementations. +// UnimplementedMessageServer can be embedded to have forward compatible implementations. type UnimplementedMessageServer struct { } -func (UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error { +func (*UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error { return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingMessage not implemented") } -func (UnimplementedMessageServer) mustEmbedUnimplementedMessageServer() {} -// UnsafeMessageServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to MessageServer will -// result in compilation errors. -type UnsafeMessageServer interface { - mustEmbedUnimplementedMessageServer() -} - -func RegisterMessageServer(s grpc.ServiceRegistrar, srv MessageServer) { - s.RegisterService(&Message_ServiceDesc, srv) +func RegisterMessageServer(s *grpc.Server, srv MessageServer) { + s.RegisterService(&_Message_serviceDesc, srv) } func _Message_BidirectionalStreamingMessage_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -118,10 +106,7 @@ func (x *messageBidirectionalStreamingMessageServer) Recv() (*StreamRequest, err return m, nil } -// Message_ServiceDesc is the grpc.ServiceDesc for Message service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Message_ServiceDesc = grpc.ServiceDesc{ +var _Message_serviceDesc = grpc.ServiceDesc{ ServiceName: "message.Message", HandlerType: (*MessageServer)(nil), Methods: []grpc.MethodDesc{}, diff --git a/hrp/internal/grpc/proto/messager.proto b/hrp/internal/grpc/proto/messager.proto index ef311339..fa6bfd49 100644 --- a/hrp/internal/grpc/proto/messager.proto +++ b/hrp/internal/grpc/proto/messager.proto @@ -10,13 +10,14 @@ service Message { message StreamRequest{ string type = 1; - map data = 2; + map data = 2; string NodeID = 3; } message StreamResponse{ string type = 1; - map data = 2; - string NodeID = 3; - bytes tasks = 4; + bytes profile = 2; + map data = 3; + string NodeID = 4; + bytes tasks = 5; } \ No newline at end of file diff --git a/hrp/server.go b/hrp/server.go index 68db4815..faacfb95 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -6,9 +6,11 @@ import ( "io/ioutil" "log" "net/http" + "strings" "github.com/httprunner/httprunner/v4/hrp/internal/boomer" "github.com/httprunner/httprunner/v4/hrp/internal/json" + "github.com/mitchellh/mapstructure" ) const jsonContentType = "application/json; encoding=utf-8" @@ -43,7 +45,7 @@ func parseBody(r *http.Request) (data map[string]interface{}, err error) { r.Body.Close() return nil, err } - err = json.Unmarshal(body, data) + err = json.Unmarshal(body, &data) if err != nil { return nil, err } @@ -62,10 +64,10 @@ func writeJSON(w http.ResponseWriter, body []byte, status int) { } type StartRequestBody struct { - Worker string `json:"worker"` // all - SpawnCount int64 `json:"spawn_count"` - SpawnRate int64 `json:"spawn_rate"` - TestCasePath string `json:"testcase_path"` + boomer.Profile `mapstructure:",squash"` + Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` // all + TestCasePath string `json:"testcase-path" yaml:"testcase-path" mapstructure:"testcase-path"` + Other map[string]interface{} `mapstructure:",remain"` } type ServerCode int @@ -118,10 +120,9 @@ func CustomAPIResponse(errCode ServerCode, errMsg string) ServerStatus { } type RebalanceRequestBody struct { - Worker string `json:"worker"` - SpawnCount int64 `json:"spawn_count"` - SpawnRate int64 `json:"spawn_rate"` - TestCasePath string `json:"testcase_path"` + boomer.Profile `mapstructure:",squash"` + Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` + Other map[string]interface{} `mapstructure:",remain"` } type StopRequestBody struct { @@ -167,15 +168,38 @@ func (api *apiHandler) Index(w http.ResponseWriter, r *http.Request) { } func (api *apiHandler) Start(w http.ResponseWriter, r *http.Request) { - data := map[string]interface{}{} - args := r.URL.Query() - for k, vs := range args { - for _, v := range vs { - data[k] = v - } - } var resp *CommonResponseBody - err := api.boomer.Start(data) + data, err := parseBody(r) + + req := StartRequestBody{ + Profile: *api.boomer.GetProfile(), + } + err = mapstructure.Decode(data, &req) + if len(req.Other) > 0 { + keys := make([]string, 0, len(req.Other)) + for k := range req.Other { + keys = append(keys, k) + } + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseParamError(fmt.Sprintf("failed to recognize params: %v", keys)), + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + return + } + if req.TestCasePath == "" { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseParamError(fmt.Sprint("missing testcases path")), + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + return + } + paths := strings.Split(req.TestCasePath, ",") + api.boomer.SetTestCasesPath(paths) + if err == nil { + err = api.boomer.Start(&req.Profile) + } if err != nil { resp = &CommonResponseBody{ ServerStatus: EnumAPIResponseServerError(err.Error()), @@ -231,15 +255,28 @@ func (api *apiHandler) Quit(w http.ResponseWriter, r *http.Request) { } func (api *apiHandler) ReBalance(w http.ResponseWriter, r *http.Request) { - data := map[string]interface{}{} - args := r.URL.Query() - for k, vs := range args { - for _, v := range vs { - data[k] = v - } - } var resp *CommonResponseBody - err := api.boomer.ReBalance(data) + data, err := parseBody(r) + + req := RebalanceRequestBody{ + Profile: *api.boomer.GetProfile(), + } + err = mapstructure.Decode(data, &req) + if len(req.Other) > 0 { + keys := make([]string, 0, len(req.Other)) + for k := range req.Other { + keys = append(keys, k) + } + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseParamError(fmt.Sprintf("failed to recognize params: %v", keys)), + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + return + } + if err == nil { + err = api.boomer.ReBalance(&req.Profile) + } if err != nil { resp = &CommonResponseBody{ ServerStatus: EnumAPIResponseParamError(err.Error()), @@ -267,10 +304,10 @@ func (api *apiHandler) Handler() http.Handler { mux := http.NewServeMux() mux.HandleFunc("/", methods(api.Index, "GET")) - mux.HandleFunc("/start", methods(api.Start, "GET")) + mux.HandleFunc("/start", methods(api.Start, "POST")) mux.HandleFunc("/stop", methods(api.Stop, "GET")) mux.HandleFunc("/quit", methods(api.Quit, "GET")) - mux.HandleFunc("/rebalance", methods(api.ReBalance, "GET")) + mux.HandleFunc("/rebalance", methods(api.ReBalance, "POST")) mux.HandleFunc("/workers", methods(api.GetWorkersInfo, "GET")) return mux From 4ff3179a226524b823c8abf55db4d209a62f71a3 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Mon, 23 May 2022 18:05:48 +0800 Subject: [PATCH 06/31] fix: unitest --- hrp/boomer.go | 5 ++--- hrp/internal/boomer/message.go | 4 ++-- hrp/internal/boomer/runner.go | 29 +++++++++++++---------------- hrp/internal/boomer/runner_test.go | 30 +++++++++--------------------- 4 files changed, 26 insertions(+), 42 deletions(-) diff --git a/hrp/boomer.go b/hrp/boomer.go index 5bb37f4c..8a076a4a 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -219,13 +219,12 @@ func (b *HRPBoomer) PollTasks() { if len(b.Boomer.GetTasksChan()) > 0 { continue } - profile := boomer.BytesToProfile(tasks.Profile) //Todo: 过滤掉已经传输过的task if tasks.Tasks != nil { testCases := b.BytesToTestCases(tasks.Tasks) - go b.runTasks(testCases, profile) + go b.runTasks(testCases, tasks.Profile) } else { - go b.rebalanceTasks(profile) + go b.rebalanceTasks(tasks.Profile) } case <-b.Boomer.GetCloseChan(): diff --git a/hrp/internal/boomer/message.go b/hrp/internal/boomer/message.go index 69819854..a9168384 100644 --- a/hrp/internal/boomer/message.go +++ b/hrp/internal/boomer/message.go @@ -19,8 +19,8 @@ type genericMessage struct { } type profileMessage struct { - Profile []byte `json:"profile,omitempty"` - Tasks []byte `json:"tasks,omitempty"` + Profile *Profile `json:"profile,omitempty"` + Tasks []byte `json:"tasks,omitempty"` } func newGenericMessage(t string, data map[string]int64, nodeID string) (msg *genericMessage) { diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 61506db2..bc6bab58 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -176,9 +176,6 @@ type runner struct { controller *Controller loop *Loop // specify loop count for testcase, count = loopCount * spawnCount - // when this channel is closed, all statistics are reported successfully - reportedChan chan bool - // rebalance spawn rebalance chan bool @@ -331,7 +328,6 @@ func (r *runner) reset() { r.stats.clearAll() r.rebalance = make(chan bool) r.stopChan = make(chan bool) - r.reportedChan = make(chan bool) } func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { @@ -479,9 +475,7 @@ func (r *runner) statsStart() { // report stats case <-ticker.C: r.reportStats() - // close reportedChan and return if the last stats is reported successfully if !r.isStarted() { - close(r.reportedChan) log.Info().Msg("Quitting statsStart") return } @@ -546,15 +540,12 @@ func (r *localRunner) start() { go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, nil) // start stats report - go r.statsStart() + r.statsStart() // stop <-r.stopChan r.updateState(StateStopped) - // wait until all stats are reported successfully - <-r.reportedChan - // stop rate limiter if r.rateLimitEnabled { r.rateLimiter.Stop() @@ -621,11 +612,15 @@ func (r *workerRunner) onSpawnMessage(msg *genericMessage) { if msg.Profile == nil { log.Error().Msg("miss profile") } - if msg.Tasks == nil { + profile := BytesToProfile(msg.Profile) + r.setSpawnCount(profile.SpawnCount) + r.setSpawnRate(profile.SpawnRate) + + if msg.Tasks == nil && len(r.tasks) == 0 { log.Error().Msg("miss tasks") } r.tasksChan <- &profileMessage{ - Profile: msg.Profile, + Profile: profile, Tasks: msg.Tasks, } log.Info().Msg("on spawn message successful") @@ -635,8 +630,12 @@ func (r *workerRunner) onRebalanceMessage(msg *genericMessage) { if msg.Profile == nil { log.Error().Msg("miss profile") } + profile := BytesToProfile(msg.Profile) + r.setSpawnCount(profile.SpawnCount) + r.setSpawnRate(profile.SpawnRate) + r.tasksChan <- &profileMessage{ - Profile: msg.Profile, + Profile: profile, } log.Info().Msg("on rebalance message successful") } @@ -759,9 +758,7 @@ func (r *workerRunner) start() { go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, r.spawnComplete) // start stats report - go r.statsStart() - - <-r.reportedChan + r.statsStart() r.reportTestResult() r.outputOnStop() diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index ae41b5dd..5bfccf6e 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -281,11 +281,7 @@ func TestOnSpawnMessage(t *testing.T) { runner.setTasks([]*Task{taskA}) runner.setSpawnCount(100) runner.setSpawnRate(100) - - runner.onSpawnMessage(newGenericMessage("spawn", map[string]int64{ - "spawn_count": 20, - "spawn_rate": 20, - }, runner.nodeID)) + runner.onSpawnMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 20, SpawnRate: 20}), nil, nil)) if runner.getSpawnCount() != 20 { t.Error("workers should be overwrote by onSpawnMessage, expected: 20, was:", runner.controller.spawnCount) @@ -344,13 +340,9 @@ func TestOnMessage(t *testing.T) { runner.updateState(StateInit) runner.setTasks(tasks) - go runner.start() - // start spawning - runner.onMessage(newGenericMessage("spawn", map[string]int64{ - "spawn_count": 10, - "spawn_rate": 10, - }, runner.nodeID)) + runner.onMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 10, SpawnRate: 10}), nil, nil)) + go runner.start() msg := <-runner.client.sendChannel() if msg.Type != "spawning" { @@ -371,10 +363,8 @@ func TestOnMessage(t *testing.T) { } // increase goroutines while running - runner.onMessage(newGenericMessage("rebalance", map[string]int64{ - "spawn_count": 15, - "spawn_rate": 15, - }, runner.nodeID)) + runner.onMessage(newMessageToWorker("rebalance", ProfileToBytes(&Profile{SpawnCount: 15, SpawnRate: 15}), nil, nil)) + runner.rebalance <- true time.Sleep(2 * time.Second) if runner.getState() != StateRunning { @@ -394,14 +384,11 @@ func TestOnMessage(t *testing.T) { t.Error("Runner should send client_stopped message, got", msg.Type) } - time.Sleep(4 * time.Second) + time.Sleep(3 * time.Second) - go runner.start() // spawn again - runner.onMessage(newGenericMessage("spawn", map[string]int64{ - "spawn_count": 10, - "spawn_rate": 10, - }, runner.nodeID)) + runner.onMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 10, SpawnRate: 10}), nil, nil)) + go runner.start() msg = <-runner.client.sendChannel() if msg.Type != "spawning" { @@ -431,6 +418,7 @@ func TestOnMessage(t *testing.T) { t.Error("Runner should send client_stopped message, got", msg.Type) } + time.Sleep(3 * time.Second) // quit runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) } From c72dc38bd320067500c5ca169e0d9c6463086bb3 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Thu, 26 May 2022 10:51:59 +0800 Subject: [PATCH 07/31] suport for distributing tasks that contain plugins --- hrp/boomer.go | 27 +++++++++++++++++++++++++++ hrp/config.go | 9 ++++++++- hrp/internal/boomer/client_grpc.go | 4 ++-- hrp/runner.go | 14 ++++++++++++++ 4 files changed, 51 insertions(+), 3 deletions(-) diff --git a/hrp/boomer.go b/hrp/boomer.go index 8a076a4a..87c6c809 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -1,7 +1,11 @@ package hrp import ( + "fmt" + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" + "io/ioutil" "os" + "path/filepath" "sync" "time" @@ -194,7 +198,30 @@ func (b *HRPBoomer) runTasks(testCases []*TCase, profile *boomer.Profile) { tesecase, err := tc.toTestCase() if err != nil { log.Error().Err(err).Msg("failed to load testcases") + return } + // create temp dir to save testcase + tempDir, err := ioutil.TempDir("", "hrp_testcases") + if err != nil { + log.Error().Err(err).Msg("failed to save testcases") + return + } + + tesecase.Config.Path = filepath.Join(tempDir, "test-case.json") + if tesecase.Config.PluginSetting != nil { + tesecase.Config.PluginSetting.Path = filepath.Join(tempDir, fmt.Sprintf("debugtalk.%s", tesecase.Config.PluginSetting.Type)) + err = builtin.Bytes2File(tesecase.Config.PluginSetting.Content, tesecase.Config.PluginSetting.Path) + if err != nil { + log.Error().Err(err).Msg("failed to save plugin file") + return + } + } + err = builtin.Dump2JSON(tesecase, tesecase.Config.Path) + if err != nil { + log.Error().Err(err).Msg("failed to dump testcases") + return + } + testcases = append(testcases, tesecase) } b.SetProfile(profile) diff --git a/hrp/config.go b/hrp/config.go index 06930564..3ee1264a 100644 --- a/hrp/config.go +++ b/hrp/config.go @@ -32,7 +32,8 @@ type TConfig struct { Timeout float64 `json:"timeout,omitempty" yaml:"timeout,omitempty"` // global timeout in seconds Export []string `json:"export,omitempty" yaml:"export,omitempty"` Weight int `json:"weight,omitempty" yaml:"weight,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` // testcase file path + Path string `json:"path,omitempty" yaml:"path,omitempty"` // testcase file path + PluginSetting *PluginConfig `json:"plugin,omitempty" yaml:"plugin,omitempty"` // plugin config } // WithVariables sets variables for current testcase. @@ -172,3 +173,9 @@ const ( ) var thinkTimeDefaultRandom = map[string]float64{"min_percentage": 0.5, "max_percentage": 1.5} + +type PluginConfig struct { + Path string + Type string // bin、so、py + Content []byte +} diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 8082074d..892f9c89 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -74,12 +74,12 @@ func newClient(masterHost string, masterPort int, identity string) (client *grpc func (c *grpcClient) connect() (err error) { addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) - c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure()) + c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024))) if err != nil { log.Error().Err(err).Msg("failed to connect") return err } - + grpc.MaxCallRecvMsgSize(32 * 10e9) go c.recv() go c.send() diff --git a/hrp/runner.go b/hrp/runner.go index 12272d4b..63a3098a 100644 --- a/hrp/runner.go +++ b/hrp/runner.go @@ -7,6 +7,7 @@ import ( "net/http/cookiejar" "net/url" "path/filepath" + "strings" "testing" "time" @@ -16,6 +17,7 @@ import ( "github.com/rs/zerolog/log" "golang.org/x/net/http2" + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" "github.com/httprunner/httprunner/v4/hrp/internal/sdk" ) @@ -279,6 +281,18 @@ func (r *HRPRunner) newCaseRunner(testcase *TestCase) (*testCaseRunner, error) { timeout := time.Duration(runner.testCase.Config.Timeout*1000) * time.Millisecond runner.hrpRunner.SetTimeout(timeout) } + if plugin.Path() != "" { + pluginContent, err := builtin.ReadFile(plugin.Path()) + if err != nil { + return nil, err + } + tp := strings.Split(plugin.Path(), ".") + runner.parsedConfig.PluginSetting = &PluginConfig{ + Path: plugin.Path(), + Content: pluginContent, + Type: tp[len(tp)-1], + } + } return runner, nil } From b654f32cc31678481a5ee9446c1d783d097693c7 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Thu, 26 May 2022 11:13:40 +0800 Subject: [PATCH 08/31] fix: unittest --- docs/cmd/hrp_boom.md | 3 +- hrp/internal/boomer/runner.go | 10 ++- hrp/internal/builtin/utils.go | 152 ---------------------------------- 3 files changed, 9 insertions(+), 156 deletions(-) diff --git a/docs/cmd/hrp_boom.md b/docs/cmd/hrp_boom.md index d79d7f14..a51770b5 100644 --- a/docs/cmd/hrp_boom.md +++ b/docs/cmd/hrp_boom.md @@ -27,9 +27,10 @@ hrp boom [flags] --disable-compression Disable compression --disable-console-output Disable console output. --disable-keepalive Disable keepalive - --expect-workers int How many workers master should expect to connect before starting the test (only when --autostart is used (default 1) + --expect-workers int How many workers master should expect to connect before starting the test (only when --autostart is used) (default 1) --expect-workers-max-wait int How many workers master should expect to connect before starting the test (only when --autostart is used -h, --help help for boom + --ignore-quit ignores quit from master (only when --worker is used) --loop-count int The specify running cycles for load testing (default -1) --master master of distributed testing --master-bind-host string Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces). (default "127.0.0.1") diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index bc6bab58..8b87d6a7 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -151,9 +151,9 @@ func (c *Controller) increaseFinishedCount() { func (c *Controller) reset() { c.mutex.Lock() defer c.mutex.Unlock() - c.spawnCount = 0 + atomic.StoreInt64(&c.spawnCount, 0) c.spawnRate = 0 - c.currentClientsNum = 0 + atomic.StoreInt64(&c.currentClientsNum, 0) c.spawnDone = make(chan struct{}) c.tasks = []*Task{} c.once = sync.Once{} @@ -490,6 +490,7 @@ func (r *runner) stop() { if r.rateLimitEnabled { r.rateLimiter.Stop() } + r.updateState(StateStopped) } func (r *runner) getState() int32 { @@ -581,6 +582,7 @@ type workerRunner struct { tasksChan chan *profileMessage + mutex sync.Mutex ignoreQuit bool } @@ -597,6 +599,7 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { masterPort: masterPort, nodeID: getNodeID(), tasksChan: make(chan *profileMessage, 10), + mutex: sync.Mutex{}, } return r } @@ -746,6 +749,8 @@ func (r *workerRunner) run() { // start load test func (r *workerRunner) start() { + r.mutex.Lock() + defer r.mutex.Unlock() r.reset() // start rate limiter @@ -768,7 +773,6 @@ func (r *workerRunner) stop() { if r.isStarted() { r.runner.stop() close(r.rebalance) - r.updateState(StateStopped) } } diff --git a/hrp/internal/builtin/utils.go b/hrp/internal/builtin/utils.go index 876eaff3..10aad1e3 100644 --- a/hrp/internal/builtin/utils.go +++ b/hrp/internal/builtin/utils.go @@ -1,13 +1,11 @@ package builtin import ( - "archive/zip" "bufio" "bytes" "encoding/csv" builtinJSON "encoding/json" "fmt" - "io" "math/rand" "os" "os/exec" @@ -493,156 +491,6 @@ func GetFileNameWithoutExtension(path string) string { return base[0 : len(base)-len(ext)] } -func ZipDir(filename string, root string) error { - p, err := os.Getwd() - if err != nil { - return err - } - if strings.Contains(root, p) { - root, err = filepath.Rel(p, root) - if err != nil { - return err - } - } - err = os.RemoveAll(filename) - if err != nil { - return err - } - var files []string - err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - files = append(files, path) - return nil - }) - if err != nil { - return err - } - err = ZipFiles(filename, files) - return err -} - -// ZipFiles compresses one or many files into a single zip archive file. -// Param 1: filename is the output zip file's name. -// Param 2: files is a list of files to add to the zip. -func ZipFiles(filename string, files []string) error { - newZipFile, err := os.Create(filename) - if err != nil { - return err - } - defer newZipFile.Close() - - zipWriter := zip.NewWriter(newZipFile) - defer zipWriter.Close() - - // Add files to zip - for _, file := range files { - if err = AddFileToZip(zipWriter, file); err != nil { - return err - } - } - return nil -} - -func AddFileToZip(zipWriter *zip.Writer, filename string) error { - fileToZip, err := os.Open(filename) - if err != nil { - return err - } - defer fileToZip.Close() - - // Get the file information - info, err := fileToZip.Stat() - if err != nil { - return err - } - - header, err := zip.FileInfoHeader(info) - if err != nil { - return err - } - - // Using FileInfoHeader() above only uses the basename of the file. If we want - // to preserve the folder structure we can overwrite this with the full path. - header.Name = filename - - // if dir - if info.IsDir() { - header.Name += `/` - } else { - // Change to deflate to gain better compression - // see http://golang.org/pkg/archive/zip/#pkg-constants - header.Method = zip.Deflate - } - - writer, err := zipWriter.CreateHeader(header) - if err != nil { - return err - } - if !info.IsDir() { - _, err = io.Copy(writer, fileToZip) - } - return err -} - -func UnZip(dst, src string) (err error) { - zr, err := zip.OpenReader(src) - defer zr.Close() - if err != nil { - return - } - if dst != "" { - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - } - for _, file := range zr.File { - path := filepath.Join(dst, file.Name) - if file.FileInfo().IsDir() { - if err := os.MkdirAll(path, file.Mode()); err != nil { - return err - } - continue - } - fr, err := file.Open() - if err != nil { - return err - } - fw, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, file.Mode()) - if err != nil { - return err - } - _, err = io.Copy(fw, fr) - if err != nil { - return err - } - log.Info().Msg(fmt.Sprintf("unzip %s successful\n", path)) - _ = fw.Close() - _ = fr.Close() - } - return nil -} - -func File2Bytes(filename string) ([]byte, error) { - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - stats, err := file.Stat() - if err != nil { - return nil, err - } - - data := make([]byte, stats.Size()) - count, err := file.Read(data) - if err != nil { - return nil, err - } - log.Info().Msg(fmt.Sprintf("read file %s len: %d \n", filename, count)) - - return data, nil -} - func Bytes2File(data []byte, filename string) error { file, err := os.Create(filename) if err != nil { From d67764e49c2310e9104f2c266369260f239cdd4f Mon Sep 17 00:00:00 2001 From: xucong053 Date: Mon, 4 Jul 2022 17:14:39 +0800 Subject: [PATCH 09/31] fix: data race --- go.mod | 3 +-- go.sum | 5 +---- hrp/boomer.go | 5 +++++ hrp/runner.go | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 1bc11398..9f476f80 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/httprunner/httprunner/v4 -go 1.18 +go 1.16 require ( github.com/andybalholm/brotli v1.0.4 @@ -17,7 +17,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/json-iterator/go v1.1.12 github.com/maja42/goval v1.2.1 - github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mitchellh/mapstructure v1.4.1 github.com/olekukonko/tablewriter v0.0.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index a36ed4ad..97bee3a8 100644 --- a/go.sum +++ b/go.sum @@ -335,9 +335,8 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -408,8 +407,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= diff --git a/hrp/boomer.go b/hrp/boomer.go index 87c6c809..0f66c1ff 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -289,6 +289,9 @@ func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rend // reset start time only once once := sync.Once{} + // update session variables mutex + mutex := sync.Mutex{} + return &boomer.Task{ Name: testcase.Config.Name, Weight: testcase.Config.Weight, @@ -299,9 +302,11 @@ func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rend // init session runner sessionRunner := caseRunner.newSession() + mutex.Lock() if parametersIterator.HasNext() { sessionRunner.updateSessionVariables(parametersIterator.Next()) } + mutex.Unlock() startTime := time.Now() for _, step := range testcase.TestSteps { diff --git a/hrp/runner.go b/hrp/runner.go index 63a3098a..60389d03 100644 --- a/hrp/runner.go +++ b/hrp/runner.go @@ -281,7 +281,7 @@ func (r *HRPRunner) newCaseRunner(testcase *TestCase) (*testCaseRunner, error) { timeout := time.Duration(runner.testCase.Config.Timeout*1000) * time.Millisecond runner.hrpRunner.SetTimeout(timeout) } - if plugin.Path() != "" { + if plugin != nil { pluginContent, err := builtin.ReadFile(plugin.Path()) if err != nil { return nil, err From 2ca94381efa2249c8086eb85afd136db8af81489 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Mon, 4 Jul 2022 22:00:39 +0800 Subject: [PATCH 10/31] fix: report stats for httprunner master --- hrp/internal/boomer/output.go | 14 +------ hrp/internal/boomer/runner.go | 62 ++++++++++++++++++++++++++---- hrp/internal/boomer/server_grpc.go | 4 +- 3 files changed, 57 insertions(+), 23 deletions(-) diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index a0866e02..152f0768 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -116,19 +116,7 @@ func (o *ConsoleOutput) OnEvent(data map[string]interface{}) { return } - var state string - switch output.State { - case StateInit: - state = "initializing" - case StateSpawning: - state = "spawning" - case StateRunning: - state = "running" - case StateQuitting: - state = "quitting" - case StateStopped: - state = "stopped" - } + state := getStateName(output.State) currentTime := time.Now() println(fmt.Sprintf("Current time: %s, Users: %d, State: %s, Total RPS: %.1f, Total Average Response Time: %.1fms, Total Fail Ratio: %.1f%%", diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 8b87d6a7..80d8552e 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -26,6 +26,26 @@ const ( StateMissing // missing ) +func getStateName(state int32) (stateName string) { + switch state { + case StateInit: + stateName = "initializing" + case StateSpawning: + stateName = "spawning" + case StateRunning: + stateName = "running" + case StateStopping: + stateName = "stopping" + case StateStopped: + stateName = "stopped" + case StateQuitting: + stateName = "quitting" + case StateMissing: + stateName = "stopped" + } + return +} + const ( reportStatsInterval = 3 * time.Second heartbeatInterval = 1 * time.Second @@ -839,12 +859,13 @@ func (r *masterRunner) setExpectWorkers(expectWorkers int, expectWorkersMaxWait func (r *masterRunner) heartbeatWorker() { log.Info().Msg("heartbeatWorker, listen and record heartbeat from worker") - var ticker = time.NewTicker(heartbeatInterval) + var heartBeatTicker = time.NewTicker(heartbeatInterval) + var reportTicker = time.NewTicker(heartbeatLiveness) for { select { case <-r.closeChan: return - case <-ticker.C: + case <-heartBeatTicker.C: r.server.clients.Range(func(key, value interface{}) bool { workerInfo, ok := value.(*WorkerNode) if !ok { @@ -863,6 +884,8 @@ func (r *masterRunner) heartbeatWorker() { } return true }) + case <-reportTicker.C: + r.reportStats() } } } @@ -889,7 +912,7 @@ func (r *masterRunner) clientListener() { } workerInfo.setState(StateInit) if r.getState() == StateRunning { - println(fmt.Sprintf("worker(%s) joined, ready to rebalance the load of each worker", workerInfo.ID)) + log.Warn().Str("worker id", workerInfo.ID).Msg("worker joined, ready to rebalance the load of each worker") err := r.rebalance() if err != nil { log.Error().Err(err).Msg("failed to rebalance") @@ -916,7 +939,7 @@ func (r *masterRunner) clientListener() { case typeSpawningComplete: workerInfo.setState(StateRunning) if r.server.getWorkersLengthByState(StateRunning) == r.server.getClientsLength() { - println(fmt.Sprintf("all(%v) workers spawn done, setting state as running", r.server.getClientsLength())) + log.Warn().Msg("all workers spawn done, setting state as running") r.updateState(StateRunning) } case typeQuit: @@ -926,7 +949,7 @@ func (r *masterRunner) clientListener() { workerInfo.setState(StateQuitting) if r.isStarted() { if r.server.getClientsLength() > 0 { - println(fmt.Sprintf("worker(%s) quited, ready to rebalance the load of each worker", workerInfo.ID)) + log.Warn().Str("worker id", workerInfo.ID).Msg("worker quited, ready to rebalance the load of each worker") err := r.rebalance() if err != nil { log.Error().Err(err).Msg("failed to rebalance") @@ -1004,8 +1027,7 @@ func (r *masterRunner) start() error { profile := r.profile.dispatch(int64(numWorkers)) r.server.sendChannel() <- newMessageToWorker("spawn", ProfileToBytes(profile), nil, testcase) - println("send spawn data to worker successful") - log.Info().Msg("send spawn data to worker successful") + log.Warn().Interface("profile", profile).Msg("send spawn data to worker successful") return nil } @@ -1017,7 +1039,7 @@ func (r *masterRunner) rebalance() error { profile := r.profile.dispatch(int64(numWorkers)) r.server.sendChannel() <- newMessageToWorker("rebalance", ProfileToBytes(profile), nil, nil) - println("send rebalance data to worker successful") + log.Warn().Msg("send rebalance data to worker successful") return nil } @@ -1061,3 +1083,27 @@ func (r *masterRunner) close() { close(r.closeChan) r.server.close() } + +func (r *masterRunner) reportStats() { + currentTime := time.Now() + println() + println("===================== HttpRunner Master for Distributed Load Testing ===================== ") + println(fmt.Sprintf("Current time: %s, State: %v, Current Valid Workers: %v, Target Users: %v", + currentTime.Format("2006/01/02 15:04:05"), getStateName(r.getState()), r.server.getClientsLength(), r.getSpawnCount())) + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Worker ID", "State", "Current Users", "CPU Usage", "CPU Warning Emitted", "Memory Usage", "Heartbeat"}) + + for _, worker := range r.server.getAllWorkers() { + row := make([]string, 7) + row[0] = worker.ID + row[1] = fmt.Sprintf("%v", getStateName(worker.getState())) + row[2] = fmt.Sprintf("%v", worker.getSpawnCount()) + row[3] = fmt.Sprintf("%v", worker.getCPUUsage()) + row[4] = fmt.Sprintf("%v", worker.getCPUWarningEmitted()) + row[5] = fmt.Sprintf("%v", worker.getMemoryUsage()) + row[6] = fmt.Sprintf("%v", worker.getHeartbeat()) + table.Append(row) + } + table.Render() + println() +} diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index bcd85001..0bf593ea 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -35,10 +35,10 @@ func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_Bidirect } wn := &WorkerNode{messenger: srv, ID: req.NodeID, Heartbeat: 3} s.clients.Store(req.NodeID, wn) - println(fmt.Sprintf("worker(%v) joined, current worker count: %v", req.NodeID, s.getClientsLength())) + log.Warn().Str("worker id", req.NodeID).Msg("worker joined") <-s.disconnectedChannel() s.clients.Delete(req.NodeID) - println(fmt.Sprintf("worker(%v) quited, current worker count: %v", req.NodeID, s.getClientsLength())) + log.Warn().Str("worker id", req.NodeID).Msg("worker quited") return nil } From caed68d6dd82e35e4f30723c1b3804182d3e75c8 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Tue, 5 Jul 2022 16:37:42 +0800 Subject: [PATCH 11/31] fix: graceful stop --- hrp/internal/boomer/boomer.go | 4 +--- hrp/internal/boomer/runner.go | 9 +++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index ed314249..8ec9536b 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -378,9 +378,6 @@ func (b *Boomer) EnableGracefulQuit() { signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) go func() { <-c - if b.mode == DistributedWorkerMode { - b.workerRunner.ignoreQuit = false - } b.Quit() }() } @@ -536,6 +533,7 @@ func (b *Boomer) GetCloseChan() chan bool { func (b *Boomer) Quit() { switch b.mode { case DistributedWorkerMode: + b.workerRunner.stop() b.workerRunner.close() case DistributedMasterMode: b.masterRunner.close() diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 80d8552e..e30d0d7a 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -620,6 +620,7 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { nodeID: getNodeID(), tasksChan: make(chan *profileMessage, 10), mutex: sync.Mutex{}, + ignoreQuit: false, } return r } @@ -686,6 +687,10 @@ func (r *workerRunner) onMessage(msg *genericMessage) { log.Info().Msg("Recv stop message from master, all the goroutines are stopped") r.client.sendChannel() <- newGenericMessage("client_stopped", nil, r.nodeID) case "quit": + r.stop() + if r.ignoreQuit { + break + } r.close() log.Info().Msg("Recv quit message from master, all the goroutines are stopped") } @@ -797,10 +802,6 @@ func (r *workerRunner) stop() { } func (r *workerRunner) close() { - r.stop() - if r.ignoreQuit { - return - } // waiting report finished time.Sleep(1 * time.Second) close(r.closeChan) From b325232a6cbf61bbc4a137761811646cf0cdcfe6 Mon Sep 17 00:00:00 2001 From: xucong053 Date: Wed, 6 Jul 2022 13:22:18 +0800 Subject: [PATCH 12/31] fix: plugin file distribution --- examples/demo-with-go-plugin/plugin/debugtalk_gen.go | 2 +- hrp/internal/boomer/runner.go | 2 +- hrp/internal/builtin/utils.go | 9 ++++----- hrp/runner.go | 7 +++++-- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/examples/demo-with-go-plugin/plugin/debugtalk_gen.go b/examples/demo-with-go-plugin/plugin/debugtalk_gen.go index 6e6c9067..0ee1ae22 100644 --- a/examples/demo-with-go-plugin/plugin/debugtalk_gen.go +++ b/examples/demo-with-go-plugin/plugin/debugtalk_gen.go @@ -1,4 +1,4 @@ -// NOTE: Generated By hrp v4.1.4, DO NOT EDIT! +// NOTE: Generated By hrp v4.1.5, DO NOT EDIT! package main import ( diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index e30d0d7a..b020bbfa 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -1089,7 +1089,7 @@ func (r *masterRunner) reportStats() { currentTime := time.Now() println() println("===================== HttpRunner Master for Distributed Load Testing ===================== ") - println(fmt.Sprintf("Current time: %s, State: %v, Current Valid Workers: %v, Target Users: %v", + println(fmt.Sprintf("Current time: %s, State: %v, Current Available Workers: %v, Target Users: %v", currentTime.Format("2006/01/02 15:04:05"), getStateName(r.getState()), r.server.getClientsLength(), r.getSpawnCount())) table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Worker ID", "State", "Current Users", "CPU Usage", "CPU Warning Emitted", "Memory Usage", "Heartbeat"}) diff --git a/hrp/internal/builtin/utils.go b/hrp/internal/builtin/utils.go index 10aad1e3..cfc660c2 100644 --- a/hrp/internal/builtin/utils.go +++ b/hrp/internal/builtin/utils.go @@ -492,12 +492,11 @@ func GetFileNameWithoutExtension(path string) string { } func Bytes2File(data []byte, filename string) error { - file, err := os.Create(filename) - if err != nil { - return err - } + file, err := os.OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0755) defer file.Close() - + if err != nil { + log.Error().Err(err).Msg("failed to generate file") + } count, err := file.Write(data) if err != nil { return err diff --git a/hrp/runner.go b/hrp/runner.go index 60389d03..e6662613 100644 --- a/hrp/runner.go +++ b/hrp/runner.go @@ -281,14 +281,17 @@ func (r *HRPRunner) newCaseRunner(testcase *TestCase) (*testCaseRunner, error) { timeout := time.Duration(runner.testCase.Config.Timeout*1000) * time.Millisecond runner.hrpRunner.SetTimeout(timeout) } + + // load plugin info to testcase config if plugin != nil { - pluginContent, err := builtin.ReadFile(plugin.Path()) + pluginPath, _ := locatePlugin(testcase.Config.Path) + pluginContent, err := builtin.ReadFile(pluginPath) if err != nil { return nil, err } tp := strings.Split(plugin.Path(), ".") runner.parsedConfig.PluginSetting = &PluginConfig{ - Path: plugin.Path(), + Path: pluginPath, Content: pluginContent, Type: tp[len(tp)-1], } From b121282525cdd0ea7fe79e82b6e44a2c3891e409 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Wed, 6 Jul 2022 22:11:29 +0800 Subject: [PATCH 13/31] feat: add interceptors for grpc --- go.mod | 1 + go.sum | 3 + hrp/internal/boomer/client_grpc.go | 110 ++++++++++++++++++++-- hrp/internal/boomer/server_grpc.go | 88 ++++++++++++++++- hrp/internal/data/data.go | 44 +++++++++ hrp/internal/data/x509/README.md | 6 ++ hrp/internal/data/x509/ca_cert.pem | 34 +++++++ hrp/internal/data/x509/ca_key.pem | 52 ++++++++++ hrp/internal/data/x509/client_ca_cert.pem | 34 +++++++ hrp/internal/data/x509/client_ca_key.pem | 52 ++++++++++ hrp/internal/data/x509/client_cert.pem | 32 +++++++ hrp/internal/data/x509/client_key.pem | 51 ++++++++++ hrp/internal/data/x509/create.sh | 69 ++++++++++++++ hrp/internal/data/x509/openssl.cnf | 28 ++++++ hrp/internal/data/x509/server_cert.pem | 32 +++++++ hrp/internal/data/x509/server_key.pem | 51 ++++++++++ 16 files changed, 676 insertions(+), 11 deletions(-) create mode 100644 hrp/internal/data/data.go create mode 100644 hrp/internal/data/x509/README.md create mode 100644 hrp/internal/data/x509/ca_cert.pem create mode 100644 hrp/internal/data/x509/ca_key.pem create mode 100644 hrp/internal/data/x509/client_ca_cert.pem create mode 100644 hrp/internal/data/x509/client_ca_key.pem create mode 100644 hrp/internal/data/x509/client_cert.pem create mode 100644 hrp/internal/data/x509/client_key.pem create mode 100755 hrp/internal/data/x509/create.sh create mode 100644 hrp/internal/data/x509/openssl.cnf create mode 100644 hrp/internal/data/x509/server_cert.pem create mode 100644 hrp/internal/data/x509/server_key.pem diff --git a/go.mod b/go.mod index 9f476f80..0ed1fd7a 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f + golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 google.golang.org/grpc v1.45.0 google.golang.org/protobuf v1.27.1 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b diff --git a/go.sum b/go.sum index 97bee3a8..d774c3f4 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,7 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= @@ -602,6 +603,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -783,6 +785,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 892f9c89..7f82227b 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -8,9 +8,14 @@ import ( "sync/atomic" "time" + "golang.org/x/oauth2" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + + "github.com/httprunner/httprunner/v4/hrp/internal/data" "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" "github.com/rs/zerolog/log" - "google.golang.org/grpc" ) type grpcClient struct { @@ -31,14 +36,86 @@ type grpcClient struct { } type grpcClientConfig struct { - ctx context.Context - cancel context.CancelFunc // use cancel() to stop client - conn *grpc.ClientConn - biStream messager.Message_BidirectionalStreamingMessageClient + // ctx is used for the lifetime of the stream that may need to be canceled + // on client shutdown. + ctx context.Context + ctxCancel context.CancelFunc + conn *grpc.ClientConn + biStream messager.Message_BidirectionalStreamingMessageClient mutex sync.RWMutex } +const token = "httprunner-secret-token" + +func logger(format string, a ...interface{}) { + log.Logger.Log().Msg(fmt.Sprintf(format, a...)) +} + +// unaryInterceptor is an example unary interceptor. +func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + var credsConfigured bool + for _, o := range opts { + _, ok := o.(grpc.PerRPCCredsCallOption) + if ok { + credsConfigured = true + break + } + } + if !credsConfigured { + opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ + AccessToken: token, + }))) + } + start := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + end := time.Now() + logger("RPC: %s, start time: %s, end time: %s, err: %v", method, start.Format("Basic"), end.Format(time.RFC3339), err) + return err +} + +// wrappedStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and +// SendMsg method call. +type wrappedStream struct { + grpc.ClientStream +} + +func (w *wrappedStream) RecvMsg(m interface{}) error { + logger("Receive a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) + return w.ClientStream.RecvMsg(m) +} + +func (w *wrappedStream) SendMsg(m interface{}) error { + logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) + return w.ClientStream.SendMsg(m) +} + +func newWrappedStream(s grpc.ClientStream) grpc.ClientStream { + return &wrappedStream{s} +} + +// streamInterceptor is an example stream interceptor. +func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + var credsConfigured bool + for _, o := range opts { + _, ok := o.(*grpc.PerRPCCredsCallOption) + if ok { + credsConfigured = true + break + } + } + if !credsConfigured { + opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ + AccessToken: token, + }))) + } + s, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + return nil, err + } + return newWrappedStream(s), nil +} + func (c *grpcClientConfig) getBiStreamClient() messager.Message_BidirectionalStreamingMessageClient { c.mutex.RLock() defer c.mutex.RUnlock() @@ -64,9 +141,9 @@ func newClient(masterHost string, masterPort int, identity string) (client *grpc disconnectedFromMaster: make(chan bool), shutdownChan: make(chan bool), config: &grpcClientConfig{ - ctx: ctx, - cancel: cancel, - mutex: sync.RWMutex{}, + ctx: ctx, + ctxCancel: cancel, + mutex: sync.RWMutex{}, }, } return client @@ -74,7 +151,20 @@ func newClient(masterHost string, masterPort int, identity string) (client *grpc func (c *grpcClient) connect() (err error) { addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) - c.config.conn, err = grpc.Dial(addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024))) + // Create tls based credential. + creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") + if err != nil { + log.Fatal().Msg(fmt.Sprintf("failed to load credentials: %v", err)) + } + opts := []grpc.DialOption{ + // oauth.NewOauthAccess requires the configuration of transport + // credentials. + grpc.WithTransportCredentials(creds), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024 * 1024 * 1024)), + grpc.WithUnaryInterceptor(unaryInterceptor), + grpc.WithStreamInterceptor(streamInterceptor), + } + c.config.conn, err = grpc.Dial(addr, opts...) if err != nil { log.Error().Err(err).Msg("failed to connect") return err @@ -112,7 +202,7 @@ func (c *grpcClient) reConnect() (err error) { func (c *grpcClient) close() { close(c.shutdownChan) - c.config.cancel() + c.config.ctxCancel() if c.config.conn != nil { c.config.conn.Close() } diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 0bf593ea..d6d062f4 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -5,18 +5,93 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" + "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" + "github.com/httprunner/httprunner/v4/hrp/internal/data" "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" "github.com/rs/zerolog/log" ) +var ( + errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") + errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") +) + +// valid validates the authorization. +func valid(authorization []string) bool { + if len(authorization) < 1 { + return false + } + token := strings.TrimPrefix(authorization[0], "Bearer ") + // Perform the token validation here. For the sake of this example, the code + // here forgoes any of the usual OAuth2 token validation and instead checks + // for a token matching an arbitrary string. + return token == "httprunner-secret-token" +} + +func serverUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // authentication (token verification) + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + if !valid(md["authorization"]) { + return nil, errInvalidToken + } + m, err := handler(ctx, req) + if err != nil { + logger("RPC failed with error %v", err) + } + return m, err +} + +// serverWrappedStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverWrappedStream struct { + grpc.ServerStream +} + +func (w *serverWrappedStream) RecvMsg(m interface{}) error { + logger("Receive a message (Type: %T) at %s", m, time.Now().Format(time.RFC3339)) + return w.ServerStream.RecvMsg(m) +} + +func (w *serverWrappedStream) SendMsg(m interface{}) error { + logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) + return w.ServerStream.SendMsg(m) +} + +func newServerWrappedStream(s grpc.ServerStream) grpc.ServerStream { + return &serverWrappedStream{s} +} + +func serverStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // authentication (token verification) + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + if !valid(md["authorization"]) { + return errInvalidToken + } + + err := handler(srv, newServerWrappedStream(ss)) + if err != nil { + logger("RPC failed with error %v", err) + } + return err +} + func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error { s.wg.Add(1) defer s.wg.Done() @@ -158,13 +233,24 @@ func newServer(masterHost string, masterPort int) (server *grpcServer) { func (s *grpcServer) start() (err error) { addr := fmt.Sprintf("%v:%v", s.masterHost, s.masterPort) + // Create tls based credential. + creds, err := credentials.NewServerTLSFromFile(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem")) + if err != nil { + log.Fatal().Msg(fmt.Sprintf("failed to load key pair: %s", err)) + } + opts := []grpc.ServerOption{ + grpc.UnaryInterceptor(serverUnaryInterceptor), + grpc.StreamInterceptor(serverStreamInterceptor), + // Enable TLS for all incoming connections. + grpc.Creds(creds), + } lis, err := net.Listen("tcp", addr) if err != nil { log.Error().Err(err).Msg("failed to listen") return } // create gRPC server - serv := grpc.NewServer() + serv := grpc.NewServer(opts...) // register message server messager.RegisterMessageServer(serv, s) reflection.Register(serv) diff --git a/hrp/internal/data/data.go b/hrp/internal/data/data.go new file mode 100644 index 00000000..c583755f --- /dev/null +++ b/hrp/internal/data/data.go @@ -0,0 +1,44 @@ +/* + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package data provides convenience routines to access files in the data +// directory. +package data + +import ( + "path/filepath" + "runtime" +) + +// basepath is the root directory of this package. +var basepath string + +func init() { + _, currentFile, _, _ := runtime.Caller(0) + basepath = filepath.Dir(currentFile) +} + +// Path returns the absolute path the given relative file or directory path, +// relative to the google.golang.org/grpc/examples/data directory in the +// user's GOPATH. If rel is already absolute, it is returned unmodified. +func Path(rel string) string { + if filepath.IsAbs(rel) { + return rel + } + + return filepath.Join(basepath, rel) +} diff --git a/hrp/internal/data/x509/README.md b/hrp/internal/data/x509/README.md new file mode 100644 index 00000000..3b9a05da --- /dev/null +++ b/hrp/internal/data/x509/README.md @@ -0,0 +1,6 @@ +This directory contains x509 certificates and associated private keys used in +examples. + +How were these test certs/keys generated ? +------------------------------------------ +Run `./create.sh` diff --git a/hrp/internal/data/x509/ca_cert.pem b/hrp/internal/data/x509/ca_cert.pem new file mode 100644 index 00000000..868a01eb --- /dev/null +++ b/hrp/internal/data/x509/ca_cert.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF6jCCA9KgAwIBAgIJANQvyb7tgLDkMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjAzMTgyMTQ0NTZaFw0zMjAz +MTUyMTQ0NTZaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANGmhBQQ5f3n4UhgJLsXHh3CE3ej +Ox36ob+Hnny9Gb/OquA4FMKjTTaSrhKIQapqlCLODai50XKSRBJcgsvsqWk9UdL2 +3zf7CzAPmg5CmzpWWwgpKPTuK5W+gLA1+uMKecBdH5gqSswQ3TD1fMfnJuq9mNfC +GsMkplaqS5VATNFPVnqS7us3OXKEITmBaQP4wOpGP1PgqX7K08aZEeAyQJaTS5um +4MNlBLYa/nQ9Wca0Uk5tzoNjE6mWH7bTuwdoZgOIwKFmBbmsC9y/HzwV/zRsL8Yp ++7FwfIYuZ5j8gBNqSFQjDFkm6Q7RcQ/lyHHj9YduOgTciIFVgx+j8aZvFqH127h8 +WIb7Jppy0DEDJE1hRP6iV2uVoaUxhXWrCWLBUU+naLix7SJ8rqw8gHwRNWfM/Lwg +I3rGXdw5WIHVQcuxevN6qVSZeWVYAlAgfxjKtM5cKZyM+W80CSdVKEku1XA0sq6h +jaiJdo6hpm8BLIB2k7LWafc5MASst7XULk4uDC/OYcEz3+C3Ryn1qBltr1gA3+5K +ANuhjYCZH4P0pX08I1MpeVP6h8XhbBPEZg2txbVGlnDXEFoJN9Eg5iEKRBo/HKhf +lP84ljtBSmCnsF6K/y3vnRiu+BVNP5KMq179DNqEy7tSygzgY41m3pSFojdvA59N +JWJoy9/NZzdlU4nzAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUW5AMXXg/zPSaLHwSO/7LwoBeZYUwgYAGA1UdIwR5MHeAFFuQDF14P8z0mix8 +Ejv+y8KAXmWFoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC +CQDUL8m+7YCw5DAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAKTh +Ofg4WospSN7Gg/q3bQqfSMT5XTFC7cj0j3cWDZBnmqb0HAFPmzHT+w3kBVNCyx1r +iatOhaZRH7RA0vacZQT5pD2MGU48/zFfwBV/qHENQWuRLD2WOOEU3cjjoINBclfP +im7ml/xgz0ACOgUyf+/2hkS7VLq4p9QQVGf2TQt65DZA9mUylZTdsBf4AfEg7IXv +gaYpq6tYmNi7fXDzR/LT+fPd4ejQARy9U7uVhecyH9zTUMzm2Fr/p7HhydSXNwhF +JUfPWw7XYO0lyA+8PxUSAKXOfsT44WNtHAeRm/Gkmn8inBdedFia/+M67k45b/wY +RF11QzvaMR33jmrdZWxCc0Xjg8oZIP7T9MfGFULEGCpB3NY4YjnRrid/JZ/edhPR +2iOiEiek4qAaxeIne3CR2dqCM+n+FV1zCs4n3S0os4+kknnS5aNR5wZpqpZfG0Co +FyWE+dE51cGcub1wT1oi5Xrxg/iRteCfd33Ky668FYKA/tHHdqkVfBflATU6iOtw +dIzvFJk1H1mUwpJrH/aNOHzVCQ5KSpcc+kXcOQPafTHFB6zMVJ6O+Vm7SrqiSENM +2b1fBKxHIsxOtwrKuzbRhU5+eAICqwMd6gcIpT/JSR1r+UfHVcrXalbeazmT2DS5 +CFOeinj4WQvtPYOdbYsWg8Y9zGN4L9zH6GovM1wD +-----END CERTIFICATE----- diff --git a/hrp/internal/data/x509/ca_key.pem b/hrp/internal/data/x509/ca_key.pem new file mode 100644 index 00000000..4dccea1b --- /dev/null +++ b/hrp/internal/data/x509/ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDRpoQUEOX95+FI +YCS7Fx4dwhN3ozsd+qG/h558vRm/zqrgOBTCo002kq4SiEGqapQizg2oudFykkQS +XILL7KlpPVHS9t83+wswD5oOQps6VlsIKSj07iuVvoCwNfrjCnnAXR+YKkrMEN0w +9XzH5ybqvZjXwhrDJKZWqkuVQEzRT1Z6ku7rNzlyhCE5gWkD+MDqRj9T4Kl+ytPG +mRHgMkCWk0ubpuDDZQS2Gv50PVnGtFJObc6DYxOplh+207sHaGYDiMChZgW5rAvc +vx88Ff80bC/GKfuxcHyGLmeY/IATakhUIwxZJukO0XEP5chx4/WHbjoE3IiBVYMf +o/Gmbxah9du4fFiG+yaactAxAyRNYUT+oldrlaGlMYV1qwliwVFPp2i4se0ifK6s +PIB8ETVnzPy8ICN6xl3cOViB1UHLsXrzeqlUmXllWAJQIH8YyrTOXCmcjPlvNAkn +VShJLtVwNLKuoY2oiXaOoaZvASyAdpOy1mn3OTAErLe11C5OLgwvzmHBM9/gt0cp +9agZba9YAN/uSgDboY2AmR+D9KV9PCNTKXlT+ofF4WwTxGYNrcW1RpZw1xBaCTfR +IOYhCkQaPxyoX5T/OJY7QUpgp7Beiv8t750YrvgVTT+SjKte/QzahMu7UsoM4GON +Zt6UhaI3bwOfTSViaMvfzWc3ZVOJ8wIDAQABAoICAQCxi7A9AhaUUWRzE6DnpGtH +zk0IO39cIx4KAsNQZiDBVDdXzYafUwaX2d57KVNbDAlJ9HCS3FKpEX9+gUPviQvr +aRe7boCZewv9dqkDvJqS7AEJxzm9O1pD5WI8WGqRDhUPuI2CIwbXDM0VokA7VuGZ +WFlxFxvs+UO5D10VF7A2blcRVQ/quQj4lzc/6P1TdL2DaVxGH3PLQd/ZR1ZhJI2Y +N0OHnOqp7wnvYqrtK+u0oI83hjym/ifvrYhMH8E7Q8lo4s4noSvmEvK0zlKYYxSO +g7RtwK47lcSPKgtn/yZDyvVX85qIgbBLcUmrqfB3qxMKz2lpJo6f4Rg7mm6SgW+K +zxYnGNCTPfiyPKiufM3rQPfJ4giqQ1XDKiZEKUJBo4mzzV6LcAoDaEqhHBlySpi3 +Z38I0rmAT62PRJ1sMkQl6j1Ben9TpwTzJmLX1sEO1Jsabsk8rRdV+ni5oRRUdW4H ++ratyQ8pmegLYyhAZqkD7FzKBLdznLmWXVTcBQkRoD5lQkCP2OF78TdL4twNvoTH +X4kQ3cNysWFXsm+yf4jSCHl4BEtGA2jOU690T0trtMf13aI3wEULmcBgc2ix+tch +wX79hwBYcjGGDfTMb39r/DrcgWMVFXawru78QFoN9vVxznit9LrOERBm6zN2ok4X +E1kD4YZGr8dxUHax0or4CQKCAQEA7W1Sxeqc0gV0ANQf3eCsFNjvT97z/RSzzUYF +wCe4rpzQ9ZNsY2UYMYmEzUuRBuQxYKCNTWot3hu+6OPMCp4pLuu2l8ha/wCM2TkY +6hceduvXkdUNUG1xZNSR8waw4PTXNeoOD30+GB4OpHdjzsF5pEzx853/Qo/ERJFx +A+aZZJy/Sfw82KTseYTniWYjH4iYUbC8TVLfRjPw6V2VcF78pYkdAQenGglqw/sI +4a3FhJspN9xV/PoPbb7PjBJFHUt7ZRQt+D3WPuhLSjyPxwV+3u2OsQ1/J/sxcih6 +rW2g+OJYrK4YkOqX9tLRB39RjO4H6Eiv5eUAw/+vHHufKRu1HwKCAQEA4gzxZNzm +r1X/5GAwwyBJ4eQUHFvEQsC2L4GTJnNNAvmJzSIWnmxGfFLhfJSabnlCMYelMhKS +Ntxokk5ItOhxlUbA1CucEtQgehJwREpUljlk7cii5MLZEkz11QxIVoAhGlq3svFG +B/gwYWNVWl2CXcK2o6BBD9sIgzgp7qhmdJej16h8YkWn7HibKs+OBcdCu+ri7wU+ +VdLpdhN3uqo1b1tO58Gv+40vuQE3ZKDdMy55V30+0qEqg6dXvDQ9nwYFkw6C31Ad +Wpa9ZB0A0HNSou1xTWyl/hDie6dlN84RHGX8on4sjgPrb8A8WVis+R2abvh9ApZA +fRZ3H/ZYXB1crQKCAQBgjgEHc+3qi0UtwRZkiSXyJHbOKIFY/r5QUJWuG3lDqYph +FF8T3N0F6EMVqhGEl/Bst14/iVq15Nqyo1ErUD63UiyjdVtsMLEW9d1n9ZbyDd9Q +8y/C8X8X3kqsZqAwG+IZjuHA8tH5xN93iwYP4yaw5onO5QYV75mFuRAY4gKnpAc2 +81lbUVbJ5H60pdDK1iX7ssAhQf6C8kSa4vAPDtH4D9a3wID4WbQNl115Sc31q5QL +n5NomdkEbIDDGfr5euTnqlk3hw5F7voPaqmd6mI6Dqnk3vRDMihdoJCjTt4T2Rju +wK5E4OKEAh/3yJNFmNemY0kFWSgCjUyNbMjBUv9JAoIBAQCYS9QO+m1JUA2ZVd1E +eWqNkFakTIdL2f5kv03ep+wIxwq6c+79SUGr3UMh5hStvXCFYjhAJhbwc0rY13lQ +uRJdWk/sIn2CifxfgjC1MccPdxeyxGxK56PMGqG9qgrKjITA9sGxA7EFCYe+9We5 +/Coq9VaLoxpyjkWL8rj9m+N7RfcTAubaZseeIBuamj+7UOZ7KOM/2i6HMBQugys1 +Thu2LLRanDnups6yPEmPuHmPVA5YjX9X9VFpZcNMf33MuAflbe9qeNVuBQUQgCHe +TvQr5QFjAoJLTCDq4nrlQCZzFZtB9vQZsjZbEg8WuxG+vN0hSrUemxBTtmEH3bbm +SLn5AoIBABGxznQFXXlF3eLIZqLvItDMSTpFp8YPk8GQWPT2V3pNNjvK/j7eg+tn +VouXv5LjyLTzWLKnPjIU4t+qwu6R9nohZ62OjGl6lssVdjPnf4R6UKzRa0iIZtH4 +BlGncnAbzb6TJuLX7dNwICoUCGyvk9tdnThH1FY3ZAEhOi1G8LEh7aBrj9/vUZ2d +S5jzZ7kLh04AB8OP1MXM3sZE7VlIxUtT/NLlwC8zRsg84pAjg3U7PygIDYQDzCRB +4yIvDziTPqDB/vdCKt7/Xary5Xj4NwqcPCRf6HvdHYCVeW7V+mWcMKZgodQARQhv +qQCK9iiN08MAFNia/0/Bj4D7XKurNRY= +-----END PRIVATE KEY----- diff --git a/hrp/internal/data/x509/client_ca_cert.pem b/hrp/internal/data/x509/client_ca_cert.pem new file mode 100644 index 00000000..62a0ce05 --- /dev/null +++ b/hrp/internal/data/x509/client_ca_cert.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF6jCCA9KgAwIBAgIJAOhoXtjjP6JdMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjAzMTgyMTQ0NThaFw0zMjAz +MTUyMTQ0NThaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAO7fTqeU+8OfKMwXABNF90+RYL4X +YS4ULx4rpf14Ntp1SF6o3itCSM3jJfHzexj2Pm16aL+OQll8ODtvTadqVSMndMCn +UN/jVjxiMmjkSNKpwUGG69CsQzCKoueKBCEy/CZSopQae6Wxn7mqTAzhFlh3idNL +J+12UtdqDxnPDsiG2XBET3UrKyJeBxMgRyPi/g4wHfhH9oJ97jkdacUlLko8l22s +ZiMSSwwOlWxtTY5t0FbHu08ufP4eYTqC0LL3z1Fon4v+4BqUyK7BT3dISwPBmSd1 +uTD7Wbaa/QmfU6Y18dkNlK00GUAcKWgPfLcm7EH/AAz5XkqozVR3z5FLBYFTxVrA +Ly/Gu5HLx/uwoYWeYRWBOSkqvdgf9PT57imO4fOi1CTQuq/1LAdaxGkm7yXaz0YP +ySTiT6PvcLWFEbjrbufxdBrF4/ZsQz5vdJiKq2IQmCIKONJOFHWqgoF4AA7Ze1cl +mrK0eLzUlG1WmSy5mpjByRanahQWYvK1s0tc8IwMRRJY4DS6Dp99EVyteKZP/jc0 +x+ILet2ThDhjY3AxtkzlejyylABgl2AyGoGzZzbaf1q/0LfM6SfYBSVZK3TFR3Kt +8lQnG0tztoM+bnM/JZ8UZ61s16jJVxWzlZ+rx8rCpIvh3Cnl52DGo6oA4Kt60uDP +3iiTLGNYqEyHmzgnAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUdOqNqaSjcn7BRN3fLs4eTIp1W9MwgYAGA1UdIwR5MHeAFHTqjamko3J+wUTd +3y7OHkyKdVvToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC +CQDoaF7Y4z+iXTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAOnH +CrwiJd51oBic5PwjQBhQcUtGOfR1BJe/PACpLXTf1Fbo8bLT5GxZLATlw9+EVO9P +JhhH+oiUuvA7dE2SRiZXpY7faqtDgvVfssyCrvACkM7pcP9A5kM4LiunX7dpY2xp +naJAqDV5Av1mOohHuVEZHqV6xQSREQFW2IusfpCsPP+P+RPKM2o571e6oz5RGbuP +dQ39QycBTK8ezccxaDaH614peAnBi4Q1GuxzgNmXq2FPDcf7F1QcWMrW3jUI8npi +Q9rXRwrqUYP7Yzz+dIziGdpOfZd7x/MyCXuqRdFdA+bulGM2Es5lvtguPOFhcWp0 +3hzLJ+yolxyqxnNNdaU0r+TDbgxOBjw0VxahuhzFDeZsP6Civzp+Y6MRdvofNXBm +IBD4uqmQtUUyE2uoznXvZkXaSc+0VIGgs04AMS9irBC2oVEGDp0AbelcIhdgToam +/NTuOmxgadwDuEn3TIFYkzx84J81kL8g0HQ1N09nSXChkSVb+XlxC+Wosxoazydr +M4FOvaa1V4vnmIdA2aF1nWTzJNcc9FC23zTmQkV2YJ1IKNmxGd3xBZzUtUBu5OgZ +vPXECtUjRcraNuXeL6gSX0qBaaVkcdxhp8CpI8k6Qb+mgOaq/ixrVEKtczBVXjHD +pO6QmwMZtqR8JsStbMCYXa2owt4k8F3yMlIKE6qX +-----END CERTIFICATE----- diff --git a/hrp/internal/data/x509/client_ca_key.pem b/hrp/internal/data/x509/client_ca_key.pem new file mode 100644 index 00000000..77065d5c --- /dev/null +++ b/hrp/internal/data/x509/client_ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDu306nlPvDnyjM +FwATRfdPkWC+F2EuFC8eK6X9eDbadUheqN4rQkjN4yXx83sY9j5temi/jkJZfDg7 +b02nalUjJ3TAp1Df41Y8YjJo5EjSqcFBhuvQrEMwiqLnigQhMvwmUqKUGnulsZ+5 +qkwM4RZYd4nTSyftdlLXag8Zzw7IhtlwRE91KysiXgcTIEcj4v4OMB34R/aCfe45 +HWnFJS5KPJdtrGYjEksMDpVsbU2ObdBWx7tPLnz+HmE6gtCy989RaJ+L/uAalMiu +wU93SEsDwZkndbkw+1m2mv0Jn1OmNfHZDZStNBlAHCloD3y3JuxB/wAM+V5KqM1U +d8+RSwWBU8VawC8vxruRy8f7sKGFnmEVgTkpKr3YH/T0+e4pjuHzotQk0Lqv9SwH +WsRpJu8l2s9GD8kk4k+j73C1hRG4627n8XQaxeP2bEM+b3SYiqtiEJgiCjjSThR1 +qoKBeAAO2XtXJZqytHi81JRtVpksuZqYwckWp2oUFmLytbNLXPCMDEUSWOA0ug6f +fRFcrXimT/43NMfiC3rdk4Q4Y2NwMbZM5Xo8spQAYJdgMhqBs2c22n9av9C3zOkn +2AUlWSt0xUdyrfJUJxtLc7aDPm5zPyWfFGetbNeoyVcVs5Wfq8fKwqSL4dwp5edg +xqOqAOCretLgz94okyxjWKhMh5s4JwIDAQABAoICAAmMq9xPPHFpn3vpP3uFxIlN +yoxO6veonumZ3Rzw/WBmZ+pA3gDkuXxhpFaz4SvyTDScPCvMSCLDsIvPu08CFT0+ +ipBZIAaTVBM96b3/wlmJp8wy1KKXAGikYjbXcarSGvp9OzqohGDvZO9LO5cYOIh4 +3u2vh30ayd0KxGfHu1OQ8IhocrTAcQ0CrU26cJ2iqX1vtwMB/XziA/AMmPnkrqER +IwyjY8HrLUziGF8pT3xuL3IIshhMR3rxQ/nO2QEOnx8mC5rRKaxmXk9+MusV3Mnd +p33IWwr2QXPnZk5ILFPsvCptPJBgENJbTdx3IglAaRmKVDowjfB2Jx9FWur4ENQy ++yCzf0ygRoXnugtwE48/L7P8mlqZlZsxQbUUjXEPtht8rtM4CR5b0v7PHXiLh1oM +igfy1RDAQAZQRGIlWCOeV2soiyKLnCGyAaVXcM2ksDkYOSH4ObE4KwF1Ph87lNaG +ywolsPvQD0ygymXcuStrYHWamTp8qRjNvZBcThs3SaKN+lxXxPng2tBPUwU0S6nj +e0pjWco74elBk+fjjd0wNolKjUD7FhRXlWiXz9BgcCjRD9TLoVk8mp9cFL7OLzJc +735JmNKP8C5Qs91Ugo6Z9tWQQTdGHZe9ElUY0fWP0bs+4iBaadl63R26tchLncZE +LnYsi2AjDdV908cEkAiBAoIBAQD6LbGeyFHZA42nuSw/NFsMVldqU6QwmADQI3Tw +JEdw2thS8VIX2c8aeJkVL++dNmSPcqs4NqhzgJSm9o1xNqGZovAPK/B3NmLl1kzG +JPwSr8QwNxmKwUlbt1K48qIV0JmetOgRG/ll5ux2CxgWHzwgRwtvpbnxDa7Gf7BA +UfH7AfZJ3iV+HlJSxr9XxNgFoNEtpP9sqbOgt10f5JJlIELCTa38iMBojAGxlzyj +7DGYY/diQDr+6mRNnv2pY57dOnmdvN1w+p1W7saaeRCeltva/G+5n5AWMFl5qBjT +LDktBE+okH5wapkUsZzZTByTgFXdBC2wY2qBrOexBAyS8/F3AoIBAQD0bkNBc1ya +KYmWlCsVSUZxUGSOp9g7ZdzlB/1G523s3PltXSphsC4mACs7ZAs5OAO/bu05kurp +dOqEAxsC05IxD2/gGoarC6QfTum9CMNoKrvtczA7Gl+6D5djum17lULY6YSBO75J +L0FQK6nCVGfAbBRAqhiFi+9kXvNThuqjgoiCNwQYxaG8aovoAKTFdkzQjDw2tUgM +jqCM6ifOBJIRolFq2CBom8nB+wpsI1naFLaOdg0Luz/Ds03gD9nWa6a4XIowKCml +Tek1Q+S2hZoTgfOlKRbCcM1KyoaI9LKI/pbKmpNyyrADw/kZKevfsKnYwMpHlaTR +NSuQ2VJKuxrRAoIBAQCBQ3bQ+eQAYyugC7dm+OBKYZpNH+ZoDUHuSUO0iKo5D3pS +cMnf9PRjUwiVv+zoqCARVkhNhUBIXZlxI1c1teqNfXjX/fYDQqCa7L1Ca/2qkhKm +bvHNlc0XjIM7eHJzHxMgw4xcur2D/2sSGu1ZEM56RvsLtu96M32opnUk5rJG5V6i +EBwDLBuRFYvsB5MuZUdvdB9dv9lGIzgEsI9LnP2hc42APBBedGizn9b/Q5zkhlJd ++53/9I/a41lhWk3NNNd9vwYTyAnfzwPi8Ma7imsSnPgFSwKh1F2G1GnvQpxQPDgE +epQ59XofDR5j0EW7mMXEqtIIn3V6hyI3fkYY795FAoIBAQCsx7x26YsN1krRzA7g +TxmiQ8exJ2gsJIcOxqT8l98WTeVqry6kOxuD9R6aLs/YNIZBrbG2vuma+PBFPMS9 +LLzsPRNCAL4s7l+nWerTmvw2B+8rm/796Fi+dwL2lfOKJipIllj52TdbGDI874Bi +Q7PLSxrN0u7eh9pCwvORmY8G4eCI20bkE9+OBmq7JqlSg5ss19RAf8hcR/2pXmOg +t45hNLIEqp3OFEF8A26MnjiHdZjN/xidsFEUjwx/U/USIqqJK7Dq9ZjqprYw1rs3 +Yh1VqMiHeRIDhCU5twt+iCojuILy2G1d+XSOVNsiNIXtaz3EYBMcouUMlV8kVtpa +xQPhAoIBAEr8U7ZaAxN2Ptgb6B8M1CVNE6q7S1VuX+T8xkciadW2aRjJ3PufFfsk +Zo12fP9K/NeOPTIz0dQB6Gy/CKzDLb8NnJCJnCUUaO8E45C2L9r6qvIJpXWHp3vo +neGO49y/5st7suOZkWU2B6ZGwNWH90296mfSKcUNxSRMaHCotPdVDyvOgLC24ZWR +6teRaxB2sVZYqmoz+4+G8SOK40bHJKf1kwujbrS3OqzDzEeC/STtqYZWPW03MFkk +MBPQvwCWMJINv4zz4YrnOaA9COc1/fTXCG5kKYyalPD8VKxi1usas1pZwIqZkuwm +D6kBMuZ4gkKW24IYzXzOni0/BOnpOfM= +-----END PRIVATE KEY----- diff --git a/hrp/internal/data/x509/client_cert.pem b/hrp/internal/data/x509/client_cert.pem new file mode 100644 index 00000000..e35b94b1 --- /dev/null +++ b/hrp/internal/data/x509/client_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDMxODIxNDQ1OVoXDTMyMDMxNTIxNDQ1 +OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAL2ec6a93OYIioefCs3KRz752E5VfJPyVuxalBMc +7Dx84NsdwpbUyDT6fO7ePYM8IvYAsLc5coLCP1HKGGRmYm423WZf8Kn93BDl0XcN +4bgtW9ZrekvYcXqSzygz3ifdQeZljZrqW43dkkYR2vWc+uJXs+vrRVZyUSLLbe97 +9zUbWbOfHBc1jK1vTUakl08VhllYbO0m0SYZIni0sioItVdVWTz9XE2COavLqwwL +MIq8N7JXEdYJC49JWfdzvqZYTxOn5FSTCWen7/mcZmuLYPwUCkSu05M5T2o1ygkd +ohA+/X9yjToPJ7NO509lKHWo7+sp9if6jZsiOU45/t84pD6juVZSZ20/A9i6hjtj +C0SqYk2iQEtRp+lT6yYa5ffeNllFUGtM+xq2are2n93PnXwMTUlYGuTtkyRPG717 +ZtQjKQuwfdJNoNbJl2cfQpmtLdm4Jzrg5cWiiFro+aqnZxIfUEEDkIBaUjYmwMkS +Qq+S32L4f4u7rtbnzdo/jVwq0wpSjTGQJEab+v2wZpDhVbQblTyI30A+TvBIzLil +09OX49/teZCp05kOJy0V/yXdQtPwlQGXdsCUmD6dnGav17fB1witXDdG+4SNoyF/ +PN+8wtlMQ8fWvLdxLsd/Rq6CEZQV9mBhrQxXUmFFDhd0O6wfxR/lVFxIWg70Fz7P ++z7tAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFG0psrHrGny8ziVm +RtulG3f9ROrhMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAtr1dzSQswIOlEGlLtoAwkL7ys/gP2fcdh7Jl +ggiPs266yzZFyGGdd2GKo6tcjdBNjfnO8T5h8eLzj7QlzKPqA/l0BgAW7s7WX9QF +wCivw1DHE815ujlQNo3yve38pd2/I0hdf9GtQLGyOirYpwW5YcHvpmLezrW6J3UU +CWIfYhqO6bSs+HCLkvQdsCG1TpveWYXfC9aXHjw+ZGOjBMEt6AgdWctwzTjQfZub +VjZosBC3ZkDjkA9LTqKP5f8XSWt89J4JCYkiFRiJuYYiNYcZpb0Ug93XjEHIHXMG +N/cD9fCB2HovoVu8YnezpSrqEhqEikHSq80fwbf+NaT0CEbPMx3UMzt8d8gwUiwE +nzzf/o4uOwoofNWfka0J1VPY1AtjUDvz44LyVhp4uvkEJEK1WQ46mM68H/EOUmpd +fHANEbV8HLq2iOjR78n5+MCHRcX7duScp5wT0ajfDg41VrhvV/u7YctFj8ynQJg5 +cqbH+GgTrEfAFFm5mZH1SGqNPyxr1eQFWXMRGE7R/NoyQo2uqrSRmz6JFXlnWtxF +YmLhnOdQaytcpiYN2YVyC/rLK3l3Tbh4u5axvlZP/hi+nQluiZzkH97iUqXcBU/9 +jYNohnJzXMHTIZM8FQY+9uGw9ErdDo7FmX5Xkp4TzEz9k10m1fnt0njSEzITtqpg +MoO9n00= +-----END CERTIFICATE----- diff --git a/hrp/internal/data/x509/client_key.pem b/hrp/internal/data/x509/client_key.pem new file mode 100644 index 00000000..d9c4bae3 --- /dev/null +++ b/hrp/internal/data/x509/client_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAvZ5zpr3c5giKh58KzcpHPvnYTlV8k/JW7FqUExzsPHzg2x3C +ltTINPp87t49gzwi9gCwtzlygsI/UcoYZGZibjbdZl/wqf3cEOXRdw3huC1b1mt6 +S9hxepLPKDPeJ91B5mWNmupbjd2SRhHa9Zz64lez6+tFVnJRIstt73v3NRtZs58c +FzWMrW9NRqSXTxWGWVhs7SbRJhkieLSyKgi1V1VZPP1cTYI5q8urDAswirw3slcR +1gkLj0lZ93O+plhPE6fkVJMJZ6fv+Zxma4tg/BQKRK7TkzlPajXKCR2iED79f3KN +Og8ns07nT2Uodajv6yn2J/qNmyI5Tjn+3zikPqO5VlJnbT8D2LqGO2MLRKpiTaJA +S1Gn6VPrJhrl9942WUVQa0z7GrZqt7af3c+dfAxNSVga5O2TJE8bvXtm1CMpC7B9 +0k2g1smXZx9Cma0t2bgnOuDlxaKIWuj5qqdnEh9QQQOQgFpSNibAyRJCr5LfYvh/ +i7uu1ufN2j+NXCrTClKNMZAkRpv6/bBmkOFVtBuVPIjfQD5O8EjMuKXT05fj3+15 +kKnTmQ4nLRX/Jd1C0/CVAZd2wJSYPp2cZq/Xt8HXCK1cN0b7hI2jIX8837zC2UxD +x9a8t3Eux39GroIRlBX2YGGtDFdSYUUOF3Q7rB/FH+VUXEhaDvQXPs/7Pu0CAwEA +AQKCAgAtlwQ9adbLo/ASrYV+dwzsMkv0gY9DTvfhOeHyOnj+DhRN+njHpP9B5ZvW +Hq7xd6r8NKxIUVKb57Irqwh0Uz2FPEG9FIIbjQK1OVxEYJ0NmDJFem/b/n1CODwA +cYAPW541k+MZBRHgKQ67NB3OAeE8PFPw/A8euruRPxH+i3KjXSETE8VAO0rIhEMz +Ie2TQRydLKp71mJg45grJ17Sxmc7STT8efoQVKgjCwPkEGiqYpiNk2uhZ2lVGRC9 +cyG6gu74TdyTDQss1e7Xt+fUIZ2+3d6eJt6NvjC+25Ho4SwO9eYjF1qnQ++KqATr +TOoOaADPLLaXZCFZ1D+s9Dq4Vrj+QGk8Fajotj4gBpUtc0JxtvYM9EhlW7DpchYm +Cxe8vmEi/54YErXKawTUXYBB8IeDzwtvi3v3ktmH8BsGJ6Y3RXDI9KIG/6IE5Xeu +hkPCJnB0e3G2nlaffNSrVknxF+z74DB3T2kj0zC/4H4/hHo4W5D/pswcGWlhREWG +E7ViXJjBRkc5tpS9HfNdZ2wHiccioDIdGSHGqGMF4rLCUE2n+zc4m6pvvNCjN5KB +S4+zps50Gqtbp3DH2h1YLtkzuzvDhgpMPyJ1qZsdgelRSi2IaE5oekuBGP2WeXFw +DLI/cijc13cCacH+kpllQL//zBP8mMGmussWGgrVXdm9ZqD+rQKCAQEA6OG+s8sa +QZJ8W1nukcaS5rSvJBeZO6neCd6EB4oew5UGJsSz+x4RtJ7aJhdTGtyCXqiR2uFw +SBYdTcOgNbBUXg39vWAv+k2lmxiMGuLnAcNcGYyDLXr1SUJwe4Be984WNFdqzY0z +LCd9NvutWWX0Xd1VBdhlDuu3eBenzPBKIxTk3N2gLvzYxC/62e29Trsm7Sur11ut +Jay/CRdomjaqIiZ8q8qgdSU+pPe2DZYzUOutySJhLUegrrgWvPS/i8FHf7AGRgki +wpFn3gy5zCsFzr6n/TzJ5zQvlz+PcbUHHb06U1cnT45fkFNAJJvBYa4vi/tRx92E +Bi8d4bn40fUo3wKCAQEA0HFDHzhRxN/RbzBkymGlgfrsKcBdaAzgClo5uAXr8sdi +efsgBFo228I5lK6ywfzOfD/UxGB6ucdkZb/tRLtoK0OqOGiNx2Q1yazRVbuhrBrR +Y7DDbh7164o/MAYqPGxTMUxzXia7WBtNm00Tv9pDsw+NTzbrk7OxkLZWbjQEj99T +A9pcqXYA1RJtD/6io/43/oVscWPdRrbrNrJz+27Bsau20MBheVmX5sLTO2iWKTN4 +/ofrvOv0ru0I3ACHiLIaQFXs4snQjlhJm5MJ6kuZVdYKAzyNE+YOPnAxoiQAlHau +E1aV8ON7jmjhwxa2QICCwVcUNmwXU4UztGyGZ5a1swKCAQAi90Ia3LPkhIoHbUlU +uev0l8x0LtbjDm44LSDFwQc9dnKl/4LGgY1HAVLfxUDFF7a7X7QGmTKyoB9mPakg +ZolEVfVzKa4Kdv4We2kN4GOu8BYz/9TyTzPk/ATHhk68BkVvNnDizACS8JrsVn2A +nr5CGalaZ1NFGj9B2MtpCesXuVtjjiMu6ufhDRMtBXUXDSKbGaODglBNB9LnGoyq +GusQlZbCdHoDHMR7IHZFM/ggfkJpoK/WjJqjoSBI3raj1TFXCqbmfRiq/goKXP7I +mO0WTaoLa8Uk4cEDhJeVCwk2feL0AHH2j/npQZav6HLwp6ab7fApgikAhLKH4dRq +MdUhAoIBAQC7svJVf7qqRT3sGTD5yXpnlJPreOzj0IxC5kKJgtOYuJDl9Qw8vxwd +QkXlrHcOFl++JSCsgZCiEHpI4c6AER5Zr0HuL8BUJ9oDtJqA0EhimXeqhLdHR5v9 +sWz7CuInrQgxIX3V75zOVy/IRF0fayWBbeS6y2LRi4O/I2KrNC5TfC/eDVlZxAg1 +1rTdLVg5wqebi3w+k0Xj8r3WcFXeuTq0ikNCsapUwyf1RcU+/wwRJ+exlKXkZrnc +d1h9/AAQSQk4m+eHxWIHfFs0O/E2yULXt7kmdvU3UPfMo+0d67uV9VUF1veIhuBx +OeLqcV5GsTKNdaOe6jELJayMsRlK2LzfAoIBAEoWFSUdf3ruvj+ONju0TDtdvvTb ++i+3ttqMK/duYM2TlD3Lvqyx3kNxlMTAArfvnwtKVSw0ZIGSPc/5KHnxldcdALgT +4Ub1YesUv5585thMw1EWyXAPognLhfTEVSLYKcMPoBNCv7FvAT3Mk5SZPReRkbT9 +oqDAzg7r+0+pjD9LmnIXfCxfbSV6zcBFF8/iGAmzh3CanDqVkUds1+Ia8018cfDS +KW5PQAEnJC/BZAI7SQsxH0J9M7NYxJRN0bua5Be0N+uuYSOa+d9yecugfmvga6jf +9nEcohJShacCSkQvIXlq5Uy/WBb6sbiTmHjjW14FG25B0rrQUjmFAUiYceI= +-----END RSA PRIVATE KEY----- diff --git a/hrp/internal/data/x509/create.sh b/hrp/internal/data/x509/create.sh new file mode 100755 index 00000000..2b5aa5cf --- /dev/null +++ b/hrp/internal/data/x509/create.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Create the server CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout ca_key.pem \ + -out ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Create the client CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout client_ca_key.pem \ + -out client_ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Generate a server cert. +openssl genrsa -out server_key.pem 4096 +openssl req -new \ + -key server_key.pem \ + -days 3650 \ + -out server_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server1/ \ + -config ./openssl.cnf \ + -reqexts test_server +openssl x509 -req \ + -in server_csr.pem \ + -CAkey ca_key.pem \ + -CA ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out server_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_server \ + -sha256 +openssl verify -verbose -CAfile ca_cert.pem server_cert.pem + +# Generate a client cert. +openssl genrsa -out client_key.pem 4096 +openssl req -new \ + -key client_key.pem \ + -days 3650 \ + -out client_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ + -config ./openssl.cnf \ + -reqexts test_client +openssl x509 -req \ + -in client_csr.pem \ + -CAkey client_ca_key.pem \ + -CA client_ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out client_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_client \ + -sha256 +openssl verify -verbose -CAfile client_ca_cert.pem client_cert.pem + +rm *_csr.pem diff --git a/hrp/internal/data/x509/openssl.cnf b/hrp/internal/data/x509/openssl.cnf new file mode 100644 index 00000000..d1034214 --- /dev/null +++ b/hrp/internal/data/x509/openssl.cnf @@ -0,0 +1,28 @@ +[req] +distinguished_name = req_distinguished_name +attributes = req_attributes + +[req_distinguished_name] + +[req_attributes] + +[test_ca] +basicConstraints = critical,CA:TRUE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always +keyUsage = critical,keyCertSign + +[test_server] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,digitalSignature,keyEncipherment,keyAgreement +subjectAltName = @server_alt_names + +[server_alt_names] +DNS.1 = *.test.example.com + +[test_client] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment +extendedKeyUsage = critical,clientAuth diff --git a/hrp/internal/data/x509/server_cert.pem b/hrp/internal/data/x509/server_cert.pem new file mode 100644 index 00000000..f1a37400 --- /dev/null +++ b/hrp/internal/data/x509/server_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDMxODIxNDQ1OFoXDTMyMDMxNTIxNDQ1 +OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAL5GBWw+qfXyelelYL/RDA/Fk4GA8DlcBQgBOjBa +XCVDMAJj63sN+ubKBtphWe6Y9SWLJa2mt8a/ZTQZm2R5FPSp9rwdr04UQgmL11wh +DCmO+wkRUeTYwsqcidEHRwOxoctyO+lwgYw983T/fp83qtNS4bw+1kJwrLtFdgok +Kd9UGIugs8BTFqE/7CxFRXTYsNy/gj0pp411Dtgknl1UefPdjco2Qon8f3Dm5iDf +AyUM1oL8+fnRQj/r6P3XC4AOiBsF3duxiBzUp87YgmwDOaa8paKOx2UNLA/eP/aP +Uhd7HkygqOX+tc3H8dvYONo6lhwQD1JqyG6IOOWe2uf5YXKK2TphPPRnCW4QIED4 +PuXYHjIvGYA4Kf0Wmb2hPk6bxJidNoLp9lsJyqGfk3QnT5PRJVgO0mlzo/UsZo77 +5j+yq87yLe5OL2HrZd1KTfg7SKOtMJ9N6tm2Hw2jwypKz+x2jlEZOgXHmYb5aUaI ++4xG+9fqc8x3ScoHQGNujF3qHO5SxnXkufNUSVbWbv1Ble8peiKyG6AFQvtcs7KG +pEoFztGSlaABwSvxO8J3aJPAEok4OI5IAGJNy92XaBMLtyt270FC8JtUnL+JEubV +t8tY5cCcGK7EtRHb47mM0K8HEq+IU2nAq6/29Ka0IZlkb5fPoWzQAZEIVKgLNHt4 +96g9AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNx36JXsCIzVWCOw +1ETtaxlN79XrMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAAEEZln7lsS/HIysNPJktc0Gdu3n +X1BcA3wXh95YTugcxSSeLLx2SykXnwX+cJncc1OKbboO9DA5mZ+huCesGIOKeUkg +azQZL6FAdw9PQKdqKg3RgSQ4XhK990fPcmmBhSXY24jNNhRHxGw5lGBrD6X2SdW3 +m66yYzn9hMXL4yrweGO7OC4bdyISDrJiP+St/xeCoIcXP2s07dE6jl2VorJCWn4J +SxKfDhPPohZKl6dL9npkmPcpz2zRAYpo4tsVdAAQDBRui44Vvm1eBPUo7EH2UOEh +/3JtTeDUpldM8fDaKE0kTa1Ttxzs2e0Jm3M4/FMOxqSesyJldw54F4+4m24e/iQU +gceArYMFVFTipgrLfUuRvRxx/7D7V92pqTyuD3T78+KdTqrlxvCTOqSHhFE05jWD +RdynS6Ev/1QZLlnWgMwhQAnjhc1NKkso+namF1ZmHH9owiTRBlWDMNcHMDReaELd +QmFUvutHUpjidt1z+G6lzbP0XB5w+0vW4BsT0FqaYsFbK5ftryj1/K0VctrSd/ke +GI0vxrErAyLG2B8bdK88u2w7DCuXjAOp+CeA7HUmk93TsPEAhrxQ6lR51IC6LcK0 +gACSdnQDPGtkoRX00DTvdcOpzmkSgaGr/mXTqp2lR9IuZIhwKbhS3lDKsAZ/hinB +yaBwLiXfcvZrZOwy +-----END CERTIFICATE----- diff --git a/hrp/internal/data/x509/server_key.pem b/hrp/internal/data/x509/server_key.pem new file mode 100644 index 00000000..1c778db7 --- /dev/null +++ b/hrp/internal/data/x509/server_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAvkYFbD6p9fJ6V6Vgv9EMD8WTgYDwOVwFCAE6MFpcJUMwAmPr +ew365soG2mFZ7pj1JYslraa3xr9lNBmbZHkU9Kn2vB2vThRCCYvXXCEMKY77CRFR +5NjCypyJ0QdHA7Ghy3I76XCBjD3zdP9+nzeq01LhvD7WQnCsu0V2CiQp31QYi6Cz +wFMWoT/sLEVFdNiw3L+CPSmnjXUO2CSeXVR5892NyjZCifx/cObmIN8DJQzWgvz5 ++dFCP+vo/dcLgA6IGwXd27GIHNSnztiCbAM5pryloo7HZQ0sD94/9o9SF3seTKCo +5f61zcfx29g42jqWHBAPUmrIbog45Z7a5/lhcorZOmE89GcJbhAgQPg+5dgeMi8Z +gDgp/RaZvaE+TpvEmJ02gun2WwnKoZ+TdCdPk9ElWA7SaXOj9SxmjvvmP7KrzvIt +7k4vYetl3UpN+DtIo60wn03q2bYfDaPDKkrP7HaOURk6BceZhvlpRoj7jEb71+pz +zHdJygdAY26MXeoc7lLGdeS581RJVtZu/UGV7yl6IrIboAVC+1yzsoakSgXO0ZKV +oAHBK/E7wndok8ASiTg4jkgAYk3L3ZdoEwu3K3bvQULwm1Scv4kS5tW3y1jlwJwY +rsS1EdvjuYzQrwcSr4hTacCrr/b0prQhmWRvl8+hbNABkQhUqAs0e3j3qD0CAwEA +AQKCAgBnR3CoGbd9hZl8u4qxc5IdeXwgflFmgRlGCAyCtHlxzG9hzMTD7Ymz/hMM +NG1xQltGfqn8AROd8MPJLOEY/1QtnZgM8fv24K4bqmlCW7nTUQXYHSubkUDiY2e3 +K0ETszaETMRSaLwY2IOujQQ4/ilePY3D9UOtmqVXnVN+G7USwP31xEvtZ+xPqHfU +a+FQlFIj8FuMQXDuKozdK7s+I51yjl7pVNx3M7QlH1/olcSKNta1EQXK4RgZxD6a +kkBuyPR93ohXOJ0OMSvI7eKVKIcBh0JM4z0+D5FMJ7IGbjL8Bdsjcs1a0g/y28Xf +NBVf9w8Fun3mmYmj3ZMsqDZgVg/bAfP2z7O9kMzbuqmjelOz8HXxTm/+GIHuseMx +b/nDZgB0ZN+FhATv/onshJcjr2L3SJYzEWqjYiqaCQo5qtib+/kxh6SHPhAY2o8l +zzMhKFsJMhmwW91FXqeDS9FTlcRXtYH1EJxNGa01GpyVa6plvvFTGBNkEUJnVuEp +ULohJw0NJQYQOz5omYaQVJ49lpzVhwLEolgSlIBiM3s9nSDvVBYu+bB1ovw5OTIJ +Wlc9cBrYmdxYdAj5n6JzIC1wixgxrFw1jBm8cL/2FQYtR7daZabTMyZj5vAUqjxr +OV+uvkSFcIyBs1ty9TnnKC3yd5Ma+5chR5u7JPc1lSSor6AwQQKCAQEA4d5XrCq5 +EikGII/unhkVZsh9xmILp/4PRKc+fV7TFEpGyn8HFCBToZk6nXv99roUBdeZFobw +gDuZqBa4ougm2zgBbhdQXGaW4yZdChJlSs9yY7OAVvnG9gjuHGmWsLhvmhaeXSr2 +auxVGRaltr3r8hP9eHhloDM6qdSSAQpsdeTBQD8Ep3//aL/BLqGcF0gLrZLPwo0+ +cku8jQoVXSSOW1+YSaXRGxueuIR8lldU4I3yp2DO++DGLsOZoGFT/+ZXc2B4nE1h +o1hCWt6RKw0q2rCkZ+i6SiPGsVgb9xn6W8wHFIPA/0sOwOdtbKqKd0xwn5DnX+vt +d8shlRRUDF7HDQKCAQEA16gR/2n59HZiQQhHU9BCvGFi4nxlsuij+nqDx9fUerDU +fK79NaOuraWNkCqz+2lqfu5o3e3XNFHlVsj98SyfmTdMZ8Fj19awqN20nCOmfRkk +/MDuEzRzvNlOYBa0PpMkKJn2sahEiXGNVI4g3cGip1c5wJ1HL3jF61io4F/auBLP +grLtw8CoTqc6VpJUvsWFjopTmNdAze8WMf3vK6AKu7PKkXH7mFQZusacpO/E61Ud +euiG9BYDIIkrnWIQdLpODgliLZzPNcJDTKTFJAfIzr3WQvUaFc1+tHyX3XhpicvP +J4zyNfHd2dZMK1csXQJvFSnPgXpy531Wca0riAYZ8QKCAQEAhaVEBxE4dLBlebrw +nAeHjEuxcELvVrWTXzH+XbxP9T+F56eGDriaA5JhBnIpcWXlFxfc82FgyN97KeRX +17y50Riwb+3HlQT23u0CPEVqPfvFWY0KsWwV99qM2a74hRR8pJYhmksjh1zTdYbb +AugZxiFh53iF2Wa2nWq0AX2jc5apalRfcqTgAaEEs4zYiUYN8uRdnmZovsRliqae +wYAx44sK1vkQY5PSNKff+C0wgbY8ECHOF2eGnIEMU8ODKnWm5RP+Ca4Xyckdahsr +lmeyJbhDb2BbaicFGEZkNa/fXZW50r+q4OQOlMHbE2NNjw1hzmi1HyLAXhOJiWZ/ +3NnvuQKCAQEAg04a/zeocBcwhcYjn717FLX6/kmdpkwNo3G7EQ+xmK5YAj6Nf35U +2fel9PR7N4WcyQIiKZYp5PpEOA4SyChSWHiZ9caDIyTd1UOAN11hfmOz6I0Tp+/U +1FQ/azQHtN3kMzBjSxJYAJN56NTM4BiJD3iFemiIsjfH0h7eXBcg1djmLf8B06FX +GOSrGZDpNmqPghVpBvNwyrJbAj9Jw3cjcdvrZ5lOBhaWv+kz8Rzn+h2N4Ir5uF46 +szGxs5bEzD2vTs6Zz4ndhC7uyRi9y81Nj8t4TLZtln7TOdNup/Mr1zGXxM4Fn6DP +YlYfdHgUU+Eqf2lApeZHVfkzi+1TRvPoEQKCAQAELU/d33TNwQ/Ylo2VhwAscY3s +hv31O4tpu5koHHjOo3RDPzjuEfwy006u8NVAoj97LrU2n+XTIlnXf14TKuKWQ+8q +ajIVNj+ZAbD3djCmYXbIEL+u6aL4K1ENdjo6DNTGgPMfISE79WrmGBIKtB//uMqy +fGTUSPeo+R5WmTGN29YxAnRE/jtwOgAcicACTc0e9nghHj3c2raI0IazY5XFP0/h +LszTNUQzWx6DjWsbB+Ymuhu4fHZTYftCrIMpjmjC9pkNggeJnkxylQz/pwO73uWg +ycDgJhRyaVhM8sJXiBk+OC/ySP2Lxo60aPa514LEYJKQxUCukCTXth/6p0Qo +-----END RSA PRIVATE KEY----- From 848d72fe3ab9bc6ba43873e2a91e63914f1a88af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Mon, 11 Jul 2022 00:02:22 +0800 Subject: [PATCH 14/31] refactor: distributed load testing --- go.mod | 2 +- go.sum | 3 +- hrp/boomer.go | 65 +- hrp/cmd/boom.go | 18 +- hrp/internal/boomer/boomer.go | 45 +- hrp/internal/boomer/client.go | 9 - hrp/internal/boomer/client_grpc.go | 135 +++-- hrp/internal/{ => boomer}/data/data.go | 0 hrp/internal/{ => boomer}/data/x509/README.md | 0 hrp/internal/boomer/data/x509/ca_cert.pem | 34 ++ hrp/internal/boomer/data/x509/ca_key.pem | 52 ++ .../boomer/data/x509/client_ca_cert.pem | 34 ++ .../boomer/data/x509/client_ca_key.pem | 52 ++ hrp/internal/boomer/data/x509/client_cert.pem | 32 + hrp/internal/boomer/data/x509/client_key.pem | 51 ++ hrp/internal/{ => boomer}/data/x509/create.sh | 0 .../{ => boomer}/data/x509/openssl.cnf | 2 +- hrp/internal/boomer/data/x509/server_cert.pem | 32 + hrp/internal/boomer/data/x509/server_key.pem | 51 ++ .../boomer/grpc/messager/messager.pb.go | 567 ++++++++++++++++++ .../boomer/grpc/messager/messager_grpc.pb.go | 210 +++++++ .../{ => boomer}/grpc/proto/messager.proto | 20 + hrp/internal/boomer/message.go | 6 +- hrp/internal/boomer/runner.go | 460 ++++++++++---- hrp/internal/boomer/runner_test.go | 60 +- hrp/internal/boomer/server.go | 1 - hrp/internal/boomer/server_grpc.go | 506 +++++++++------- hrp/internal/boomer/utils.go | 4 +- hrp/internal/builtin/utils.go | 22 + hrp/internal/data/x509/ca_cert.pem | 34 -- hrp/internal/data/x509/ca_key.pem | 52 -- hrp/internal/data/x509/client_ca_cert.pem | 34 -- hrp/internal/data/x509/client_ca_key.pem | 52 -- hrp/internal/data/x509/client_cert.pem | 32 - hrp/internal/data/x509/client_key.pem | 51 -- hrp/internal/data/x509/server_cert.pem | 32 - hrp/internal/data/x509/server_key.pem | 51 -- hrp/internal/grpc/messager/messager.pb.go | 291 --------- .../grpc/messager/messager_grpc.pb.go | 122 ---- hrp/server.go | 14 +- 40 files changed, 1958 insertions(+), 1280 deletions(-) delete mode 100644 hrp/internal/boomer/client.go rename hrp/internal/{ => boomer}/data/data.go (100%) rename hrp/internal/{ => boomer}/data/x509/README.md (100%) create mode 100644 hrp/internal/boomer/data/x509/ca_cert.pem create mode 100644 hrp/internal/boomer/data/x509/ca_key.pem create mode 100644 hrp/internal/boomer/data/x509/client_ca_cert.pem create mode 100644 hrp/internal/boomer/data/x509/client_ca_key.pem create mode 100644 hrp/internal/boomer/data/x509/client_cert.pem create mode 100644 hrp/internal/boomer/data/x509/client_key.pem rename hrp/internal/{ => boomer}/data/x509/create.sh (100%) rename hrp/internal/{ => boomer}/data/x509/openssl.cnf (96%) create mode 100644 hrp/internal/boomer/data/x509/server_cert.pem create mode 100644 hrp/internal/boomer/data/x509/server_key.pem create mode 100644 hrp/internal/boomer/grpc/messager/messager.pb.go create mode 100644 hrp/internal/boomer/grpc/messager/messager_grpc.pb.go rename hrp/internal/{ => boomer}/grpc/proto/messager.proto (54%) delete mode 100644 hrp/internal/boomer/server.go delete mode 100644 hrp/internal/data/x509/ca_cert.pem delete mode 100644 hrp/internal/data/x509/ca_key.pem delete mode 100644 hrp/internal/data/x509/client_ca_cert.pem delete mode 100644 hrp/internal/data/x509/client_ca_key.pem delete mode 100644 hrp/internal/data/x509/client_cert.pem delete mode 100644 hrp/internal/data/x509/client_key.pem delete mode 100644 hrp/internal/data/x509/server_cert.pem delete mode 100644 hrp/internal/data/x509/server_key.pem delete mode 100644 hrp/internal/grpc/messager/messager.pb.go delete mode 100644 hrp/internal/grpc/messager/messager_grpc.pb.go diff --git a/go.mod b/go.mod index 0ed1fd7a..c175a55c 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( golang.org/x/net v0.0.0-20220225172249-27dd8689420f golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 google.golang.org/grpc v1.45.0 - google.golang.org/protobuf v1.27.1 + google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ) diff --git a/go.sum b/go.sum index d774c3f4..600942d3 100644 --- a/go.sum +++ b/go.sum @@ -866,8 +866,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/hrp/boomer.go b/hrp/boomer.go index 0f66c1ff..460fd2ed 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -3,6 +3,7 @@ package hrp import ( "fmt" "github.com/httprunner/httprunner/v4/hrp/internal/builtin" + "golang.org/x/net/context" "io/ioutil" "os" "path/filepath" @@ -98,12 +99,12 @@ func (b *HRPBoomer) Run(testcases ...ITestCase) { // report execution timing event defer sdk.SendEvent(event.StartTiming("execution")) - taskSlice := b.ConvertTestCasesToTasks(testcases...) + taskSlice := b.ConvertTestCasesToBoomerTasks(testcases...) b.Boomer.Run(taskSlice...) } -func (b *HRPBoomer) ConvertTestCasesToTasks(testcases ...ITestCase) (taskSlice []*boomer.Task) { +func (b *HRPBoomer) ConvertTestCasesToBoomerTasks(testcases ...ITestCase) (taskSlice []*boomer.Task) { // load all testcases testCases, err := LoadTestCases(testcases...) if err != nil { @@ -129,25 +130,8 @@ func (b *HRPBoomer) ConvertTestCasesToTasks(testcases ...ITestCase) (taskSlice [ return taskSlice } -func (b *HRPBoomer) PollTestCases() { - for { - select { - case <-b.Boomer.ParseTestCasesChan(): - var tcs []ITestCase - for _, tc := range b.GetTestCasesPath() { - tcp := TestCasePath(tc) - tcs = append(tcs, &tcp) - } - b.GetTestCaseBytesChan() <- b.TestCasesToBytes(tcs...) - log.Info().Msg("put testcase successful") - case <-b.Boomer.GetCloseChan(): - return - } - } -} - -func (b *HRPBoomer) OutTestCases(testCases []*TestCase) []*TCase { - var outTestCases []*TCase +func (b *HRPBoomer) ParseTestCases(testCases []*TestCase) []*TCase { + var parsedTestCases []*TCase for _, tc := range testCases { caseRunner, err := b.hrpRunner.newCaseRunner(tc) if err != nil { @@ -155,12 +139,12 @@ func (b *HRPBoomer) OutTestCases(testCases []*TestCase) []*TCase { os.Exit(1) } caseRunner.parsedConfig.Parameters = caseRunner.parametersIterator.outParameters() - outTestCases = append(outTestCases, &TCase{ + parsedTestCases = append(parsedTestCases, &TCase{ Config: caseRunner.parsedConfig, TestSteps: caseRunner.testCase.ToTCase().TestSteps, }) } - return outTestCases + return parsedTestCases } func (b *HRPBoomer) TestCasesToBytes(testcases ...ITestCase) []byte { @@ -170,7 +154,7 @@ func (b *HRPBoomer) TestCasesToBytes(testcases ...ITestCase) []byte { log.Error().Err(err).Msg("failed to load testcases") os.Exit(1) } - tcs := b.OutTestCases(testCases) + tcs := b.ParseTestCases(testCases) testCasesBytes, err := json.Marshal(tcs) if err != nil { log.Error().Err(err).Msg("failed to marshal testcases") @@ -192,7 +176,7 @@ func (b *HRPBoomer) Quit() { b.Boomer.Quit() } -func (b *HRPBoomer) runTasks(testCases []*TCase, profile *boomer.Profile) { +func (b *HRPBoomer) runTestCases(testCases []*TCase, profile *boomer.Profile) { var testcases []ITestCase for _, tc := range testCases { tesecase, err := tc.toTestCase() @@ -230,7 +214,7 @@ func (b *HRPBoomer) runTasks(testCases []*TCase, profile *boomer.Profile) { b.Run(testcases...) } -func (b *HRPBoomer) rebalanceTasks(profile *boomer.Profile) { +func (b *HRPBoomer) rebalanceBoomer(profile *boomer.Profile) { b.SetProfile(profile) b.SetSpawnCount(b.GetProfile().SpawnCount) b.SetSpawnRate(b.GetProfile().SpawnRate) @@ -241,17 +225,17 @@ func (b *HRPBoomer) rebalanceTasks(profile *boomer.Profile) { func (b *HRPBoomer) PollTasks() { for { select { - case tasks := <-b.Boomer.GetTasksChan(): + case task := <-b.Boomer.GetTasksChan(): // 清理过时测试用例任务 if len(b.Boomer.GetTasksChan()) > 0 { continue } //Todo: 过滤掉已经传输过的task - if tasks.Tasks != nil { - testCases := b.BytesToTestCases(tasks.Tasks) - go b.runTasks(testCases, tasks.Profile) + if task.TestCases != nil { + testCases := b.BytesToTestCases(task.TestCases) + go b.runTestCases(testCases, task.Profile) } else { - go b.rebalanceTasks(tasks.Profile) + go b.rebalanceBoomer(task.Profile) } case <-b.Boomer.GetCloseChan(): @@ -260,6 +244,25 @@ func (b *HRPBoomer) PollTasks() { } } +func (b *HRPBoomer) PollTestCases(ctx context.Context) { + for { + select { + case <-b.Boomer.ParseTestCasesChan(): + var tcs []ITestCase + for _, tc := range b.GetTestCasesPath() { + tcp := TestCasePath(tc) + tcs = append(tcs, &tcp) + } + b.TestCaseBytesChan() <- b.TestCasesToBytes(tcs...) + log.Info().Msg("put testcase successful") + case <-b.Boomer.GetCloseChan(): + return + case <-ctx.Done(): + return + } + } +} + func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rendezvous) *boomer.Task { // init runner for testcase // this runner is shared by multiple session runners diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 7fe7c93a..5df6cd26 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -1,6 +1,7 @@ package cmd import ( + "golang.org/x/net/context" "os" "strings" "time" @@ -55,7 +56,7 @@ var boomCmd = &cobra.Command{ hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) } hrpBoomer.SetProfile(&boomArgs.Profile) - hrpBoomer.EnableGracefulQuit() + ctx := hrpBoomer.EnableGracefulQuit(context.Background()) // run boomer switch hrpBoomer.GetMode() { @@ -67,15 +68,20 @@ var boomCmd = &cobra.Command{ hrpBoomer.SetSpawnCount(boomArgs.SpawnCount) hrpBoomer.SetSpawnRate(boomArgs.SpawnRate) } - go hrpBoomer.StartServer() - go hrpBoomer.RunMaster() - hrpBoomer.PollTestCases() + if boomArgs.autoStart { + hrpBoomer.InitBoomer() + } else { + go hrpBoomer.StartServer() + } + go hrpBoomer.PollTestCases(ctx) + hrpBoomer.RunMaster() case "worker": if boomArgs.ignoreQuit { hrpBoomer.SetIgnoreQuit() } - go hrpBoomer.RunWorker() - hrpBoomer.PollTasks() + go hrpBoomer.PollTasks() + hrpBoomer.RunWorker() + time.Sleep(3 * time.Second) case "standalone": if venv != "" { hrpBoomer.SetPython3Venv(venv) diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index 8ec9536b..5b6a7f2a 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -2,6 +2,7 @@ package boomer import ( "github.com/httprunner/httprunner/v4/hrp/internal/json" + "golang.org/x/net/context" "math" "os" "os/signal" @@ -84,20 +85,6 @@ func (b *Boomer) SetProfile(profile *Profile) { } } -func (p *Profile) dispatch(workers int64) *Profile { - workerProfile := *p - if p.SpawnCount > 0 { - workerProfile.SpawnCount = p.SpawnCount / workers - } - if p.SpawnRate > 0 { - workerProfile.SpawnRate = p.SpawnRate / float64(workers) - } - if p.MaxRPS > 0 { - workerProfile.MaxRPS = p.MaxRPS / workers - } - return &workerProfile -} - // SetMode only accepts boomer.DistributedMasterMode、boomer.DistributedWorkerMode and boomer.StandaloneMode. func (b *Boomer) SetMode(mode Mode) { switch mode { @@ -169,14 +156,9 @@ func (b *Boomer) RunWorker() { b.workerRunner.run() } -// GetTestCaseBytesChan gets test case bytes chan -func (b *Boomer) GetTestCaseBytesChan() chan []byte { - switch b.mode { - case DistributedMasterMode: - return b.masterRunner.testCaseBytes - default: - return nil - } +// TestCaseBytesChan gets test case bytes chan +func (b *Boomer) TestCaseBytesChan() chan []byte { + return b.masterRunner.testCaseBytes } func ProfileToBytes(profile *Profile) []byte { @@ -197,18 +179,8 @@ func BytesToProfile(profileBytes []byte) *Profile { return profile } -// GetProfileBytesChan gets profile bytes chan -func (b *Boomer) GetProfileBytesChan() chan []byte { - switch b.mode { - case DistributedMasterMode: - return b.masterRunner.profileBytes - default: - return nil - } -} - -// GetTasksChan gets profile bytes chan -func (b *Boomer) GetTasksChan() chan *profileMessage { +// GetTasksChan getsTasks chan +func (b *Boomer) GetTasksChan() chan *task { switch b.mode { case DistributedWorkerMode: return b.workerRunner.tasksChan @@ -373,13 +345,16 @@ func (b *Boomer) EnableMemoryProfile(memoryProfile string, duration time.Duratio } // EnableGracefulQuit catch SIGINT and SIGTERM signals to quit gracefully -func (b *Boomer) EnableGracefulQuit() { +func (b *Boomer) EnableGracefulQuit(ctx context.Context) context.Context { + ctx, cancel := context.WithCancel(ctx) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) go func() { <-c b.Quit() + cancel() }() + return ctx } // Run accepts a slice of Task and connects to the locust master. diff --git a/hrp/internal/boomer/client.go b/hrp/internal/boomer/client.go deleted file mode 100644 index b3bf6def..00000000 --- a/hrp/internal/boomer/client.go +++ /dev/null @@ -1,9 +0,0 @@ -package boomer - -type client interface { - connect() (err error) - close() - recvChannel() chan *genericMessage - sendChannel() chan *genericMessage - disconnectedChannel() chan bool -} diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 7f82227b..434b88e9 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -3,7 +3,6 @@ package boomer import ( "context" "fmt" - "io" "sync" "sync/atomic" "time" @@ -12,27 +11,30 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/metadata" - "github.com/httprunner/httprunner/v4/hrp/internal/data" - "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/data" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" + "github.com/pkg/errors" "github.com/rs/zerolog/log" ) type grpcClient struct { + messager.MessageClient masterHost string masterPort int identity string // nodeID config *grpcClientConfig - fromMaster chan *genericMessage - toMaster chan *genericMessage - disconnectedFromMaster chan bool - shutdownChan chan bool + fromMaster chan *genericMessage + toMaster chan *genericMessage + disconnectedChan chan bool + shutdownChan chan bool failCount int32 - wg sync.WaitGroup + wg *sync.WaitGroup } type grpcClientConfig struct { @@ -48,10 +50,6 @@ type grpcClientConfig struct { const token = "httprunner-secret-token" -func logger(format string, a ...interface{}) { - log.Logger.Log().Msg(fmt.Sprintf(format, a...)) -} - // unaryInterceptor is an example unary interceptor. func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { var credsConfigured bool @@ -94,6 +92,15 @@ func newWrappedStream(s grpc.ClientStream) grpc.ClientStream { return &wrappedStream{s} } +func extractToken(ctx context.Context) (tkn string, ok bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok || len(md[token]) == 0 { + return "", false + } + + return md[token][0], true +} + // streamInterceptor is an example stream interceptor. func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { var credsConfigured bool @@ -133,26 +140,27 @@ func newClient(masterHost string, masterPort int, identity string) (client *grpc // Initiate the stream with a context that supports cancellation. ctx, cancel := context.WithCancel(context.Background()) client = &grpcClient{ - masterHost: masterHost, - masterPort: masterPort, - identity: identity, - fromMaster: make(chan *genericMessage, 100), - toMaster: make(chan *genericMessage, 100), - disconnectedFromMaster: make(chan bool), - shutdownChan: make(chan bool), + masterHost: masterHost, + masterPort: masterPort, + identity: identity, + fromMaster: make(chan *genericMessage, 100), + toMaster: make(chan *genericMessage, 100), + disconnectedChan: make(chan bool), + shutdownChan: make(chan bool), config: &grpcClientConfig{ ctx: ctx, ctxCancel: cancel, mutex: sync.RWMutex{}, }, + wg: &sync.WaitGroup{}, } return client } -func (c *grpcClient) connect() (err error) { +func (c *grpcClient) start() (err error) { addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort) // Create tls based credential. - creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") + creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "www.httprunner.com") if err != nil { log.Fatal().Msg(fmt.Sprintf("failed to load credentials: %v", err)) } @@ -160,7 +168,7 @@ func (c *grpcClient) connect() (err error) { // oauth.NewOauthAccess requires the configuration of transport // credentials. grpc.WithTransportCredentials(creds), - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024 * 1024 * 1024)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(32 * 10e9)), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), } @@ -169,43 +177,47 @@ func (c *grpcClient) connect() (err error) { log.Error().Err(err).Msg("failed to connect") return err } - grpc.MaxCallRecvMsgSize(32 * 10e9) - go c.recv() - go c.send() - - biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) - if err != nil { - log.Error().Err(err).Msg("call bidirectional streaming message err") - return err - } - c.config.setBiStreamClient(biStream) - log.Info().Msg(fmt.Sprintf("Boomer is connected to master(%s) press Ctrl+c to quit.\n", addr)) - + c.MessageClient = messager.NewMessageClient(c.config.conn) return nil } -func (c *grpcClient) reConnect() (err error) { - biStream, err := messager.NewMessageClient(c.config.conn).BidirectionalStreamingMessage(c.config.ctx) +func (c *grpcClient) register(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + res, err := c.Register(ctx, &messager.RegisterRequest{NodeID: c.identity}) if err != nil { - return + return err } - c.config.setBiStreamClient(biStream) - - // register worker information to master - c.sendChannel() <- newGenericMessage("register", nil, c.identity) - //// tell master, I'm ready - //log.Info().Msg("send client ready signal") - //c.sendChannel() <- newClientReadyMessageToMaster(c.identity) - log.Info().Msg(fmt.Sprintf("Boomer is reConnected to master press Ctrl+c to quit.\n")) - return + if res.Code != "0" { + return errors.New(res.Message) + } + return nil } -func (c *grpcClient) close() { - close(c.shutdownChan) - c.config.ctxCancel() - if c.config.conn != nil { - c.config.conn.Close() +func (c *grpcClient) signOut(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + res, err := c.SignOut(ctx, &messager.SignOutRequest{NodeID: c.identity}) + if err != nil { + return err } + if res.Code != "0" { + return errors.New(res.Message) + } + return nil +} + +func (c *grpcClient) newBiStreamClient() (err error) { + md := metadata.New(map[string]string{token: c.identity}) + ctx := metadata.NewOutgoingContext(c.config.ctx, md) + biStream, err := c.BidirectionalStreamingMessage(ctx) + if err != nil { + return err + } + c.config.setBiStreamClient(biStream) + println("successful to establish bidirectional stream with master, press Ctrl+c to quit.\n") + return nil } func (c *grpcClient) recvChannel() chan *genericMessage { @@ -213,8 +225,6 @@ func (c *grpcClient) recvChannel() chan *genericMessage { } func (c *grpcClient) recv() { - c.wg.Add(1) - defer c.wg.Done() for { select { case <-c.shutdownChan: @@ -235,7 +245,7 @@ func (c *grpcClient) recv() { } if msg.NodeID != c.identity { - log.Warn(). + log.Info(). Str("nodeID", msg.NodeID). Str("type", msg.Type). Interface("data", msg.Data). @@ -266,8 +276,6 @@ func (c *grpcClient) sendChannel() chan *genericMessage { } func (c *grpcClient) send() { - c.wg.Add(1) - defer c.wg.Done() for { select { case <-c.shutdownChan: @@ -278,7 +286,7 @@ func (c *grpcClient) send() { // We may send genericMessage to master. switch msg.Type { case "quit": - c.disconnectedFromMaster <- true + c.disconnectedChan <- true } } } @@ -298,9 +306,6 @@ func (c *grpcClient) sendMessage(msg *genericMessage) { switch err { case nil: atomic.StoreInt32(&c.failCount, 0) - break - case io.EOF: - fallthrough default: //log.Error().Err(err).Interface("genericMessage", *msg).Msg("failed to send message") atomic.AddInt32(&c.failCount, 1) @@ -308,5 +313,13 @@ func (c *grpcClient) sendMessage(msg *genericMessage) { } func (c *grpcClient) disconnectedChannel() chan bool { - return c.disconnectedFromMaster + return c.disconnectedChan +} + +func (c *grpcClient) close() { + close(c.shutdownChan) + c.config.ctxCancel() + if c.config.conn != nil { + c.config.conn.Close() + } } diff --git a/hrp/internal/data/data.go b/hrp/internal/boomer/data/data.go similarity index 100% rename from hrp/internal/data/data.go rename to hrp/internal/boomer/data/data.go diff --git a/hrp/internal/data/x509/README.md b/hrp/internal/boomer/data/x509/README.md similarity index 100% rename from hrp/internal/data/x509/README.md rename to hrp/internal/boomer/data/x509/README.md diff --git a/hrp/internal/boomer/data/x509/ca_cert.pem b/hrp/internal/boomer/data/x509/ca_cert.pem new file mode 100644 index 00000000..14db0ba7 --- /dev/null +++ b/hrp/internal/boomer/data/x509/ca_cert.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF6jCCA9KgAwIBAgIJAKg0eWNBWobLMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjA3MTAwNDMwMTJaFw0zMjA3 +MDcwNDMwMTJaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANpgfrPDdZAqqXrRbjmiXYbBdCvL +Oh4B/1p6yNulFspn8wTm0V1V1pPqUBWolSOpSUuxT9XDnkGq89loYaMGnRm8V6un +tNLQx3zzLjLoVeyEajztIIg1p/k9Boe4g90eLbF/Dirg9tOI1yw50Ay0v/Wvp6/d ++h3kTAXXfB4Rc78dh40/FlnEjqeywLObHQftxojC4CcwvMLVqxEZgz8/ZUoBw1Rd +I7muiMItMw8vyf3yhSpTntNoa1dqZ6a1tZzdvPlnvdP3ByEdh7MI7PKthlLZhPoU +zjFhI3+vgHq+U8yuyEpbBILBJqQ2Kd5H7x6EGiRMpeCWzIdl/PwcXhgwuUSDVUTy +6w/qKTmhzPytIiC/wyuHcX8Cvhe0Ch54x1YAPK07BB9dnaLVsStAsw7O22eSvWG7 +aAFFaXUhBGWvkRz/7bWlAlRL/Rt87oXrjF0hCDotcaWRMnH5mSY9N9LsGbLd0iVP +H5zAKFr3iytF9F0T1FcXcKcMEJbjFeUP0lKUpZ5J/Ei9Nw9AQ72xHE7mqJj/UQNf +G/hfCNGVhlcsmQmwGdtobUHrIOJYkESs1H/91r/rDYO4s0z5PEKKOx1xFPnhPcs7 +3/0ZYDocCjqIKcigN2Zowr6KgSB4l+t0xjZZp+2QjfMQ22e0NZkc+cjsrcLmJQ1n +jE4aVM/Vl2leNesjAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU/BcimdJ/xrkakVLfuYPzEa22aY0wgYAGA1UdIwR5MHeAFPwXIpnSf8a5GpFS +37mD8xGttmmNoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC +CQCoNHljQVqGyzAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAJmU +v0gjSkzzRIGEQTA9jZzOrZq6H+Gh6r+UtFzVtRmN9Xga0myNuxzXNkxI/Ew0nToR +uTYvnQBE7JkyEVELjN5QXByXNme/km5yP6mZJs6shF4u3szZ9E/zSJvVZ6Mp1Dw1 +LJj/WLyJnord0zyYxkpX2ukTpvb5D+UsDu4QxJ7Kkq1YZUFss6/wHsUgnheI64Ez +DV8FoqhiMmIwcI9QdNY3udNCvp3oHSgi777WEDoZUIJZEF/rO/i/oojuGWjYBha9 ++jO6E4jhqGE9ZwvXYOx9agMZJtZ7N4a+7tuBmmYkB8r+A60uIqocni8fzU0F7hdN +R3RIS3kWW+o/4Xz8a3fE19+RFSZd4vUgS1U+8eTeVvuCw4KaAQsEUDv8pEH6GjD+ +xQwtPbg4grufTmC1a3PmEjeeYagP0BdSbuvRqXCl4i6QK/Yp2lPUWmGVC27+X0UL +xXibxUfcgT26eIAddepO2RUVG6QAtYC6GMgCbANAIVm37Sc8JV+quF/gloBIKCY9 +dSi+x8wOTAsmJkceyAt+UOhayn1+u6+6YGqIiRt4/wBpuZj0UyvaZLmDcxdNXDBc +cZAAUwvcsa0yt/QiF7IE+/GS1mja0NcuzBjamnf/LqTcgQin9bEpVTw5suKUqmCR +BdUlu7drONjYIhMb3zY/QFmTGD7rPu/DaHE63ThL +-----END CERTIFICATE----- diff --git a/hrp/internal/boomer/data/x509/ca_key.pem b/hrp/internal/boomer/data/x509/ca_key.pem new file mode 100644 index 00000000..c1320a1f --- /dev/null +++ b/hrp/internal/boomer/data/x509/ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDaYH6zw3WQKql6 +0W45ol2GwXQryzoeAf9aesjbpRbKZ/ME5tFdVdaT6lAVqJUjqUlLsU/Vw55BqvPZ +aGGjBp0ZvFerp7TS0Md88y4y6FXshGo87SCINaf5PQaHuIPdHi2xfw4q4PbTiNcs +OdAMtL/1r6ev3fod5EwF13weEXO/HYeNPxZZxI6nssCzmx0H7caIwuAnMLzC1asR +GYM/P2VKAcNUXSO5rojCLTMPL8n98oUqU57TaGtXamemtbWc3bz5Z73T9wchHYez +COzyrYZS2YT6FM4xYSN/r4B6vlPMrshKWwSCwSakNineR+8ehBokTKXglsyHZfz8 +HF4YMLlEg1VE8usP6ik5ocz8rSIgv8Mrh3F/Ar4XtAoeeMdWADytOwQfXZ2i1bEr +QLMOzttnkr1hu2gBRWl1IQRlr5Ec/+21pQJUS/0bfO6F64xdIQg6LXGlkTJx+Zkm +PTfS7Bmy3dIlTx+cwCha94srRfRdE9RXF3CnDBCW4xXlD9JSlKWeSfxIvTcPQEO9 +sRxO5qiY/1EDXxv4XwjRlYZXLJkJsBnbaG1B6yDiWJBErNR//da/6w2DuLNM+TxC +ijsdcRT54T3LO9/9GWA6HAo6iCnIoDdmaMK+ioEgeJfrdMY2WaftkI3zENtntDWZ +HPnI7K3C5iUNZ4xOGlTP1ZdpXjXrIwIDAQABAoICAQDMwwwq7MywaIBP7E5pdkgy +EfUnF0EgYAkawuTRp2POWFfzsaaA2PsB6QQ8ur1VGefjNJhCPVGIC47ovUpHvezS +89pU10TjI+bZz3/zNg1TX/nptQL7FSyytDkKS8ZBMInx08vqAtUOFlKEYpUlRNp1 +ucYHTqG3I5jxJVN5Mi4Q9tRiadRASeDld+PexUQcaiTtmaTqunVUT1s/Bmgdhwkn +sq1/znGwKuqLACzPQaUqHBwnSw8y9ccoyVn1ZI6tTvFh/pdtSEUEFRdnlafwCStZ +RiK9B4MrpATQNjTHYu1akEy4A84f+JKOCUeK6HJbb8y/WqtzApM3JjdoAgVss0sT +Kb7bP0cXkG+RnP0+XAklT5/KidUX6At8KavI5/oQA9JY/qQs6xEtUyrDHhAxfpgm +2pTkyUcW71QLJKlNH1i6j7it0u0s/6Ezjo/MF9pfF5yqBxCPskNDJEzTYXNCzMp8 +ki1F47ypwQawpVTQqP0Bgjqujvta64CWl7qt8FL7cKu0068ykHpN27qXQhYSNk5s +jax6V429npjCARRUVl+0+jiyP5LQmBcDFQbmPfe5p9CZcZiZ1EQnT/MKTKR/pTVc +IyEBaUIGGy/OojQreIOO39HYIBaV0sNvnrvBO9Fjbg60mRZDY91BARhoQAjHPMGC +5xFrfggLjW4a6j0SM6vJOQKCAQEA+3agIxYArZ2y7qNudc5jBI+eJejE9kAofznP +WP5cs9HnQnI5zSUGdX3ZPAdC18m8TLDCdtTVh9o/sCadGTIIlsGmFiae3yI93mN8 +eVw73gtNW3qYJGZe+yZwsTZ+33rG+z6YFBhOGn+EUF7h4McPOLAl94EQmjRmwwy8 +pfXlyPGle5NfoBBN5qSBwJtmBNaF+TxoeP+zmOxnF0HZpBIot0lEZDwN83OL8GC/ +KLlti0mByUJs4e7dcmv+xBKFsUBD5AUMMaVHlh0ALqpGg4vmMqUzX/vAoJHiHHt4 +iWo2eqy/dGEYwSoKJpwLVferb+S9fTWmdZEruUQluSMi87JXrwKCAQEA3lEPk1RF +TtZHfO5Twj3m5UsdMb6Ch2wmMzGBhTI50QzXRafIOygnHKy481btIHE3e6QJAJzR +eLe4ahyNaGSLuZ+VajXsCX4jzbZdKWQJm451d7l+XjVSAVw12hjMToUyAuvV6dHo +CaCVP3s22oDQ9wPHGny6v0gY8dOE030AWqS7G3zRiT69wkjkLWdeAFEQjY5cxKhh +XgpiJTlIROJ3EPH3Hm7dwzJL3OTb2eP5pC3lbR39QJ14KYIIKTqq4WZd4L0Zdt7d +mbvjhZcNkrdXP0fSPDgkjjEJ3lYUlGfay/As2UEieQymTznXIQrCIokos3/oQfkH +L6vTsrcAwS6MzQKCAQEAi9qI65qUG/smBgUNLSXw+htqCIlx6cb6/u9G+6bUJgpq +xRDERuz9r6Cjjfg3283OFRUFwpNSgvEGFNEU9GtYTYg79/vYxh7ELAhGtTRv82lz +x5niPfRVhPb3HAhD/cTKH/fLGvn9jk03aH+svpfXRl7pbsLwWeMk9/wAe4jMGLsU +nyrytxH6UXlS1K1Yyv4ImvpW3FzSJQ3ttAiio9aZoH52NA0WcTzlKnaUOnEOlLX4 +Idf4uJthu/6GPcRTaKZmW83W31GeA8XzUQDQoN7Q03//l7Vrh6I7ED43Zq2UyRuE +i5Ro8R2RcbG9uD07ssqT/Kw2/RIVMD/Pfy0khka87wKCAQEA1eycl0F0+9q3qaDP +2k6kmyl/azmN8u//hi1yG5BsEBxSHcXIqBwIHtCZnBaeUSSApin/O6aq7oWjIABf +lf+CcFj+dthyS+QkYbPEy6pmkFgx8sX8snyOb56idz57gmcq66KyEbAZnwH1+8L9 +0p439imdcoBpVtzym+jUnIlhSNfQ8C9Ylb9Y69YmMwaPbrCSxBQkclwwbUSCkp0f +TKG6vwSGrbMzE7yXQXS7lVyJARHk/e3onz+nvBFS9xFsEz7kwPhVw4vLIz6oPglP +V0my28Kpq6a+jlDj1R1x6ihRYwK2tUu291JTylK3DyWCD6d6EdfXz3vpDVdDe2ob +gMjhVQKCAQBWmWrIdyglsetIKAT/j6Z4hJSWA6L77ii1gMeMv6Cw2XKc19gm8fnF +DfPh531pNaKjxBgwJTz6UrtVq1RcOqY/EWxDKeW5WU79RMV0duXE20EWnMqN7eXp +gZLso8ChZtz5BF4UAeXHfIskIt1KCnF6ubbmyUTa9aeJcqUwcr9Ymtu3fy5e1uCP +PdRxkpU/Q+xhR85g46GMIbjzwruTSMV7btuGh5WBjPeV2OBS6+aj2bWG3yeVAwar +w1zj0Vbxw7VMcblPm1EQ0hyZ/Q24ZSoLZL2l4FoaOhPXaYj1HuKQjiPbabj2zUZY +8xnynnp57i3BHHHbjY4R02Mqsfi1nNoN +-----END PRIVATE KEY----- diff --git a/hrp/internal/boomer/data/x509/client_ca_cert.pem b/hrp/internal/boomer/data/x509/client_ca_cert.pem new file mode 100644 index 00000000..52ae25ec --- /dev/null +++ b/hrp/internal/boomer/data/x509/client_ca_cert.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF6jCCA9KgAwIBAgIJAKRZXNeAdHXzMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjA3MTAwNDMwMTNaFw0zMjA3 +MDcwNDMwMTNaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANMfUOCyV55rvt/nELLym4CSL1/X +eg7NPWoXcAkjZt0P4j5/PRzf1i5kvleb9KXjKLyxBFd+S1+FnGg34Cq5YZWwkpfc +23qNFZobzk11QvhMJs+mJDGRYMmQ3T274wv2QQJ2zD5Qx5ZjOpDHLHauxW/3lD3t +D9f52svKuoVoeOHRR3kDYOmPj3BHJJu0RdLxWA0HwVnpy2dqnJyyMU+czm800DL+ +HfaQFPwsPvdgQnlVRa0J9GMAtY4vqpRhgvoN7kKidG75i0BRG1BNrgFhZ/Qackmx +hLvCYCQqBHUAkg1rFXr6FdsOcK+GUD9N5Hvq24v3U1nsRIo7MH56EdhERsGKFuYK +pVppBZXnNT89ji3TDZ1j/TourAdi9XiPbiqMvZrF8VEwcnewLYnfIfpv03w8TDlt +NoGVy6WIWtL9LC4blH6/riyrVnC+J1sElPiUqebtsoP/vuTLTBoM4kaCGeDjRmR1 +Q0EZDSMFODk6BaMjrigyab+KaoHc98aX740vTEl1VTvtFCeGCgbbWaBBI2z/qz1r +MNYMvGM68G7vbH3thM1KGWGnL7CTYjpz8nAvQliUxhUvE1LUK0LMdpl2pMrvjDog +f7h8/ZCAzwN8QrknYpVvgU6CKtDZz/YwZg49ew7sdUIIorntQ1hL0j1RwnGxWKJ+ +GKuwPkSL6jAHauPDAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUWNurDpJ7V480NBKoiMUlFBG5Pa4wgYAGA1UdIwR5MHeAFFjbqw6Se1ePNDQS +qIjFJRQRuT2uoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC +CQCkWVzXgHR18zAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAHpl +MizBOtEWJ7WGhCWFpbrZJPMx+vQ0ixY2Uz/wjj2jiE7O4kIR45OxgQws/LdG/D8v +nhumeau8JjYPXZHF2wVa/CbF183OHzJEgL7DRteL5qfR+simSMWdXkKXrGK6riCl +IWT2CET1u//fa9I0245KdBDlzmkxpYUB2If+jOYKIzJ3o041zWGVx7+uQ8wQuNSU +6WWNP+g9k4hgNPO8kPkbOq+YX+mcxgKslKP2HfIonzeTtLcnvBCDY7fsag9wVfTT +bP84k3c5ocvQIta/S+3rSLo6Q1EvYclV8qkI0meap91DisCVsKWekNQgnRoWjMrZ +QpSuFjnfM6rWRBlZD+Vq47WaxzxkWarOX9+XuHXf1K5VyAVbe9n7QLeXFm42eRBr +lZtwTH7aDifdyuGzG3/xu06NzLSFi+G4WedG46j3GVGj0Uche3sCx5K5HE5dIJQN +iQ7hV7hAkPyCkY8uviQWwA91ffPIJJb/bBSySo354IgRtfmPqhpfLrf75lUuy9kE +/HgRHZf916JL4A52XEX7S66JcZGqtram2/Vo64ksjnyM9ZRKE+jWRIS8YYAnDmkX +NZCAQFD3CE0zlwQQLCPtMqeSk7MrXj58y80e3mUZoZQoPWYuBIktlbCmCiRKmNGm +WHrY9obxbjh5CBJb3Ilior3lnm24S9M9bClr6RpY +-----END CERTIFICATE----- diff --git a/hrp/internal/boomer/data/x509/client_ca_key.pem b/hrp/internal/boomer/data/x509/client_ca_key.pem new file mode 100644 index 00000000..b546d6e2 --- /dev/null +++ b/hrp/internal/boomer/data/x509/client_ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDTH1Dgsleea77f +5xCy8puAki9f13oOzT1qF3AJI2bdD+I+fz0c39YuZL5Xm/Sl4yi8sQRXfktfhZxo +N+AquWGVsJKX3Nt6jRWaG85NdUL4TCbPpiQxkWDJkN09u+ML9kECdsw+UMeWYzqQ +xyx2rsVv95Q97Q/X+drLyrqFaHjh0Ud5A2Dpj49wRySbtEXS8VgNB8FZ6ctnapyc +sjFPnM5vNNAy/h32kBT8LD73YEJ5VUWtCfRjALWOL6qUYYL6De5ConRu+YtAURtQ +Ta4BYWf0GnJJsYS7wmAkKgR1AJINaxV6+hXbDnCvhlA/TeR76tuL91NZ7ESKOzB+ +ehHYREbBihbmCqVaaQWV5zU/PY4t0w2dY/06LqwHYvV4j24qjL2axfFRMHJ3sC2J +3yH6b9N8PEw5bTaBlculiFrS/SwuG5R+v64sq1ZwvidbBJT4lKnm7bKD/77ky0wa +DOJGghng40ZkdUNBGQ0jBTg5OgWjI64oMmm/imqB3PfGl++NL0xJdVU77RQnhgoG +21mgQSNs/6s9azDWDLxjOvBu72x97YTNShlhpy+wk2I6c/JwL0JYlMYVLxNS1CtC +zHaZdqTK74w6IH+4fP2QgM8DfEK5J2KVb4FOgirQ2c/2MGYOPXsO7HVCCKK57UNY +S9I9UcJxsViifhirsD5Ei+owB2rjwwIDAQABAoICAQDDrPTDLciz1l1VHM6HbQDf +i55JEGfarDNNz2dRsPQ30+73yeqUhon2+fzJKoz367DoIpFJno6xfB7ZIWCteKCP +otZb1qG91mG9MiRl+lcV107piq1lG78/UvsbqrbncVgTtpPa9ffm1RWE9nWpkpcA +DdHiC4RxwuwdkkqKN6hCdDvwV0dNcneZsvalMdK9jl7zxMpaUazqrw901FuL1GQp +AiQt/wU6b5RjnYbGtPsnhfdMSDuwPwoHPPq3CCHjLWI1dGjCKpv8ArB0H2s1cFhv +EMv4rYW+mIuPOTpkTyEPOr7v+jajj6C1rqFV6xXoHGdcNOGWKLvl+rIZp34+mhmQ +vQRkmcOzoSkdTERAOtYfKYcylzBch6WHmgVE2ZRntiQTAp56pXxUq5lEnAtTc0jo +3J2fItVgzT9ZGxNOgzA5VOoQA1as2Xr+v6YeUibn4/I8KKHV/FXTFk7ojb3EObF1 +n39OZXw6a28QNP9/7TYmB7F41fzHcRPzl48lx4rPXyUXOwYh0qwqTixmgl/HcGD7 +i2XUyJ0CHi/uzvxo6Bqg+VMdQzfqT5npf22axays9xRk0nxwvY1wHwiRQCHcT8dU +ovoLTZJFWzNik7EthMgPT+3Ec0eAs4j1N03Hb7KXUVBn70QChf2uaDEuAXJh/pOB +T8OsSN+9k0/VF3Wxni/TgQKCAQEA8DIam0wpwzabwKdpWntdhGpP6ak+o++bsNyL +hyBBT7RlmbNtKtfZAdUNT1PicYZ/yFR+4DhrfPHsIMAdTuP5uq6JpBVWYb132Hv3 +9rXZiyhRPZJmL0ZIRcY/K6jqNHlQJp5ov9yAVmFEChPdI0JagVGy52a/lbctcKaQ +lSFMSaVl1EKqXM8LljgANRTRv9Hr1Owx/IdjT+M1FqjHXWO51AWPxDAmINIo9UrR +SAOK8/kMyULG8FvEhk/g0KtpwQcW4HRZVeATyrOIcxBmSfAQ46+fpfs6qa4AB2U7 +lpxDWPuY43DUZMY7uLTEoFraya3dj42mwyvKK4UeyiKn6uNI4wKCAQEA4QN+usMh +InAdPC9cMQyvjZ5asWqmTGk6jCvUJWvr8R2z0Si8nbPuh7ciz8g6rS+ggqym3e0w +AWZt+rlXvrC9cpfvERDxQosFaWD7w0+h8h+URtRJJchlLPMxjaxtHx3mhArfsgTI +MkIFHS4Q7p+H3IyeqlALTVFwnLBNaD9RSI6T/Zn0AOhxqMTDnjnonADL2wbK1pfw +GTsjk4FNNVmSOY+ZRbobgTkAegbyra8+oa+GR97U/hT8Pii6FwX+iR7PXjBjgvHD +m7AKkcdorvleFH3Yxz1Z9Fje8rAOGf6hWJFTU1qMmaLNdvATSJu5ne5CrSN2m3FK +qr2uPmrIJdRPoQKCAQAZIKS36lfUHDpfBSR4Wr+FwrlpcFMlQ0O+VNQj5rPuaqjW +U3bwLHR/RJKH4famebOUeYJsYnqcL5LMOkzWm/LcHLY5fCH1R6Tp+M4P+SYw8J7P +GimmeGvHIN4q6xjVNHu2DoxWxfKHFtXPWBSiQ6bEMI/OtWkFeIxAZKxrbXhVm//z +HKZF30MPC/y5kNwAfS12sN7p1CAHk3VSUYXJt00RaSOJGqBifpnaT2FlbzlyHHPB ++kJlkrQUePbD3arKjrtN794IpdBsPCviHa0Vvw+FQjIpYwbYCWPnYifBscc539g2 +su8FO9ezkvWe8OJChvXOtrrjYAleVCbMbqOyZuSRAoIBAQCnrfkUqDDa/v1qSkjD +bJauTGF9cOJ4crpklozDTkdHKUFFDrxwMRQCIuFYQfgn8yQD/TFklEp/4Jr4ioHu +4rpq2PoYl62STxM7UkCLbZ5bVlki5zOTamCrPJei4el3lMqhf5Dvkky11ykEc72+ +dTfDjS738Cpb9eKbgW5Nz1F9ZnK2O7Hvs0hv4iF8md7T0mwXzln9zL/prX53f5XP +ue4T4wTvRx8UDyxhwye5cqyTxL+mc1H5/h1zHNqAKcFi4YjaweiGPi/spyVZOWaz +bbVEQ/v1jaypQEj0RWpcyLnnzHRx2zqHiyDeD03vf8y0+kbJy3GpqKVh03Qzo1N/ +jVXBAoIBAGEvsOGIBFUiDLihDEIUTBdQHzzKXN+zjzxUnmcrLn2MKBx8gjlpgZrO +pAgK0depxWA9RAuQBgqqodi8CY82h6kMaK7ANYOfgC+UDMCJ+XJKqKaa4MG4xOiv +BqJZCYIhB5ALs4DDLwWNCYQqVg3ErVk7hDgKQugQviBXGQFbEwkSHgf6MUxbe99/ +DkSgkil3TWKcVE82auY4ud04tJOBIFl+fnMysF99FqOLJTwqHDK5pC6A63zyBHgm +3hL5vjRn6DWb8wBgQo6/K8pbYQ+7dADGbNvQxUj7nqjhH3I+vEBHAg+oVt3ZPr96 ++3KzjPLML31OD8TN22FUzsYcdw2prEU= +-----END PRIVATE KEY----- diff --git a/hrp/internal/boomer/data/x509/client_cert.pem b/hrp/internal/boomer/data/x509/client_cert.pem new file mode 100644 index 00000000..c9dbafe9 --- /dev/null +++ b/hrp/internal/boomer/data/x509/client_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDcxMDA0MzAxNVoXDTMyMDcwNzA0MzAx +NVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAK36523v5SEM+J8ReNt3USwylERoUMqygoQRTIy7 +ipzfO2dmo5OANFsJtPb3CH+YB6kS9llAioLa9UNrD6SBlR23No/QJeXBiXgpUXAE +DCLhQ/aj0fEy8AEnW+a6mM5jmsEHOy/O3q/KF1JdjNA1T7HuBS6cIvp5+7rF1rG9 +tzJLLrXwUZLKlMjdCDuLxp/qtYUoH81CIuveWAODH3oad559HgD6UBgDRntdT902 +IUnTejCAOY9Q0yTlcMMbz+FEMZ43Xq4E89YQ7Mel+xkb0lL7H6mNabvfZTX+5qm9 +RDtxrNvLH+hZ+OPOp2qrfyJBaj/yP+4TTN4pC4y5Vqkq7sZ1fjfx9gZTsQLAvmr6 +/c/Z59IlsAIvttbam7FFNrwVlWsD5uRP2DZyKXTjRRCA8NnBo9fltD1FbKKevcqu +PilMiyg8+dJnhKxOeMlw1WSx0h8FFU+jf4MFFX+qFsJB7Ecss1bWpnoYsaeKGMG7 +mcOx5weglRlVccDQollZBXoIM/pDKJNrAbA8otKXbGGl1LJY20HZLNYPIRRlH2pe +YoLyhUi1AKFMecHxcGOIxlHVZ0gfEoWcChYvlWi6M/09c2Qtqq/QfKhD7DAXmMDS +xYFskyAAYSxgX2Q/5Y6mP+qRzDxT0Qm5JyN+UV0laqQ1KBA11+BF8RKriMGYSXy4 +afDJAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNv1CHU7dlRoE4Lh +/elJzmaSFpU5MA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAoF0Jc770+dMNNiDyKsGOPgUJBsYMTyGqPmpd +7Nu7wmI+PBlgDkTvVZjU3EO/Y5Ez6fum5gCtf7OKPIYLfV95WBxgkkEvBEYaX4To +eL9nr9jP9AQ9sZocPTSCrlVrIeOT3tV683BY+N8sfHW6xIeI9tqTXTExCKmwuKyZ ++qyokn35Kkydyn47J4bclPD56UWctQinO2cXm2RVHkJlmQSoFREdb0S3xiFt8aAW +olB2xWMCwXb7LDyi5M0HCvz3lGErCTnpL9GBPjsWCSZOK55D/BSxL4NRSBqzsv4N +25SQOP2NgIqabRsYqYhTCRWK0n1h3IBAVh6fVQ2CCStd4gkuDUepTfM+R7mcYR9g +u2hn4kn+1i8y+Uj0z6yN48/i9Cnz3Sq/e8Z48Rbjut5Rx32ldFvHIkdtFjjkgv47 +LbVKaYH4uqQF2xs3tAPuqq/QXNOn8Ie9yHv0MeJiPymIPAk6GBrUOA/Br4kof15v +uEbxeR/nnrzm+eyWMn4dsE0n7GA6wm2gMGENK4E8WK0sYujIAPtG8LHfShEv5f/j +77+3tAcigec39bau4yTkXBV8op1iMPBtEejLD0B5RKZig17Bfdw5v2TP+yGbzD5d +PwhAxn4aVK8zXFdYmwNfXNXBpLaEILxYFpeExaA9Gr5Mn/h+vD987GTW9F4fBhht +MtkfvRA= +-----END CERTIFICATE----- diff --git a/hrp/internal/boomer/data/x509/client_key.pem b/hrp/internal/boomer/data/x509/client_key.pem new file mode 100644 index 00000000..51fdcde0 --- /dev/null +++ b/hrp/internal/boomer/data/x509/client_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEArfrnbe/lIQz4nxF423dRLDKURGhQyrKChBFMjLuKnN87Z2aj +k4A0Wwm09vcIf5gHqRL2WUCKgtr1Q2sPpIGVHbc2j9Al5cGJeClRcAQMIuFD9qPR +8TLwASdb5rqYzmOawQc7L87er8oXUl2M0DVPse4FLpwi+nn7usXWsb23MksutfBR +ksqUyN0IO4vGn+q1hSgfzUIi695YA4Mfehp3nn0eAPpQGANGe11P3TYhSdN6MIA5 +j1DTJOVwwxvP4UQxnjdergTz1hDsx6X7GRvSUvsfqY1pu99lNf7mqb1EO3Gs28sf +6Fn4486naqt/IkFqP/I/7hNM3ikLjLlWqSruxnV+N/H2BlOxAsC+avr9z9nn0iWw +Ai+21tqbsUU2vBWVawPm5E/YNnIpdONFEIDw2cGj1+W0PUVsop69yq4+KUyLKDz5 +0meErE54yXDVZLHSHwUVT6N/gwUVf6oWwkHsRyyzVtamehixp4oYwbuZw7HnB6CV +GVVxwNCiWVkFeggz+kMok2sBsDyi0pdsYaXUsljbQdks1g8hFGUfal5igvKFSLUA +oUx5wfFwY4jGUdVnSB8ShZwKFi+VaLoz/T1zZC2qr9B8qEPsMBeYwNLFgWyTIABh +LGBfZD/ljqY/6pHMPFPRCbknI35RXSVqpDUoEDXX4EXxEquIwZhJfLhp8MkCAwEA +AQKCAgA2Vgo5d5bj/50WcOqCAH3Fg/ZydvHknGPOw2hY+6mK3N08qf2kb4HqfNmb +2AM7dkvOLjHqJhIcVC4NZD56bk4X/cR4ndV4MD2y3ZSlm13+9sO3H+rNnc7/TT+S +i+x1aP5IEu4VPFKoLEGkY7s6u6usMl5D9FeoSrin2Gn5EPtKJdjs0aVoZwSYxw9v +KXRbNX6Dm8hy3pjxeXubfTQzelipkwHv5D1ngn5cwQPUXrd+yyF6TFGtxNxsxYu2 +I9WE0Tt94mUbjEhrLtYEdH47lUjWyb9VwOio2FhPyNBZatcIibQm4QWSF6d33m7D +DdSi6jM4zXvR6w0yxTbqOGgsZVA0/y6419tfigKOV1JlPI0X7xJFLmywHcC6zA0C +GstZGU3igxtbTdkq2lUWYhTTbxAR+TAZd/FLq6y+48lWEIWhon9xDryHHCnNtYwP +ZbYJXf++V6I8LnamVNw+TCdaehMjxoEqUNuzfgm1XdOD1xlNeRSRM0y40wiTAAHj +WIRV66TEQ/y66sbp58lGyvtxcUj3iWz5loFA+gXEnvK1eFcJqRfmEx+dz+EZeKSS +rgt86RJweAuebtGZnOSj5grxPwhsS46KKWH3KEvOZ7ZEduxCgAONy7VAoSLoKMaE +/XADVUj2HukgRxRR4yIE61fVwWlb8XEm6WbhsMCcRu9wR4e50QKCAQEA3v531aFH +gzwrjMQ+6LdDNbQf9QUK5qVf/WG6f/eXcq5x6E5OptoqdYl1B2QqbnbdYCQWga+W +21YnlSOgmo0trS4Zr8LMcyvdiHL2LyYNoo6nE8qI2xYfrdpJZwkR0X+eNJFRa1/X +mha3x0oUAm559ROuaRto6HL3V1nUUiGDmPSqSyOJgTrOI29hBcvWUgpCr/1CL0uL +NmqtkMya9/0Xn0o+BTbdg3PogTIElGgWtStDx3mj67ORGPUqI7nx0TmCxYeWN4OT +779gmc6lleth1+L1RRm1hT+tMSty7fTEivU4Sj7sGmivQzAyiD9Lqg3lOeaRYBGD +UmAWbI9uaYDrQwKCAQEAx7s8Jq80t1DD3kSPCuRiw8r7RjUD3L6CQtag+QJCPts3 +7qV2RtQ2qwcmpFsZ9DcIn08xmx2rZ7sx8CJrys1sL9Wu3krpbdtPjp5AstVS6CBx +mLulGrl5nCO1bnVRKlz6S97FgZ0hjBkeMalJLoYIuD9VUOqNwi19K/oU7mFOPHvm +Jbvo2ZgygwXvSg6nSNqvd5T33ZMnL0dnUhsFsZV47nO8QMB/ZsdlWUEuV+Y5RJBY +3FLo3NBJLA9zIpLEm0hlvA0D/GEvBCQfJOEEgm8K9x7CVGF5rYDBd7R7oGrB5t4T +zFgkUkqskiG3VFE0TnpOq4gkZB/1g0E4W/VmhdelAwKCAQB/Xyaf1cF9So8tlqLA +Vn2DXWGrmLfDSs7rcjkPAyN0lAPoR2JRl+gMvvkjwaki8647TiG07dDjc/CkFXeV +D/L5Ko3tgP07A+FEITZRdBDxuz3f5h4J1jc+HKM0wU92NMjvCdpR1KrYDwXmRX/s +a6IpxJYo30krDRAOyval+xKp+YaT6LaQJEC+qM3oe6ftsIKq96QoU6Qu7vw460XR +RLWLfOK0I8SfY0N5GFLZWiMuVIoglHB3H1hPwynQwlNHyOvTXEEHcJa9qLjK4ehf +G9YFdFPYpniypc6NeV3qYZcqMCt47Tv7UbRaUltqy4yyk8FNM0/yac5y7QOh+sN8 +a/D1AoIBABnCJ+vFRMMvg1My/E+nTKV7lBRl2e2qFBqSm4gBppF8rCX26N4RmEtO +TMl9hkdcoZwKFpeup+Bk3/fcOJKbE4zHvhmlB53HXudBuY5WvK57IKtV5+EecnSU +ll18e88+1njabhZdMWpkAuTctDdvycgZQuOAnG+idjYptnFX00Mxp2jOZyVI35rO +NSIT6bcXnPGLILxOsgsC5mxMV9ujL0lxW6HuMYALzyJHqbZkVpZlF1Cy0J1Jr2Yj +R/H5g6mTGKu78fumfO3HysxyyKYZtAxSxzUirRKXPFw3xonVutQPZ/Y+l9CVGNRv +zLjvEBPe6i5tDGRtSrh2vNH/QA2a1gkCggEBAJi2TtRxR4YRgRlzP1NLbAO82OdO +1opIzPaxb+9JxvFm+xILb8kvNe0THkLhLM2nNImTydshCqLXGP6/jahw7Vh9NJNj +QrCHEx9RnJYdcdaayWeDzSJO8oGARs0CXMZXzgPYiFnNXcFFG/R+Ughv0yctIz4o +af6elMwheOPXEyNu1yV0ALlvO/xkPpBRs3HuffJ5EiMkT5SKFa4ErFUaAlDaYpRz +EITcEh6UKnZiAhQADl9rHSymWUlt88xhXw4wEDTBvNmzgOgQvfjnoud8JXO8a7S0 +ihaKprOq1WFRss1USidGfm7lBxIPM60AeSHKt2VsVgpf+KgXgNs3RONhY8c= +-----END RSA PRIVATE KEY----- diff --git a/hrp/internal/data/x509/create.sh b/hrp/internal/boomer/data/x509/create.sh similarity index 100% rename from hrp/internal/data/x509/create.sh rename to hrp/internal/boomer/data/x509/create.sh diff --git a/hrp/internal/data/x509/openssl.cnf b/hrp/internal/boomer/data/x509/openssl.cnf similarity index 96% rename from hrp/internal/data/x509/openssl.cnf rename to hrp/internal/boomer/data/x509/openssl.cnf index d1034214..cdafc663 100644 --- a/hrp/internal/data/x509/openssl.cnf +++ b/hrp/internal/boomer/data/x509/openssl.cnf @@ -19,7 +19,7 @@ keyUsage = critical,digitalSignature,keyEncipherment,keyAgreement subjectAltName = @server_alt_names [server_alt_names] -DNS.1 = *.test.example.com +DNS.1 = *.httprunner.com [test_client] basicConstraints = critical,CA:FALSE diff --git a/hrp/internal/boomer/data/x509/server_cert.pem b/hrp/internal/boomer/data/x509/server_cert.pem new file mode 100644 index 00000000..8f4661a0 --- /dev/null +++ b/hrp/internal/boomer/data/x509/server_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFdjCCA16gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDcxMDA0MzAxNFoXDTMyMDcwNzA0MzAx +NFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAL0HTaTaYQ1GbvZ/Py3NJf3WSOzXdm/qh9Fv7hAs +8FGPEEDCRhrvFMjWqAwp3EiQkRavLgTv4t1hkga9y/hc7t/q9ATFm8SC3Dtdkg2X +0YdxsyotPaWgUSmsIJ0uwCIMkU5oGE1J2fopdBxG87T+QGUo1r4QxDQGQ2H9CMsD +217Ca+PdrdldctNs/D2AVkXTew1Bd/nNaOXh3vc14/4b86Y7A2HOFFyRi3QaemJJ +ksnH0CmhydRob5rAZQRClftzjri9gaUfJW5LSUYBXn3Yx1gam6lM5LcPlgWLmXs9 +wthfksY6YlpCa1NtdnNbZIY+6cCHN6ytSPj/1BY8+C954cySSuNVSsAAvm8C80Zz +hnNaivhdouvmWTZM8febnrrt6qo0SEtnn+RkzUznOjVVxyPffgjI8s4gNc3DAIbf +oDwrAgxNF9nXAoeYTVOUxeGcjeG8fIKcfC7pxfI6/ejMiUU7LkL5rEIbfT2bF6EW +ntGyrxYRNdw+VX2MxNNvPKHUUu90JTCxzjaUCSnR4lhatcQPKeYVnn5I+jv6kMm5 +FAkjVwk4U/u7W1DtCedaN9nUJNRWwptHqX2VXcnM0k9tA5yBtBM55yf0zYHz/fOz +RJ/bqHzbs5+il07u1uedMUJ9X9pp85Pm0PFD1zbv8MwZetTJigA4CdU4XU8K56Nz +Avc1AgMBAAGjXDBaMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFI0rfKZ3rjLJZ3R4 +tv5NeYgJyiaqMA4GA1UdDwEB/wQEAwIDqDAbBgNVHREEFDASghAqLmh0dHBydW5u +ZXIuY29tMA0GCSqGSIb3DQEBCwUAA4ICAQCYbWsz11jUxABZDkQDNqGGpdAEJuaD +gAe3Ko28ntT+pjEdInD/YrfEjGI3KQhT00yMVkiWXiK8bBynZB3TpDUfG4OTBhAV +PZy/jQ08wOfmgFQco3asxQovimmKXVwbeJBOlZBfZoseB3h4zz7PcfLI9Xr8dz34 +Pbilg+XOZywoxdHWd1To13ycKi9DPh81cRWu7QACS92wGGsX/eYVW7YKFmjcnj0I +2+WJl7nHD7h+Qyy6QiHmHa6/ZKAx2vkf2ALAHr4zKvIf+LLlQVTKGxtkyRMusiP+ +sZuDq7RN5oYE5G1P5tF6Xb6AUGFrazaiC3kI0K3njs0xifjxiM+7KyfXQHOWV/a6 +NNk9CX9twaKhq8Ay5jjILSUoXWgyl1OXOyIHIpWmsJMyGrQCapS5BZHGwc/K/6yW +TETmn6frJUh8VHJ+gjLvoUVMQvkJbV5IecMQaIfHBegRobi9TDkmjGC1v6+rpfjc +tVhQ7rUQgYtkuoOfRjwvCvY0UQ3hf73u/FCG/+Lw1b/Wcp8PMU+6vpZqlAaaFGVr +WHdrPHC0B0Sc3Pr6dmJp70KVb4gx45icRaJnPLR7sr5CBkorZs9NKXUzNnf9oZWF +Nfm5/isLCqLfwA+VTk78vyWqRycdDJ0lswxZt5pvwI3gXitOhlE6zXtsA883TwZ9 +TxGOtJdjo0IEAQ== +-----END CERTIFICATE----- diff --git a/hrp/internal/boomer/data/x509/server_key.pem b/hrp/internal/boomer/data/x509/server_key.pem new file mode 100644 index 00000000..aa1850a7 --- /dev/null +++ b/hrp/internal/boomer/data/x509/server_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAvQdNpNphDUZu9n8/Lc0l/dZI7Nd2b+qH0W/uECzwUY8QQMJG +Gu8UyNaoDCncSJCRFq8uBO/i3WGSBr3L+Fzu3+r0BMWbxILcO12SDZfRh3GzKi09 +paBRKawgnS7AIgyRTmgYTUnZ+il0HEbztP5AZSjWvhDENAZDYf0IywPbXsJr492t +2V1y02z8PYBWRdN7DUF3+c1o5eHe9zXj/hvzpjsDYc4UXJGLdBp6YkmSycfQKaHJ +1GhvmsBlBEKV+3OOuL2BpR8lbktJRgFefdjHWBqbqUzktw+WBYuZez3C2F+Sxjpi +WkJrU212c1tkhj7pwIc3rK1I+P/UFjz4L3nhzJJK41VKwAC+bwLzRnOGc1qK+F2i +6+ZZNkzx95ueuu3qqjRIS2ef5GTNTOc6NVXHI99+CMjyziA1zcMAht+gPCsCDE0X +2dcCh5hNU5TF4ZyN4bx8gpx8LunF8jr96MyJRTsuQvmsQht9PZsXoRae0bKvFhE1 +3D5VfYzE0288odRS73QlMLHONpQJKdHiWFq1xA8p5hWefkj6O/qQybkUCSNXCThT ++7tbUO0J51o32dQk1FbCm0epfZVdyczST20DnIG0EznnJ/TNgfP987NEn9uofNuz +n6KXTu7W550xQn1f2mnzk+bQ8UPXNu/wzBl61MmKADgJ1ThdTwrno3MC9zUCAwEA +AQKCAgBXlDapFnS4zdVDZ5lCAzaC8PFAqmM5XxQmORG3dNqzLvF8z4XjnLmog6vA +VvS0uiY+uFM9/lbB8x7Q+Maz/3q9TAJa46NT3L1k0+mDWr+9XTSBagyR3EE+aX2C +1dI29FOuXBRGWt0fRm2BXG41gUccl1tHHEWLRQubLr0QMm1E7hdGr8KIXv+AbZJA +fGF8YIs2jQqlNkJPn+LJ7rH/Xbv5XIYonm5YpSZTWKEzQJs92dHcOBVm0CxFKrai +zqbmpZeOiF60vkV9YGxGfwPkkrdpXoqYWgPtvM7pKtClhOvti/pY1VwULYnEUYb7 +03AzsppilUN6QZ75nq4Iz569gF7YuUCTqFwYt8eX2TIpXctkvHeTIdqLUEn1JMTh +Iqr8xmnsGPTICiLc1bHPXDfOyg9wI1zcFAdS9FAzdlYyGPSZt4KwgBG7e+9daz0A +whUaim4OV4mpHQMi/Tx0aF4NPRz/BQbzfKrjvaeHVI+VBJUWR46MwjpjwaDpUiIe +fkgJf6wVeFbdzJMOCP1xZys5N/UkC9V371J6kLywPrzeVuljqSVGxP2SHzRlEnlE +cSff5sbLAHz30y5y+HMVePC/svZ93/vzdZNU6PeHvEcvGu82/KRuy4iKgCJIeSa2 +DjxlfZn6AnQCllHwVJjit1SxPYn4nmNGoCMqNln6STT47jj9UQKCAQEA5KScPzAR +2u8Wsyhfl2wqd5lfTsNqZ0VM7RuGMEKGIx3WpoSrUiTRLwalHRuJUudbePRlnYze +gQLgiEgmv2d9RaPEckMzgQwG5EIEY4z8RYaB5zadcNUla8M467vHFHDpPZ5TPHg5 +HpbREi0J0sL+Oa8M5Nf/XRO8x/uL25f9sQUoE2nSfr4PnV/ysbvoBn5sE6cn4jsr +/HDrPjksgx2/uQcnmUq70Kxhm7iCUUcbTlxoWDCV/g0UNJcZ/6PgDN7fVaXItXVK +QHnCS0yQkJERHDg5mBWGS8SChqPUTKC1O7KYEanenoxqm0mpJAvMG+DznvqClmiT +kDxJAcX31kakkwKCAQEA06VKwR8Wy3XYHpsX094ZeLXGTcnHFEM/jOBGQZvQjV2d +39dhGKj7dqw08RQGAVZ5KK3coMNk6uIO3VuYwYBjEG47a8q9FeWP0tBcDIPCGibV +HLwGgExJDyFdgWtLnI6yPWKsoZjMppstVcQZK0ouWpLvgK8nrg7WoUeJmvCfnw7f +p9pxj9S98ja8Q2uajvo8SWaV04YKm6jW0+fxwlMBqaNZbIxXyXGfO3qMGczAbCne +oPxzkHI9AZ97qevBzMAh/IXqUr7e+8BM/5vxoszEXtLgfIQL+owsy6ALe5N0UUuq +LYrauuzjaYMjkEZ1Ow2aRmrkOaStMLXPI78CW4faFwKCAQBHzV34BfuFepHxX1tt +rR1FA9hHXtz6Y2v+BifE3g9L1eID1yQKHt/GWdreYjhk3Zz/RhjnOkbh0up6QdZR +Q4m2pfBaRbpV61X6trS0IqFSoCQJXUBiH72pstwcQ5MIW1ET9bWEBulBLvGnOJee +JXg62zs8XoymST1+vAM2yet0fP8R4ail/r/elzQbFryN1YPRRCwlQpnUpA1sM/5D +isMbsyB/ZlXG+WuJwI7EQYVUvXZTQ6bG6oqO3WjfvDHvOMqAFhkKyzOvPc2DYh8A +F159Mzb7CL9s6eBnselIyys+/R3+Zg8wUT5lV+OTG1VU5/b51QfPfjXhFN2EfgwP +sY2bAoIBAQCLNB978BfNEKBqWPYOGvnD5EMe7MUs9aI55VUwV+yO3nE1RfMOBi8G ++fMEUXg1rwuXjusbLgkVWEQQoetR8kC2ENqyZjGB0nCLZxH0BUFIdBwdfyoDfqla +80YOFmUv/scLCviifN62AkCKNaWcTHk6h4RRrmK53/aZM3U1XGiQdHb0bv/caz/X +rNqcuYx51+qJGJkY/APEKAPMcrUXbAMe8Vqiw5gF3d6uf0bgvUQeoFdWqVTVP94S +UDRFKdRY+FIiRm49qF7/VJcQVCBVRLsv5yFRpIRAcawQ7h4/VFfgFJVEyRxeb+qP +fnqIrV7zzVmYUTv1EfP7oskwKLTDQRJXAoIBAG2pAsyv963Bxy4cUq2v2c1tSHSV +Pi65N/0ynhWqh7tYGmgUigEhRwbuVCmC4nFOat0b9uXauFpUWth29JKOKO3Tdaze +Nb6Nrlb2AYHAs4x1LSd73mf2GR82eahcBNpFkG5NN7vg/mySN3DoBuFx2ZvrlYuw +yjvNf51QcIlOFEWcbfOvsE9/2aXGkdmySqUZ+BJato/FMmuvSdjVOsb2zdtRG/j8 +D3nvxRqJITI849PHWVEMWeDOFT4dRTqgzd1yDB7UUggQwHExujAn9ZbTOivjn6H5 +j/aLw4IjkKge1qz9c5a13LMulYkYE8bn2GZ7Jali1v5dV5gIWtq+wtZ+32s= +-----END RSA PRIVATE KEY----- diff --git a/hrp/internal/boomer/grpc/messager/messager.pb.go b/hrp/internal/boomer/grpc/messager/messager.pb.go new file mode 100644 index 00000000..79419e23 --- /dev/null +++ b/hrp/internal/boomer/grpc/messager/messager.pb.go @@ -0,0 +1,567 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: grpc/proto/messager.proto + +package messager + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StreamRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` +} + +func (x *StreamRequest) Reset() { + *x = StreamRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamRequest) ProtoMessage() {} + +func (x *StreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead. +func (*StreamRequest) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamRequest) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StreamRequest) GetData() map[string]int64 { + if x != nil { + return x.Data + } + return nil +} + +func (x *StreamRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type StreamResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Profile []byte `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"` + Data map[string]int64 `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + NodeID string `protobuf:"bytes,4,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + Tasks []byte `protobuf:"bytes,5,opt,name=tasks,proto3" json:"tasks,omitempty"` +} + +func (x *StreamResponse) Reset() { + *x = StreamResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamResponse) ProtoMessage() {} + +func (x *StreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead. +func (*StreamResponse) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{1} +} + +func (x *StreamResponse) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StreamResponse) GetProfile() []byte { + if x != nil { + return x.Profile + } + return nil +} + +func (x *StreamResponse) GetData() map[string]int64 { + if x != nil { + return x.Data + } + return nil +} + +func (x *StreamResponse) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +func (x *StreamResponse) GetTasks() []byte { + if x != nil { + return x.Tasks + } + return nil +} + +type RegisterRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeID string `protobuf:"bytes,1,opt,name=NodeID,proto3" json:"NodeID,omitempty"` +} + +func (x *RegisterRequest) Reset() { + *x = RegisterRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegisterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterRequest) ProtoMessage() {} + +func (x *RegisterRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterRequest.ProtoReflect.Descriptor instead. +func (*RegisterRequest) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{2} +} + +func (x *RegisterRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type RegisterResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *RegisterResponse) Reset() { + *x = RegisterResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegisterResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterResponse) ProtoMessage() {} + +func (x *RegisterResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterResponse.ProtoReflect.Descriptor instead. +func (*RegisterResponse) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{3} +} + +func (x *RegisterResponse) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *RegisterResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type SignOutRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeID string `protobuf:"bytes,1,opt,name=NodeID,proto3" json:"NodeID,omitempty"` +} + +func (x *SignOutRequest) Reset() { + *x = SignOutRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignOutRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignOutRequest) ProtoMessage() {} + +func (x *SignOutRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignOutRequest.ProtoReflect.Descriptor instead. +func (*SignOutRequest) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{4} +} + +func (x *SignOutRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type SignOutResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *SignOutResponse) Reset() { + *x = SignOutResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_proto_messager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignOutResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignOutResponse) ProtoMessage() {} + +func (x *SignOutResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_proto_messager_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignOutResponse.ProtoReflect.Descriptor instead. +func (*SignOutResponse) Descriptor() ([]byte, []int) { + return file_grpc_proto_messager_proto_rawDescGZIP(), []int{5} +} + +func (x *SignOutResponse) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *SignOutResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_grpc_proto_messager_proto protoreflect.FileDescriptor + +var file_grpc_proto_messager_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x29, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x40, 0x0a, 0x10, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x28, 0x0a, + 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x0f, 0x53, 0x69, 0x67, 0x6e, 0x4f, + 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x53, 0x69, 0x67, 0x6e, 0x4f, + 0x75, 0x74, 0x12, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, + 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x72, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_proto_messager_proto_rawDescOnce sync.Once + file_grpc_proto_messager_proto_rawDescData = file_grpc_proto_messager_proto_rawDesc +) + +func file_grpc_proto_messager_proto_rawDescGZIP() []byte { + file_grpc_proto_messager_proto_rawDescOnce.Do(func() { + file_grpc_proto_messager_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_proto_messager_proto_rawDescData) + }) + return file_grpc_proto_messager_proto_rawDescData +} + +var file_grpc_proto_messager_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_proto_messager_proto_goTypes = []interface{}{ + (*StreamRequest)(nil), // 0: message.StreamRequest + (*StreamResponse)(nil), // 1: message.StreamResponse + (*RegisterRequest)(nil), // 2: message.RegisterRequest + (*RegisterResponse)(nil), // 3: message.RegisterResponse + (*SignOutRequest)(nil), // 4: message.SignOutRequest + (*SignOutResponse)(nil), // 5: message.SignOutResponse + nil, // 6: message.StreamRequest.DataEntry + nil, // 7: message.StreamResponse.DataEntry +} +var file_grpc_proto_messager_proto_depIdxs = []int32{ + 6, // 0: message.StreamRequest.data:type_name -> message.StreamRequest.DataEntry + 7, // 1: message.StreamResponse.data:type_name -> message.StreamResponse.DataEntry + 2, // 2: message.Message.Register:input_type -> message.RegisterRequest + 4, // 3: message.Message.SignOut:input_type -> message.SignOutRequest + 0, // 4: message.Message.BidirectionalStreamingMessage:input_type -> message.StreamRequest + 3, // 5: message.Message.Register:output_type -> message.RegisterResponse + 5, // 6: message.Message.SignOut:output_type -> message.SignOutResponse + 1, // 7: message.Message.BidirectionalStreamingMessage:output_type -> message.StreamResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_grpc_proto_messager_proto_init() } +func file_grpc_proto_messager_proto_init() { + if File_grpc_proto_messager_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_proto_messager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegisterRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegisterResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignOutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_proto_messager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignOutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_proto_messager_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_proto_messager_proto_goTypes, + DependencyIndexes: file_grpc_proto_messager_proto_depIdxs, + MessageInfos: file_grpc_proto_messager_proto_msgTypes, + }.Build() + File_grpc_proto_messager_proto = out.File + file_grpc_proto_messager_proto_rawDesc = nil + file_grpc_proto_messager_proto_goTypes = nil + file_grpc_proto_messager_proto_depIdxs = nil +} diff --git a/hrp/internal/boomer/grpc/messager/messager_grpc.pb.go b/hrp/internal/boomer/grpc/messager/messager_grpc.pb.go new file mode 100644 index 00000000..b4bbad60 --- /dev/null +++ b/hrp/internal/boomer/grpc/messager/messager_grpc.pb.go @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: grpc/proto/messager.proto + +package messager + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MessageClient is the client API for Message service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MessageClient interface { + Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) + SignOut(ctx context.Context, in *SignOutRequest, opts ...grpc.CallOption) (*SignOutResponse, error) + BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) +} + +type messageClient struct { + cc grpc.ClientConnInterface +} + +func NewMessageClient(cc grpc.ClientConnInterface) MessageClient { + return &messageClient{cc} +} + +func (c *messageClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) { + out := new(RegisterResponse) + err := c.cc.Invoke(ctx, "/message.Message/Register", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *messageClient) SignOut(ctx context.Context, in *SignOutRequest, opts ...grpc.CallOption) (*SignOutResponse, error) { + out := new(SignOutResponse) + err := c.cc.Invoke(ctx, "/message.Message/SignOut", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *messageClient) BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) { + stream, err := c.cc.NewStream(ctx, &Message_ServiceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...) + if err != nil { + return nil, err + } + x := &messageBidirectionalStreamingMessageClient{stream} + return x, nil +} + +type Message_BidirectionalStreamingMessageClient interface { + Send(*StreamRequest) error + Recv() (*StreamResponse, error) + grpc.ClientStream +} + +type messageBidirectionalStreamingMessageClient struct { + grpc.ClientStream +} + +func (x *messageBidirectionalStreamingMessageClient) Send(m *StreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *messageBidirectionalStreamingMessageClient) Recv() (*StreamResponse, error) { + m := new(StreamResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MessageServer is the server API for Message service. +// All implementations must embed UnimplementedMessageServer +// for forward compatibility +type MessageServer interface { + Register(context.Context, *RegisterRequest) (*RegisterResponse, error) + SignOut(context.Context, *SignOutRequest) (*SignOutResponse, error) + BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error + mustEmbedUnimplementedMessageServer() +} + +// UnimplementedMessageServer must be embedded to have forward compatible implementations. +type UnimplementedMessageServer struct { +} + +func (UnimplementedMessageServer) Register(context.Context, *RegisterRequest) (*RegisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") +} +func (UnimplementedMessageServer) SignOut(context.Context, *SignOutRequest) (*SignOutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SignOut not implemented") +} +func (UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error { + return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingMessage not implemented") +} +func (UnimplementedMessageServer) mustEmbedUnimplementedMessageServer() {} + +// UnsafeMessageServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MessageServer will +// result in compilation errors. +type UnsafeMessageServer interface { + mustEmbedUnimplementedMessageServer() +} + +func RegisterMessageServer(s grpc.ServiceRegistrar, srv MessageServer) { + s.RegisterService(&Message_ServiceDesc, srv) +} + +func _Message_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MessageServer).Register(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/message.Message/Register", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MessageServer).Register(ctx, req.(*RegisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Message_SignOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignOutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MessageServer).SignOut(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/message.Message/SignOut", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MessageServer).SignOut(ctx, req.(*SignOutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Message_BidirectionalStreamingMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MessageServer).BidirectionalStreamingMessage(&messageBidirectionalStreamingMessageServer{stream}) +} + +type Message_BidirectionalStreamingMessageServer interface { + Send(*StreamResponse) error + Recv() (*StreamRequest, error) + grpc.ServerStream +} + +type messageBidirectionalStreamingMessageServer struct { + grpc.ServerStream +} + +func (x *messageBidirectionalStreamingMessageServer) Send(m *StreamResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *messageBidirectionalStreamingMessageServer) Recv() (*StreamRequest, error) { + m := new(StreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Message_ServiceDesc is the grpc.ServiceDesc for Message service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Message_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "message.Message", + HandlerType: (*MessageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Register", + Handler: _Message_Register_Handler, + }, + { + MethodName: "SignOut", + Handler: _Message_SignOut_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BidirectionalStreamingMessage", + Handler: _Message_BidirectionalStreamingMessage_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/proto/messager.proto", +} diff --git a/hrp/internal/grpc/proto/messager.proto b/hrp/internal/boomer/grpc/proto/messager.proto similarity index 54% rename from hrp/internal/grpc/proto/messager.proto rename to hrp/internal/boomer/grpc/proto/messager.proto index fa6bfd49..8ddbbfa0 100644 --- a/hrp/internal/grpc/proto/messager.proto +++ b/hrp/internal/boomer/grpc/proto/messager.proto @@ -5,6 +5,8 @@ package message; option go_package = "grpc/messager"; service Message { + rpc Register(RegisterRequest) returns (RegisterResponse) {} + rpc SignOut(SignOutRequest) returns (SignOutResponse) {} rpc BidirectionalStreamingMessage(stream StreamRequest) returns (stream StreamResponse){}; } @@ -20,4 +22,22 @@ message StreamResponse{ map data = 3; string NodeID = 4; bytes tasks = 5; +} + +message RegisterRequest{ + string NodeID = 1; +} + +message RegisterResponse{ + string code = 1; + string message = 2; +} + +message SignOutRequest{ + string NodeID = 1; +} + +message SignOutResponse{ + string code = 1; + string message = 2; } \ No newline at end of file diff --git a/hrp/internal/boomer/message.go b/hrp/internal/boomer/message.go index a9168384..afee82ba 100644 --- a/hrp/internal/boomer/message.go +++ b/hrp/internal/boomer/message.go @@ -18,9 +18,9 @@ type genericMessage struct { Tasks []byte `json:"tasks,omitempty"` } -type profileMessage struct { - Profile *Profile `json:"profile,omitempty"` - Tasks []byte `json:"tasks,omitempty"` +type task struct { + Profile *Profile `json:"profile,omitempty"` + TestCases []byte `json:"testcases,omitempty"` } func newGenericMessage(t string, data map[string]int64, nodeID string) (msg *genericMessage) { diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index b020bbfa..100a7bca 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -10,6 +10,10 @@ import ( "sync/atomic" "time" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" + "github.com/jinzhu/copier" + "github.com/go-errors/errors" "github.com/olekukonko/tablewriter" @@ -203,9 +207,21 @@ type runner struct { // close this channel will stop all running workers. stopChan chan bool + stoppingChan chan bool + + doneChan chan bool + + reportChan chan bool + // close this channel will stop all goroutines used in runner. closeChan chan bool + // wgMu blocks concurrent waitgroup mutation while server stopping + wgMu sync.RWMutex + // wg is used to wait for the goroutines that depends on the server state + // to exit when stopping the server. + wg sync.WaitGroup + outputs []Output once *sync.Once @@ -343,11 +359,12 @@ func (r *runner) reportTestResult() { } func (r *runner) reset() { - r.updateState(StateInit) r.controller.reset() r.stats.clearAll() r.rebalance = make(chan bool) - r.stopChan = make(chan bool) + r.stoppingChan = make(chan bool) + r.doneChan = make(chan bool) + r.reportChan = make(chan bool) } func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { @@ -376,7 +393,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo if r.loop != nil { workerLoop = &Loop{loopCount: atomic.LoadInt64(&r.loop.loopCount) / r.controller.spawnCount} } - go func() { + r.goAttach(func() { for { select { case <-quit: @@ -402,8 +419,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo // finished count of single worker workerLoop.increaseFinishedCount() if r.loop.isFinished() { - r.stop() - close(r.rebalance) + go r.stop() } } if r.controller.erase() { @@ -411,7 +427,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo } } } - }() + }) continue } @@ -433,6 +449,27 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo } } +// goAttach creates a goroutine on a given function and tracks it using +// the runner waitgroup. +// The passed function should interrupt on r.StoppingNotify(). +func (r *runner) goAttach(f func()) { + r.wgMu.RLock() // this blocks with ongoing close(s.stopping) + defer r.wgMu.RUnlock() + select { + case <-r.stoppingChan: + log.Warn().Msg("server has stopped; skipping GoAttach") + return + default: + } + + // now safe to add since waitgroup wait has not started yet + r.wg.Add(1) + go func() { + defer r.wg.Done() + f() + }() +} + // setTasks will set the runner's task list AND the total task weight // which is used to get a random task later func (r *runner) setTasks(t []*Task) { @@ -496,6 +533,7 @@ func (r *runner) statsStart() { case <-ticker.C: r.reportStats() if !r.isStarted() { + close(r.reportChan) log.Info().Msg("Quitting statsStart") return } @@ -506,13 +544,49 @@ func (r *runner) statsStart() { func (r *runner) stop() { // stop previous goroutines without blocking // those goroutines will exit when r.safeRun returns - close(r.stopChan) + r.Stop() if r.rateLimitEnabled { r.rateLimiter.Stop() } r.updateState(StateStopped) } +// HardStop stops the server without coordination with other members in the cluster. +func (r *runner) hardStop() { + select { + case r.stopChan <- true: + case <-r.doneChan: + return + } + <-r.doneChan +} + +// Stop stops the server gracefully, and shuts down the running goroutine. +// Stop should be called after a Start(s), otherwise it will block forever. +// When stopping leader, Stop transfers its leadership to one of its peers +// before stopping the server. +// Stop terminates the Server and performs any necessary finalization. +// Do and Process cannot be called after Stop has been invoked. +func (r *runner) Stop() { + r.hardStop() +} + +// StopNotify returns a channel that receives a empty struct +// when the server is stopped. +func (r *runner) StopNotify() <-chan bool { return r.stopChan } + +// DoneNotify returns a channel that receives a empty struct +// when the server is stopped. +func (r *runner) DoneNotify() <-chan bool { return r.doneChan } + +// StoppingNotify returns a channel that receives a empty struct +// when the server is being stopped. +func (r *runner) StoppingNotify() <-chan bool { return r.stoppingChan } + +// RebalanceNotify returns a channel that receives a empty struct +// when the server is being stopped. +func (r *runner) RebalanceNotify() <-chan bool { return r.rebalance } + func (r *runner) getState() int32 { return atomic.LoadInt32(&r.state) } @@ -541,13 +615,17 @@ func newLocalRunner(spawnCount int64, spawnRate float64) *localRunner { spawnRate: spawnRate, controller: &Controller{}, outputs: make([]Output, 0), + stopChan: make(chan bool), closeChan: make(chan bool), once: &sync.Once{}, + wg: sync.WaitGroup{}, + wgMu: sync.RWMutex{}, }, } } func (r *localRunner) start() { + r.updateState(StateInit) // init localRunner r.reset() @@ -558,34 +636,40 @@ func (r *localRunner) start() { // output setup r.outputOnStart() - go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, nil) + go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, nil) + + defer func() { + r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + close(r.stoppingChan) + close(r.rebalance) + r.wgMu.Unlock() + + // wait for goroutines before closing + r.wg.Wait() + + r.updateState(StateStopping) + + <-r.reportChan + + // report test result + r.reportTestResult() + + // output teardown + r.outputOnStop() + + close(r.doneChan) + r.updateState(StateQuitting) + }() // start stats report - r.statsStart() + go r.statsStart() - // stop <-r.stopChan - r.updateState(StateStopped) - - // stop rate limiter - if r.rateLimitEnabled { - r.rateLimiter.Stop() - } - - // report test result - r.reportTestResult() - - // output teardown - r.outputOnStop() - - r.updateState(StateQuitting) - return } func (r *localRunner) stop() { if r.runner.isStarted() { r.runner.stop() - close(r.rebalance) } } @@ -600,7 +684,7 @@ type workerRunner struct { profile *Profile - tasksChan chan *profileMessage + tasksChan chan *task mutex sync.Mutex ignoreQuit bool @@ -612,13 +696,14 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { stats: newRequestStats(), outputs: make([]Output, 0), controller: &Controller{}, + stopChan: make(chan bool), closeChan: make(chan bool), once: &sync.Once{}, }, masterHost: masterHost, masterPort: masterPort, nodeID: getNodeID(), - tasksChan: make(chan *profileMessage, 10), + tasksChan: make(chan *task, 10), mutex: sync.Mutex{}, ignoreQuit: false, } @@ -643,9 +728,9 @@ func (r *workerRunner) onSpawnMessage(msg *genericMessage) { if msg.Tasks == nil && len(r.tasks) == 0 { log.Error().Msg("miss tasks") } - r.tasksChan <- &profileMessage{ - Profile: profile, - Tasks: msg.Tasks, + r.tasksChan <- &task{ + Profile: profile, + TestCases: msg.Tasks, } log.Info().Msg("on spawn message successful") } @@ -658,7 +743,7 @@ func (r *workerRunner) onRebalanceMessage(msg *genericMessage) { r.setSpawnCount(profile.SpawnCount) r.setSpawnRate(profile.SpawnRate) - r.tasksChan <- &profileMessage{ + r.tasksChan <- &task{ Profile: profile, } log.Info().Msg("on rebalance message successful") @@ -672,6 +757,9 @@ func (r *workerRunner) onMessage(msg *genericMessage) { case "spawn": r.onSpawnMessage(msg) case "quit": + if r.ignoreQuit { + break + } r.close() } case StateSpawning: @@ -698,8 +786,10 @@ func (r *workerRunner) onMessage(msg *genericMessage) { switch msg.Type { case "spawn": r.onSpawnMessage(msg) - go r.start() case "quit": + if r.ignoreQuit { + break + } r.close() } } @@ -725,57 +815,88 @@ func (r *workerRunner) startListener() { // run worker service func (r *workerRunner) run() { + println("\n========================= HttpRunner Worker for Distributed Load Testing ========================= ") r.updateState(StateInit) r.client = newClient(r.masterHost, r.masterPort, r.nodeID) - - err := r.client.connect() + println(fmt.Sprintf("ready to connect master to %s:%d", r.masterHost, r.masterPort)) + err := r.client.start() if err != nil { - log.Printf("Failed to connect to master(%s:%d) with error %v\n", r.masterHost, r.masterPort, err) + log.Error().Err(err).Msg(fmt.Sprintf("failed to connect to master(%s:%d) with error %v\n", r.masterHost, r.masterPort)) } + if err = r.client.register(r.client.config.ctx); err != nil { + log.Error().Err(err).Msg("failed to register") + } + + err = r.client.newBiStreamClient() + if err != nil { + log.Error().Err(err).Msg("failed to establish bidirectional stream, waiting master launched") + } + + go r.client.recv() + go r.client.send() + + defer func() { + r.wg.Wait() + + var ticker = time.NewTicker(1 * time.Second) + if r.client != nil { + // waitting for quit message is sent to master + select { + case <-r.client.disconnectedChannel(): + case <-ticker.C: + log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") + } + + if err = r.client.signOut(r.client.config.ctx); err != nil { + log.Error().Err(err).Msg("failed to sign out") + } + r.client.close() + } + }() + // listen to master go r.startListener() - // register worker information to master - r.client.sendChannel() <- newGenericMessage("register", nil, r.nodeID) // tell master, I'm ready log.Info().Msg("send client ready signal") r.client.sendChannel() <- newClientReadyMessageToMaster(r.nodeID) // heartbeat // See: https://github.com/locustio/locust/commit/a8c0d7d8c588f3980303358298870f2ea394ab93 - go func() { - var ticker = time.NewTicker(heartbeatInterval) - for { - select { - case <-ticker.C: - if atomic.LoadInt32(&r.client.failCount) > 2 { - r.updateState(StateMissing) - } - if r.getState() == StateMissing { - if r.client.reConnect() == nil { - r.updateState(StateInit) - } - } - CPUUsage := GetCurrentCPUUsage() - data := map[string]int64{ - "state": int64(r.getState()), - "current_cpu_usage": int64(CPUUsage), - "spawn_count": r.controller.getCurrentClientsNum(), - } - r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) - case <-r.closeChan: - return + var ticker = time.NewTicker(heartbeatInterval) + for { + select { + case <-ticker.C: + if atomic.LoadInt32(&r.client.failCount) > 2 { + r.updateState(StateMissing) } + if r.getState() == StateMissing { + err = r.client.register(r.client.config.ctx) + if err != nil { + continue + } + if r.client.newBiStreamClient() == nil { + r.updateState(StateInit) + } + } + CPUUsage := GetCurrentCPUUsage() + data := map[string]int64{ + "state": int64(r.getState()), + "current_cpu_usage": int64(CPUUsage), + "spawn_count": r.controller.getCurrentClientsNum(), + } + r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) + case <-r.closeChan: + return } - }() - <-r.closeChan + } } -// start load test func (r *workerRunner) start() { r.mutex.Lock() defer r.mutex.Unlock() + r.updateState(StateInit) r.reset() // start rate limiter @@ -785,38 +906,42 @@ func (r *workerRunner) start() { r.once.Do(r.outputOnStart) - go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stopChan, r.spawnComplete) + go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, r.spawnComplete) + + defer func() { + r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + close(r.stoppingChan) + close(r.rebalance) + r.wgMu.Unlock() + + // wait for goroutines before closing + r.wg.Wait() + + r.updateState(StateStopping) + + <-r.reportChan + + r.reportTestResult() + r.outputOnStop() + + close(r.doneChan) + }() // start stats report - r.statsStart() + go r.statsStart() - r.reportTestResult() - r.outputOnStop() + <-r.stopChan } func (r *workerRunner) stop() { if r.isStarted() { r.runner.stop() - close(r.rebalance) } } func (r *workerRunner) close() { - // waiting report finished - time.Sleep(1 * time.Second) + r.onQuiting() close(r.closeChan) - var ticker = time.NewTicker(1 * time.Second) - if r.client != nil { - // waitting for quit message is sent to master - select { - case <-r.client.disconnectedChannel(): - break - case <-ticker.C: - log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") - r.onQuiting() - } - r.client.close() - } } // masterRunner controls worker to spawn goroutines and collect stats. @@ -835,15 +960,18 @@ type masterRunner struct { parseTestCasesChan chan bool testCaseBytes chan []byte - // set profile to worker - profileBytes chan []byte + tcb []byte } func newMasterRunner(masterBindHost string, masterBindPort int) *masterRunner { return &masterRunner{ runner: runner{ - state: StateInit, - closeChan: make(chan bool), + state: StateInit, + stoppingChan: make(chan bool), + doneChan: make(chan bool), + closeChan: make(chan bool), + wg: sync.WaitGroup{}, + wgMu: sync.RWMutex{}, }, masterBindHost: masterBindHost, masterBindPort: masterBindPort, @@ -908,9 +1036,6 @@ func (r *masterRunner) clientListener() { } switch msg.Type { case typeClientReady: - if workerInfo.getState() == StateInit { - break - } workerInfo.setState(StateInit) if r.getState() == StateRunning { log.Warn().Str("worker id", workerInfo.ID).Msg("worker joined, ready to rebalance the load of each worker") @@ -975,40 +1100,52 @@ func (r *masterRunner) run() { return } - // listen and deal message from worker - go r.clientListener() - // listen and record heartbeat from worker - go r.heartbeatWorker() + defer func() { + r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + close(r.stoppingChan) + r.wgMu.Unlock() + + r.wg.Wait() + + r.server.close() + + close(r.doneChan) + }() if r.autoStart { - log.Info().Msg("auto start, waiting expected workers joined") - var ticker = time.NewTicker(1 * time.Second) - var tickerMaxWait = time.NewTicker(time.Duration(r.expectWorkersMaxWait) * time.Second) - FOR: - for { - select { - case <-r.closeChan: - return - case <-ticker.C: - c := r.server.getClientsLength() - log.Info().Msg(fmt.Sprintf("expected worker number: %v, current worker count: %v", r.expectWorkers, c)) - if c >= r.expectWorkers { - go func() { + r.goAttach(func() { + log.Info().Msg("auto start, waiting expected workers joined") + var ticker = time.NewTicker(1 * time.Second) + var tickerMaxWait = time.NewTicker(time.Duration(r.expectWorkersMaxWait) * time.Second) + for { + select { + case <-r.closeChan: + return + case <-ticker.C: + c := r.server.getClientsLength() + log.Info().Msg(fmt.Sprintf("expected worker number: %v, current worker count: %v", r.expectWorkers, c)) + if c >= r.expectWorkers { err = r.start() if err != nil { log.Error().Err(err).Msg("failed to run") os.Exit(1) } - }() - break FOR + return + } + case <-tickerMaxWait.C: + log.Warn().Msg("reached max wait time, quiting") + r.onQuiting() + os.Exit(1) } - case <-tickerMaxWait.C: - log.Warn().Msg("reached max wait time, quiting") - r.onQuiting() - os.Exit(1) } - } + }) } + + // listen and deal message from worker + r.goAttach(r.clientListener) + + // listen and record heartbeat from worker + r.heartbeatWorker() <-r.closeChan } @@ -1018,17 +1155,48 @@ func (r *masterRunner) start() error { return errors.New("current workers: 0") } - log.Info().Msg("send spawn data to worker") - r.updateState(StateSpawning) // fetching testcase testcase, err := r.fetchTestCase() if err != nil { return err } - profile := r.profile.dispatch(int64(numWorkers)) - r.server.sendChannel() <- newMessageToWorker("spawn", ProfileToBytes(profile), nil, testcase) - log.Warn().Interface("profile", profile).Msg("send spawn data to worker successful") + workerProfile := &Profile{} + if err := copier.Copy(workerProfile, r.profile); err != nil { + log.Error().Err(err).Msg("copy workerProfile failed") + return err + } + cur := 0 + ints := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) + log.Info().Msg("send spawn data to worker") + r.updateState(StateSpawning) + r.server.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { + return true + } + if workerProfile.SpawnCount > 0 { + workerProfile.SpawnCount = int64(ints[cur]) + } + if workerProfile.SpawnRate > 0 { + workerProfile.SpawnRate = workerProfile.SpawnRate / float64(numWorkers) + } + if workerProfile.MaxRPS > 0 { + workerProfile.MaxRPS = workerProfile.MaxRPS / int64(numWorkers) + } + workerInfo.getStream() <- &messager.StreamResponse{ + Type: "spawn", + Profile: ProfileToBytes(workerProfile), + Data: map[string]int64{}, + NodeID: workerInfo.ID, + Tasks: testcase, + } + cur++ + } + return true + }) + + log.Warn().Interface("profile", r.profile).Msg("send spawn data to worker successful") return nil } @@ -1037,9 +1205,49 @@ func (r *masterRunner) rebalance() error { if numWorkers == 0 { return errors.New("current workers: 0") } - profile := r.profile.dispatch(int64(numWorkers)) + workerProfile := &Profile{} + if err := copier.Copy(workerProfile, r.profile); err != nil { + log.Error().Err(err).Msg("copy workerProfile failed") + return err + } + cur := 0 + ints := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) + log.Info().Msg("send spawn data to worker") + r.server.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { + return true + } + if workerProfile.SpawnCount > 0 { + workerProfile.SpawnCount = int64(ints[cur]) + } + if workerProfile.SpawnRate > 0 { + workerProfile.SpawnRate = workerProfile.SpawnRate / float64(numWorkers) + } + if workerProfile.MaxRPS > 0 { + workerProfile.MaxRPS = workerProfile.MaxRPS / int64(numWorkers) + } + if workerInfo.getState() == StateInit { + workerInfo.getStream() <- &messager.StreamResponse{ + Type: "spawn", + Profile: ProfileToBytes(workerProfile), + Data: map[string]int64{}, + NodeID: workerInfo.ID, + Tasks: r.tcb, + } + } else { + workerInfo.getStream() <- &messager.StreamResponse{ + Type: "rebalance", + Profile: ProfileToBytes(workerProfile), + Data: map[string]int64{}, + NodeID: workerInfo.ID, + } + } + cur++ + } + return true + }) - r.server.sendChannel() <- newMessageToWorker("rebalance", ProfileToBytes(profile), nil, nil) log.Warn().Msg("send rebalance data to worker successful") return nil } @@ -1054,6 +1262,7 @@ func (r *masterRunner) fetchTestCase() ([]byte, error) { case <-ticker.C: return nil, errors.New("parse testcases timeout") case tcb := <-r.testCaseBytes: + r.tcb = tcb return tcb, nil } } @@ -1061,8 +1270,7 @@ func (r *masterRunner) fetchTestCase() ([]byte, error) { func (r *masterRunner) stop() error { if r.isStarted() { r.updateState(StateStopping) - r.server.sendChannel() <- &genericMessage{Type: "stop", Data: map[string]int64{}} - r.updateState(StateStopped) + r.server.sendBroadcasts(&genericMessage{Type: "stop", Data: map[string]int64{}}) return nil } else { return errors.New("already stopped") @@ -1071,24 +1279,22 @@ func (r *masterRunner) stop() error { func (r *masterRunner) onQuiting() { if r.getState() != StateQuitting { - r.server.sendChannel() <- &genericMessage{ + r.server.sendBroadcasts(&genericMessage{ Type: "quit", - } + }) } r.updateState(StateQuitting) } func (r *masterRunner) close() { r.onQuiting() - r.server.wg.Wait() close(r.closeChan) - r.server.close() } func (r *masterRunner) reportStats() { currentTime := time.Now() println() - println("===================== HttpRunner Master for Distributed Load Testing ===================== ") + println("========================= HttpRunner Master for Distributed Load Testing ========================= ") println(fmt.Sprintf("Current time: %s, State: %v, Current Available Workers: %v, Target Users: %v", currentTime.Format("2006/01/02 15:04:05"), getStateName(r.getState()), r.server.getClientsLength(), r.getSpawnCount())) table := tablewriter.NewWriter(os.Stdout) diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 5bfccf6e..8c139d58 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" "github.com/stretchr/testify/assert" ) @@ -113,6 +114,32 @@ func TestLoopCount(t *testing.T) { } } +func TestStopNotify(t *testing.T) { + r := &localRunner{ + runner: runner{ + stopChan: make(chan bool), + doneChan: make(chan bool), + }, + } + go func() { + <-r.stopChan + close(r.doneChan) + }() + + notifier := r.DoneNotify() + select { + case <-notifier: + t.Fatalf("received unexpected stop notification") + default: + } + r.Stop() + select { + case <-notifier: + default: + t.Fatalf("cannot receive stop notification") + } +} + func TestSpawnWorkers(t *testing.T) { taskA := &Task{ Weight: 10, @@ -239,7 +266,8 @@ func TestSpawnAndStop(t *testing.T) { if msg.Type != "spawning_complete" { t.Error("Runner should send spawning_complete message when spawning completed, got", msg.Type) } - runner.stop() + go runner.stop() + close(runner.doneChan) runner.onQuiting() msg = <-runner.client.sendChannel() @@ -260,10 +288,11 @@ func TestStop(t *testing.T) { runner.reset() runner.updateState(StateSpawning) - runner.stop() - + go runner.stop() + close(runner.doneChan) + time.Sleep(1 * time.Second) if runner.getState() != StateStopped { - t.Error("Expected runner state to be 5, was", runner.getState()) + t.Error("Expected runner state to be 5, was", getStateName(runner.getState())) } } @@ -305,7 +334,8 @@ func TestOnQuitMessage(t *testing.T) { runner.reset() runner.closeChan = make(chan bool) runner.client.shutdownChan = make(chan bool) - runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) + go runner.onMessage(newGenericMessage("quit", nil, runner.nodeID)) + close(runner.doneChan) <-runner.closeChan if runner.getState() != StateQuitting { t.Error("Runner's state should be StateQuitting") @@ -359,7 +389,7 @@ func TestOnMessage(t *testing.T) { t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type) } if runner.getState() != StateRunning { - t.Error("State of runner is not running after spawn, got", runner.getState()) + t.Error("State of runner is not running after spawn, got", getStateName(runner.getState())) } // increase goroutines while running @@ -368,7 +398,7 @@ func TestOnMessage(t *testing.T) { time.Sleep(2 * time.Second) if runner.getState() != StateRunning { - t.Error("State of runner is not running after spawn, got", runner.getState()) + t.Error("State of runner is not running after spawn, got", getStateName(runner.getState())) } if runner.controller.getCurrentClientsNum() != 15 { t.Error("Number of goroutines mismatches, expected: 15, current count:", runner.controller.getCurrentClientsNum()) @@ -377,7 +407,7 @@ func TestOnMessage(t *testing.T) { // stop all the workers runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) if runner.getState() != StateStopped { - t.Error("State of runner is not stopped, got", runner.getState()) + t.Error("State of runner is not stopped, got", getStateName(runner.getState())) } msg = <-runner.client.sendChannel() if msg.Type != "client_stopped" { @@ -401,7 +431,7 @@ func TestOnMessage(t *testing.T) { t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.controller.getCurrentClientsNum()) } if runner.getState() != StateRunning { - t.Error("State of runner is not running after spawn, got", runner.getState()) + t.Error("State of runner is not running after spawn, got", getStateName(runner.getState())) } msg = <-runner.client.sendChannel() if msg.Type != "spawning_complete" { @@ -411,7 +441,7 @@ func TestOnMessage(t *testing.T) { // stop all the workers runner.onMessage(newGenericMessage("stop", nil, runner.nodeID)) if runner.getState() != StateStopped { - t.Error("State of runner is not stopped, got", runner.getState()) + t.Error("State of runner is not stopped, got", getStateName(runner.getState())) } msg = <-runner.client.sendChannel() if msg.Type != "client_stopped" { @@ -430,8 +460,8 @@ func TestClientListener(t *testing.T) { runner.setSpawnCount(10) runner.setSpawnRate(10) go runner.clientListener() - runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 3}) - runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 3}) + runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 3, stream: make(chan *messager.StreamResponse, 10)}) + runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 3, stream: make(chan *messager.StreamResponse, 10)}) runner.server.recvChannel() <- &genericMessage{ Type: typeClientReady, NodeID: "testID1", @@ -470,7 +500,7 @@ func TestClientListener(t *testing.T) { } time.Sleep(time.Second) if runner.getState() != StateStopped { - t.Error("State of master runner is not stopped, got", runner.getState()) + t.Error("State of master runner is not stopped, got", getStateName(runner.getState())) } } @@ -480,8 +510,8 @@ func TestHeartbeatWorker(t *testing.T) { runner.updateState(StateInit) runner.setSpawnCount(10) runner.setSpawnRate(10) - runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 1, State: StateInit}) - runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 1, State: StateInit}) + runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 1, State: StateInit, stream: make(chan *messager.StreamResponse, 10)}) + runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 1, State: StateInit, stream: make(chan *messager.StreamResponse, 10)}) go runner.clientListener() go runner.heartbeatWorker() time.Sleep(3 * time.Second) diff --git a/hrp/internal/boomer/server.go b/hrp/internal/boomer/server.go deleted file mode 100644 index 853e847e..00000000 --- a/hrp/internal/boomer/server.go +++ /dev/null @@ -1 +0,0 @@ -package boomer diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index d6d062f4..0c2f36f5 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -3,7 +3,6 @@ package boomer import ( "context" "fmt" - "io" "net" "strings" "sync" @@ -17,106 +16,11 @@ import ( "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" - "github.com/httprunner/httprunner/v4/hrp/internal/data" - "github.com/httprunner/httprunner/v4/hrp/internal/grpc/messager" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/data" + "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" "github.com/rs/zerolog/log" ) -var ( - errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") - errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") -) - -// valid validates the authorization. -func valid(authorization []string) bool { - if len(authorization) < 1 { - return false - } - token := strings.TrimPrefix(authorization[0], "Bearer ") - // Perform the token validation here. For the sake of this example, the code - // here forgoes any of the usual OAuth2 token validation and instead checks - // for a token matching an arbitrary string. - return token == "httprunner-secret-token" -} - -func serverUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - // authentication (token verification) - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, errMissingMetadata - } - if !valid(md["authorization"]) { - return nil, errInvalidToken - } - m, err := handler(ctx, req) - if err != nil { - logger("RPC failed with error %v", err) - } - return m, err -} - -// serverWrappedStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and -// SendMsg method call. -type serverWrappedStream struct { - grpc.ServerStream -} - -func (w *serverWrappedStream) RecvMsg(m interface{}) error { - logger("Receive a message (Type: %T) at %s", m, time.Now().Format(time.RFC3339)) - return w.ServerStream.RecvMsg(m) -} - -func (w *serverWrappedStream) SendMsg(m interface{}) error { - logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) - return w.ServerStream.SendMsg(m) -} - -func newServerWrappedStream(s grpc.ServerStream) grpc.ServerStream { - return &serverWrappedStream{s} -} - -func serverStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // authentication (token verification) - md, ok := metadata.FromIncomingContext(ss.Context()) - if !ok { - return errMissingMetadata - } - if !valid(md["authorization"]) { - return errInvalidToken - } - - err := handler(srv, newServerWrappedStream(ss)) - if err != nil { - logger("RPC failed with error %v", err) - } - return err -} - -func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error { - s.wg.Add(1) - defer s.wg.Done() - req, err := srv.Recv() - switch err { - case nil: - break - case io.EOF: - return nil - default: - if err.Error() == status.Error(codes.Canceled, context.Canceled.Error()).Error() { - return nil - } - log.Error().Err(err).Msg("failed to get stream from client") - return err - } - wn := &WorkerNode{messenger: srv, ID: req.NodeID, Heartbeat: 3} - s.clients.Store(req.NodeID, wn) - log.Warn().Str("worker id", req.NodeID).Msg("worker joined") - <-s.disconnectedChannel() - s.clients.Delete(req.NodeID) - log.Warn().Str("worker id", req.NodeID).Msg("worker quited") - return nil -} - type WorkerNode struct { ID string `json:"id"` State int32 `json:"state"` @@ -125,8 +29,14 @@ type WorkerNode struct { CPUUsage float64 `json:"cpu_usage"` CPUWarningEmitted bool `json:"cpu_warning_emitted"` MemoryUsage float64 `json:"memory_usage"` - messenger messager.Message_BidirectionalStreamingMessageServer + stream chan *messager.StreamResponse mutex sync.RWMutex + disconnectedChan chan bool +} + +func newWorkerNode(id string) *WorkerNode { + stream := make(chan *messager.StreamResponse, 100) + return &WorkerNode{State: StateInit, ID: id, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)} } func (w *WorkerNode) getState() int32 { @@ -189,6 +99,18 @@ func (w *WorkerNode) getMemoryUsage() float64 { return w.MemoryUsage } +func (w *WorkerNode) setStream(stream chan *messager.StreamResponse) { + w.mutex.RLock() + defer w.mutex.RUnlock() + w.stream = stream +} + +func (w *WorkerNode) getStream() chan *messager.StreamResponse { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.stream +} + func (w *WorkerNode) getWorkerInfo() WorkerNode { w.mutex.RLock() defer w.mutex.RUnlock() @@ -208,25 +130,95 @@ type grpcServer struct { masterHost string masterPort int server *grpc.Server + secure bool clients *sync.Map - fromWorker chan *genericMessage - toWorker chan *genericMessage - disconnectedToWorker chan bool - shutdownChan chan bool - wg sync.WaitGroup + fromWorker chan *genericMessage + disconnectedChan chan bool + shutdownChan chan bool + wg *sync.WaitGroup +} + +var ( + errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") + errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") +) + +func logger(format string, a ...interface{}) { + log.Info().Msg(fmt.Sprintf(format, a...)) +} + +// valid validates the authorization. +func valid(authorization []string) bool { + if len(authorization) < 1 { + return false + } + token := strings.TrimPrefix(authorization[0], "Bearer ") + return token == "httprunner-secret-token" +} + +func serverUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // authentication (token verification) + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + if !valid(md["authorization"]) { + return nil, errInvalidToken + } + m, err := handler(ctx, req) + if err != nil { + logger("RPC failed with error %v", err) + } + return m, err +} + +// serverWrappedStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverWrappedStream struct { + grpc.ServerStream +} + +func (w *serverWrappedStream) RecvMsg(m interface{}) error { + logger("Receive a message (Type: %T) at %s", m, time.Now().Format(time.RFC3339)) + return w.ServerStream.RecvMsg(m) +} + +func (w *serverWrappedStream) SendMsg(m interface{}) error { + logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) + return w.ServerStream.SendMsg(m) +} + +func newServerWrappedStream(s grpc.ServerStream) grpc.ServerStream { + return &serverWrappedStream{s} +} + +func serverStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // authentication (token verification) + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + if !valid(md["authorization"]) { + return errInvalidToken + } + + err := handler(srv, newServerWrappedStream(ss)) + if err != nil { + logger("RPC failed with error %v", err) + } + return err } func newServer(masterHost string, masterPort int) (server *grpcServer) { log.Info().Msg("Boomer is built with grpc support.") server = &grpcServer{ - masterHost: masterHost, - masterPort: masterPort, - clients: &sync.Map{}, - fromWorker: make(chan *genericMessage, 100), - toWorker: make(chan *genericMessage, 100), - disconnectedToWorker: make(chan bool), - shutdownChan: make(chan bool), + masterHost: masterHost, + masterPort: masterPort, + clients: &sync.Map{}, + fromWorker: make(chan *genericMessage, 100), + disconnectedChan: make(chan bool), + shutdownChan: make(chan bool), } return server } @@ -250,25 +242,179 @@ func (s *grpcServer) start() (err error) { return } // create gRPC server - serv := grpc.NewServer(opts...) + s.server = grpc.NewServer(opts...) // register message server - messager.RegisterMessageServer(serv, s) - reflection.Register(serv) + messager.RegisterMessageServer(s.server, s) + reflection.Register(s.server) // start grpc server go func() { - err = serv.Serve(lis) + err = s.server.Serve(lis) if err != nil { log.Error().Err(err).Msg("failed to serve") return } }() - - go s.recv() - go s.send() - return nil } +func (s *grpcServer) Register(_ context.Context, req *messager.RegisterRequest) (*messager.RegisterResponse, error) { + // store worker information + wn := newWorkerNode(req.NodeID) + s.clients.Store(req.NodeID, wn) + log.Warn().Str("worker id", req.NodeID).Msg("worker joined") + return &messager.RegisterResponse{Code: "0", Message: "register successfully"}, nil +} + +func (s *grpcServer) SignOut(_ context.Context, req *messager.SignOutRequest) (*messager.SignOutResponse, error) { + // delete worker information + s.clients.Delete(req.NodeID) + log.Warn().Str("worker id", req.NodeID).Msg("worker quited") + return &messager.SignOutResponse{Code: "0", Message: "sign out successfully"}, nil +} + +func (s *grpcServer) valid(token string) (isValid bool) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.ID == token { + isValid = true + } + } + return true + }) + return +} + +func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error { + token, ok := extractToken(srv.Context()) + if !ok { + return status.Error(codes.Unauthenticated, "missing token header") + } + + ok = s.valid(token) + if !ok { + return status.Error(codes.Unauthenticated, "invalid token") + } + + go s.sendMsg(srv, token) +FOR: + for { + msg, err := srv.Recv() + if st, ok := status.FromError(err); ok { + switch st.Code() { + case codes.OK: + s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID) + log.Info(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Msg("receive data from worker") + case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded: + s.fromWorker <- newQuitMessage(token) + break FOR + default: + log.Error().Err(err).Msg("failed to get stream from client") + break FOR + } + } + } + // disconnected to worker + select { + case <-srv.Context().Done(): + return srv.Context().Err() + case <-s.disconnectedChan: + } + log.Warn().Str("worker id", token).Msg("worker quited") + return nil +} + +func (s *grpcServer) sendMsg(srv messager.Message_BidirectionalStreamingMessageServer, id string) { + stream := s.getWorkersByID(id).getStream() + for { + select { + case <-srv.Context().Done(): + return + case res := <-stream: + if s, ok := status.FromError(srv.Send(res)); ok { + switch s.Code() { + case codes.OK: + log.Info(). + Str("nodeID", res.NodeID). + Str("type", res.Type). + Interface("data", res.Data). + Interface("profile", res.Profile). + Msg("send data to worker") + case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded: + log.Warn().Msg(fmt.Sprintf("client (%s) terminated connection", id)) + return + default: + log.Warn().Msg(fmt.Sprintf("failed to send to client (%s): %v", id, s.Err())) + return + } + } + } + } +} + +func (s *grpcServer) sendBroadcasts(msg *genericMessage) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { + return true + } + workerInfo.getStream() <- &messager.StreamResponse{ + Type: msg.Type, + Profile: msg.Profile, + Data: msg.Data, + NodeID: workerInfo.ID, + Tasks: msg.Tasks, + } + } + return true + }) +} + +func (s *grpcServer) stopServer(ctx context.Context) { + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + s.server.GracefulStop() + }() + + // wait until all pending RPCs are finished + select { + case <-ch: + case <-ctx.Done(): + // took too long, manually close open transports + // e.g. watch streams + s.server.Stop() + + // concurrent GracefulStop should be interrupted + <-ch + } +} + +func (s *grpcServer) close() { + // close client requests with request timeout + timeout := 2 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + s.stopServer(ctx) + cancel() +} + +func (s *grpcServer) recvChannel() chan *genericMessage { + return s.fromWorker +} + +func (s *grpcServer) shutdownChannel() chan bool { + return s.shutdownChan +} + +func (s *grpcServer) disconnectedChannel() chan bool { + return s.disconnectedChan +} + func (s *grpcServer) getWorkersByState(state int32) (wns []*WorkerNode) { s.clients.Range(func(key, value interface{}) bool { if workerInfo, ok := value.(*WorkerNode); ok { @@ -281,6 +427,18 @@ func (s *grpcServer) getWorkersByState(state int32) (wns []*WorkerNode) { return wns } +func (s *grpcServer) getWorkersByID(id string) (wn *WorkerNode) { + s.clients.Range(func(key, value interface{}) bool { + if workerInfo, ok := value.(*WorkerNode); ok { + if workerInfo.ID == id { + wn = workerInfo + } + } + return true + }) + return wn +} + func (s *grpcServer) getWorkersLengthByState(state int32) (l int) { s.clients.Range(func(key, value interface{}) bool { if workerInfo, ok := value.(*WorkerNode); ok { @@ -318,113 +476,3 @@ func (s *grpcServer) getClientsLength() (l int) { }) return } - -func (s *grpcServer) close() { - close(s.shutdownChan) -} - -func (s *grpcServer) recvChannel() chan *genericMessage { - return s.fromWorker -} - -func (s *grpcServer) shutdownChannel() chan bool { - return s.shutdownChan -} - -func (s *grpcServer) recv() { - for { - select { - case <-s.shutdownChan: - return - default: - s.clients.Range(func(key, value interface{}) bool { - if workerInfo, ok := value.(*WorkerNode); ok { - if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { - return true - } - msg, err := workerInfo.messenger.Recv() - switch err { - case nil: - if msg == nil { - return true - } - s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID) - log.Info(). - Str("nodeID", msg.NodeID). - Str("type", msg.Type). - Interface("data", msg.Data). - Msg("receive data from worker") - case io.EOF: - s.fromWorker <- newQuitMessage(workerInfo.ID) - default: - if err.Error() == status.Error(codes.Canceled, context.Canceled.Error()).Error() { - s.fromWorker <- newQuitMessage(workerInfo.ID) - return true - } - log.Error().Err(err).Msg("failed to get stream from client") - } - } - return true - }) - } - } -} - -func (s *grpcServer) sendChannel() chan *genericMessage { - return s.toWorker -} - -func (s *grpcServer) send() { - for { - select { - case <-s.shutdownChan: - return - case msg := <-s.toWorker: - s.sendMessage(msg) - - // We may send genericMessage to Worker. - if msg.Type == "quit" { - close(s.disconnectedToWorker) - } - } - } -} - -func (s *grpcServer) sendMessage(msg *genericMessage) { - s.clients.Range(func(key, value interface{}) bool { - if workerInfo, ok := value.(*WorkerNode); ok { - if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { - return true - } - err := workerInfo.messenger.Send( - &messager.StreamResponse{ - Type: msg.Type, - Profile: msg.Profile, - Data: msg.Data, - NodeID: workerInfo.ID, - Tasks: msg.Tasks}, - ) - switch err { - case nil: - break - case io.EOF: - fallthrough - default: - s.fromWorker <- newQuitMessage(workerInfo.ID) - log.Error().Err(err).Msg("failed to send message") - return true - } - log.Info(). - Str("nodeID", workerInfo.ID). - Str("type", msg.Type). - Interface("data", msg.Data). - Int32("state", workerInfo.getState()). - Msg("send data to worker") - } - return true - }) -} - -func (s *grpcServer) disconnectedChannel() chan bool { - return s.disconnectedToWorker -} diff --git a/hrp/internal/boomer/utils.go b/hrp/internal/boomer/utils.go index bc376ca6..b277078f 100644 --- a/hrp/internal/boomer/utils.go +++ b/hrp/internal/boomer/utils.go @@ -94,12 +94,12 @@ func GetCurrentCPUUsage() float64 { currentPid := os.Getpid() p, err := process.NewProcess(int32(currentPid)) if err != nil { - log.Printf("Fail to get CPU percent, %v\n", err) + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) return 0.0 } percent, err := p.CPUPercent() if err != nil { - log.Printf("Fail to get CPU percent, %v\n", err) + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) return 0.0 } return percent / float64(runtime.NumCPU()) diff --git a/hrp/internal/builtin/utils.go b/hrp/internal/builtin/utils.go index cfc660c2..da437cc6 100644 --- a/hrp/internal/builtin/utils.go +++ b/hrp/internal/builtin/utils.go @@ -504,3 +504,25 @@ func Bytes2File(data []byte, filename string) error { log.Info().Msg(fmt.Sprintf("write file %s len: %d \n", filename, count)) return nil } + +func SplitInteger(m, n int) (ints []int) { + quotient := m / n + remainder := m % n + if remainder >= 0 { + for i := 0; i < n-remainder; i++ { + ints = append(ints, quotient) + } + for i := 0; i < remainder; i++ { + ints = append(ints, quotient+1) + } + return + } else if remainder < 0 { + for i := 0; i < -remainder; i++ { + ints = append(ints, quotient-1) + } + for i := 0; i < n+remainder; i++ { + ints = append(ints, quotient) + } + } + return +} diff --git a/hrp/internal/data/x509/ca_cert.pem b/hrp/internal/data/x509/ca_cert.pem deleted file mode 100644 index 868a01eb..00000000 --- a/hrp/internal/data/x509/ca_cert.pem +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJANQvyb7tgLDkMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjAzMTgyMTQ0NTZaFw0zMjAz -MTUyMTQ0NTZaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANGmhBQQ5f3n4UhgJLsXHh3CE3ej -Ox36ob+Hnny9Gb/OquA4FMKjTTaSrhKIQapqlCLODai50XKSRBJcgsvsqWk9UdL2 -3zf7CzAPmg5CmzpWWwgpKPTuK5W+gLA1+uMKecBdH5gqSswQ3TD1fMfnJuq9mNfC -GsMkplaqS5VATNFPVnqS7us3OXKEITmBaQP4wOpGP1PgqX7K08aZEeAyQJaTS5um -4MNlBLYa/nQ9Wca0Uk5tzoNjE6mWH7bTuwdoZgOIwKFmBbmsC9y/HzwV/zRsL8Yp -+7FwfIYuZ5j8gBNqSFQjDFkm6Q7RcQ/lyHHj9YduOgTciIFVgx+j8aZvFqH127h8 -WIb7Jppy0DEDJE1hRP6iV2uVoaUxhXWrCWLBUU+naLix7SJ8rqw8gHwRNWfM/Lwg -I3rGXdw5WIHVQcuxevN6qVSZeWVYAlAgfxjKtM5cKZyM+W80CSdVKEku1XA0sq6h -jaiJdo6hpm8BLIB2k7LWafc5MASst7XULk4uDC/OYcEz3+C3Ryn1qBltr1gA3+5K -ANuhjYCZH4P0pX08I1MpeVP6h8XhbBPEZg2txbVGlnDXEFoJN9Eg5iEKRBo/HKhf -lP84ljtBSmCnsF6K/y3vnRiu+BVNP5KMq179DNqEy7tSygzgY41m3pSFojdvA59N -JWJoy9/NZzdlU4nzAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUW5AMXXg/zPSaLHwSO/7LwoBeZYUwgYAGA1UdIwR5MHeAFFuQDF14P8z0mix8 -Ejv+y8KAXmWFoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC -CQDUL8m+7YCw5DAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAKTh -Ofg4WospSN7Gg/q3bQqfSMT5XTFC7cj0j3cWDZBnmqb0HAFPmzHT+w3kBVNCyx1r -iatOhaZRH7RA0vacZQT5pD2MGU48/zFfwBV/qHENQWuRLD2WOOEU3cjjoINBclfP -im7ml/xgz0ACOgUyf+/2hkS7VLq4p9QQVGf2TQt65DZA9mUylZTdsBf4AfEg7IXv -gaYpq6tYmNi7fXDzR/LT+fPd4ejQARy9U7uVhecyH9zTUMzm2Fr/p7HhydSXNwhF -JUfPWw7XYO0lyA+8PxUSAKXOfsT44WNtHAeRm/Gkmn8inBdedFia/+M67k45b/wY -RF11QzvaMR33jmrdZWxCc0Xjg8oZIP7T9MfGFULEGCpB3NY4YjnRrid/JZ/edhPR -2iOiEiek4qAaxeIne3CR2dqCM+n+FV1zCs4n3S0os4+kknnS5aNR5wZpqpZfG0Co -FyWE+dE51cGcub1wT1oi5Xrxg/iRteCfd33Ky668FYKA/tHHdqkVfBflATU6iOtw -dIzvFJk1H1mUwpJrH/aNOHzVCQ5KSpcc+kXcOQPafTHFB6zMVJ6O+Vm7SrqiSENM -2b1fBKxHIsxOtwrKuzbRhU5+eAICqwMd6gcIpT/JSR1r+UfHVcrXalbeazmT2DS5 -CFOeinj4WQvtPYOdbYsWg8Y9zGN4L9zH6GovM1wD ------END CERTIFICATE----- diff --git a/hrp/internal/data/x509/ca_key.pem b/hrp/internal/data/x509/ca_key.pem deleted file mode 100644 index 4dccea1b..00000000 --- a/hrp/internal/data/x509/ca_key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDRpoQUEOX95+FI -YCS7Fx4dwhN3ozsd+qG/h558vRm/zqrgOBTCo002kq4SiEGqapQizg2oudFykkQS -XILL7KlpPVHS9t83+wswD5oOQps6VlsIKSj07iuVvoCwNfrjCnnAXR+YKkrMEN0w -9XzH5ybqvZjXwhrDJKZWqkuVQEzRT1Z6ku7rNzlyhCE5gWkD+MDqRj9T4Kl+ytPG -mRHgMkCWk0ubpuDDZQS2Gv50PVnGtFJObc6DYxOplh+207sHaGYDiMChZgW5rAvc -vx88Ff80bC/GKfuxcHyGLmeY/IATakhUIwxZJukO0XEP5chx4/WHbjoE3IiBVYMf -o/Gmbxah9du4fFiG+yaactAxAyRNYUT+oldrlaGlMYV1qwliwVFPp2i4se0ifK6s -PIB8ETVnzPy8ICN6xl3cOViB1UHLsXrzeqlUmXllWAJQIH8YyrTOXCmcjPlvNAkn -VShJLtVwNLKuoY2oiXaOoaZvASyAdpOy1mn3OTAErLe11C5OLgwvzmHBM9/gt0cp -9agZba9YAN/uSgDboY2AmR+D9KV9PCNTKXlT+ofF4WwTxGYNrcW1RpZw1xBaCTfR -IOYhCkQaPxyoX5T/OJY7QUpgp7Beiv8t750YrvgVTT+SjKte/QzahMu7UsoM4GON -Zt6UhaI3bwOfTSViaMvfzWc3ZVOJ8wIDAQABAoICAQCxi7A9AhaUUWRzE6DnpGtH -zk0IO39cIx4KAsNQZiDBVDdXzYafUwaX2d57KVNbDAlJ9HCS3FKpEX9+gUPviQvr -aRe7boCZewv9dqkDvJqS7AEJxzm9O1pD5WI8WGqRDhUPuI2CIwbXDM0VokA7VuGZ -WFlxFxvs+UO5D10VF7A2blcRVQ/quQj4lzc/6P1TdL2DaVxGH3PLQd/ZR1ZhJI2Y -N0OHnOqp7wnvYqrtK+u0oI83hjym/ifvrYhMH8E7Q8lo4s4noSvmEvK0zlKYYxSO -g7RtwK47lcSPKgtn/yZDyvVX85qIgbBLcUmrqfB3qxMKz2lpJo6f4Rg7mm6SgW+K -zxYnGNCTPfiyPKiufM3rQPfJ4giqQ1XDKiZEKUJBo4mzzV6LcAoDaEqhHBlySpi3 -Z38I0rmAT62PRJ1sMkQl6j1Ben9TpwTzJmLX1sEO1Jsabsk8rRdV+ni5oRRUdW4H -+ratyQ8pmegLYyhAZqkD7FzKBLdznLmWXVTcBQkRoD5lQkCP2OF78TdL4twNvoTH -X4kQ3cNysWFXsm+yf4jSCHl4BEtGA2jOU690T0trtMf13aI3wEULmcBgc2ix+tch -wX79hwBYcjGGDfTMb39r/DrcgWMVFXawru78QFoN9vVxznit9LrOERBm6zN2ok4X -E1kD4YZGr8dxUHax0or4CQKCAQEA7W1Sxeqc0gV0ANQf3eCsFNjvT97z/RSzzUYF -wCe4rpzQ9ZNsY2UYMYmEzUuRBuQxYKCNTWot3hu+6OPMCp4pLuu2l8ha/wCM2TkY -6hceduvXkdUNUG1xZNSR8waw4PTXNeoOD30+GB4OpHdjzsF5pEzx853/Qo/ERJFx -A+aZZJy/Sfw82KTseYTniWYjH4iYUbC8TVLfRjPw6V2VcF78pYkdAQenGglqw/sI -4a3FhJspN9xV/PoPbb7PjBJFHUt7ZRQt+D3WPuhLSjyPxwV+3u2OsQ1/J/sxcih6 -rW2g+OJYrK4YkOqX9tLRB39RjO4H6Eiv5eUAw/+vHHufKRu1HwKCAQEA4gzxZNzm -r1X/5GAwwyBJ4eQUHFvEQsC2L4GTJnNNAvmJzSIWnmxGfFLhfJSabnlCMYelMhKS -Ntxokk5ItOhxlUbA1CucEtQgehJwREpUljlk7cii5MLZEkz11QxIVoAhGlq3svFG -B/gwYWNVWl2CXcK2o6BBD9sIgzgp7qhmdJej16h8YkWn7HibKs+OBcdCu+ri7wU+ -VdLpdhN3uqo1b1tO58Gv+40vuQE3ZKDdMy55V30+0qEqg6dXvDQ9nwYFkw6C31Ad -Wpa9ZB0A0HNSou1xTWyl/hDie6dlN84RHGX8on4sjgPrb8A8WVis+R2abvh9ApZA -fRZ3H/ZYXB1crQKCAQBgjgEHc+3qi0UtwRZkiSXyJHbOKIFY/r5QUJWuG3lDqYph -FF8T3N0F6EMVqhGEl/Bst14/iVq15Nqyo1ErUD63UiyjdVtsMLEW9d1n9ZbyDd9Q -8y/C8X8X3kqsZqAwG+IZjuHA8tH5xN93iwYP4yaw5onO5QYV75mFuRAY4gKnpAc2 -81lbUVbJ5H60pdDK1iX7ssAhQf6C8kSa4vAPDtH4D9a3wID4WbQNl115Sc31q5QL -n5NomdkEbIDDGfr5euTnqlk3hw5F7voPaqmd6mI6Dqnk3vRDMihdoJCjTt4T2Rju -wK5E4OKEAh/3yJNFmNemY0kFWSgCjUyNbMjBUv9JAoIBAQCYS9QO+m1JUA2ZVd1E -eWqNkFakTIdL2f5kv03ep+wIxwq6c+79SUGr3UMh5hStvXCFYjhAJhbwc0rY13lQ -uRJdWk/sIn2CifxfgjC1MccPdxeyxGxK56PMGqG9qgrKjITA9sGxA7EFCYe+9We5 -/Coq9VaLoxpyjkWL8rj9m+N7RfcTAubaZseeIBuamj+7UOZ7KOM/2i6HMBQugys1 -Thu2LLRanDnups6yPEmPuHmPVA5YjX9X9VFpZcNMf33MuAflbe9qeNVuBQUQgCHe -TvQr5QFjAoJLTCDq4nrlQCZzFZtB9vQZsjZbEg8WuxG+vN0hSrUemxBTtmEH3bbm -SLn5AoIBABGxznQFXXlF3eLIZqLvItDMSTpFp8YPk8GQWPT2V3pNNjvK/j7eg+tn -VouXv5LjyLTzWLKnPjIU4t+qwu6R9nohZ62OjGl6lssVdjPnf4R6UKzRa0iIZtH4 -BlGncnAbzb6TJuLX7dNwICoUCGyvk9tdnThH1FY3ZAEhOi1G8LEh7aBrj9/vUZ2d -S5jzZ7kLh04AB8OP1MXM3sZE7VlIxUtT/NLlwC8zRsg84pAjg3U7PygIDYQDzCRB -4yIvDziTPqDB/vdCKt7/Xary5Xj4NwqcPCRf6HvdHYCVeW7V+mWcMKZgodQARQhv -qQCK9iiN08MAFNia/0/Bj4D7XKurNRY= ------END PRIVATE KEY----- diff --git a/hrp/internal/data/x509/client_ca_cert.pem b/hrp/internal/data/x509/client_ca_cert.pem deleted file mode 100644 index 62a0ce05..00000000 --- a/hrp/internal/data/x509/client_ca_cert.pem +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAOhoXtjjP6JdMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjAzMTgyMTQ0NThaFw0zMjAz -MTUyMTQ0NThaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAO7fTqeU+8OfKMwXABNF90+RYL4X -YS4ULx4rpf14Ntp1SF6o3itCSM3jJfHzexj2Pm16aL+OQll8ODtvTadqVSMndMCn -UN/jVjxiMmjkSNKpwUGG69CsQzCKoueKBCEy/CZSopQae6Wxn7mqTAzhFlh3idNL -J+12UtdqDxnPDsiG2XBET3UrKyJeBxMgRyPi/g4wHfhH9oJ97jkdacUlLko8l22s -ZiMSSwwOlWxtTY5t0FbHu08ufP4eYTqC0LL3z1Fon4v+4BqUyK7BT3dISwPBmSd1 -uTD7Wbaa/QmfU6Y18dkNlK00GUAcKWgPfLcm7EH/AAz5XkqozVR3z5FLBYFTxVrA -Ly/Gu5HLx/uwoYWeYRWBOSkqvdgf9PT57imO4fOi1CTQuq/1LAdaxGkm7yXaz0YP -ySTiT6PvcLWFEbjrbufxdBrF4/ZsQz5vdJiKq2IQmCIKONJOFHWqgoF4AA7Ze1cl -mrK0eLzUlG1WmSy5mpjByRanahQWYvK1s0tc8IwMRRJY4DS6Dp99EVyteKZP/jc0 -x+ILet2ThDhjY3AxtkzlejyylABgl2AyGoGzZzbaf1q/0LfM6SfYBSVZK3TFR3Kt -8lQnG0tztoM+bnM/JZ8UZ61s16jJVxWzlZ+rx8rCpIvh3Cnl52DGo6oA4Kt60uDP -3iiTLGNYqEyHmzgnAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUdOqNqaSjcn7BRN3fLs4eTIp1W9MwgYAGA1UdIwR5MHeAFHTqjamko3J+wUTd -3y7OHkyKdVvToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC -CQDoaF7Y4z+iXTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAOnH -CrwiJd51oBic5PwjQBhQcUtGOfR1BJe/PACpLXTf1Fbo8bLT5GxZLATlw9+EVO9P -JhhH+oiUuvA7dE2SRiZXpY7faqtDgvVfssyCrvACkM7pcP9A5kM4LiunX7dpY2xp -naJAqDV5Av1mOohHuVEZHqV6xQSREQFW2IusfpCsPP+P+RPKM2o571e6oz5RGbuP -dQ39QycBTK8ezccxaDaH614peAnBi4Q1GuxzgNmXq2FPDcf7F1QcWMrW3jUI8npi -Q9rXRwrqUYP7Yzz+dIziGdpOfZd7x/MyCXuqRdFdA+bulGM2Es5lvtguPOFhcWp0 -3hzLJ+yolxyqxnNNdaU0r+TDbgxOBjw0VxahuhzFDeZsP6Civzp+Y6MRdvofNXBm -IBD4uqmQtUUyE2uoznXvZkXaSc+0VIGgs04AMS9irBC2oVEGDp0AbelcIhdgToam -/NTuOmxgadwDuEn3TIFYkzx84J81kL8g0HQ1N09nSXChkSVb+XlxC+Wosxoazydr -M4FOvaa1V4vnmIdA2aF1nWTzJNcc9FC23zTmQkV2YJ1IKNmxGd3xBZzUtUBu5OgZ -vPXECtUjRcraNuXeL6gSX0qBaaVkcdxhp8CpI8k6Qb+mgOaq/ixrVEKtczBVXjHD -pO6QmwMZtqR8JsStbMCYXa2owt4k8F3yMlIKE6qX ------END CERTIFICATE----- diff --git a/hrp/internal/data/x509/client_ca_key.pem b/hrp/internal/data/x509/client_ca_key.pem deleted file mode 100644 index 77065d5c..00000000 --- a/hrp/internal/data/x509/client_ca_key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDu306nlPvDnyjM -FwATRfdPkWC+F2EuFC8eK6X9eDbadUheqN4rQkjN4yXx83sY9j5temi/jkJZfDg7 -b02nalUjJ3TAp1Df41Y8YjJo5EjSqcFBhuvQrEMwiqLnigQhMvwmUqKUGnulsZ+5 -qkwM4RZYd4nTSyftdlLXag8Zzw7IhtlwRE91KysiXgcTIEcj4v4OMB34R/aCfe45 -HWnFJS5KPJdtrGYjEksMDpVsbU2ObdBWx7tPLnz+HmE6gtCy989RaJ+L/uAalMiu -wU93SEsDwZkndbkw+1m2mv0Jn1OmNfHZDZStNBlAHCloD3y3JuxB/wAM+V5KqM1U -d8+RSwWBU8VawC8vxruRy8f7sKGFnmEVgTkpKr3YH/T0+e4pjuHzotQk0Lqv9SwH -WsRpJu8l2s9GD8kk4k+j73C1hRG4627n8XQaxeP2bEM+b3SYiqtiEJgiCjjSThR1 -qoKBeAAO2XtXJZqytHi81JRtVpksuZqYwckWp2oUFmLytbNLXPCMDEUSWOA0ug6f -fRFcrXimT/43NMfiC3rdk4Q4Y2NwMbZM5Xo8spQAYJdgMhqBs2c22n9av9C3zOkn -2AUlWSt0xUdyrfJUJxtLc7aDPm5zPyWfFGetbNeoyVcVs5Wfq8fKwqSL4dwp5edg -xqOqAOCretLgz94okyxjWKhMh5s4JwIDAQABAoICAAmMq9xPPHFpn3vpP3uFxIlN -yoxO6veonumZ3Rzw/WBmZ+pA3gDkuXxhpFaz4SvyTDScPCvMSCLDsIvPu08CFT0+ -ipBZIAaTVBM96b3/wlmJp8wy1KKXAGikYjbXcarSGvp9OzqohGDvZO9LO5cYOIh4 -3u2vh30ayd0KxGfHu1OQ8IhocrTAcQ0CrU26cJ2iqX1vtwMB/XziA/AMmPnkrqER -IwyjY8HrLUziGF8pT3xuL3IIshhMR3rxQ/nO2QEOnx8mC5rRKaxmXk9+MusV3Mnd -p33IWwr2QXPnZk5ILFPsvCptPJBgENJbTdx3IglAaRmKVDowjfB2Jx9FWur4ENQy -+yCzf0ygRoXnugtwE48/L7P8mlqZlZsxQbUUjXEPtht8rtM4CR5b0v7PHXiLh1oM -igfy1RDAQAZQRGIlWCOeV2soiyKLnCGyAaVXcM2ksDkYOSH4ObE4KwF1Ph87lNaG -ywolsPvQD0ygymXcuStrYHWamTp8qRjNvZBcThs3SaKN+lxXxPng2tBPUwU0S6nj -e0pjWco74elBk+fjjd0wNolKjUD7FhRXlWiXz9BgcCjRD9TLoVk8mp9cFL7OLzJc -735JmNKP8C5Qs91Ugo6Z9tWQQTdGHZe9ElUY0fWP0bs+4iBaadl63R26tchLncZE -LnYsi2AjDdV908cEkAiBAoIBAQD6LbGeyFHZA42nuSw/NFsMVldqU6QwmADQI3Tw -JEdw2thS8VIX2c8aeJkVL++dNmSPcqs4NqhzgJSm9o1xNqGZovAPK/B3NmLl1kzG -JPwSr8QwNxmKwUlbt1K48qIV0JmetOgRG/ll5ux2CxgWHzwgRwtvpbnxDa7Gf7BA -UfH7AfZJ3iV+HlJSxr9XxNgFoNEtpP9sqbOgt10f5JJlIELCTa38iMBojAGxlzyj -7DGYY/diQDr+6mRNnv2pY57dOnmdvN1w+p1W7saaeRCeltva/G+5n5AWMFl5qBjT -LDktBE+okH5wapkUsZzZTByTgFXdBC2wY2qBrOexBAyS8/F3AoIBAQD0bkNBc1ya -KYmWlCsVSUZxUGSOp9g7ZdzlB/1G523s3PltXSphsC4mACs7ZAs5OAO/bu05kurp -dOqEAxsC05IxD2/gGoarC6QfTum9CMNoKrvtczA7Gl+6D5djum17lULY6YSBO75J -L0FQK6nCVGfAbBRAqhiFi+9kXvNThuqjgoiCNwQYxaG8aovoAKTFdkzQjDw2tUgM -jqCM6ifOBJIRolFq2CBom8nB+wpsI1naFLaOdg0Luz/Ds03gD9nWa6a4XIowKCml -Tek1Q+S2hZoTgfOlKRbCcM1KyoaI9LKI/pbKmpNyyrADw/kZKevfsKnYwMpHlaTR -NSuQ2VJKuxrRAoIBAQCBQ3bQ+eQAYyugC7dm+OBKYZpNH+ZoDUHuSUO0iKo5D3pS -cMnf9PRjUwiVv+zoqCARVkhNhUBIXZlxI1c1teqNfXjX/fYDQqCa7L1Ca/2qkhKm -bvHNlc0XjIM7eHJzHxMgw4xcur2D/2sSGu1ZEM56RvsLtu96M32opnUk5rJG5V6i -EBwDLBuRFYvsB5MuZUdvdB9dv9lGIzgEsI9LnP2hc42APBBedGizn9b/Q5zkhlJd -+53/9I/a41lhWk3NNNd9vwYTyAnfzwPi8Ma7imsSnPgFSwKh1F2G1GnvQpxQPDgE -epQ59XofDR5j0EW7mMXEqtIIn3V6hyI3fkYY795FAoIBAQCsx7x26YsN1krRzA7g -TxmiQ8exJ2gsJIcOxqT8l98WTeVqry6kOxuD9R6aLs/YNIZBrbG2vuma+PBFPMS9 -LLzsPRNCAL4s7l+nWerTmvw2B+8rm/796Fi+dwL2lfOKJipIllj52TdbGDI874Bi -Q7PLSxrN0u7eh9pCwvORmY8G4eCI20bkE9+OBmq7JqlSg5ss19RAf8hcR/2pXmOg -t45hNLIEqp3OFEF8A26MnjiHdZjN/xidsFEUjwx/U/USIqqJK7Dq9ZjqprYw1rs3 -Yh1VqMiHeRIDhCU5twt+iCojuILy2G1d+XSOVNsiNIXtaz3EYBMcouUMlV8kVtpa -xQPhAoIBAEr8U7ZaAxN2Ptgb6B8M1CVNE6q7S1VuX+T8xkciadW2aRjJ3PufFfsk -Zo12fP9K/NeOPTIz0dQB6Gy/CKzDLb8NnJCJnCUUaO8E45C2L9r6qvIJpXWHp3vo -neGO49y/5st7suOZkWU2B6ZGwNWH90296mfSKcUNxSRMaHCotPdVDyvOgLC24ZWR -6teRaxB2sVZYqmoz+4+G8SOK40bHJKf1kwujbrS3OqzDzEeC/STtqYZWPW03MFkk -MBPQvwCWMJINv4zz4YrnOaA9COc1/fTXCG5kKYyalPD8VKxi1usas1pZwIqZkuwm -D6kBMuZ4gkKW24IYzXzOni0/BOnpOfM= ------END PRIVATE KEY----- diff --git a/hrp/internal/data/x509/client_cert.pem b/hrp/internal/data/x509/client_cert.pem deleted file mode 100644 index e35b94b1..00000000 --- a/hrp/internal/data/x509/client_cert.pem +++ /dev/null @@ -1,32 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx -CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDMxODIxNDQ1OVoXDTMyMDMxNTIxNDQ1 -OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAL2ec6a93OYIioefCs3KRz752E5VfJPyVuxalBMc -7Dx84NsdwpbUyDT6fO7ePYM8IvYAsLc5coLCP1HKGGRmYm423WZf8Kn93BDl0XcN -4bgtW9ZrekvYcXqSzygz3ifdQeZljZrqW43dkkYR2vWc+uJXs+vrRVZyUSLLbe97 -9zUbWbOfHBc1jK1vTUakl08VhllYbO0m0SYZIni0sioItVdVWTz9XE2COavLqwwL -MIq8N7JXEdYJC49JWfdzvqZYTxOn5FSTCWen7/mcZmuLYPwUCkSu05M5T2o1ygkd -ohA+/X9yjToPJ7NO509lKHWo7+sp9if6jZsiOU45/t84pD6juVZSZ20/A9i6hjtj -C0SqYk2iQEtRp+lT6yYa5ffeNllFUGtM+xq2are2n93PnXwMTUlYGuTtkyRPG717 -ZtQjKQuwfdJNoNbJl2cfQpmtLdm4Jzrg5cWiiFro+aqnZxIfUEEDkIBaUjYmwMkS -Qq+S32L4f4u7rtbnzdo/jVwq0wpSjTGQJEab+v2wZpDhVbQblTyI30A+TvBIzLil -09OX49/teZCp05kOJy0V/yXdQtPwlQGXdsCUmD6dnGav17fB1witXDdG+4SNoyF/ -PN+8wtlMQ8fWvLdxLsd/Rq6CEZQV9mBhrQxXUmFFDhd0O6wfxR/lVFxIWg70Fz7P -+z7tAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFG0psrHrGny8ziVm -RtulG3f9ROrhMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQsFAAOCAgEAtr1dzSQswIOlEGlLtoAwkL7ys/gP2fcdh7Jl -ggiPs266yzZFyGGdd2GKo6tcjdBNjfnO8T5h8eLzj7QlzKPqA/l0BgAW7s7WX9QF -wCivw1DHE815ujlQNo3yve38pd2/I0hdf9GtQLGyOirYpwW5YcHvpmLezrW6J3UU -CWIfYhqO6bSs+HCLkvQdsCG1TpveWYXfC9aXHjw+ZGOjBMEt6AgdWctwzTjQfZub -VjZosBC3ZkDjkA9LTqKP5f8XSWt89J4JCYkiFRiJuYYiNYcZpb0Ug93XjEHIHXMG -N/cD9fCB2HovoVu8YnezpSrqEhqEikHSq80fwbf+NaT0CEbPMx3UMzt8d8gwUiwE -nzzf/o4uOwoofNWfka0J1VPY1AtjUDvz44LyVhp4uvkEJEK1WQ46mM68H/EOUmpd -fHANEbV8HLq2iOjR78n5+MCHRcX7duScp5wT0ajfDg41VrhvV/u7YctFj8ynQJg5 -cqbH+GgTrEfAFFm5mZH1SGqNPyxr1eQFWXMRGE7R/NoyQo2uqrSRmz6JFXlnWtxF -YmLhnOdQaytcpiYN2YVyC/rLK3l3Tbh4u5axvlZP/hi+nQluiZzkH97iUqXcBU/9 -jYNohnJzXMHTIZM8FQY+9uGw9ErdDo7FmX5Xkp4TzEz9k10m1fnt0njSEzITtqpg -MoO9n00= ------END CERTIFICATE----- diff --git a/hrp/internal/data/x509/client_key.pem b/hrp/internal/data/x509/client_key.pem deleted file mode 100644 index d9c4bae3..00000000 --- a/hrp/internal/data/x509/client_key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAvZ5zpr3c5giKh58KzcpHPvnYTlV8k/JW7FqUExzsPHzg2x3C -ltTINPp87t49gzwi9gCwtzlygsI/UcoYZGZibjbdZl/wqf3cEOXRdw3huC1b1mt6 -S9hxepLPKDPeJ91B5mWNmupbjd2SRhHa9Zz64lez6+tFVnJRIstt73v3NRtZs58c -FzWMrW9NRqSXTxWGWVhs7SbRJhkieLSyKgi1V1VZPP1cTYI5q8urDAswirw3slcR -1gkLj0lZ93O+plhPE6fkVJMJZ6fv+Zxma4tg/BQKRK7TkzlPajXKCR2iED79f3KN -Og8ns07nT2Uodajv6yn2J/qNmyI5Tjn+3zikPqO5VlJnbT8D2LqGO2MLRKpiTaJA -S1Gn6VPrJhrl9942WUVQa0z7GrZqt7af3c+dfAxNSVga5O2TJE8bvXtm1CMpC7B9 -0k2g1smXZx9Cma0t2bgnOuDlxaKIWuj5qqdnEh9QQQOQgFpSNibAyRJCr5LfYvh/ -i7uu1ufN2j+NXCrTClKNMZAkRpv6/bBmkOFVtBuVPIjfQD5O8EjMuKXT05fj3+15 -kKnTmQ4nLRX/Jd1C0/CVAZd2wJSYPp2cZq/Xt8HXCK1cN0b7hI2jIX8837zC2UxD -x9a8t3Eux39GroIRlBX2YGGtDFdSYUUOF3Q7rB/FH+VUXEhaDvQXPs/7Pu0CAwEA -AQKCAgAtlwQ9adbLo/ASrYV+dwzsMkv0gY9DTvfhOeHyOnj+DhRN+njHpP9B5ZvW -Hq7xd6r8NKxIUVKb57Irqwh0Uz2FPEG9FIIbjQK1OVxEYJ0NmDJFem/b/n1CODwA -cYAPW541k+MZBRHgKQ67NB3OAeE8PFPw/A8euruRPxH+i3KjXSETE8VAO0rIhEMz -Ie2TQRydLKp71mJg45grJ17Sxmc7STT8efoQVKgjCwPkEGiqYpiNk2uhZ2lVGRC9 -cyG6gu74TdyTDQss1e7Xt+fUIZ2+3d6eJt6NvjC+25Ho4SwO9eYjF1qnQ++KqATr -TOoOaADPLLaXZCFZ1D+s9Dq4Vrj+QGk8Fajotj4gBpUtc0JxtvYM9EhlW7DpchYm -Cxe8vmEi/54YErXKawTUXYBB8IeDzwtvi3v3ktmH8BsGJ6Y3RXDI9KIG/6IE5Xeu -hkPCJnB0e3G2nlaffNSrVknxF+z74DB3T2kj0zC/4H4/hHo4W5D/pswcGWlhREWG -E7ViXJjBRkc5tpS9HfNdZ2wHiccioDIdGSHGqGMF4rLCUE2n+zc4m6pvvNCjN5KB -S4+zps50Gqtbp3DH2h1YLtkzuzvDhgpMPyJ1qZsdgelRSi2IaE5oekuBGP2WeXFw -DLI/cijc13cCacH+kpllQL//zBP8mMGmussWGgrVXdm9ZqD+rQKCAQEA6OG+s8sa -QZJ8W1nukcaS5rSvJBeZO6neCd6EB4oew5UGJsSz+x4RtJ7aJhdTGtyCXqiR2uFw -SBYdTcOgNbBUXg39vWAv+k2lmxiMGuLnAcNcGYyDLXr1SUJwe4Be984WNFdqzY0z -LCd9NvutWWX0Xd1VBdhlDuu3eBenzPBKIxTk3N2gLvzYxC/62e29Trsm7Sur11ut -Jay/CRdomjaqIiZ8q8qgdSU+pPe2DZYzUOutySJhLUegrrgWvPS/i8FHf7AGRgki -wpFn3gy5zCsFzr6n/TzJ5zQvlz+PcbUHHb06U1cnT45fkFNAJJvBYa4vi/tRx92E -Bi8d4bn40fUo3wKCAQEA0HFDHzhRxN/RbzBkymGlgfrsKcBdaAzgClo5uAXr8sdi -efsgBFo228I5lK6ywfzOfD/UxGB6ucdkZb/tRLtoK0OqOGiNx2Q1yazRVbuhrBrR -Y7DDbh7164o/MAYqPGxTMUxzXia7WBtNm00Tv9pDsw+NTzbrk7OxkLZWbjQEj99T -A9pcqXYA1RJtD/6io/43/oVscWPdRrbrNrJz+27Bsau20MBheVmX5sLTO2iWKTN4 -/ofrvOv0ru0I3ACHiLIaQFXs4snQjlhJm5MJ6kuZVdYKAzyNE+YOPnAxoiQAlHau -E1aV8ON7jmjhwxa2QICCwVcUNmwXU4UztGyGZ5a1swKCAQAi90Ia3LPkhIoHbUlU -uev0l8x0LtbjDm44LSDFwQc9dnKl/4LGgY1HAVLfxUDFF7a7X7QGmTKyoB9mPakg -ZolEVfVzKa4Kdv4We2kN4GOu8BYz/9TyTzPk/ATHhk68BkVvNnDizACS8JrsVn2A -nr5CGalaZ1NFGj9B2MtpCesXuVtjjiMu6ufhDRMtBXUXDSKbGaODglBNB9LnGoyq -GusQlZbCdHoDHMR7IHZFM/ggfkJpoK/WjJqjoSBI3raj1TFXCqbmfRiq/goKXP7I -mO0WTaoLa8Uk4cEDhJeVCwk2feL0AHH2j/npQZav6HLwp6ab7fApgikAhLKH4dRq -MdUhAoIBAQC7svJVf7qqRT3sGTD5yXpnlJPreOzj0IxC5kKJgtOYuJDl9Qw8vxwd -QkXlrHcOFl++JSCsgZCiEHpI4c6AER5Zr0HuL8BUJ9oDtJqA0EhimXeqhLdHR5v9 -sWz7CuInrQgxIX3V75zOVy/IRF0fayWBbeS6y2LRi4O/I2KrNC5TfC/eDVlZxAg1 -1rTdLVg5wqebi3w+k0Xj8r3WcFXeuTq0ikNCsapUwyf1RcU+/wwRJ+exlKXkZrnc -d1h9/AAQSQk4m+eHxWIHfFs0O/E2yULXt7kmdvU3UPfMo+0d67uV9VUF1veIhuBx -OeLqcV5GsTKNdaOe6jELJayMsRlK2LzfAoIBAEoWFSUdf3ruvj+ONju0TDtdvvTb -+i+3ttqMK/duYM2TlD3Lvqyx3kNxlMTAArfvnwtKVSw0ZIGSPc/5KHnxldcdALgT -4Ub1YesUv5585thMw1EWyXAPognLhfTEVSLYKcMPoBNCv7FvAT3Mk5SZPReRkbT9 -oqDAzg7r+0+pjD9LmnIXfCxfbSV6zcBFF8/iGAmzh3CanDqVkUds1+Ia8018cfDS -KW5PQAEnJC/BZAI7SQsxH0J9M7NYxJRN0bua5Be0N+uuYSOa+d9yecugfmvga6jf -9nEcohJShacCSkQvIXlq5Uy/WBb6sbiTmHjjW14FG25B0rrQUjmFAUiYceI= ------END RSA PRIVATE KEY----- diff --git a/hrp/internal/data/x509/server_cert.pem b/hrp/internal/data/x509/server_cert.pem deleted file mode 100644 index f1a37400..00000000 --- a/hrp/internal/data/x509/server_cert.pem +++ /dev/null @@ -1,32 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx -CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDMxODIxNDQ1OFoXDTMyMDMxNTIxNDQ1 -OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAL5GBWw+qfXyelelYL/RDA/Fk4GA8DlcBQgBOjBa -XCVDMAJj63sN+ubKBtphWe6Y9SWLJa2mt8a/ZTQZm2R5FPSp9rwdr04UQgmL11wh -DCmO+wkRUeTYwsqcidEHRwOxoctyO+lwgYw983T/fp83qtNS4bw+1kJwrLtFdgok -Kd9UGIugs8BTFqE/7CxFRXTYsNy/gj0pp411Dtgknl1UefPdjco2Qon8f3Dm5iDf -AyUM1oL8+fnRQj/r6P3XC4AOiBsF3duxiBzUp87YgmwDOaa8paKOx2UNLA/eP/aP -Uhd7HkygqOX+tc3H8dvYONo6lhwQD1JqyG6IOOWe2uf5YXKK2TphPPRnCW4QIED4 -PuXYHjIvGYA4Kf0Wmb2hPk6bxJidNoLp9lsJyqGfk3QnT5PRJVgO0mlzo/UsZo77 -5j+yq87yLe5OL2HrZd1KTfg7SKOtMJ9N6tm2Hw2jwypKz+x2jlEZOgXHmYb5aUaI -+4xG+9fqc8x3ScoHQGNujF3qHO5SxnXkufNUSVbWbv1Ble8peiKyG6AFQvtcs7KG -pEoFztGSlaABwSvxO8J3aJPAEok4OI5IAGJNy92XaBMLtyt270FC8JtUnL+JEubV -t8tY5cCcGK7EtRHb47mM0K8HEq+IU2nAq6/29Ka0IZlkb5fPoWzQAZEIVKgLNHt4 -96g9AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNx36JXsCIzVWCOw -1ETtaxlN79XrMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAAEEZln7lsS/HIysNPJktc0Gdu3n -X1BcA3wXh95YTugcxSSeLLx2SykXnwX+cJncc1OKbboO9DA5mZ+huCesGIOKeUkg -azQZL6FAdw9PQKdqKg3RgSQ4XhK990fPcmmBhSXY24jNNhRHxGw5lGBrD6X2SdW3 -m66yYzn9hMXL4yrweGO7OC4bdyISDrJiP+St/xeCoIcXP2s07dE6jl2VorJCWn4J -SxKfDhPPohZKl6dL9npkmPcpz2zRAYpo4tsVdAAQDBRui44Vvm1eBPUo7EH2UOEh -/3JtTeDUpldM8fDaKE0kTa1Ttxzs2e0Jm3M4/FMOxqSesyJldw54F4+4m24e/iQU -gceArYMFVFTipgrLfUuRvRxx/7D7V92pqTyuD3T78+KdTqrlxvCTOqSHhFE05jWD -RdynS6Ev/1QZLlnWgMwhQAnjhc1NKkso+namF1ZmHH9owiTRBlWDMNcHMDReaELd -QmFUvutHUpjidt1z+G6lzbP0XB5w+0vW4BsT0FqaYsFbK5ftryj1/K0VctrSd/ke -GI0vxrErAyLG2B8bdK88u2w7DCuXjAOp+CeA7HUmk93TsPEAhrxQ6lR51IC6LcK0 -gACSdnQDPGtkoRX00DTvdcOpzmkSgaGr/mXTqp2lR9IuZIhwKbhS3lDKsAZ/hinB -yaBwLiXfcvZrZOwy ------END CERTIFICATE----- diff --git a/hrp/internal/data/x509/server_key.pem b/hrp/internal/data/x509/server_key.pem deleted file mode 100644 index 1c778db7..00000000 --- a/hrp/internal/data/x509/server_key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAvkYFbD6p9fJ6V6Vgv9EMD8WTgYDwOVwFCAE6MFpcJUMwAmPr -ew365soG2mFZ7pj1JYslraa3xr9lNBmbZHkU9Kn2vB2vThRCCYvXXCEMKY77CRFR -5NjCypyJ0QdHA7Ghy3I76XCBjD3zdP9+nzeq01LhvD7WQnCsu0V2CiQp31QYi6Cz -wFMWoT/sLEVFdNiw3L+CPSmnjXUO2CSeXVR5892NyjZCifx/cObmIN8DJQzWgvz5 -+dFCP+vo/dcLgA6IGwXd27GIHNSnztiCbAM5pryloo7HZQ0sD94/9o9SF3seTKCo -5f61zcfx29g42jqWHBAPUmrIbog45Z7a5/lhcorZOmE89GcJbhAgQPg+5dgeMi8Z -gDgp/RaZvaE+TpvEmJ02gun2WwnKoZ+TdCdPk9ElWA7SaXOj9SxmjvvmP7KrzvIt -7k4vYetl3UpN+DtIo60wn03q2bYfDaPDKkrP7HaOURk6BceZhvlpRoj7jEb71+pz -zHdJygdAY26MXeoc7lLGdeS581RJVtZu/UGV7yl6IrIboAVC+1yzsoakSgXO0ZKV -oAHBK/E7wndok8ASiTg4jkgAYk3L3ZdoEwu3K3bvQULwm1Scv4kS5tW3y1jlwJwY -rsS1EdvjuYzQrwcSr4hTacCrr/b0prQhmWRvl8+hbNABkQhUqAs0e3j3qD0CAwEA -AQKCAgBnR3CoGbd9hZl8u4qxc5IdeXwgflFmgRlGCAyCtHlxzG9hzMTD7Ymz/hMM -NG1xQltGfqn8AROd8MPJLOEY/1QtnZgM8fv24K4bqmlCW7nTUQXYHSubkUDiY2e3 -K0ETszaETMRSaLwY2IOujQQ4/ilePY3D9UOtmqVXnVN+G7USwP31xEvtZ+xPqHfU -a+FQlFIj8FuMQXDuKozdK7s+I51yjl7pVNx3M7QlH1/olcSKNta1EQXK4RgZxD6a -kkBuyPR93ohXOJ0OMSvI7eKVKIcBh0JM4z0+D5FMJ7IGbjL8Bdsjcs1a0g/y28Xf -NBVf9w8Fun3mmYmj3ZMsqDZgVg/bAfP2z7O9kMzbuqmjelOz8HXxTm/+GIHuseMx -b/nDZgB0ZN+FhATv/onshJcjr2L3SJYzEWqjYiqaCQo5qtib+/kxh6SHPhAY2o8l -zzMhKFsJMhmwW91FXqeDS9FTlcRXtYH1EJxNGa01GpyVa6plvvFTGBNkEUJnVuEp -ULohJw0NJQYQOz5omYaQVJ49lpzVhwLEolgSlIBiM3s9nSDvVBYu+bB1ovw5OTIJ -Wlc9cBrYmdxYdAj5n6JzIC1wixgxrFw1jBm8cL/2FQYtR7daZabTMyZj5vAUqjxr -OV+uvkSFcIyBs1ty9TnnKC3yd5Ma+5chR5u7JPc1lSSor6AwQQKCAQEA4d5XrCq5 -EikGII/unhkVZsh9xmILp/4PRKc+fV7TFEpGyn8HFCBToZk6nXv99roUBdeZFobw -gDuZqBa4ougm2zgBbhdQXGaW4yZdChJlSs9yY7OAVvnG9gjuHGmWsLhvmhaeXSr2 -auxVGRaltr3r8hP9eHhloDM6qdSSAQpsdeTBQD8Ep3//aL/BLqGcF0gLrZLPwo0+ -cku8jQoVXSSOW1+YSaXRGxueuIR8lldU4I3yp2DO++DGLsOZoGFT/+ZXc2B4nE1h -o1hCWt6RKw0q2rCkZ+i6SiPGsVgb9xn6W8wHFIPA/0sOwOdtbKqKd0xwn5DnX+vt -d8shlRRUDF7HDQKCAQEA16gR/2n59HZiQQhHU9BCvGFi4nxlsuij+nqDx9fUerDU -fK79NaOuraWNkCqz+2lqfu5o3e3XNFHlVsj98SyfmTdMZ8Fj19awqN20nCOmfRkk -/MDuEzRzvNlOYBa0PpMkKJn2sahEiXGNVI4g3cGip1c5wJ1HL3jF61io4F/auBLP -grLtw8CoTqc6VpJUvsWFjopTmNdAze8WMf3vK6AKu7PKkXH7mFQZusacpO/E61Ud -euiG9BYDIIkrnWIQdLpODgliLZzPNcJDTKTFJAfIzr3WQvUaFc1+tHyX3XhpicvP -J4zyNfHd2dZMK1csXQJvFSnPgXpy531Wca0riAYZ8QKCAQEAhaVEBxE4dLBlebrw -nAeHjEuxcELvVrWTXzH+XbxP9T+F56eGDriaA5JhBnIpcWXlFxfc82FgyN97KeRX -17y50Riwb+3HlQT23u0CPEVqPfvFWY0KsWwV99qM2a74hRR8pJYhmksjh1zTdYbb -AugZxiFh53iF2Wa2nWq0AX2jc5apalRfcqTgAaEEs4zYiUYN8uRdnmZovsRliqae -wYAx44sK1vkQY5PSNKff+C0wgbY8ECHOF2eGnIEMU8ODKnWm5RP+Ca4Xyckdahsr -lmeyJbhDb2BbaicFGEZkNa/fXZW50r+q4OQOlMHbE2NNjw1hzmi1HyLAXhOJiWZ/ -3NnvuQKCAQEAg04a/zeocBcwhcYjn717FLX6/kmdpkwNo3G7EQ+xmK5YAj6Nf35U -2fel9PR7N4WcyQIiKZYp5PpEOA4SyChSWHiZ9caDIyTd1UOAN11hfmOz6I0Tp+/U -1FQ/azQHtN3kMzBjSxJYAJN56NTM4BiJD3iFemiIsjfH0h7eXBcg1djmLf8B06FX -GOSrGZDpNmqPghVpBvNwyrJbAj9Jw3cjcdvrZ5lOBhaWv+kz8Rzn+h2N4Ir5uF46 -szGxs5bEzD2vTs6Zz4ndhC7uyRi9y81Nj8t4TLZtln7TOdNup/Mr1zGXxM4Fn6DP -YlYfdHgUU+Eqf2lApeZHVfkzi+1TRvPoEQKCAQAELU/d33TNwQ/Ylo2VhwAscY3s -hv31O4tpu5koHHjOo3RDPzjuEfwy006u8NVAoj97LrU2n+XTIlnXf14TKuKWQ+8q -ajIVNj+ZAbD3djCmYXbIEL+u6aL4K1ENdjo6DNTGgPMfISE79WrmGBIKtB//uMqy -fGTUSPeo+R5WmTGN29YxAnRE/jtwOgAcicACTc0e9nghHj3c2raI0IazY5XFP0/h -LszTNUQzWx6DjWsbB+Ymuhu4fHZTYftCrIMpjmjC9pkNggeJnkxylQz/pwO73uWg -ycDgJhRyaVhM8sJXiBk+OC/ySP2Lxo60aPa514LEYJKQxUCukCTXth/6p0Qo ------END RSA PRIVATE KEY----- diff --git a/hrp/internal/grpc/messager/messager.pb.go b/hrp/internal/grpc/messager/messager.pb.go deleted file mode 100644 index a9d2efde..00000000 --- a/hrp/internal/grpc/messager/messager.pb.go +++ /dev/null @@ -1,291 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.0 -// source: grpc/proto/messager.proto - -package messager - -import ( - context "context" - grpc "google.golang.org/grpc" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StreamRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` -} - -func (x *StreamRequest) Reset() { - *x = StreamRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_proto_messager_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamRequest) ProtoMessage() {} - -func (x *StreamRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_proto_messager_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead. -func (*StreamRequest) Descriptor() ([]byte, []int) { - return file_grpc_proto_messager_proto_rawDescGZIP(), []int{0} -} - -func (x *StreamRequest) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *StreamRequest) GetData() map[string]int64 { - if x != nil { - return x.Data - } - return nil -} - -func (x *StreamRequest) GetNodeID() string { - if x != nil { - return x.NodeID - } - return "" -} - -type StreamResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Profile []byte `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"` - Data map[string]int64 `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - NodeID string `protobuf:"bytes,4,opt,name=NodeID,proto3" json:"NodeID,omitempty"` - Tasks []byte `protobuf:"bytes,5,opt,name=tasks,proto3" json:"tasks,omitempty"` -} - -func (x *StreamResponse) Reset() { - *x = StreamResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_proto_messager_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamResponse) ProtoMessage() {} - -func (x *StreamResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_proto_messager_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead. -func (*StreamResponse) Descriptor() ([]byte, []int) { - return file_grpc_proto_messager_proto_rawDescGZIP(), []int{1} -} - -func (x *StreamResponse) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *StreamResponse) GetProfile() []byte { - if x != nil { - return x.Profile - } - return nil -} - -func (x *StreamResponse) GetData() map[string]int64 { - if x != nil { - return x.Data - } - return nil -} - -func (x *StreamResponse) GetNodeID() string { - if x != nil { - return x.NodeID - } - return "" -} - -func (x *StreamResponse) GetTasks() []byte { - if x != nil { - return x.Tasks - } - return nil -} - -var File_grpc_proto_messager_proto protoreflect.FileDescriptor - -var file_grpc_proto_messager_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, - 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, - 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x32, 0x61, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x56, 0x0a, 0x1d, 0x42, - 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, - 0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_proto_messager_proto_rawDescOnce sync.Once - file_grpc_proto_messager_proto_rawDescData = file_grpc_proto_messager_proto_rawDesc -) - -func file_grpc_proto_messager_proto_rawDescGZIP() []byte { - file_grpc_proto_messager_proto_rawDescOnce.Do(func() { - file_grpc_proto_messager_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_proto_messager_proto_rawDescData) - }) - return file_grpc_proto_messager_proto_rawDescData -} - -var file_grpc_proto_messager_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_grpc_proto_messager_proto_goTypes = []interface{}{ - (*StreamRequest)(nil), // 0: message.StreamRequest - (*StreamResponse)(nil), // 1: message.StreamResponse - nil, // 2: message.StreamRequest.DataEntry - nil, // 3: message.StreamResponse.DataEntry -} -var file_grpc_proto_messager_proto_depIdxs = []int32{ - 2, // 0: message.StreamRequest.data:type_name -> message.StreamRequest.DataEntry - 3, // 1: message.StreamResponse.data:type_name -> message.StreamResponse.DataEntry - 0, // 2: message.Message.BidirectionalStreamingMessage:input_type -> message.StreamRequest - 1, // 3: message.Message.BidirectionalStreamingMessage:output_type -> message.StreamResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_grpc_proto_messager_proto_init() } -func file_grpc_proto_messager_proto_init() { - if File_grpc_proto_messager_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_proto_messager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_proto_messager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_proto_messager_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_proto_messager_proto_goTypes, - DependencyIndexes: file_grpc_proto_messager_proto_depIdxs, - MessageInfos: file_grpc_proto_messager_proto_msgTypes, - }.Build() - File_grpc_proto_messager_proto = out.File - file_grpc_proto_messager_proto_rawDesc = nil - file_grpc_proto_messager_proto_goTypes = nil - file_grpc_proto_messager_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface diff --git a/hrp/internal/grpc/messager/messager_grpc.pb.go b/hrp/internal/grpc/messager/messager_grpc.pb.go deleted file mode 100644 index d59a25e8..00000000 --- a/hrp/internal/grpc/messager/messager_grpc.pb.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.0 -// source: grpc/proto/messager.proto - -package messager - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// MessageClient is the client API for Message service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MessageClient interface { - BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) -} - -type messageClient struct { - cc grpc.ClientConnInterface -} - -func NewMessageClient(cc grpc.ClientConnInterface) MessageClient { - return &messageClient{cc} -} - -func (c *messageClient) BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) { - stream, err := c.cc.NewStream(ctx, &_Message_serviceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...) - if err != nil { - return nil, err - } - x := &messageBidirectionalStreamingMessageClient{stream} - return x, nil -} - -type Message_BidirectionalStreamingMessageClient interface { - Send(*StreamRequest) error - Recv() (*StreamResponse, error) - grpc.ClientStream -} - -type messageBidirectionalStreamingMessageClient struct { - grpc.ClientStream -} - -func (x *messageBidirectionalStreamingMessageClient) Send(m *StreamRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *messageBidirectionalStreamingMessageClient) Recv() (*StreamResponse, error) { - m := new(StreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// MessageServer is the server API for Message service. -type MessageServer interface { - BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error -} - -// UnimplementedMessageServer can be embedded to have forward compatible implementations. -type UnimplementedMessageServer struct { -} - -func (*UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error { - return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingMessage not implemented") -} - -func RegisterMessageServer(s *grpc.Server, srv MessageServer) { - s.RegisterService(&_Message_serviceDesc, srv) -} - -func _Message_BidirectionalStreamingMessage_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(MessageServer).BidirectionalStreamingMessage(&messageBidirectionalStreamingMessageServer{stream}) -} - -type Message_BidirectionalStreamingMessageServer interface { - Send(*StreamResponse) error - Recv() (*StreamRequest, error) - grpc.ServerStream -} - -type messageBidirectionalStreamingMessageServer struct { - grpc.ServerStream -} - -func (x *messageBidirectionalStreamingMessageServer) Send(m *StreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *messageBidirectionalStreamingMessageServer) Recv() (*StreamRequest, error) { - m := new(StreamRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Message_serviceDesc = grpc.ServiceDesc{ - ServiceName: "message.Message", - HandlerType: (*MessageServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BidirectionalStreamingMessage", - Handler: _Message_BidirectionalStreamingMessage_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/proto/messager.proto", -} diff --git a/hrp/server.go b/hrp/server.go index faacfb95..1f811bd7 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -63,13 +63,6 @@ func writeJSON(w http.ResponseWriter, body []byte, status int) { writeResponse(w, status, jsonContentType, body) } -type StartRequestBody struct { - boomer.Profile `mapstructure:",squash"` - Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` // all - TestCasePath string `json:"testcase-path" yaml:"testcase-path" mapstructure:"testcase-path"` - Other map[string]interface{} `mapstructure:",remain"` -} - type ServerCode int // server response code @@ -119,6 +112,13 @@ func CustomAPIResponse(errCode ServerCode, errMsg string) ServerStatus { } } +type StartRequestBody struct { + boomer.Profile `mapstructure:",squash"` + Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` // all + TestCasePath string `json:"testcase-path" yaml:"testcase-path" mapstructure:"testcase-path"` + Other map[string]interface{} `mapstructure:",remain"` +} + type RebalanceRequestBody struct { boomer.Profile `mapstructure:",squash"` Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` From 0b60b7a507c3373f153b4fb69b89530371ed861b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Mon, 11 Jul 2022 11:56:36 +0800 Subject: [PATCH 15/31] change: update docs --- docs/CHANGELOG.md | 38 +++++++++-- docs/cmd/hrp_boom.md | 3 +- examples/demo-empty-project/proj.json | 4 +- examples/demo-with-go-plugin/proj.json | 2 +- .../demo-with-py-plugin/.debugtalk_gen.py | 2 +- examples/demo-with-py-plugin/proj.json | 2 +- examples/demo-without-plugin/proj.json | 4 +- hrp/boomer.go | 5 +- hrp/cmd/boom.go | 9 +-- hrp/internal/boomer/boomer.go | 3 + hrp/internal/boomer/client_grpc.go | 2 +- hrp/internal/boomer/runner.go | 68 ++++++++----------- hrp/internal/boomer/runner_test.go | 4 +- hrp/server.go | 13 ++-- 14 files changed, 93 insertions(+), 66 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index bf0ea847..61780740 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -1,17 +1,25 @@ # Release History +## v4.2.0 (2022-07-22) + +**go version** + +- feat: support multi-machine collaborative distributed load testing + ## v4.1.7 (2022-07-18) **go version** -- fix: using `@FILEPATH` to indicate the path of the file - feat: support indicating type and filename when uploading file - feat: support to infer MIME type of the file automatically - feat: support omitting websocket url if not necessary - feat: support multiple websocket connections each session -- fix: optimize websocket step initialization - feat: support convert curl command(s) to testcase(s) - feat: support run curl as subcommand of run/boom/convert +- fix: optimize websocket step initialization +- fix: using `@FILEPATH` to indicate the path of the file +- fix: reuse plugin instance if it already initialized +- fix: deep copy api step to avoid data racing ## v4.1.6 (2022-07-04) @@ -276,7 +284,8 @@ - feat: implement `transaction` mechanism for load test - feat: continue running next step when failure occurs with `--continue-on-failure` flag, default to failfast - feat: report GA events with version -- feat: run load test with the given limit and burst as rate limiter, use `--spawn-count`, `--spawn-rate` and `--request-increase-rate` flag +- feat: run load test with the given limit and burst as rate limiter, use `--spawn-count`, `--spawn-rate` + and `--request-increase-rate` flag - feat: report runner state to prometheus - refactor: fork [boomer] as submodule initially and made a lot of changes - change: update API models @@ -330,7 +339,8 @@ ## 3.1.8 (2022-03-22) -- feat: add `--profile` flag for har2case to support overwrite headers/cookies with specified yaml/json configuration file +- feat: add `--profile` flag for har2case to support overwrite headers/cookies with specified yaml/json configuration + file - feat: support variable and function in response extract expression - fix: keep negative index in jmespath unchanged when converting pytest files, e.g. body.users[-1] - fix: variable should not start with digit @@ -462,9 +472,9 @@ **Changed** - change: override variables - (1) testcase: session variables > step variables > config variables - (2) testsuite: testcase variables > config variables - (3) testsuite testcase variables > testcase config variables + (1) testcase: session variables > step variables > config variables + (2) testsuite: testcase variables > config variables + (3) testsuite testcase variables > testcase config variables **Fixed** @@ -648,17 +658,31 @@ reference: [v2-changelog] [hrp]: https://github.com/httprunner/hrp + [hashicorp/go-plugin]: https://github.com/hashicorp/go-plugin + [go plugin]: https://pkg.go.dev/plugin + [docs repo]: https://github.com/httprunner/httprunner.github.io + [zerolog]: https://github.com/rs/zerolog + [jmespath]: https://jmespath.org/ + [mkdocs]: https://www.mkdocs.org/ + [github-actions]: https://github.com/httprunner/hrp/actions + [boomer]: github.com/myzhan/boomer + [sentry sdk]: https://github.com/getsentry/sentry-go + [pushgateway]: https://github.com/prometheus/pushgateway + [locust]: https://locust.io/ + [black]: https://github.com/psf/black + [loguru]: https://github.com/Delgan/loguru + [v2-changelog]: https://github.com/httprunner/httprunner/blob/v2/docs/CHANGELOG.md diff --git a/docs/cmd/hrp_boom.md b/docs/cmd/hrp_boom.md index a51770b5..f61722a1 100644 --- a/docs/cmd/hrp_boom.md +++ b/docs/cmd/hrp_boom.md @@ -21,7 +21,7 @@ hrp boom [flags] ### Options ``` - --autostart Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and run time + --autostart Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and increase rate --cpu-profile string Enable CPU profiling. --cpu-profile-duration duration CPU profile duration. (default 30s) --disable-compression Disable compression @@ -36,6 +36,7 @@ hrp boom [flags] --master-bind-host string Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces). (default "127.0.0.1") --master-bind-port int Port that hrp master should bind to. Only used when running with --master. Defaults to 5557. (default 5557) --master-host string Host or IP address of hrp master for distributed load testing. (default "127.0.0.1") + --master-http-address string Interfaces (ip:port) that hrp master should control by user. Only used when running with --master. Defaults to *:9771. (default ":9771") --master-port int The port to connect to that is used by the hrp master for distributed load testing. (default 5557) --max-rps int Max RPS that boomer can generate, disabled by default. --mem-profile string Enable memory profiling. diff --git a/examples/demo-empty-project/proj.json b/examples/demo-empty-project/proj.json index fe59965d..b2b376f6 100644 --- a/examples/demo-empty-project/proj.json +++ b/examples/demo-empty-project/proj.json @@ -1,5 +1,5 @@ { "project_name": "demo-empty-project", - "create_time": "2022-07-04T14:54:33.795693+08:00", - "hrp_version": "v4.1.5" + "create_time": "2022-07-11T11:45:29.942532+08:00", + "hrp_version": "v4.1.6" } diff --git a/examples/demo-with-go-plugin/proj.json b/examples/demo-with-go-plugin/proj.json index 13b1eab0..1899c546 100644 --- a/examples/demo-with-go-plugin/proj.json +++ b/examples/demo-with-go-plugin/proj.json @@ -1,5 +1,5 @@ { "project_name": "demo-with-go-plugin", - "create_time": "2022-07-06T13:57:04.054424+08:00", + "create_time": "2022-07-11T11:44:36.214909+08:00", "hrp_version": "v4.1.6" } diff --git a/examples/demo-with-py-plugin/.debugtalk_gen.py b/examples/demo-with-py-plugin/.debugtalk_gen.py index 70910180..9588e35e 100644 --- a/examples/demo-with-py-plugin/.debugtalk_gen.py +++ b/examples/demo-with-py-plugin/.debugtalk_gen.py @@ -20,4 +20,4 @@ if __name__ == "__main__": funppy.register("concatenate", concatenate) funppy.register("setup_hook_example", setup_hook_example) funppy.register("teardown_hook_example", teardown_hook_example) - funppy.serve() + funppy.serve() \ No newline at end of file diff --git a/examples/demo-with-py-plugin/proj.json b/examples/demo-with-py-plugin/proj.json index 73d9a31c..f2336020 100644 --- a/examples/demo-with-py-plugin/proj.json +++ b/examples/demo-with-py-plugin/proj.json @@ -1,5 +1,5 @@ { "project_name": "demo-with-py-plugin", - "create_time": "2022-07-06T13:57:04.482633+08:00", + "create_time": "2022-07-11T11:44:37.021634+08:00", "hrp_version": "v4.1.6" } diff --git a/examples/demo-without-plugin/proj.json b/examples/demo-without-plugin/proj.json index 24b61c18..50c06186 100644 --- a/examples/demo-without-plugin/proj.json +++ b/examples/demo-without-plugin/proj.json @@ -1,5 +1,5 @@ { "project_name": "demo-without-plugin", - "create_time": "2022-07-04T14:54:33.495643+08:00", - "hrp_version": "v4.1.5" + "create_time": "2022-07-11T11:45:29.800018+08:00", + "hrp_version": "v4.1.6" } diff --git a/hrp/boomer.go b/hrp/boomer.go index 460fd2ed..91094341 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -208,6 +208,7 @@ func (b *HRPBoomer) runTestCases(testCases []*TCase, profile *boomer.Profile) { testcases = append(testcases, tesecase) } + b.SetProfile(profile) b.InitBoomer() log.Info().Interface("testcases", testcases).Interface("profile", profile).Msg("run tasks successful") @@ -222,7 +223,7 @@ func (b *HRPBoomer) rebalanceBoomer(profile *boomer.Profile) { log.Info().Interface("profile", profile).Msg("rebalance tasks successful") } -func (b *HRPBoomer) PollTasks() { +func (b *HRPBoomer) PollTasks(ctx context.Context) { for { select { case task := <-b.Boomer.GetTasksChan(): @@ -240,6 +241,8 @@ func (b *HRPBoomer) PollTasks() { case <-b.Boomer.GetCloseChan(): return + case <-ctx.Done(): + return } } } diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 5df6cd26..a13232c3 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -71,7 +71,7 @@ var boomCmd = &cobra.Command{ if boomArgs.autoStart { hrpBoomer.InitBoomer() } else { - go hrpBoomer.StartServer() + go hrpBoomer.StartServer(ctx, boomArgs.masterHttpAddress) } go hrpBoomer.PollTestCases(ctx) hrpBoomer.RunMaster() @@ -79,9 +79,8 @@ var boomCmd = &cobra.Command{ if boomArgs.ignoreQuit { hrpBoomer.SetIgnoreQuit() } - go hrpBoomer.PollTasks() + go hrpBoomer.PollTasks(ctx) hrpBoomer.RunWorker() - time.Sleep(3 * time.Second) case "standalone": if venv != "" { hrpBoomer.SetPython3Venv(venv) @@ -102,6 +101,7 @@ type BoomArgs struct { masterPort int masterBindHost string masterBindPort int + masterHttpAddress string autoStart bool expectWorkers int expectWorkersMaxWait int @@ -129,11 +129,12 @@ func init() { boomCmd.Flags().BoolVar(&boomArgs.master, "master", false, "master of distributed testing") boomCmd.Flags().StringVar(&boomArgs.masterBindHost, "master-bind-host", "127.0.0.1", "Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces).") boomCmd.Flags().IntVar(&boomArgs.masterBindPort, "master-bind-port", 5557, "Port that hrp master should bind to. Only used when running with --master. Defaults to 5557.") + boomCmd.Flags().StringVar(&boomArgs.masterHttpAddress, "master-http-address", ":9771", "Interfaces (ip:port) that hrp master should control by user. Only used when running with --master. Defaults to *:9771.") boomCmd.Flags().BoolVar(&boomArgs.worker, "worker", false, "worker of distributed testing") boomCmd.Flags().BoolVar(&boomArgs.ignoreQuit, "ignore-quit", false, "ignores quit from master (only when --worker is used)") boomCmd.Flags().StringVar(&boomArgs.masterHost, "master-host", "127.0.0.1", "Host or IP address of hrp master for distributed load testing.") boomCmd.Flags().IntVar(&boomArgs.masterPort, "master-port", 5557, "The port to connect to that is used by the hrp master for distributed load testing.") - boomCmd.Flags().BoolVar(&boomArgs.autoStart, "autostart", false, "Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and run time") + boomCmd.Flags().BoolVar(&boomArgs.autoStart, "autostart", false, "Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and increase rate") boomCmd.Flags().IntVar(&boomArgs.expectWorkers, "expect-workers", 1, "How many workers master should expect to connect before starting the test (only when --autostart is used)") boomCmd.Flags().IntVar(&boomArgs.expectWorkersMaxWait, "expect-workers-max-wait", 0, "How many workers master should expect to connect before starting the test (only when --autostart is used") } diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index 5b6a7f2a..2dc40056 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -461,6 +461,9 @@ func (b *Boomer) Start(Args *Profile) error { if b.masterRunner.isStarted() { return errors.New("already started") } + if b.masterRunner.getState() == StateStopping { + return errors.New("Please wait for all workers to finish") + } b.SetSpawnCount(Args.SpawnCount) b.SetSpawnRate(Args.SpawnRate) b.SetProfile(Args) diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 434b88e9..f96f789e 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -216,7 +216,7 @@ func (c *grpcClient) newBiStreamClient() (err error) { return err } c.config.setBiStreamClient(biStream) - println("successful to establish bidirectional stream with master, press Ctrl+c to quit.\n") + println("successful to establish bidirectional stream with master, press Ctrl+c to quit.") return nil } diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 100a7bca..869244dd 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -10,12 +10,10 @@ import ( "sync/atomic" "time" + "github.com/go-errors/errors" "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" "github.com/httprunner/httprunner/v4/hrp/internal/builtin" "github.com/jinzhu/copier" - - "github.com/go-errors/errors" - "github.com/olekukonko/tablewriter" "github.com/rs/zerolog/log" ) @@ -200,15 +198,15 @@ type runner struct { controller *Controller loop *Loop // specify loop count for testcase, count = loopCount * spawnCount - // rebalance spawn + // dynamically balance boomer running parameters rebalance chan bool - // all running workers(goroutines) will select on this channel. - // close this channel will stop all running workers. + // stop signals the run goroutine should shutdown. stopChan chan bool - + // all running workers(goroutines) will select on this channel. + // stopping is closed by run goroutine on shutdown. stoppingChan chan bool - + // done is closed when all goroutines from start() complete. doneChan chan bool reportChan chan bool @@ -216,10 +214,10 @@ type runner struct { // close this channel will stop all goroutines used in runner. closeChan chan bool - // wgMu blocks concurrent waitgroup mutation while server stopping + // wgMu blocks concurrent waitgroup mutation while boomer stopping wgMu sync.RWMutex - // wg is used to wait for the goroutines that depends on the server state - // to exit when stopping the server. + // wg is used to wait for all running workers(goroutines) that depends on the boomer state + // to exit when stopping the boomer. wg sync.WaitGroup outputs []Output @@ -544,15 +542,20 @@ func (r *runner) statsStart() { func (r *runner) stop() { // stop previous goroutines without blocking // those goroutines will exit when r.safeRun returns - r.Stop() + r.gracefulStop() if r.rateLimitEnabled { r.rateLimiter.Stop() } r.updateState(StateStopped) } -// HardStop stops the server without coordination with other members in the cluster. -func (r *runner) hardStop() { +// gracefulStop stops the boomer gracefully, and shuts down the running goroutine. +// gracefulStop should be called after a start(), otherwise it will block forever. +// When stopping leader, Stop transfers its leadership to one of its peers +// before stopping the boomer. +// gracefulStop terminates the boomer and performs any necessary finalization. +// Do and Process cannot be called after Stop has been invoked. +func (r *runner) gracefulStop() { select { case r.stopChan <- true: case <-r.doneChan: @@ -561,30 +564,16 @@ func (r *runner) hardStop() { <-r.doneChan } -// Stop stops the server gracefully, and shuts down the running goroutine. -// Stop should be called after a Start(s), otherwise it will block forever. -// When stopping leader, Stop transfers its leadership to one of its peers -// before stopping the server. -// Stop terminates the Server and performs any necessary finalization. -// Do and Process cannot be called after Stop has been invoked. -func (r *runner) Stop() { - r.hardStop() -} +// StopNotify returns a channel that receives a bool type value +// when the runner is stopped. +func (r *runner) StopNotify() <-chan bool { return r.doneChan } -// StopNotify returns a channel that receives a empty struct -// when the server is stopped. -func (r *runner) StopNotify() <-chan bool { return r.stopChan } - -// DoneNotify returns a channel that receives a empty struct -// when the server is stopped. -func (r *runner) DoneNotify() <-chan bool { return r.doneChan } - -// StoppingNotify returns a channel that receives a empty struct -// when the server is being stopped. +// StoppingNotify returns a channel that receives a bool type value +// when the runner is being stopped. func (r *runner) StoppingNotify() <-chan bool { return r.stoppingChan } -// RebalanceNotify returns a channel that receives a empty struct -// when the server is being stopped. +// RebalanceNotify returns a channel that receives a bool type value +// when the runner is being rebalance. func (r *runner) RebalanceNotify() <-chan bool { return r.rebalance } func (r *runner) getState() int32 { @@ -758,6 +747,7 @@ func (r *workerRunner) onMessage(msg *genericMessage) { r.onSpawnMessage(msg) case "quit": if r.ignoreQuit { + log.Warn().Msg("master already quit, waiting to reconnect master.") break } r.close() @@ -777,6 +767,7 @@ func (r *workerRunner) onMessage(msg *genericMessage) { case "quit": r.stop() if r.ignoreQuit { + log.Warn().Msg("master already quit, waiting to reconnect master.") break } r.close() @@ -788,6 +779,7 @@ func (r *workerRunner) onMessage(msg *genericMessage) { r.onSpawnMessage(msg) case "quit": if r.ignoreQuit { + log.Warn().Msg("master already quit, waiting to reconnect master.") break } r.close() @@ -815,13 +807,13 @@ func (r *workerRunner) startListener() { // run worker service func (r *workerRunner) run() { - println("\n========================= HttpRunner Worker for Distributed Load Testing ========================= ") + println("==================== HttpRunner Worker for Distributed Load Testing ==================== ") r.updateState(StateInit) r.client = newClient(r.masterHost, r.masterPort, r.nodeID) println(fmt.Sprintf("ready to connect master to %s:%d", r.masterHost, r.masterPort)) err := r.client.start() if err != nil { - log.Error().Err(err).Msg(fmt.Sprintf("failed to connect to master(%s:%d) with error %v\n", r.masterHost, r.masterPort)) + log.Error().Err(err).Msg(fmt.Sprintf("failed to connect to master(%s:%d)", r.masterHost, r.masterPort)) } if err = r.client.register(r.client.config.ctx); err != nil { @@ -904,7 +896,7 @@ func (r *workerRunner) start() { r.rateLimiter.Start() } - r.once.Do(r.outputOnStart) + r.outputOnStart() go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, r.spawnComplete) diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 8c139d58..2a250800 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -126,13 +126,13 @@ func TestStopNotify(t *testing.T) { close(r.doneChan) }() - notifier := r.DoneNotify() + notifier := r.StopNotify() select { case <-notifier: t.Fatalf("received unexpected stop notification") default: } - r.Stop() + r.gracefulStop() select { case <-notifier: default: diff --git a/hrp/server.go b/hrp/server.go index 1f811bd7..a42d9a33 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -163,7 +163,7 @@ func (api *apiHandler) Index(w http.ResponseWriter, r *http.Request) { http.Error(w, "Not Found", http.StatusNotFound) return } - w.Header().Set("Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' camo.githubusercontent.com") + w.Header().Set("Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' www.httprunner.com") fmt.Fprintf(w, "Welcome to httprunner page!") } @@ -315,23 +315,26 @@ func (api *apiHandler) Handler() http.Handler { func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {} -func (b *HRPBoomer) StartServer() { +func (b *HRPBoomer) StartServer(ctx context.Context, addr string) { h := b.NewAPIHandler() mux := h.Handler() server := &http.Server{ - Addr: ":9771", + Addr: addr, Handler: mux, } go func() { - <-b.GetCloseChan() + select { + case <-ctx.Done(): + case <-b.GetCloseChan(): + } if err := server.Shutdown(context.Background()); err != nil { log.Fatal("shutdown server:", err) } }() - log.Println("Starting HTTP server...") + log.Println(fmt.Sprintf("starting HTTP server (%v), please use the API to control master", server.Addr)) err := server.ListenAndServe() if err != nil { if err == http.ErrServerClosed { From 2cdebbf448497f568a77982b995e2b8dd72e8701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Mon, 11 Jul 2022 12:31:52 +0800 Subject: [PATCH 16/31] fix: unittest --- examples/demo-with-py-plugin/.debugtalk_gen.py | 2 +- hrp/cmd/boom.go | 8 ++++---- hrp/internal/boomer/output.go | 2 +- hrp/internal/boomer/runner.go | 9 ++++++--- hrp/internal/boomer/runner_test.go | 2 +- hrp/internal/boomer/ulimit.go | 1 + hrp/internal/boomer/ulimit_windows.go | 1 + 7 files changed, 15 insertions(+), 10 deletions(-) diff --git a/examples/demo-with-py-plugin/.debugtalk_gen.py b/examples/demo-with-py-plugin/.debugtalk_gen.py index 9588e35e..dbf8aed9 100644 --- a/examples/demo-with-py-plugin/.debugtalk_gen.py +++ b/examples/demo-with-py-plugin/.debugtalk_gen.py @@ -1,4 +1,4 @@ -# NOTE: Generated By hrp v4.1.5, DO NOT EDIT! +# NOTE: Generated By hrp v4.1.6, DO NOT EDIT! import sys import os diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index a13232c3..6b13bdcf 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -1,13 +1,13 @@ package cmd import ( - "golang.org/x/net/context" "os" "strings" "time" "github.com/rs/zerolog/log" "github.com/spf13/cobra" + "golang.org/x/net/context" "github.com/httprunner/httprunner/v4/hrp" "github.com/httprunner/httprunner/v4/hrp/internal/boomer" @@ -39,7 +39,7 @@ var boomCmd = &cobra.Command{ // if set profile, the priority is higher than the other commands if boomArgs.profile != "" { - err := builtin.LoadFile(boomArgs.profile, &boomArgs.profile) + err := builtin.LoadFile(boomArgs.profile, &boomArgs.Profile) if err != nil { log.Error().Err(err).Msg("failed to load profile") os.Exit(1) @@ -134,9 +134,9 @@ func init() { boomCmd.Flags().BoolVar(&boomArgs.ignoreQuit, "ignore-quit", false, "ignores quit from master (only when --worker is used)") boomCmd.Flags().StringVar(&boomArgs.masterHost, "master-host", "127.0.0.1", "Host or IP address of hrp master for distributed load testing.") boomCmd.Flags().IntVar(&boomArgs.masterPort, "master-port", 5557, "The port to connect to that is used by the hrp master for distributed load testing.") - boomCmd.Flags().BoolVar(&boomArgs.autoStart, "autostart", false, "Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and increase rate") + boomCmd.Flags().BoolVar(&boomArgs.autoStart, "auto-start", false, "Starts the test immediately. Use --spawn-count and --spawn-rate to control user count and increase rate") boomCmd.Flags().IntVar(&boomArgs.expectWorkers, "expect-workers", 1, "How many workers master should expect to connect before starting the test (only when --autostart is used)") - boomCmd.Flags().IntVar(&boomArgs.expectWorkersMaxWait, "expect-workers-max-wait", 0, "How many workers master should expect to connect before starting the test (only when --autostart is used") + boomCmd.Flags().IntVar(&boomArgs.expectWorkersMaxWait, "expect-workers-max-wait", 120, "How many workers master should expect to connect before starting the test (only when --autostart is used") } func makeHRPBoomer() *hrp.HRPBoomer { diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index 152f0768..72e185b6 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -392,7 +392,7 @@ var ( gaugeState = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "state", - Help: "The current runner state, 1=initializing, 2=spawning, 3=running, 4=quitting, 5=stopped", + Help: "The current runner state, 1=initializing, 2=spawning, 3=running, 4=stopping, 5=stopped, 6=quitting, 7=missing", }, ) gaugeDuration = prometheus.NewGauge( diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 869244dd..c6e87937 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -43,7 +43,7 @@ func getStateName(state int32) (stateName string) { case StateQuitting: stateName = "quitting" case StateMissing: - stateName = "stopped" + stateName = "missing" } return } @@ -840,9 +840,12 @@ func (r *workerRunner) run() { log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") } - if err = r.client.signOut(r.client.config.ctx); err != nil { - log.Error().Err(err).Msg("failed to sign out") + if r.getState() != StateMissing { + if err = r.client.signOut(r.client.config.ctx); err != nil { + log.Error().Err(err).Msg("failed to sign out") + } } + r.client.close() } }() diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 2a250800..67089c1f 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -196,7 +196,7 @@ func TestSpawnWorkersWithManyTasks(t *testing.T) { const numToSpawn int64 = 30 go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) - time.Sleep(2 * time.Second) + time.Sleep(3 * time.Second) currentClients := runner.controller.getCurrentClientsNum() diff --git a/hrp/internal/boomer/ulimit.go b/hrp/internal/boomer/ulimit.go index 504a534d..40f0c0cc 100644 --- a/hrp/internal/boomer/ulimit.go +++ b/hrp/internal/boomer/ulimit.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package boomer diff --git a/hrp/internal/boomer/ulimit_windows.go b/hrp/internal/boomer/ulimit_windows.go index 76ca69fc..d02840dc 100644 --- a/hrp/internal/boomer/ulimit_windows.go +++ b/hrp/internal/boomer/ulimit_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package boomer From 476b3ab9d9064714d0cbf3766906bdbb3e7be7dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Tue, 12 Jul 2022 21:12:33 +0800 Subject: [PATCH 17/31] fix: http server and grpc with cert --- .../demo-with-py-plugin/.debugtalk_gen.py | 2 +- hrp/internal/boomer/data/data.go | 50 +++-- hrp/server.go | 183 ++++++++++-------- 3 files changed, 137 insertions(+), 98 deletions(-) diff --git a/examples/demo-with-py-plugin/.debugtalk_gen.py b/examples/demo-with-py-plugin/.debugtalk_gen.py index dbf8aed9..50f50e5f 100644 --- a/examples/demo-with-py-plugin/.debugtalk_gen.py +++ b/examples/demo-with-py-plugin/.debugtalk_gen.py @@ -20,4 +20,4 @@ if __name__ == "__main__": funppy.register("concatenate", concatenate) funppy.register("setup_hook_example", setup_hook_example) funppy.register("teardown_hook_example", teardown_hook_example) - funppy.serve() \ No newline at end of file + funppy.serve() diff --git a/hrp/internal/boomer/data/data.go b/hrp/internal/boomer/data/data.go index c583755f..9e0a21ea 100644 --- a/hrp/internal/boomer/data/data.go +++ b/hrp/internal/boomer/data/data.go @@ -15,30 +15,48 @@ * */ -// Package data provides convenience routines to access files in the data -// directory. package data import ( + "embed" + "os" "path/filepath" - "runtime" + + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" ) -// basepath is the root directory of this package. -var basepath string +// hrpPath is .hrp directory under the user directory. +var hrpPath string + +//go:embed x509/* +var x509Dir embed.FS func init() { - _, currentFile, _, _ := runtime.Caller(0) - basepath = filepath.Dir(currentFile) -} - -// Path returns the absolute path the given relative file or directory path, -// relative to the google.golang.org/grpc/examples/data directory in the -// user's GOPATH. If rel is already absolute, it is returned unmodified. -func Path(rel string) string { - if filepath.IsAbs(rel) { - return rel + home, err := os.UserHomeDir() + if err != nil { + return } + hrpPath = filepath.Join(home, ".hrp") + _ = builtin.EnsureFolderExists(filepath.Join(hrpPath, "x509")) - return filepath.Join(basepath, rel) +} + +// Path returns the absolute path the given relative file or directory path +func Path(rel string) (destPath string) { + destPath = rel + if !filepath.IsAbs(rel) { + destPath = filepath.Join(hrpPath, rel) + } + if !builtin.IsFilePathExists(destPath) { + content, err := x509Dir.ReadFile(rel) + if err != nil { + return + } + + err = os.WriteFile(destPath, content, 0o644) + if err != nil { + return + } + } + return } diff --git a/hrp/server.go b/hrp/server.go index a42d9a33..cb1c07b9 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -2,6 +2,7 @@ package hrp import ( "context" + "errors" "fmt" "io/ioutil" "log" @@ -52,17 +53,13 @@ func parseBody(r *http.Request) (data map[string]interface{}, err error) { return data, nil } -func writeResponse(w http.ResponseWriter, status int, contentType string, body []byte) { - w.Header().Set("Content-Type", contentType) +func writeJSON(w http.ResponseWriter, body []byte, status int) { + w.Header().Set("Content-Type", jsonContentType) w.Header().Set("Content-Length", fmt.Sprintf("%d", len(body))) w.WriteHeader(status) w.Write(body) } -func writeJSON(w http.ResponseWriter, body []byte, status int) { - writeResponse(w, status, jsonContentType, body) -} - type ServerCode int // server response code @@ -169,48 +166,100 @@ func (api *apiHandler) Index(w http.ResponseWriter, r *http.Request) { func (api *apiHandler) Start(w http.ResponseWriter, r *http.Request) { var resp *CommonResponseBody - data, err := parseBody(r) + var err error + defer func() { + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseServerError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + }() + // parse body + data, err := parseBody(r) + if err != nil { + return + } req := StartRequestBody{ Profile: *api.boomer.GetProfile(), } err = mapstructure.Decode(data, &req) + if err != nil { + return + } + + // recognize invalid parameters if len(req.Other) > 0 { keys := make([]string, 0, len(req.Other)) for k := range req.Other { keys = append(keys, k) } - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseParamError(fmt.Sprintf("failed to recognize params: %v", keys)), - } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) + err = errors.New(fmt.Sprintf("failed to recognize params: %v", keys)) return } + + // parse testcase path if req.TestCasePath == "" { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseParamError(fmt.Sprint("missing testcases path")), - } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) + err = errors.New("missing testcases path") return } paths := strings.Split(req.TestCasePath, ",") + + // set testcase path api.boomer.SetTestCasesPath(paths) - if err == nil { - err = api.boomer.Start(&req.Profile) - } + + // start boomer with profile + err = api.boomer.Start(&req.Profile) +} + +func (api *apiHandler) ReBalance(w http.ResponseWriter, r *http.Request) { + var resp *CommonResponseBody + var err error + defer func() { + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseServerError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } + } + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + }() + + // parse body + data, err := parseBody(r) if err != nil { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseServerError(err.Error()), - } - } else { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseSuccess, - } + return } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) + req := RebalanceRequestBody{ + Profile: *api.boomer.GetProfile(), + } + err = mapstructure.Decode(data, &req) + if err != nil { + return + } + + // recognize invalid parameters + if len(req.Other) > 0 { + keys := make([]string, 0, len(req.Other)) + for k := range req.Other { + keys = append(keys, k) + } + err = errors.New(fmt.Sprintf("failed to recognize params: %v", keys)) + return + } + + // rebalance boomer with profile + err = api.boomer.ReBalance(&req.Profile) } func (api *apiHandler) Stop(w http.ResponseWriter, r *http.Request) { @@ -223,18 +272,23 @@ func (api *apiHandler) Stop(w http.ResponseWriter, r *http.Request) { } var resp *CommonResponseBody - err := api.boomer.Stop() - if err != nil { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseStopError(err.Error()), + var err error + defer func() { + if err != nil { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseStopError(err.Error()), + } + } else { + resp = &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + } } - } else { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseSuccess, - } - } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) + }() + + // stop boomer + err = api.boomer.Stop() } func (api *apiHandler) Quit(w http.ResponseWriter, r *http.Request) { @@ -245,49 +299,16 @@ func (api *apiHandler) Quit(w http.ResponseWriter, r *http.Request) { data[k] = v } } - - resp := &CommonResponseBody{ - ServerStatus: EnumAPIResponseSuccess, - } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) - api.boomer.Quit() -} - -func (api *apiHandler) ReBalance(w http.ResponseWriter, r *http.Request) { - var resp *CommonResponseBody - data, err := parseBody(r) - - req := RebalanceRequestBody{ - Profile: *api.boomer.GetProfile(), - } - err = mapstructure.Decode(data, &req) - if len(req.Other) > 0 { - keys := make([]string, 0, len(req.Other)) - for k := range req.Other { - keys = append(keys, k) - } - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseParamError(fmt.Sprintf("failed to recognize params: %v", keys)), + defer func() { + resp := &CommonResponseBody{ + ServerStatus: EnumAPIResponseSuccess, } body, _ := json.Marshal(resp) writeJSON(w, body, http.StatusOK) - return - } - if err == nil { - err = api.boomer.ReBalance(&req.Profile) - } - if err != nil { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseParamError(err.Error()), - } - } else { - resp = &CommonResponseBody{ - ServerStatus: EnumAPIResponseSuccess, - } - } - body, _ := json.Marshal(resp) - writeJSON(w, body, http.StatusOK) + }() + + // quit boomer + api.boomer.Quit() } func (api *apiHandler) GetWorkersInfo(w http.ResponseWriter, r *http.Request) { @@ -305,9 +326,9 @@ func (api *apiHandler) Handler() http.Handler { mux.HandleFunc("/", methods(api.Index, "GET")) mux.HandleFunc("/start", methods(api.Start, "POST")) + mux.HandleFunc("/rebalance", methods(api.ReBalance, "POST")) mux.HandleFunc("/stop", methods(api.Stop, "GET")) mux.HandleFunc("/quit", methods(api.Quit, "GET")) - mux.HandleFunc("/rebalance", methods(api.ReBalance, "POST")) mux.HandleFunc("/workers", methods(api.GetWorkersInfo, "GET")) return mux From 11c5a754428aa2f24b22973a433178938b70a3f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Tue, 12 Jul 2022 22:01:12 +0800 Subject: [PATCH 18/31] feat: get client export ip on the server --- hrp/internal/boomer/runner.go | 19 ++++++++++--------- hrp/internal/boomer/server_grpc.go | 14 ++++++++++---- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index c6e87937..ffbcce72 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -840,7 +840,7 @@ func (r *workerRunner) run() { log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") } - if r.getState() != StateMissing { + if atomic.LoadInt32(&r.client.failCount) < 2 { if err = r.client.signOut(r.client.config.ctx); err != nil { log.Error().Err(err).Msg("failed to sign out") } @@ -1293,17 +1293,18 @@ func (r *masterRunner) reportStats() { println(fmt.Sprintf("Current time: %s, State: %v, Current Available Workers: %v, Target Users: %v", currentTime.Format("2006/01/02 15:04:05"), getStateName(r.getState()), r.server.getClientsLength(), r.getSpawnCount())) table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Worker ID", "State", "Current Users", "CPU Usage", "CPU Warning Emitted", "Memory Usage", "Heartbeat"}) + table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU Usage", "CPU Warning Emitted", "Memory Usage", "Heartbeat"}) for _, worker := range r.server.getAllWorkers() { - row := make([]string, 7) + row := make([]string, 8) row[0] = worker.ID - row[1] = fmt.Sprintf("%v", getStateName(worker.getState())) - row[2] = fmt.Sprintf("%v", worker.getSpawnCount()) - row[3] = fmt.Sprintf("%v", worker.getCPUUsage()) - row[4] = fmt.Sprintf("%v", worker.getCPUWarningEmitted()) - row[5] = fmt.Sprintf("%v", worker.getMemoryUsage()) - row[6] = fmt.Sprintf("%v", worker.getHeartbeat()) + row[1] = worker.IP + row[2] = fmt.Sprintf("%v", getStateName(worker.getState())) + row[3] = fmt.Sprintf("%v", worker.getSpawnCount()) + row[4] = fmt.Sprintf("%v", worker.getCPUUsage()) + row[5] = fmt.Sprintf("%v", worker.getCPUWarningEmitted()) + row[6] = fmt.Sprintf("%v", worker.getMemoryUsage()) + row[7] = fmt.Sprintf("%v", worker.getHeartbeat()) table.Append(row) } table.Render() diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 0c2f36f5..8702ef74 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -13,6 +13,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" @@ -23,6 +24,7 @@ import ( type WorkerNode struct { ID string `json:"id"` + IP string `json:"ip"` State int32 `json:"state"` Heartbeat int32 `json:"heartbeat"` SpawnCount int64 `json:"spawn_count"` @@ -34,9 +36,9 @@ type WorkerNode struct { disconnectedChan chan bool } -func newWorkerNode(id string) *WorkerNode { +func newWorkerNode(id, ip string) *WorkerNode { stream := make(chan *messager.StreamResponse, 100) - return &WorkerNode{State: StateInit, ID: id, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)} + return &WorkerNode{State: StateInit, ID: id, IP: ip, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)} } func (w *WorkerNode) getState() int32 { @@ -116,6 +118,7 @@ func (w *WorkerNode) getWorkerInfo() WorkerNode { defer w.mutex.RUnlock() return WorkerNode{ ID: w.ID, + IP: w.IP, State: w.getState(), Heartbeat: w.getHeartbeat(), SpawnCount: w.getSpawnCount(), @@ -257,9 +260,12 @@ func (s *grpcServer) start() (err error) { return nil } -func (s *grpcServer) Register(_ context.Context, req *messager.RegisterRequest) (*messager.RegisterResponse, error) { +func (s *grpcServer) Register(ctx context.Context, req *messager.RegisterRequest) (*messager.RegisterResponse, error) { + // get client ip + p, _ := peer.FromContext(ctx) + clientIp := strings.Split(p.Addr.String(), ":")[0] // store worker information - wn := newWorkerNode(req.NodeID) + wn := newWorkerNode(req.NodeID, clientIp) s.clients.Store(req.NodeID, wn) log.Warn().Str("worker id", req.NodeID).Msg("worker joined") return &messager.RegisterResponse{Code: "0", Message: "register successfully"}, nil From c381062a8dbb2cfa50993fd72065ce525a53ff89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Wed, 13 Jul 2022 16:45:43 +0800 Subject: [PATCH 19/31] fix: remove unnecessary content --- hrp/internal/boomer/client_grpc.go | 3 --- hrp/internal/boomer/runner.go | 34 +++++++++++------------------- hrp/internal/boomer/runner_test.go | 2 +- hrp/internal/boomer/server_grpc.go | 3 +-- 4 files changed, 14 insertions(+), 28 deletions(-) diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index f96f789e..17310e42 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -33,8 +33,6 @@ type grpcClient struct { shutdownChan chan bool failCount int32 - - wg *sync.WaitGroup } type grpcClientConfig struct { @@ -152,7 +150,6 @@ func newClient(masterHost string, masterPort int, identity string) (client *grpc ctxCancel: cancel, mutex: sync.RWMutex{}, }, - wg: &sync.WaitGroup{}, } return client } diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index ffbcce72..933a2a72 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -208,8 +208,8 @@ type runner struct { stoppingChan chan bool // done is closed when all goroutines from start() complete. doneChan chan bool - - reportChan chan bool + // when this channel is closed, all statistics are reported successfully + reportedChan chan bool // close this channel will stop all goroutines used in runner. closeChan chan bool @@ -221,8 +221,6 @@ type runner struct { wg sync.WaitGroup outputs []Output - - once *sync.Once } func (r *runner) setSpawnRate(spawnRate float64) { @@ -362,7 +360,7 @@ func (r *runner) reset() { r.rebalance = make(chan bool) r.stoppingChan = make(chan bool) r.doneChan = make(chan bool) - r.reportChan = make(chan bool) + r.reportedChan = make(chan bool) } func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan bool, spawnCompleteFunc func()) { @@ -449,13 +447,13 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo // goAttach creates a goroutine on a given function and tracks it using // the runner waitgroup. -// The passed function should interrupt on r.StoppingNotify(). +// The passed function should interrupt on r.stoppingNotify(). func (r *runner) goAttach(f func()) { r.wgMu.RLock() // this blocks with ongoing close(s.stopping) defer r.wgMu.RUnlock() select { case <-r.stoppingChan: - log.Warn().Msg("server has stopped; skipping GoAttach") + log.Warn().Msg("runner has stopped; skipping GoAttach") return default: } @@ -530,8 +528,9 @@ func (r *runner) statsStart() { // report stats case <-ticker.C: r.reportStats() + // close reportedChan and return if the last stats is reported successfully if !r.isStarted() { - close(r.reportChan) + close(r.reportedChan) log.Info().Msg("Quitting statsStart") return } @@ -564,17 +563,9 @@ func (r *runner) gracefulStop() { <-r.doneChan } -// StopNotify returns a channel that receives a bool type value +// stopNotify returns a channel that receives a bool type value // when the runner is stopped. -func (r *runner) StopNotify() <-chan bool { return r.doneChan } - -// StoppingNotify returns a channel that receives a bool type value -// when the runner is being stopped. -func (r *runner) StoppingNotify() <-chan bool { return r.stoppingChan } - -// RebalanceNotify returns a channel that receives a bool type value -// when the runner is being rebalance. -func (r *runner) RebalanceNotify() <-chan bool { return r.rebalance } +func (r *runner) stopNotify() <-chan bool { return r.doneChan } func (r *runner) getState() int32 { return atomic.LoadInt32(&r.state) @@ -606,7 +597,6 @@ func newLocalRunner(spawnCount int64, spawnRate float64) *localRunner { outputs: make([]Output, 0), stopChan: make(chan bool), closeChan: make(chan bool), - once: &sync.Once{}, wg: sync.WaitGroup{}, wgMu: sync.RWMutex{}, }, @@ -638,7 +628,8 @@ func (r *localRunner) start() { r.updateState(StateStopping) - <-r.reportChan + // wait until all stats are reported successfully + <-r.reportedChan // report test result r.reportTestResult() @@ -687,7 +678,6 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { controller: &Controller{}, stopChan: make(chan bool), closeChan: make(chan bool), - once: &sync.Once{}, }, masterHost: masterHost, masterPort: masterPort, @@ -914,7 +904,7 @@ func (r *workerRunner) start() { r.updateState(StateStopping) - <-r.reportChan + <-r.reportedChan r.reportTestResult() r.outputOnStop() diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 67089c1f..4293103b 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -126,7 +126,7 @@ func TestStopNotify(t *testing.T) { close(r.doneChan) }() - notifier := r.StopNotify() + notifier := r.stopNotify() select { case <-notifier: t.Fatalf("received unexpected stop notification") diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 8702ef74..b9d61cc6 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -133,13 +133,11 @@ type grpcServer struct { masterHost string masterPort int server *grpc.Server - secure bool clients *sync.Map fromWorker chan *genericMessage disconnectedChan chan bool shutdownChan chan bool - wg *sync.WaitGroup } var ( @@ -148,6 +146,7 @@ var ( ) func logger(format string, a ...interface{}) { + // FIXME: support server-side and client-side logging to files log.Info().Msg(fmt.Sprintf(format, a...)) } From 846fb3e46b9e8e0dc6b7951235ddaee24a9323fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 14 Jul 2022 10:50:28 +0800 Subject: [PATCH 20/31] fix: mask error logging that worker fails to sign out --- hrp/internal/boomer/runner.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 933a2a72..3bd2a9b4 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -830,10 +830,8 @@ func (r *workerRunner) run() { log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") } - if atomic.LoadInt32(&r.client.failCount) < 2 { - if err = r.client.signOut(r.client.config.ctx); err != nil { - log.Error().Err(err).Msg("failed to sign out") - } + if err = r.client.signOut(r.client.config.ctx); err != nil { + log.Info().Err(err).Msg("failed to sign out") } r.client.close() From c4919451663f15bab6ca38ce93b396df83c7267c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 14 Jul 2022 13:22:11 +0800 Subject: [PATCH 21/31] fix: cpu and memory usage --- hrp/internal/boomer/runner.go | 29 +++++++++++++++++++---------- hrp/internal/boomer/utils.go | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 3bd2a9b4..9867a1d3 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -806,6 +806,7 @@ func (r *workerRunner) run() { log.Error().Err(err).Msg(fmt.Sprintf("failed to connect to master(%s:%d)", r.masterHost, r.masterPort)) } + // register worker information to master if err = r.client.register(r.client.config.ctx); err != nil { log.Error().Err(err).Msg("failed to register") } @@ -819,6 +820,7 @@ func (r *workerRunner) run() { go r.client.send() defer func() { + // wait for goroutines before closing r.wg.Wait() var ticker = time.NewTicker(1 * time.Second) @@ -830,10 +832,12 @@ func (r *workerRunner) run() { log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") } + // sign out from master if err = r.client.signOut(r.client.config.ctx); err != nil { log.Info().Err(err).Msg("failed to sign out") } + // close grpc client r.client.close() } }() @@ -864,10 +868,12 @@ func (r *workerRunner) run() { } } CPUUsage := GetCurrentCPUUsage() + MemoryUsage := GetCurrentMemoryUsage() data := map[string]int64{ - "state": int64(r.getState()), - "current_cpu_usage": int64(CPUUsage), - "spawn_count": r.controller.getCurrentClientsNum(), + "state": int64(r.getState()), + "current_cpu_usage": int64(CPUUsage), // percentage + "current_memory_usage": int64(MemoryUsage), // percentage + "current_users": r.controller.getCurrentClientsNum(), } r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) case <-r.closeChan: @@ -1040,8 +1046,11 @@ func (r *masterRunner) clientListener() { if workerInfo.getCPUUsage() != float64(msg.Data["current_cpu_usage"]) { workerInfo.updateCPUUsage(float64(msg.Data["current_cpu_usage"])) } - if workerInfo.getSpawnCount() != msg.Data["spawn_count"] { - workerInfo.updateSpawnCount(msg.Data["spawn_count"]) + if workerInfo.getMemoryUsage() != float64(msg.Data["current_memory_usage"]) { + workerInfo.updateMemoryUsage(float64(msg.Data["current_memory_usage"])) + } + if workerInfo.getSpawnCount() != msg.Data["current_users"] { + workerInfo.updateSpawnCount(msg.Data["current_users"]) } case typeSpawning: workerInfo.setState(StateSpawning) @@ -1281,18 +1290,18 @@ func (r *masterRunner) reportStats() { println(fmt.Sprintf("Current time: %s, State: %v, Current Available Workers: %v, Target Users: %v", currentTime.Format("2006/01/02 15:04:05"), getStateName(r.getState()), r.server.getClientsLength(), r.getSpawnCount())) table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU Usage", "CPU Warning Emitted", "Memory Usage", "Heartbeat"}) + table.SetColMinWidth(0, 20) + table.SetColMinWidth(1, 10) + table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU Usage (%)", "Memory Usage (%)"}) for _, worker := range r.server.getAllWorkers() { - row := make([]string, 8) + row := make([]string, 6) row[0] = worker.ID row[1] = worker.IP row[2] = fmt.Sprintf("%v", getStateName(worker.getState())) row[3] = fmt.Sprintf("%v", worker.getSpawnCount()) row[4] = fmt.Sprintf("%v", worker.getCPUUsage()) - row[5] = fmt.Sprintf("%v", worker.getCPUWarningEmitted()) - row[6] = fmt.Sprintf("%v", worker.getMemoryUsage()) - row[7] = fmt.Sprintf("%v", worker.getHeartbeat()) + row[5] = fmt.Sprintf("%v", worker.getMemoryUsage()) table.Append(row) } table.Render() diff --git a/hrp/internal/boomer/utils.go b/hrp/internal/boomer/utils.go index b277078f..c18400f8 100644 --- a/hrp/internal/boomer/utils.go +++ b/hrp/internal/boomer/utils.go @@ -6,7 +6,6 @@ import ( "io" "math" "os" - "runtime" "runtime/pprof" "strings" "time" @@ -102,5 +101,37 @@ func GetCurrentCPUUsage() float64 { log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) return 0.0 } - return percent / float64(runtime.NumCPU()) + return percent +} + +// GetCurrentCPUPercent get the percentage of current cpu used +func GetCurrentCPUPercent() float64 { + currentPid := os.Getpid() + p, err := process.NewProcess(int32(currentPid)) + if err != nil { + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) + return 0.0 + } + percent, err := p.Percent(time.Second) + if err != nil { + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) + return 0.0 + } + return percent +} + +// GetCurrentMemoryUsage get current Memory usage +func GetCurrentMemoryUsage() float64 { + currentPid := os.Getpid() + p, err := process.NewProcess(int32(currentPid)) + if err != nil { + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) + return 0.0 + } + percent, err := p.MemoryPercent() + if err != nil { + log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n")) + return 0.0 + } + return float64(percent) } From ef71d614134c5e4544571cbdce7ec711bdccc409 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 14 Jul 2022 14:26:55 +0800 Subject: [PATCH 22/31] fix: failed to evenly distribute the spawn-rate to each worker --- hrp/internal/boomer/runner.go | 31 ++++++++++++++++++++---------- hrp/internal/boomer/server_grpc.go | 1 + 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 9867a1d3..c75d6769 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -1158,24 +1158,35 @@ func (r *masterRunner) start() error { log.Error().Err(err).Msg("copy workerProfile failed") return err } - cur := 0 - ints := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) - log.Info().Msg("send spawn data to worker") + + // spawn count + spawnCounts := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) + + // spawn rate + spawnRate := workerProfile.SpawnRate / float64(numWorkers) + if spawnRate < 1 { + spawnRate = 1 + } + + // max RPS + maxRPSs := builtin.SplitInteger(int(workerProfile.MaxRPS), numWorkers) + r.updateState(StateSpawning) + log.Info().Msg("send spawn data to worker") + + cur := 0 r.server.clients.Range(func(key, value interface{}) bool { if workerInfo, ok := value.(*WorkerNode); ok { if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { return true } + if workerProfile.SpawnCount > 0 { - workerProfile.SpawnCount = int64(ints[cur]) - } - if workerProfile.SpawnRate > 0 { - workerProfile.SpawnRate = workerProfile.SpawnRate / float64(numWorkers) - } - if workerProfile.MaxRPS > 0 { - workerProfile.MaxRPS = workerProfile.MaxRPS / int64(numWorkers) + workerProfile.SpawnCount = int64(spawnCounts[cur]) } + workerProfile.MaxRPS = int64(maxRPSs[cur]) + workerProfile.SpawnRate = spawnRate + workerInfo.getStream() <- &messager.StreamResponse{ Type: "spawn", Profile: ProfileToBytes(workerProfile), diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index b9d61cc6..08323aa6 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -406,6 +406,7 @@ func (s *grpcServer) close() { ctx, cancel := context.WithTimeout(context.Background(), timeout) s.stopServer(ctx) cancel() + close(s.disconnectedChan) } func (s *grpcServer) recvChannel() chan *genericMessage { From 1a5523bc2ead29b4e2705e84247392fdc111bbe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 14 Jul 2022 15:24:43 +0800 Subject: [PATCH 23/31] fix: unittest --- hrp/internal/boomer/runner.go | 1 - hrp/internal/boomer/runner_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index c75d6769..675f69b9 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -383,7 +383,6 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo // spawn workers with rate limit sleepTime := time.Duration(1000000/r.controller.getSpawnRate()) * time.Microsecond time.Sleep(sleepTime) - // loop count per worker var workerLoop *Loop if r.loop != nil { diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 4293103b..5b71e451 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -193,7 +193,7 @@ func TestSpawnWorkersWithManyTasks(t *testing.T) { runner.setTasks(tasks) runner.client = newClient("localhost", 5557, runner.nodeID) - const numToSpawn int64 = 30 + const numToSpawn int64 = 20 go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete) time.Sleep(3 * time.Second) From a8e23ebf77ae761a635189bb44c10679341a6d30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 14 Jul 2022 19:56:27 +0800 Subject: [PATCH 24/31] fix: state machine --- hrp/internal/boomer/boomer.go | 6 +-- hrp/internal/boomer/runner.go | 74 +++++++++++++++++------------- hrp/internal/boomer/server_grpc.go | 56 ++++++++++++---------- 3 files changed, 78 insertions(+), 58 deletions(-) diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index 2dc40056..cfbd17ea 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -458,10 +458,10 @@ func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exc // Start starts to run func (b *Boomer) Start(Args *Profile) error { - if b.masterRunner.isStarted() { + if b.masterRunner.isStarting() { return errors.New("already started") } - if b.masterRunner.getState() == StateStopping { + if b.masterRunner.isStopping() { return errors.New("Please wait for all workers to finish") } b.SetSpawnCount(Args.SpawnCount) @@ -473,7 +473,7 @@ func (b *Boomer) Start(Args *Profile) error { // ReBalance starts to rebalance load test func (b *Boomer) ReBalance(Args *Profile) error { - if !b.masterRunner.isStarted() { + if !b.masterRunner.isStarting() { return errors.New("no start") } b.SetSpawnCount(Args.SpawnCount) diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 675f69b9..207c18dd 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -379,7 +379,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo log.Info().Msg("Quitting spawning workers") return default: - if r.isStarted() && r.controller.acquire() { + if r.isStarting() && r.controller.acquire() { // spawn workers with rate limit sleepTime := time.Duration(1000000/r.controller.getSpawnRate()) * time.Microsecond time.Sleep(sleepTime) @@ -436,7 +436,7 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo }) <-r.rebalance - if r.isStarted() { + if r.isStarting() { // rebalance spawn count r.controller.setSpawn(r.getSpawnCount(), r.getSpawnRate()) } @@ -528,7 +528,7 @@ func (r *runner) statsStart() { case <-ticker.C: r.reportStats() // close reportedChan and return if the last stats is reported successfully - if !r.isStarted() { + if !r.isStarting() && !r.isStopping() { close(r.reportedChan) log.Info().Msg("Quitting statsStart") return @@ -575,10 +575,14 @@ func (r *runner) updateState(state int32) { atomic.StoreInt32(&r.state, state) } -func (r *runner) isStarted() bool { +func (r *runner) isStarting() bool { return r.getState() == StateRunning || r.getState() == StateSpawning } +func (r *runner) isStopping() bool { + return r.getState() == StateStopping +} + type localRunner struct { runner @@ -618,6 +622,7 @@ func (r *localRunner) start() { defer func() { r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + r.updateState(StateStopping) close(r.stoppingChan) close(r.rebalance) r.wgMu.Unlock() @@ -625,7 +630,7 @@ func (r *localRunner) start() { // wait for goroutines before closing r.wg.Wait() - r.updateState(StateStopping) + close(r.doneChan) // wait until all stats are reported successfully <-r.reportedChan @@ -636,7 +641,6 @@ func (r *localRunner) start() { // output teardown r.outputOnStop() - close(r.doneChan) r.updateState(StateQuitting) }() @@ -647,7 +651,7 @@ func (r *localRunner) start() { } func (r *localRunner) stop() { - if r.runner.isStarted() { + if r.runner.isStarting() { r.runner.stop() } } @@ -898,6 +902,7 @@ func (r *workerRunner) start() { defer func() { r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + r.updateState(StateStopping) close(r.stoppingChan) close(r.rebalance) r.wgMu.Unlock() @@ -905,14 +910,12 @@ func (r *workerRunner) start() { // wait for goroutines before closing r.wg.Wait() - r.updateState(StateStopping) + close(r.doneChan) <-r.reportedChan r.reportTestResult() r.outputOnStop() - - close(r.doneChan) }() // start stats report @@ -922,7 +925,7 @@ func (r *workerRunner) start() { } func (r *workerRunner) stop() { - if r.isStarted() { + if r.isStarting() { r.runner.stop() } } @@ -1064,7 +1067,7 @@ func (r *masterRunner) clientListener() { break } workerInfo.setState(StateQuitting) - if r.isStarted() { + if r.isStarting() { if r.server.getClientsLength() > 0 { log.Warn().Str("worker id", workerInfo.ID).Msg("worker quited, ready to rebalance the load of each worker") err := r.rebalance() @@ -1092,19 +1095,18 @@ func (r *masterRunner) run() { } defer func() { - r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping - close(r.stoppingChan) - r.wgMu.Unlock() + // disconnecting workers + close(r.server.disconnectedChan) - r.wg.Wait() + // waiting to close bidirectional stream + r.server.wg.Wait() + // close server r.server.close() - - close(r.doneChan) }() if r.autoStart { - r.goAttach(func() { + go func() { log.Info().Msg("auto start, waiting expected workers joined") var ticker = time.NewTicker(1 * time.Second) var tickerMaxWait = time.NewTicker(time.Duration(r.expectWorkersMaxWait) * time.Second) @@ -1129,7 +1131,7 @@ func (r *masterRunner) run() { os.Exit(1) } } - }) + }() } // listen and deal message from worker @@ -1143,7 +1145,7 @@ func (r *masterRunner) run() { func (r *masterRunner) start() error { numWorkers := r.server.getClientsLength() if numWorkers == 0 { - return errors.New("current workers: 0") + return errors.New("current available workers: 0") } // fetching testcase @@ -1205,30 +1207,40 @@ func (r *masterRunner) start() error { func (r *masterRunner) rebalance() error { numWorkers := r.server.getClientsLength() if numWorkers == 0 { - return errors.New("current workers: 0") + return errors.New("current available workers: 0") } workerProfile := &Profile{} if err := copier.Copy(workerProfile, r.profile); err != nil { log.Error().Err(err).Msg("copy workerProfile failed") return err } + + // spawn count + spawnCounts := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) + + // spawn rate + spawnRate := workerProfile.SpawnRate / float64(numWorkers) + if spawnRate < 1 { + spawnRate = 1 + } + + // max RPS + maxRPSs := builtin.SplitInteger(int(workerProfile.MaxRPS), numWorkers) + cur := 0 - ints := builtin.SplitInteger(int(r.profile.SpawnCount), numWorkers) log.Info().Msg("send spawn data to worker") r.server.clients.Range(func(key, value interface{}) bool { if workerInfo, ok := value.(*WorkerNode); ok { if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing { return true } + if workerProfile.SpawnCount > 0 { - workerProfile.SpawnCount = int64(ints[cur]) - } - if workerProfile.SpawnRate > 0 { - workerProfile.SpawnRate = workerProfile.SpawnRate / float64(numWorkers) - } - if workerProfile.MaxRPS > 0 { - workerProfile.MaxRPS = workerProfile.MaxRPS / int64(numWorkers) + workerProfile.SpawnCount = int64(spawnCounts[cur]) } + workerProfile.MaxRPS = int64(maxRPSs[cur]) + workerProfile.SpawnRate = spawnRate + if workerInfo.getState() == StateInit { workerInfo.getStream() <- &messager.StreamResponse{ Type: "spawn", @@ -1270,7 +1282,7 @@ func (r *masterRunner) fetchTestCase() ([]byte, error) { } func (r *masterRunner) stop() error { - if r.isStarted() { + if r.isStarting() { r.updateState(StateStopping) r.server.sendBroadcasts(&genericMessage{Type: "stop", Data: map[string]int64{}}) return nil diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 08323aa6..0b2943da 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -138,6 +138,8 @@ type grpcServer struct { fromWorker chan *genericMessage disconnectedChan chan bool shutdownChan chan bool + + wg sync.WaitGroup } var ( @@ -221,6 +223,7 @@ func newServer(masterHost string, masterPort int) (server *grpcServer) { fromWorker: make(chan *genericMessage, 100), disconnectedChan: make(chan bool), shutdownChan: make(chan bool), + wg: sync.WaitGroup{}, } return server } @@ -290,6 +293,8 @@ func (s *grpcServer) valid(token string) (isValid bool) { } func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error { + s.wg.Add(1) + defer s.wg.Done() token, ok := extractToken(srv.Context()) if !ok { return status.Error(codes.Unauthenticated, "missing token header") @@ -303,32 +308,34 @@ func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_Bidirect go s.sendMsg(srv, token) FOR: for { - msg, err := srv.Recv() - if st, ok := status.FromError(err); ok { - switch st.Code() { - case codes.OK: - s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID) - log.Info(). - Str("nodeID", msg.NodeID). - Str("type", msg.Type). - Interface("data", msg.Data). - Msg("receive data from worker") - case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded: - s.fromWorker <- newQuitMessage(token) - break FOR - default: - log.Error().Err(err).Msg("failed to get stream from client") - break FOR + select { + case <-srv.Context().Done(): + break FOR + case <-s.disconnectedChannel(): + break FOR + default: + msg, err := srv.Recv() + if st, ok := status.FromError(err); ok { + switch st.Code() { + case codes.OK: + s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID) + log.Info(). + Str("nodeID", msg.NodeID). + Str("type", msg.Type). + Interface("data", msg.Data). + Msg("receive data from worker") + case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded: + s.fromWorker <- newQuitMessage(token) + break FOR + default: + log.Error().Err(err).Msg("failed to get stream from client") + break FOR + } } } } - // disconnected to worker - select { - case <-srv.Context().Done(): - return srv.Context().Err() - case <-s.disconnectedChan: - } - log.Warn().Str("worker id", token).Msg("worker quited") + + log.Info().Str("worker id", token).Msg("bidirectional stream closed") return nil } @@ -338,6 +345,8 @@ func (s *grpcServer) sendMsg(srv messager.Message_BidirectionalStreamingMessageS select { case <-srv.Context().Done(): return + case <-s.disconnectedChannel(): + return case res := <-stream: if s, ok := status.FromError(srv.Send(res)); ok { switch s.Code() { @@ -406,7 +415,6 @@ func (s *grpcServer) close() { ctx, cancel := context.WithTimeout(context.Background(), timeout) s.stopServer(ctx) cancel() - close(s.disconnectedChan) } func (s *grpcServer) recvChannel() chan *genericMessage { From a459d6ac2f9af0bb6600db0f5c9db672cec027e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Fri, 15 Jul 2022 13:46:09 +0800 Subject: [PATCH 25/31] feat: report worker system information to the master --- hrp/internal/boomer/client_grpc.go | 6 +- .../boomer/grpc/messager/messager.pb.go | 174 ++++++++++-------- hrp/internal/boomer/grpc/proto/messager.proto | 18 +- hrp/internal/boomer/message.go | 14 +- hrp/internal/boomer/runner.go | 106 ++++++----- hrp/internal/boomer/server_grpc.go | 56 +++++- hrp/internal/boomer/utils.go | 26 ++- hrp/internal/builtin/utils.go | 38 ++++ 8 files changed, 279 insertions(+), 159 deletions(-) diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index 17310e42..cdf32111 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -3,6 +3,7 @@ package boomer import ( "context" "fmt" + "runtime" "sync" "sync/atomic" "time" @@ -181,7 +182,7 @@ func (c *grpcClient) start() (err error) { func (c *grpcClient) register(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - res, err := c.Register(ctx, &messager.RegisterRequest{NodeID: c.identity}) + res, err := c.Register(ctx, &messager.RegisterRequest{NodeID: c.identity, Os: runtime.GOOS, Arch: runtime.GOARCH}) if err != nil { return err } @@ -212,6 +213,9 @@ func (c *grpcClient) newBiStreamClient() (err error) { if err != nil { return err } + // reset failCount + atomic.StoreInt32(&c.failCount, 0) + // set bidirectional stream client c.config.setBiStreamClient(biStream) println("successful to establish bidirectional stream with master, press Ctrl+c to quit.") return nil diff --git a/hrp/internal/boomer/grpc/messager/messager.pb.go b/hrp/internal/boomer/grpc/messager/messager.pb.go index 79419e23..66a20108 100644 --- a/hrp/internal/boomer/grpc/messager/messager.pb.go +++ b/hrp/internal/boomer/grpc/messager/messager.pb.go @@ -25,9 +25,9 @@ type StreamRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Data map[string]int64 `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - NodeID string `protobuf:"bytes,3,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + Data map[string][]byte `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *StreamRequest) Reset() { @@ -69,13 +69,6 @@ func (x *StreamRequest) GetType() string { return "" } -func (x *StreamRequest) GetData() map[string]int64 { - if x != nil { - return x.Data - } - return nil -} - func (x *StreamRequest) GetNodeID() string { if x != nil { return x.NodeID @@ -83,16 +76,23 @@ func (x *StreamRequest) GetNodeID() string { return "" } +func (x *StreamRequest) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + type StreamResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Profile []byte `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"` - Data map[string]int64 `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - NodeID string `protobuf:"bytes,4,opt,name=NodeID,proto3" json:"NodeID,omitempty"` - Tasks []byte `protobuf:"bytes,5,opt,name=tasks,proto3" json:"tasks,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + Profile []byte `protobuf:"bytes,3,opt,name=profile,proto3" json:"profile,omitempty"` + Tasks []byte `protobuf:"bytes,4,opt,name=tasks,proto3" json:"tasks,omitempty"` + Data map[string][]byte `protobuf:"bytes,5,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *StreamResponse) Reset() { @@ -134,20 +134,6 @@ func (x *StreamResponse) GetType() string { return "" } -func (x *StreamResponse) GetProfile() []byte { - if x != nil { - return x.Profile - } - return nil -} - -func (x *StreamResponse) GetData() map[string]int64 { - if x != nil { - return x.Data - } - return nil -} - func (x *StreamResponse) GetNodeID() string { if x != nil { return x.NodeID @@ -155,6 +141,13 @@ func (x *StreamResponse) GetNodeID() string { return "" } +func (x *StreamResponse) GetProfile() []byte { + if x != nil { + return x.Profile + } + return nil +} + func (x *StreamResponse) GetTasks() []byte { if x != nil { return x.Tasks @@ -162,12 +155,21 @@ func (x *StreamResponse) GetTasks() []byte { return nil } +func (x *StreamResponse) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + type RegisterRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodeID string `protobuf:"bytes,1,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + Os string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` + Arch string `protobuf:"bytes,3,opt,name=arch,proto3" json:"arch,omitempty"` } func (x *RegisterRequest) Reset() { @@ -209,6 +211,20 @@ func (x *RegisterRequest) GetNodeID() string { return "" } +func (x *RegisterRequest) GetOs() string { + if x != nil { + return x.Os + } + return "" +} + +func (x *RegisterRequest) GetArch() string { + if x != nil { + return x.Arch + } + return "" +} + type RegisterResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -269,7 +285,7 @@ type SignOutRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodeID string `protobuf:"bytes,1,opt,name=NodeID,proto3" json:"NodeID,omitempty"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` } func (x *SignOutRequest) Reset() { @@ -373,59 +389,61 @@ var file_grpc_proto_messager_proto_rawDesc = []byte{ 0x73, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x44, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, - 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, - 0x44, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x29, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x40, 0x0a, 0x10, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x28, 0x0a, - 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x0f, 0x53, 0x69, 0x67, 0x6e, 0x4f, - 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xe4, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x53, 0x69, 0x67, 0x6e, 0x4f, - 0x75, 0x74, 0x12, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, - 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, - 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x72, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x4d, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x6f, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, + 0x72, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x63, 0x68, 0x22, + 0x40, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x28, 0x0a, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x0f, 0x53, + 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xe4, 0x01, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x53, + 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x12, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x1d, 0x42, + 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/hrp/internal/boomer/grpc/proto/messager.proto b/hrp/internal/boomer/grpc/proto/messager.proto index 8ddbbfa0..2dc77839 100644 --- a/hrp/internal/boomer/grpc/proto/messager.proto +++ b/hrp/internal/boomer/grpc/proto/messager.proto @@ -12,20 +12,22 @@ service Message { message StreamRequest{ string type = 1; - map data = 2; - string NodeID = 3; + string nodeID = 2; + map data = 3; } message StreamResponse{ string type = 1; - bytes profile = 2; - map data = 3; - string NodeID = 4; - bytes tasks = 5; + string nodeID = 2; + bytes profile = 3; + bytes tasks = 4; + map data = 5; } message RegisterRequest{ - string NodeID = 1; + string nodeID = 1; + string os = 2; + string arch = 3; } message RegisterResponse{ @@ -34,7 +36,7 @@ message RegisterResponse{ } message SignOutRequest{ - string NodeID = 1; + string nodeID = 1; } message SignOutResponse{ diff --git a/hrp/internal/boomer/message.go b/hrp/internal/boomer/message.go index afee82ba..975aaef1 100644 --- a/hrp/internal/boomer/message.go +++ b/hrp/internal/boomer/message.go @@ -11,11 +11,11 @@ const ( ) type genericMessage struct { - Type string `json:"type,omitempty"` - Profile []byte `json:"profile,omitempty"` - Data map[string]int64 `json:"data,omitempty"` - NodeID string `json:"node_id,omitempty"` - Tasks []byte `json:"tasks,omitempty"` + Type string `json:"type,omitempty"` + Profile []byte `json:"profile,omitempty"` + Data map[string][]byte `json:"data,omitempty"` + NodeID string `json:"node_id,omitempty"` + Tasks []byte `json:"tasks,omitempty"` } type task struct { @@ -23,7 +23,7 @@ type task struct { TestCases []byte `json:"testcases,omitempty"` } -func newGenericMessage(t string, data map[string]int64, nodeID string) (msg *genericMessage) { +func newGenericMessage(t string, data map[string][]byte, nodeID string) (msg *genericMessage) { return &genericMessage{ Type: t, Data: data, @@ -38,7 +38,7 @@ func newQuitMessage(nodeID string) (msg *genericMessage) { } } -func newMessageToWorker(t string, profile []byte, data map[string]int64, tasks []byte) (msg *genericMessage) { +func newMessageToWorker(t string, profile []byte, data map[string][]byte, tasks []byte) (msg *genericMessage) { return &genericMessage{ Type: t, Profile: profile, diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 207c18dd..2462be36 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -52,6 +52,7 @@ const ( reportStatsInterval = 3 * time.Second heartbeatInterval = 1 * time.Second heartbeatLiveness = 3 * time.Second + reconnectInterval = 3 * time.Second ) type Loop struct { @@ -392,10 +393,11 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo for { select { case <-quit: - atomic.AddInt64(&r.controller.currentClientsNum, -1) + r.controller.increaseFinishedCount() return default: if workerLoop != nil && !workerLoop.acquire() { + r.controller.increaseFinishedCount() return } if r.rateLimitEnabled { @@ -415,6 +417,8 @@ func (r *runner) spawnWorkers(spawnCount int64, spawnRate float64, quit chan boo workerLoop.increaseFinishedCount() if r.loop.isFinished() { go r.stop() + r.controller.increaseFinishedCount() + return } } if r.controller.erase() { @@ -621,7 +625,8 @@ func (r *localRunner) start() { go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, nil) defer func() { - r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + // block concurrent waitgroup adds in GoAttach while stopping + r.wgMu.Lock() r.updateState(StateStopping) close(r.stoppingChan) close(r.rebalance) @@ -634,10 +639,8 @@ func (r *localRunner) start() { // wait until all stats are reported successfully <-r.reportedChan - // report test result r.reportTestResult() - // output teardown r.outputOnStop() @@ -693,8 +696,8 @@ func newWorkerRunner(masterHost string, masterPort int) (r *workerRunner) { } func (r *workerRunner) spawnComplete() { - data := make(map[string]int64) - data["count"] = r.controller.getSpawnCount() + data := make(map[string][]byte) + data["count"] = builtin.Int64ToBytes(r.controller.getSpawnCount()) r.client.sendChannel() <- newGenericMessage("spawning_complete", data, r.nodeID) } @@ -755,8 +758,6 @@ func (r *workerRunner) onMessage(msg *genericMessage) { r.onRebalanceMessage(msg) case "stop": r.stop() - log.Info().Msg("Recv stop message from master, all the goroutines are stopped") - r.client.sendChannel() <- newGenericMessage("client_stopped", nil, r.nodeID) case "quit": r.stop() if r.ignoreQuit { @@ -780,6 +781,10 @@ func (r *workerRunner) onMessage(msg *genericMessage) { } } +func (r *workerRunner) onStopped() { + r.client.sendChannel() <- newGenericMessage("client_stopped", nil, r.nodeID) +} + func (r *workerRunner) onQuiting() { if r.getState() != StateQuitting { r.client.sendChannel() <- newQuitMessage(r.nodeID) @@ -832,7 +837,7 @@ func (r *workerRunner) run() { select { case <-r.client.disconnectedChannel(): case <-ticker.C: - log.Warn().Msg("Timeout waiting for sending quit message to master, boomer will quit any way.") + log.Warn().Msg("timeout waiting for sending quit message to master, boomer will quit any way.") } // sign out from master @@ -858,25 +863,31 @@ func (r *workerRunner) run() { for { select { case <-ticker.C: - if atomic.LoadInt32(&r.client.failCount) > 2 { - r.updateState(StateMissing) - } if r.getState() == StateMissing { err = r.client.register(r.client.config.ctx) if err != nil { continue } - if r.client.newBiStreamClient() == nil { - r.updateState(StateInit) + err = r.client.newBiStreamClient() + if err != nil { + continue } + r.updateState(StateInit) } - CPUUsage := GetCurrentCPUUsage() - MemoryUsage := GetCurrentMemoryUsage() - data := map[string]int64{ - "state": int64(r.getState()), - "current_cpu_usage": int64(CPUUsage), // percentage - "current_memory_usage": int64(MemoryUsage), // percentage - "current_users": r.controller.getCurrentClientsNum(), + if atomic.LoadInt32(&r.client.failCount) > 2 { + r.updateState(StateMissing) + } + CPUUsage := GetCurrentCPUPercent() + MemoryUsage := GetCurrentMemoryPercent() + PidCPUUsage := GetCurrentPidCPUUsage() + PidMemoryUsage := GetCurrentPidMemoryUsage() + data := map[string][]byte{ + "state": builtin.Int64ToBytes(int64(r.getState())), + "current_cpu_usage": builtin.Float64ToByte(CPUUsage), + "current_pid_cpu_usage": builtin.Float64ToByte(PidCPUUsage), + "current_memory_usage": builtin.Float64ToByte(MemoryUsage), + "current_pid_memory_usage": builtin.Float64ToByte(PidMemoryUsage), + "current_users": builtin.Int64ToBytes(r.controller.getCurrentClientsNum()), } r.client.sendChannel() <- newGenericMessage("heartbeat", data, r.nodeID) case <-r.closeChan: @@ -901,7 +912,8 @@ func (r *workerRunner) start() { go r.spawnWorkers(r.getSpawnCount(), r.getSpawnRate(), r.stoppingChan, r.spawnComplete) defer func() { - r.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + // block concurrent waitgroup adds in GoAttach while stopping + r.wgMu.Lock() r.updateState(StateStopping) close(r.stoppingChan) close(r.rebalance) @@ -912,9 +924,13 @@ func (r *workerRunner) start() { close(r.doneChan) + // notify master that worker is stopped + r.onStopped() + // wait until all stats are reported successfully <-r.reportedChan - + // report test result r.reportTestResult() + // output teardown r.outputOnStop() }() @@ -991,13 +1007,16 @@ func (r *masterRunner) heartbeatWorker() { if !ok { log.Error().Msg("failed to get worker information") } - if atomic.LoadInt32(&workerInfo.Heartbeat) <= 0 && workerInfo.getState() != StateMissing { - workerInfo.setState(StateMissing) + if atomic.LoadInt32(&workerInfo.Heartbeat) <= 0 { + if workerInfo.getState() != StateMissing { + workerInfo.setState(StateMissing) + } if r.getState() == StateRunning { // all running workers missed, stopping runner if r.server.getClientsLength() <= 0 { r.updateState(StateStopped) } + return true } } else { atomic.AddInt32(&workerInfo.Heartbeat, -1) @@ -1041,19 +1060,15 @@ func (r *masterRunner) clientListener() { r.updateState(StateStopped) } case typeHeartbeat: - if workerInfo.getState() != int32(msg.Data["state"]) { - workerInfo.setState(int32(msg.Data["state"])) + if workerInfo.getState() != int32(builtin.BytesToInt64(msg.Data["state"])) { + workerInfo.setState(int32(builtin.BytesToInt64(msg.Data["state"]))) } workerInfo.updateHeartbeat(3) - if workerInfo.getCPUUsage() != float64(msg.Data["current_cpu_usage"]) { - workerInfo.updateCPUUsage(float64(msg.Data["current_cpu_usage"])) - } - if workerInfo.getMemoryUsage() != float64(msg.Data["current_memory_usage"]) { - workerInfo.updateMemoryUsage(float64(msg.Data["current_memory_usage"])) - } - if workerInfo.getSpawnCount() != msg.Data["current_users"] { - workerInfo.updateSpawnCount(msg.Data["current_users"]) - } + workerInfo.updateCPUUsage(builtin.ByteToFloat64(msg.Data["current_cpu_usage"])) + workerInfo.updateWorkerCPUUsage(builtin.ByteToFloat64(msg.Data["current_pid_cpu_usage"])) + workerInfo.updateMemoryUsage(builtin.ByteToFloat64(msg.Data["current_memory_usage"])) + workerInfo.updateWorkerMemoryUsage(builtin.ByteToFloat64(msg.Data["current_pid_memory_usage"])) + workerInfo.updateUserCount(builtin.BytesToInt64(msg.Data["current_users"])) case typeSpawning: workerInfo.setState(StateSpawning) case typeSpawningComplete: @@ -1095,12 +1110,6 @@ func (r *masterRunner) run() { } defer func() { - // disconnecting workers - close(r.server.disconnectedChan) - - // waiting to close bidirectional stream - r.server.wg.Wait() - // close server r.server.close() }() @@ -1191,7 +1200,6 @@ func (r *masterRunner) start() error { workerInfo.getStream() <- &messager.StreamResponse{ Type: "spawn", Profile: ProfileToBytes(workerProfile), - Data: map[string]int64{}, NodeID: workerInfo.ID, Tasks: testcase, } @@ -1245,7 +1253,6 @@ func (r *masterRunner) rebalance() error { workerInfo.getStream() <- &messager.StreamResponse{ Type: "spawn", Profile: ProfileToBytes(workerProfile), - Data: map[string]int64{}, NodeID: workerInfo.ID, Tasks: r.tcb, } @@ -1253,7 +1260,6 @@ func (r *masterRunner) rebalance() error { workerInfo.getStream() <- &messager.StreamResponse{ Type: "rebalance", Profile: ProfileToBytes(workerProfile), - Data: map[string]int64{}, NodeID: workerInfo.ID, } } @@ -1284,7 +1290,7 @@ func (r *masterRunner) fetchTestCase() ([]byte, error) { func (r *masterRunner) stop() error { if r.isStarting() { r.updateState(StateStopping) - r.server.sendBroadcasts(&genericMessage{Type: "stop", Data: map[string]int64{}}) + r.server.sendBroadcasts(&genericMessage{Type: "stop"}) return nil } else { return errors.New("already stopped") @@ -1314,16 +1320,16 @@ func (r *masterRunner) reportStats() { table := tablewriter.NewWriter(os.Stdout) table.SetColMinWidth(0, 20) table.SetColMinWidth(1, 10) - table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU Usage (%)", "Memory Usage (%)"}) + table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU (%)", "Memory (%)"}) for _, worker := range r.server.getAllWorkers() { row := make([]string, 6) row[0] = worker.ID row[1] = worker.IP row[2] = fmt.Sprintf("%v", getStateName(worker.getState())) - row[3] = fmt.Sprintf("%v", worker.getSpawnCount()) - row[4] = fmt.Sprintf("%v", worker.getCPUUsage()) - row[5] = fmt.Sprintf("%v", worker.getMemoryUsage()) + row[3] = fmt.Sprintf("%v", worker.getUserCount()) + row[4] = fmt.Sprintf("%.2f", worker.getCPUUsage()) + row[5] = fmt.Sprintf("%.2f", worker.getMemoryUsage()) table.Append(row) } table.Render() diff --git a/hrp/internal/boomer/server_grpc.go b/hrp/internal/boomer/server_grpc.go index 0b2943da..5ad07267 100644 --- a/hrp/internal/boomer/server_grpc.go +++ b/hrp/internal/boomer/server_grpc.go @@ -25,20 +25,24 @@ import ( type WorkerNode struct { ID string `json:"id"` IP string `json:"ip"` + OS string `json:"os"` + Arch string `json:"arch"` State int32 `json:"state"` Heartbeat int32 `json:"heartbeat"` - SpawnCount int64 `json:"spawn_count"` + UserCount int64 `json:"user_count"` + WorkerCPUUsage float64 `json:"worker_cpu_usage"` CPUUsage float64 `json:"cpu_usage"` CPUWarningEmitted bool `json:"cpu_warning_emitted"` + WorkerMemoryUsage float64 `json:"worker_memory_usage"` MemoryUsage float64 `json:"memory_usage"` stream chan *messager.StreamResponse mutex sync.RWMutex disconnectedChan chan bool } -func newWorkerNode(id, ip string) *WorkerNode { +func newWorkerNode(id, ip, os, arch string) *WorkerNode { stream := make(chan *messager.StreamResponse, 100) - return &WorkerNode{State: StateInit, ID: id, IP: ip, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)} + return &WorkerNode{State: StateInit, ID: id, IP: ip, OS: os, Arch: arch, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)} } func (w *WorkerNode) getState() int32 { @@ -57,12 +61,12 @@ func (w *WorkerNode) getHeartbeat() int32 { return atomic.LoadInt32(&w.Heartbeat) } -func (w *WorkerNode) updateSpawnCount(spawnCount int64) { - atomic.StoreInt64(&w.SpawnCount, spawnCount) +func (w *WorkerNode) updateUserCount(spawnCount int64) { + atomic.StoreInt64(&w.UserCount, spawnCount) } -func (w *WorkerNode) getSpawnCount() int64 { - return atomic.LoadInt64(&w.SpawnCount) +func (w *WorkerNode) getUserCount() int64 { + return atomic.LoadInt64(&w.UserCount) } func (w *WorkerNode) updateCPUUsage(cpuUsage float64) { @@ -77,6 +81,18 @@ func (w *WorkerNode) getCPUUsage() float64 { return w.CPUUsage } +func (w *WorkerNode) updateWorkerCPUUsage(workerCPUUsage float64) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.WorkerCPUUsage = workerCPUUsage +} + +func (w *WorkerNode) getWorkerCPUUsage() float64 { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.WorkerCPUUsage +} + func (w *WorkerNode) updateCPUWarningEmitted(cpuWarningEmitted bool) { w.mutex.Lock() defer w.mutex.Unlock() @@ -89,6 +105,18 @@ func (w *WorkerNode) getCPUWarningEmitted() bool { return w.CPUWarningEmitted } +func (w *WorkerNode) updateWorkerMemoryUsage(workerMemoryUsage float64) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.WorkerMemoryUsage = workerMemoryUsage +} + +func (w *WorkerNode) getWorkerMemoryUsage() float64 { + w.mutex.RLock() + defer w.mutex.RUnlock() + return w.WorkerMemoryUsage +} + func (w *WorkerNode) updateMemoryUsage(memoryUsage float64) { w.mutex.Lock() defer w.mutex.Unlock() @@ -119,11 +147,15 @@ func (w *WorkerNode) getWorkerInfo() WorkerNode { return WorkerNode{ ID: w.ID, IP: w.IP, + OS: w.OS, + Arch: w.Arch, State: w.getState(), Heartbeat: w.getHeartbeat(), - SpawnCount: w.getSpawnCount(), + UserCount: w.getUserCount(), + WorkerCPUUsage: w.getWorkerCPUUsage(), CPUUsage: w.getCPUUsage(), CPUWarningEmitted: w.getCPUWarningEmitted(), + WorkerMemoryUsage: w.getWorkerMemoryUsage(), MemoryUsage: w.getMemoryUsage(), } } @@ -267,7 +299,7 @@ func (s *grpcServer) Register(ctx context.Context, req *messager.RegisterRequest p, _ := peer.FromContext(ctx) clientIp := strings.Split(p.Addr.String(), ":")[0] // store worker information - wn := newWorkerNode(req.NodeID, clientIp) + wn := newWorkerNode(req.NodeID, clientIp, req.Os, req.Arch) s.clients.Store(req.NodeID, wn) log.Warn().Str("worker id", req.NodeID).Msg("worker joined") return &messager.RegisterResponse{Code: "0", Message: "register successfully"}, nil @@ -415,6 +447,12 @@ func (s *grpcServer) close() { ctx, cancel := context.WithTimeout(context.Background(), timeout) s.stopServer(ctx) cancel() + + // disconnecting workers + close(s.disconnectedChan) + + // waiting to close bidirectional stream + s.wg.Wait() } func (s *grpcServer) recvChannel() chan *genericMessage { diff --git a/hrp/internal/boomer/utils.go b/hrp/internal/boomer/utils.go index c18400f8..94fad13b 100644 --- a/hrp/internal/boomer/utils.go +++ b/hrp/internal/boomer/utils.go @@ -13,6 +13,8 @@ import ( "github.com/google/uuid" "github.com/rs/zerolog/log" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/process" ) @@ -88,8 +90,8 @@ func getNodeID() (nodeID string) { return } -// GetCurrentCPUUsage get current CPU usage -func GetCurrentCPUUsage() float64 { +// GetCurrentPidCPUUsage get current pid CPU usage +func GetCurrentPidCPUUsage() float64 { currentPid := os.Getpid() p, err := process.NewProcess(int32(currentPid)) if err != nil { @@ -104,8 +106,8 @@ func GetCurrentCPUUsage() float64 { return percent } -// GetCurrentCPUPercent get the percentage of current cpu used -func GetCurrentCPUPercent() float64 { +// GetCurrentPidCPUPercent get the percentage of current pid cpu used +func GetCurrentPidCPUPercent() float64 { currentPid := os.Getpid() p, err := process.NewProcess(int32(currentPid)) if err != nil { @@ -120,8 +122,20 @@ func GetCurrentCPUPercent() float64 { return percent } -// GetCurrentMemoryUsage get current Memory usage -func GetCurrentMemoryUsage() float64 { +// GetCurrentCPUPercent get the percentage of current cpu used +func GetCurrentCPUPercent() float64 { + percent, _ := cpu.Percent(time.Second, false) + return percent[0] +} + +// GetCurrentMemoryPercent get the percentage of current memory used +func GetCurrentMemoryPercent() float64 { + memInfo, _ := mem.VirtualMemory() + return memInfo.UsedPercent +} + +// GetCurrentPidMemoryUsage get current Memory usage +func GetCurrentPidMemoryUsage() float64 { currentPid := os.Getpid() p, err := process.NewProcess(int32(currentPid)) if err != nil { diff --git a/hrp/internal/builtin/utils.go b/hrp/internal/builtin/utils.go index da437cc6..07157cdb 100644 --- a/hrp/internal/builtin/utils.go +++ b/hrp/internal/builtin/utils.go @@ -3,9 +3,11 @@ package builtin import ( "bufio" "bytes" + "encoding/binary" "encoding/csv" builtinJSON "encoding/json" "fmt" + "math" "math/rand" "os" "os/exec" @@ -505,6 +507,42 @@ func Bytes2File(data []byte, filename string) error { return nil } +func Float32ToByte(v float32) []byte { + bits := math.Float32bits(v) + bytes := make([]byte, 4) + binary.LittleEndian.PutUint32(bytes, bits) + return bytes +} + +func ByteToFloat32(v []byte) float32 { + bits := binary.LittleEndian.Uint32(v) + return math.Float32frombits(bits) +} + +func Float64ToByte(v float64) []byte { + bits := math.Float64bits(v) + bts := make([]byte, 8) + binary.LittleEndian.PutUint64(bts, bits) + return bts +} + +func ByteToFloat64(v []byte) float64 { + bits := binary.LittleEndian.Uint64(v) + return math.Float64frombits(bits) +} + +func Int64ToBytes(n int64) []byte { + bytesBuf := bytes.NewBuffer([]byte{}) + _ = binary.Write(bytesBuf, binary.BigEndian, n) + return bytesBuf.Bytes() +} + +func BytesToInt64(bys []byte) (data int64) { + byteBuff := bytes.NewBuffer(bys) + _ = binary.Read(byteBuff, binary.BigEndian, &data) + return +} + func SplitInteger(m, n int) (ints []int) { quotient := m / n remainder := m % n From 17617ee13e1bcf42dbe5c79e14d7e276236d3780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Fri, 15 Jul 2022 14:49:47 +0800 Subject: [PATCH 26/31] fix: grpc reconnect backoff --- hrp/internal/boomer/client_grpc.go | 9 +++++++++ hrp/internal/boomer/runner.go | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hrp/internal/boomer/client_grpc.go b/hrp/internal/boomer/client_grpc.go index cdf32111..6e014aee 100644 --- a/hrp/internal/boomer/client_grpc.go +++ b/hrp/internal/boomer/client_grpc.go @@ -10,6 +10,7 @@ import ( "golang.org/x/oauth2" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/metadata" @@ -169,6 +170,14 @@ func (c *grpcClient) start() (err error) { grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(32 * 10e9)), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: 1 * time.Second, + Multiplier: 1.2, + MaxDelay: 3 * time.Second, + }, + MinConnectTimeout: 3 * time.Second, + }), } c.config.conn, err = grpc.Dial(addr, opts...) if err != nil { diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 2462be36..df68c155 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -1320,7 +1320,7 @@ func (r *masterRunner) reportStats() { table := tablewriter.NewWriter(os.Stdout) table.SetColMinWidth(0, 20) table.SetColMinWidth(1, 10) - table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU (%)", "Memory (%)"}) + table.SetHeader([]string{"Worker ID", "IP", "State", "Current Users", "CPU Usage (%)", "Memory Usage (%)"}) for _, worker := range r.server.getAllWorkers() { row := make([]string, 6) From f2799aef53662503f362ed3d0d6ff653b03746cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Fri, 15 Jul 2022 15:33:53 +0800 Subject: [PATCH 27/31] feat: call the HTTP API to get master state information --- hrp/internal/boomer/boomer.go | 11 ++++++++++- hrp/internal/boomer/runner.go | 25 ++++++++++++++++++++----- hrp/internal/boomer/runner_test.go | 3 ++- hrp/server.go | 23 +++++++++++++++++++---- 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index cfbd17ea..69b782b4 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -491,11 +491,20 @@ func (b *Boomer) Stop() error { return b.masterRunner.stop() } -// GetWorkersInfo gets workers +// GetWorkersInfo gets workers information func (b *Boomer) GetWorkersInfo() []WorkerNode { return b.masterRunner.server.getAllWorkers() } +// GetMasterInfo gets master information +func (b *Boomer) GetMasterInfo() map[string]interface{} { + masterInfo := make(map[string]interface{}) + masterInfo["state"] = getStateName(b.masterRunner.getState()) + masterInfo["workers"] = b.masterRunner.server.getClientsLength() + masterInfo["target_users"] = b.masterRunner.getSpawnCount() + return masterInfo +} + func (b *Boomer) GetCloseChan() chan bool { switch b.mode { case DistributedWorkerMode: diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index df68c155..9a2f8794 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -1064,11 +1064,26 @@ func (r *masterRunner) clientListener() { workerInfo.setState(int32(builtin.BytesToInt64(msg.Data["state"]))) } workerInfo.updateHeartbeat(3) - workerInfo.updateCPUUsage(builtin.ByteToFloat64(msg.Data["current_cpu_usage"])) - workerInfo.updateWorkerCPUUsage(builtin.ByteToFloat64(msg.Data["current_pid_cpu_usage"])) - workerInfo.updateMemoryUsage(builtin.ByteToFloat64(msg.Data["current_memory_usage"])) - workerInfo.updateWorkerMemoryUsage(builtin.ByteToFloat64(msg.Data["current_pid_memory_usage"])) - workerInfo.updateUserCount(builtin.BytesToInt64(msg.Data["current_users"])) + currentCPUUsage, ok := msg.Data["current_cpu_usage"] + if ok { + workerInfo.updateCPUUsage(builtin.ByteToFloat64(currentCPUUsage)) + } + currentPidCpuUsage, ok := msg.Data["current_pid_cpu_usage"] + if ok { + workerInfo.updateWorkerCPUUsage(builtin.ByteToFloat64(currentPidCpuUsage)) + } + currentMemoryUsage, ok := msg.Data["current_memory_usage"] + if ok { + workerInfo.updateMemoryUsage(builtin.ByteToFloat64(currentMemoryUsage)) + } + currentPidMemoryUsage, ok := msg.Data["current_pid_memory_usage"] + if ok { + workerInfo.updateWorkerMemoryUsage(builtin.ByteToFloat64(currentPidMemoryUsage)) + } + currentUsers, ok := msg.Data["current_users"] + if ok { + workerInfo.updateUserCount(builtin.BytesToInt64(currentUsers)) + } case typeSpawning: workerInfo.setState(StateSpawning) case typeSpawningComplete: diff --git a/hrp/internal/boomer/runner_test.go b/hrp/internal/boomer/runner_test.go index 5b71e451..f8a9e228 100644 --- a/hrp/internal/boomer/runner_test.go +++ b/hrp/internal/boomer/runner_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager" + "github.com/httprunner/httprunner/v4/hrp/internal/builtin" "github.com/stretchr/testify/assert" ) @@ -529,7 +530,7 @@ func TestHeartbeatWorker(t *testing.T) { runner.server.recvChannel() <- &genericMessage{ Type: typeHeartbeat, NodeID: "testID2", - Data: map[string]int64{"state": 3}, + Data: map[string][]byte{"state": builtin.Int64ToBytes(3)}, } worker2, ok := runner.server.getClients().Load("testID2") if !ok { diff --git a/hrp/server.go b/hrp/server.go index cb1c07b9..c060d409 100644 --- a/hrp/server.go +++ b/hrp/server.go @@ -135,10 +135,6 @@ type CommonResponseBody struct { } type APIGetWorkersRequestBody struct { - ID string `json:"id"` - State int32 `json:"state"` - CPUUsage float64 `json:"cpu_usage"` - MemoryUsage float64 `json:"memory_usage"` } type APIGetWorkersResponseBody struct { @@ -146,6 +142,14 @@ type APIGetWorkersResponseBody struct { Data []boomer.WorkerNode `json:"data"` } +type APIGetMasterRequestBody struct { +} + +type APIGetMasterResponseBody struct { + ServerStatus + Data map[string]interface{} `json:"data"` +} + type apiHandler struct { boomer *HRPBoomer } @@ -321,6 +325,16 @@ func (api *apiHandler) GetWorkersInfo(w http.ResponseWriter, r *http.Request) { writeJSON(w, body, http.StatusOK) } +func (api *apiHandler) GetMasterInfo(w http.ResponseWriter, r *http.Request) { + resp := &APIGetMasterResponseBody{ + ServerStatus: EnumAPIResponseSuccess, + Data: api.boomer.GetMasterInfo(), + } + + body, _ := json.Marshal(resp) + writeJSON(w, body, http.StatusOK) +} + func (api *apiHandler) Handler() http.Handler { mux := http.NewServeMux() @@ -330,6 +344,7 @@ func (api *apiHandler) Handler() http.Handler { mux.HandleFunc("/stop", methods(api.Stop, "GET")) mux.HandleFunc("/quit", methods(api.Quit, "GET")) mux.HandleFunc("/workers", methods(api.GetWorkersInfo, "GET")) + mux.HandleFunc("/master", methods(api.GetMasterInfo, "GET")) return mux } From ccd03c4ec58c3c6a741dd524952e52c73908fc5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Fri, 15 Jul 2022 17:47:19 +0800 Subject: [PATCH 28/31] fix: worker missing state --- hrp/internal/boomer/boomer.go | 2 +- hrp/internal/boomer/runner.go | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index 69b782b4..b4fe7256 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -499,7 +499,7 @@ func (b *Boomer) GetWorkersInfo() []WorkerNode { // GetMasterInfo gets master information func (b *Boomer) GetMasterInfo() map[string]interface{} { masterInfo := make(map[string]interface{}) - masterInfo["state"] = getStateName(b.masterRunner.getState()) + masterInfo["state"] = b.masterRunner.getState() masterInfo["workers"] = b.masterRunner.server.getClientsLength() masterInfo["target_users"] = b.masterRunner.getSpawnCount() return masterInfo diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index 9a2f8794..cf6ceb11 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -874,8 +874,11 @@ func (r *workerRunner) run() { } r.updateState(StateInit) } - if atomic.LoadInt32(&r.client.failCount) > 2 { - r.updateState(StateMissing) + if atomic.LoadInt32(&r.client.failCount) > 3 { + go r.stop() + if !r.isStarting() && !r.isStopping() { + r.updateState(StateMissing) + } } CPUUsage := GetCurrentCPUPercent() MemoryUsage := GetCurrentMemoryPercent() @@ -1007,12 +1010,15 @@ func (r *masterRunner) heartbeatWorker() { if !ok { log.Error().Msg("failed to get worker information") } - if atomic.LoadInt32(&workerInfo.Heartbeat) <= 0 { + if atomic.LoadInt32(&workerInfo.Heartbeat) < 0 { + if workerInfo.getState() == StateQuitting { + return true + } if workerInfo.getState() != StateMissing { workerInfo.setState(StateMissing) } - if r.getState() == StateRunning { - // all running workers missed, stopping runner + if r.isStopping() { + // all running workers missed, setting state to stopped if r.server.getClientsLength() <= 0 { r.updateState(StateStopped) } From 6a812c637415a233c3dccc811ad4b8e394769c19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 21 Jul 2022 15:53:35 +0800 Subject: [PATCH 29/31] fix: reset all metrics before starting worker --- hrp/internal/boomer/output.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index 72e185b6..ec4ab82e 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -475,6 +475,7 @@ type PrometheusPusherOutput struct { // OnStart will register all prometheus metric collectors func (o *PrometheusPusherOutput) OnStart() { + o.reset() log.Info().Msg("register prometheus metric collectors") registry := prometheus.NewRegistry() registry.MustRegister( @@ -606,3 +607,35 @@ func (o *PrometheusPusherOutput) OnEvent(data map[string]interface{}) { log.Error().Err(err).Msg("push to Pushgateway failed") } } + +// reset will reset all metrics +func (o *PrometheusPusherOutput) reset() { + log.Info().Msg("reset metrics") + gaugeNumRequests.Reset() + gaugeNumFailures.Reset() + gaugeMedianResponseTime.Reset() + gaugeAverageResponseTime.Reset() + gaugeMinResponseTime.Reset() + gaugeMaxResponseTime.Reset() + gaugeAverageContentLength.Reset() + gaugeCurrentRPS.Reset() + gaugeCurrentFailPerSec.Reset() + // counter for total + counterErrors.Reset() + counterTotalNumRequests.Reset() + counterTotalNumFailures.Reset() + // summary for total + summaryResponseTime.Reset() + // gauges for total + gaugeUsers.Set(0) + gaugeState.Set(1) + gaugeDuration.Set(0) + gaugeTotalAverageResponseTime.Set(0) + gaugeTotalMinResponseTime.Reset() + gaugeTotalMaxResponseTime.Reset() + gaugeTotalRPS.Set(0) + gaugeTotalFailRatio.Set(0) + gaugeTotalFailPerSec.Set(0) + gaugeTransactionsPassed.Set(0) + gaugeTransactionsFailed.Set(0) +} From 4784d166392decf869991122cbbc3cdaa3d4dbbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Thu, 21 Jul 2022 18:01:27 +0800 Subject: [PATCH 30/31] fix: report metrics to prometheus --- docs/CHANGELOG.md | 27 +++------------------------ hrp/internal/boomer/output.go | 9 +++++---- hrp/internal/boomer/runner.go | 5 +++-- hrp/plugin.go | 3 +-- 4 files changed, 12 insertions(+), 32 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 61780740..f918cbcc 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -5,11 +5,6 @@ **go version** - feat: support multi-machine collaborative distributed load testing - -## v4.1.7 (2022-07-18) - -**go version** - - feat: support indicating type and filename when uploading file - feat: support to infer MIME type of the file automatically - feat: support omitting websocket url if not necessary @@ -284,8 +279,7 @@ - feat: implement `transaction` mechanism for load test - feat: continue running next step when failure occurs with `--continue-on-failure` flag, default to failfast - feat: report GA events with version -- feat: run load test with the given limit and burst as rate limiter, use `--spawn-count`, `--spawn-rate` - and `--request-increase-rate` flag +- feat: run load test with the given limit and burst as rate limiter, use `--spawn-count`, `--spawn-rate` and `--request-increase-rate` flag - feat: report runner state to prometheus - refactor: fork [boomer] as submodule initially and made a lot of changes - change: update API models @@ -339,8 +333,7 @@ ## 3.1.8 (2022-03-22) -- feat: add `--profile` flag for har2case to support overwrite headers/cookies with specified yaml/json configuration - file +- feat: add `--profile` flag for har2case to support overwrite headers/cookies with specified yaml/json configuration file - feat: support variable and function in response extract expression - fix: keep negative index in jmespath unchanged when converting pytest files, e.g. body.users[-1] - fix: variable should not start with digit @@ -658,31 +651,17 @@ reference: [v2-changelog] [hrp]: https://github.com/httprunner/hrp - [hashicorp/go-plugin]: https://github.com/hashicorp/go-plugin - [go plugin]: https://pkg.go.dev/plugin - [docs repo]: https://github.com/httprunner/httprunner.github.io - [zerolog]: https://github.com/rs/zerolog - [jmespath]: https://jmespath.org/ - [mkdocs]: https://www.mkdocs.org/ - [github-actions]: https://github.com/httprunner/hrp/actions - [boomer]: github.com/myzhan/boomer - [sentry sdk]: https://github.com/getsentry/sentry-go - [pushgateway]: https://github.com/prometheus/pushgateway - [locust]: https://locust.io/ - [black]: https://github.com/psf/black - [loguru]: https://github.com/Delgan/loguru - -[v2-changelog]: https://github.com/httprunner/httprunner/blob/v2/docs/CHANGELOG.md +[v2-changelog]: https://github.com/httprunner/httprunner/blob/v2/docs/CHANGELOG.md \ No newline at end of file diff --git a/hrp/internal/boomer/output.go b/hrp/internal/boomer/output.go index ec4ab82e..55dec290 100644 --- a/hrp/internal/boomer/output.go +++ b/hrp/internal/boomer/output.go @@ -475,7 +475,8 @@ type PrometheusPusherOutput struct { // OnStart will register all prometheus metric collectors func (o *PrometheusPusherOutput) OnStart() { - o.reset() + // reset all prometheus metrics + resetPrometheusMetrics() log.Info().Msg("register prometheus metric collectors") registry := prometheus.NewRegistry() registry.MustRegister( @@ -608,9 +609,9 @@ func (o *PrometheusPusherOutput) OnEvent(data map[string]interface{}) { } } -// reset will reset all metrics -func (o *PrometheusPusherOutput) reset() { - log.Info().Msg("reset metrics") +// resetPrometheusMetrics will reset all metrics +func resetPrometheusMetrics() { + log.Info().Msg("reset all prometheus metrics") gaugeNumRequests.Reset() gaugeNumFailures.Reset() gaugeMedianResponseTime.Reset() diff --git a/hrp/internal/boomer/runner.go b/hrp/internal/boomer/runner.go index cf6ceb11..503ffbaa 100644 --- a/hrp/internal/boomer/runner.go +++ b/hrp/internal/boomer/runner.go @@ -927,14 +927,15 @@ func (r *workerRunner) start() { close(r.doneChan) - // notify master that worker is stopped - r.onStopped() // wait until all stats are reported successfully <-r.reportedChan // report test result r.reportTestResult() // output teardown r.outputOnStop() + + // notify master that worker is stopped + r.onStopped() }() // start stats report diff --git a/hrp/plugin.go b/hrp/plugin.go index ff8252b5..d98437ac 100644 --- a/hrp/plugin.go +++ b/hrp/plugin.go @@ -8,10 +8,9 @@ import ( "github.com/httprunner/funplugin" "github.com/httprunner/funplugin/fungo" - "github.com/rs/zerolog/log" - "github.com/httprunner/httprunner/v4/hrp/internal/builtin" "github.com/httprunner/httprunner/v4/hrp/internal/sdk" + "github.com/rs/zerolog/log" ) const ( From 6138d80fb01ba75c216e7fdbf1f3ed14d7e31d12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BE=90=E8=81=AA?= Date: Mon, 25 Jul 2022 22:47:31 +0800 Subject: [PATCH 31/31] fix: use master IP if PrometheusPushgatewayURL IP is not set in worker --- hrp/boomer.go | 13 +++++++++++++ hrp/cmd/boom.go | 20 +++----------------- hrp/internal/boomer/boomer.go | 5 +++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/hrp/boomer.go b/hrp/boomer.go index 91094341..c8cd9349 100644 --- a/hrp/boomer.go +++ b/hrp/boomer.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "sync" "time" @@ -209,6 +210,18 @@ func (b *HRPBoomer) runTestCases(testCases []*TCase, profile *boomer.Profile) { testcases = append(testcases, tesecase) } + if profile.PrometheusPushgatewayURL != "" { + urlSlice := strings.Split(profile.PrometheusPushgatewayURL, ":") + if len(urlSlice) != 2 { + profile.PrometheusPushgatewayURL = "" + } else { + if urlSlice[0] == "" { + urlSlice[0] = b.Boomer.GetMasterHost() + } + } + profile.PrometheusPushgatewayURL = strings.Join(urlSlice, ":") + } + b.SetProfile(profile) b.InitBoomer() log.Info().Interface("testcases", testcases).Interface("profile", profile).Msg("run tasks successful") diff --git a/hrp/cmd/boom.go b/hrp/cmd/boom.go index 6b13bdcf..7a267b4d 100644 --- a/hrp/cmd/boom.go +++ b/hrp/cmd/boom.go @@ -148,26 +148,12 @@ func makeHRPBoomer() *hrp.HRPBoomer { os.Exit(1) } } - hrpBoomer := hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate) - hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate) - if boomArgs.LoopCount > 0 { - hrpBoomer.SetLoopCount(boomArgs.LoopCount) - } - if !boomArgs.DisableConsoleOutput { - hrpBoomer.AddOutput(boomer.NewConsoleOutput()) - } - if boomArgs.PrometheusPushgatewayURL != "" { - hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode())) - } - hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive) - hrpBoomer.SetDisableCompression(boomArgs.DisableCompression) - hrpBoomer.SetClientTransport() if venv != "" { hrpBoomer.SetPython3Venv(venv) } - hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration) - hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration) - hrpBoomer.EnableGracefulQuit() + hrpBoomer.SetProfile(&boomArgs.Profile) + hrpBoomer.EnableGracefulQuit(context.Background()) + hrpBoomer.InitBoomer() return hrpBoomer } diff --git a/hrp/internal/boomer/boomer.go b/hrp/internal/boomer/boomer.go index b4fe7256..da7ac054 100644 --- a/hrp/internal/boomer/boomer.go +++ b/hrp/internal/boomer/boomer.go @@ -210,6 +210,11 @@ func (b *Boomer) ParseTestCasesChan() chan bool { return b.masterRunner.parseTestCasesChan } +// GetMasterHost returns master IP +func (b *Boomer) GetMasterHost() string { + return b.masterHost +} + // GetState gets worker state func (b *Boomer) GetState() int32 { switch b.mode {