Merge pull request #1303 from xucong053/support-distributed-load-testing-controlled-by-http-refactoring

feat: support distributed load testing on multi-machines
This commit is contained in:
debugtalk
2022-07-26 10:10:14 +08:00
committed by GitHub
49 changed files with 5189 additions and 253 deletions

View File

@@ -1,17 +1,20 @@
# Release History
## v4.1.7 (2022-07-18)
## v4.2.0 (2022-07-22)
**go version**
- fix: using `@FILEPATH` to indicate the path of the file
- feat: support multi-machine collaborative distributed load testing
- feat: support indicating type and filename when uploading file
- feat: support to infer MIME type of the file automatically
- feat: support omitting websocket url if not necessary
- feat: support multiple websocket connections each session
- fix: optimize websocket step initialization
- feat: support convert curl command(s) to testcase(s)
- feat: support run curl as subcommand of run/boom/convert
- fix: optimize websocket step initialization
- fix: using `@FILEPATH` to indicate the path of the file
- fix: reuse plugin instance if it already initialized
- fix: deep copy api step to avoid data racing
## v4.1.6 (2022-07-04)
@@ -462,9 +465,9 @@
**Changed**
- change: override variables
(1) testcase: session variables > step variables > config variables
(2) testsuite: testcase variables > config variables
(3) testsuite testcase variables > testcase config variables
(1) testcase: session variables > step variables > config variables
(2) testsuite: testcase variables > config variables
(3) testsuite testcase variables > testcase config variables
**Fixed**
@@ -661,4 +664,4 @@ reference: [v2-changelog]
[locust]: https://locust.io/
[black]: https://github.com/psf/black
[loguru]: https://github.com/Delgan/loguru
[v2-changelog]: https://github.com/httprunner/httprunner/blob/v2/docs/CHANGELOG.md
[v2-changelog]: https://github.com/httprunner/httprunner/blob/v2/docs/CHANGELOG.md

View File

@@ -21,13 +21,23 @@ hrp boom [flags]
### Options
```
--autostart Starts the test immediately (without disabling the web UI). Use --spawn-count and --spawn-rate to control user count and increase rate
--cpu-profile string Enable CPU profiling.
--cpu-profile-duration duration CPU profile duration. (default 30s)
--disable-compression Disable compression
--disable-console-output Disable console output.
--disable-keepalive Disable keepalive
--expect-workers int How many workers master should expect to connect before starting the test (only when --autostart is used) (default 1)
--expect-workers-max-wait int How many workers master should expect to connect before starting the test (only when --autostart is used
-h, --help help for boom
--ignore-quit ignores quit from master (only when --worker is used)
--loop-count int The specify running cycles for load testing (default -1)
--master master of distributed testing
--master-bind-host string Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces). (default "127.0.0.1")
--master-bind-port int Port that hrp master should bind to. Only used when running with --master. Defaults to 5557. (default 5557)
--master-host string Host or IP address of hrp master for distributed load testing. (default "127.0.0.1")
--master-http-address string Interfaces (ip:port) that hrp master should control by user. Only used when running with --master. Defaults to *:9771. (default ":9771")
--master-port int The port to connect to that is used by the hrp master for distributed load testing. (default 5557)
--max-rps int Max RPS that boomer can generate, disabled by default.
--mem-profile string Enable memory profiling.
--mem-profile-duration duration Memory profile duration. (default 30s)
@@ -36,6 +46,7 @@ hrp boom [flags]
--request-increase-rate string Request increase rate, disabled by default. (default "-1")
--spawn-count int The number of users to spawn for load testing (default 1)
--spawn-rate float The rate for spawning users (default 1)
--worker worker of distributed testing
```
### SEE ALSO

View File

@@ -1,5 +1,5 @@
{
"project_name": "demo-empty-project",
"create_time": "2022-07-04T14:54:33.795693+08:00",
"hrp_version": "v4.1.5"
"create_time": "2022-07-11T11:45:29.942532+08:00",
"hrp_version": "v4.1.6"
}

View File

@@ -1,4 +1,4 @@
// NOTE: Generated By hrp v4.1.4, DO NOT EDIT!
// NOTE: Generated By hrp v4.1.5, DO NOT EDIT!
package main
import (

View File

@@ -1,5 +1,5 @@
{
"project_name": "demo-with-go-plugin",
"create_time": "2022-07-06T13:57:04.054424+08:00",
"create_time": "2022-07-11T11:44:36.214909+08:00",
"hrp_version": "v4.1.6"
}

View File

@@ -1,4 +1,4 @@
# NOTE: Generated By hrp v4.1.5, DO NOT EDIT!
# NOTE: Generated By hrp v4.1.6, DO NOT EDIT!
import sys
import os

View File

@@ -1,5 +1,5 @@
{
"project_name": "demo-with-py-plugin",
"create_time": "2022-07-06T13:57:04.482633+08:00",
"create_time": "2022-07-11T11:44:37.021634+08:00",
"hrp_version": "v4.1.6"
}

View File

@@ -1,5 +1,5 @@
{
"project_name": "demo-without-plugin",
"create_time": "2022-07-04T14:54:33.495643+08:00",
"hrp_version": "v4.1.5"
"create_time": "2022-07-11T11:45:29.800018+08:00",
"hrp_version": "v4.1.6"
}

11
go.mod
View File

@@ -7,6 +7,7 @@ require (
github.com/denisbrodbeck/machineid v1.0.1
github.com/fatih/color v1.13.0
github.com/getsentry/sentry-go v0.13.0
github.com/go-errors/errors v1.0.1
github.com/go-openapi/spec v0.20.6
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/google/uuid v1.3.0
@@ -16,15 +17,21 @@ require (
github.com/jmespath/go-jmespath v0.4.0
github.com/json-iterator/go v1.1.12
github.com/maja42/goval v1.2.1
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mitchellh/mapstructure v1.4.1
github.com/olekukonko/tablewriter v0.0.5
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/rs/zerolog v1.26.1
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/spf13/cobra v1.2.1
github.com/stretchr/testify v1.7.0
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f
gopkg.in/yaml.v3 v3.0.0
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602
google.golang.org/grpc v1.45.0
google.golang.org/protobuf v1.28.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
)
// replace github.com/httprunner/funplugin => ../funplugin

27
go.sum
View File

@@ -17,6 +17,7 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@@ -132,6 +133,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -333,9 +336,8 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -352,6 +354,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -405,8 +408,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
@@ -421,6 +422,8 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -452,6 +455,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
@@ -474,6 +481,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
@@ -594,6 +603,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -624,6 +634,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -668,6 +679,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -773,6 +785,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -853,8 +866,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -877,9 +891,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -1,18 +1,24 @@
package hrp
import (
"fmt"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
"golang.org/x/net/context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/httprunner/funplugin"
"github.com/rs/zerolog/log"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer"
"github.com/httprunner/httprunner/v4/hrp/internal/json"
"github.com/httprunner/httprunner/v4/hrp/internal/sdk"
"github.com/rs/zerolog/log"
)
func NewBoomer(spawnCount int, spawnRate float64) *HRPBoomer {
func NewStandaloneBoomer(spawnCount int64, spawnRate float64) *HRPBoomer {
b := &HRPBoomer{
Boomer: boomer.NewStandaloneBoomer(spawnCount, spawnRate),
pluginsMutex: new(sync.RWMutex),
@@ -22,6 +28,27 @@ func NewBoomer(spawnCount int, spawnRate float64) *HRPBoomer {
return b
}
func NewMasterBoomer(masterBindHost string, masterBindPort int) *HRPBoomer {
b := &HRPBoomer{
Boomer: boomer.NewMasterBoomer(masterBindHost, masterBindPort),
pluginsMutex: new(sync.RWMutex),
}
b.hrpRunner = NewRunner(nil)
return b
}
func NewWorkerBoomer(masterHost string, masterPort int) *HRPBoomer {
b := &HRPBoomer{
Boomer: boomer.NewWorkerBoomer(masterHost, masterPort),
pluginsMutex: new(sync.RWMutex),
}
b.hrpRunner = NewRunner(nil)
// set client transport for high concurrency load testing
b.hrpRunner.SetClientTransport(b.GetSpawnCount(), b.GetDisableKeepAlive(), b.GetDisableCompression())
return b
}
type HRPBoomer struct {
*boomer.Boomer
hrpRunner *HRPRunner
@@ -29,6 +56,27 @@ type HRPBoomer struct {
pluginsMutex *sync.RWMutex // avoid data race
}
func (b *HRPBoomer) InitBoomer() {
// init output
if !b.GetProfile().DisableConsoleOutput {
b.AddOutput(boomer.NewConsoleOutput())
}
if b.GetProfile().PrometheusPushgatewayURL != "" {
b.AddOutput(boomer.NewPrometheusPusherOutput(b.GetProfile().PrometheusPushgatewayURL, "hrp", b.GetMode()))
}
b.SetSpawnCount(b.GetProfile().SpawnCount)
b.SetSpawnRate(b.GetProfile().SpawnRate)
if b.GetProfile().LoopCount > 0 {
b.SetLoopCount(b.GetProfile().LoopCount)
}
b.SetRateLimiter(b.GetProfile().MaxRPS, b.GetProfile().RequestIncreaseRate)
b.SetDisableKeepAlive(b.GetProfile().DisableKeepalive)
b.SetDisableCompression(b.GetProfile().DisableCompression)
b.SetClientTransport()
b.EnableCPUProfile(b.GetProfile().CPUProfile, b.GetProfile().CPUProfileDuration)
b.EnableMemoryProfile(b.GetProfile().MemoryProfile, b.GetProfile().MemoryProfileDuration)
}
func (b *HRPBoomer) SetClientTransport() *HRPBoomer {
// set client transport for high concurrency load testing
b.hrpRunner.SetClientTransport(b.GetSpawnCount(), b.GetDisableKeepAlive(), b.GetDisableCompression())
@@ -52,8 +100,12 @@ func (b *HRPBoomer) Run(testcases ...ITestCase) {
// report execution timing event
defer sdk.SendEvent(event.StartTiming("execution"))
var taskSlice []*boomer.Task
taskSlice := b.ConvertTestCasesToBoomerTasks(testcases...)
b.Boomer.Run(taskSlice...)
}
func (b *HRPBoomer) ConvertTestCasesToBoomerTasks(testcases ...ITestCase) (taskSlice []*boomer.Task) {
// load all testcases
testCases, err := LoadTestCases(testcases...)
if err != nil {
@@ -74,15 +126,159 @@ func (b *HRPBoomer) Run(testcases ...ITestCase) {
rendezvousList := initRendezvous(testcase, int64(b.GetSpawnCount()))
task := b.convertBoomerTask(testcase, rendezvousList)
taskSlice = append(taskSlice, task)
waitRendezvous(rendezvousList)
waitRendezvous(rendezvousList, b)
}
b.Boomer.Run(taskSlice...)
return taskSlice
}
func (b *HRPBoomer) ParseTestCases(testCases []*TestCase) []*TCase {
var parsedTestCases []*TCase
for _, tc := range testCases {
caseRunner, err := b.hrpRunner.newCaseRunner(tc)
if err != nil {
log.Error().Err(err).Msg("failed to create runner")
os.Exit(1)
}
caseRunner.parsedConfig.Parameters = caseRunner.parametersIterator.outParameters()
parsedTestCases = append(parsedTestCases, &TCase{
Config: caseRunner.parsedConfig,
TestSteps: caseRunner.testCase.ToTCase().TestSteps,
})
}
return parsedTestCases
}
func (b *HRPBoomer) TestCasesToBytes(testcases ...ITestCase) []byte {
// load all testcases
testCases, err := LoadTestCases(testcases...)
if err != nil {
log.Error().Err(err).Msg("failed to load testcases")
os.Exit(1)
}
tcs := b.ParseTestCases(testCases)
testCasesBytes, err := json.Marshal(tcs)
if err != nil {
log.Error().Err(err).Msg("failed to marshal testcases")
return nil
}
return testCasesBytes
}
func (b *HRPBoomer) BytesToTestCases(testCasesBytes []byte) []*TCase {
var testcase []*TCase
err := json.Unmarshal(testCasesBytes, &testcase)
if err != nil {
log.Error().Err(err).Msg("failed to unmarshal testcases")
}
return testcase
}
func (b *HRPBoomer) Quit() {
b.Boomer.Quit()
}
func (b *HRPBoomer) runTestCases(testCases []*TCase, profile *boomer.Profile) {
var testcases []ITestCase
for _, tc := range testCases {
tesecase, err := tc.toTestCase()
if err != nil {
log.Error().Err(err).Msg("failed to load testcases")
return
}
// create temp dir to save testcase
tempDir, err := ioutil.TempDir("", "hrp_testcases")
if err != nil {
log.Error().Err(err).Msg("failed to save testcases")
return
}
tesecase.Config.Path = filepath.Join(tempDir, "test-case.json")
if tesecase.Config.PluginSetting != nil {
tesecase.Config.PluginSetting.Path = filepath.Join(tempDir, fmt.Sprintf("debugtalk.%s", tesecase.Config.PluginSetting.Type))
err = builtin.Bytes2File(tesecase.Config.PluginSetting.Content, tesecase.Config.PluginSetting.Path)
if err != nil {
log.Error().Err(err).Msg("failed to save plugin file")
return
}
}
err = builtin.Dump2JSON(tesecase, tesecase.Config.Path)
if err != nil {
log.Error().Err(err).Msg("failed to dump testcases")
return
}
testcases = append(testcases, tesecase)
}
if profile.PrometheusPushgatewayURL != "" {
urlSlice := strings.Split(profile.PrometheusPushgatewayURL, ":")
if len(urlSlice) != 2 {
profile.PrometheusPushgatewayURL = ""
} else {
if urlSlice[0] == "" {
urlSlice[0] = b.Boomer.GetMasterHost()
}
}
profile.PrometheusPushgatewayURL = strings.Join(urlSlice, ":")
}
b.SetProfile(profile)
b.InitBoomer()
log.Info().Interface("testcases", testcases).Interface("profile", profile).Msg("run tasks successful")
b.Run(testcases...)
}
func (b *HRPBoomer) rebalanceBoomer(profile *boomer.Profile) {
b.SetProfile(profile)
b.SetSpawnCount(b.GetProfile().SpawnCount)
b.SetSpawnRate(b.GetProfile().SpawnRate)
b.GetRebalanceChan() <- true
log.Info().Interface("profile", profile).Msg("rebalance tasks successful")
}
func (b *HRPBoomer) PollTasks(ctx context.Context) {
for {
select {
case task := <-b.Boomer.GetTasksChan():
// 清理过时测试用例任务
if len(b.Boomer.GetTasksChan()) > 0 {
continue
}
//Todo: 过滤掉已经传输过的task
if task.TestCases != nil {
testCases := b.BytesToTestCases(task.TestCases)
go b.runTestCases(testCases, task.Profile)
} else {
go b.rebalanceBoomer(task.Profile)
}
case <-b.Boomer.GetCloseChan():
return
case <-ctx.Done():
return
}
}
}
func (b *HRPBoomer) PollTestCases(ctx context.Context) {
for {
select {
case <-b.Boomer.ParseTestCasesChan():
var tcs []ITestCase
for _, tc := range b.GetTestCasesPath() {
tcp := TestCasePath(tc)
tcs = append(tcs, &tcp)
}
b.TestCaseBytesChan() <- b.TestCasesToBytes(tcs...)
log.Info().Msg("put testcase successful")
case <-b.Boomer.GetCloseChan():
return
case <-ctx.Done():
return
}
}
}
func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rendezvous) *boomer.Task {
// init runner for testcase
// this runner is shared by multiple session runners
@@ -112,6 +308,9 @@ func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rend
// reset start time only once
once := sync.Once{}
// update session variables mutex
mutex := sync.Mutex{}
return &boomer.Task{
Name: testcase.Config.Name,
Weight: testcase.Config.Weight,
@@ -122,9 +321,11 @@ func (b *HRPBoomer) convertBoomerTask(testcase *TestCase, rendezvousList []*Rend
// init session runner
sessionRunner := caseRunner.newSession()
mutex.Lock()
if parametersIterator.HasNext() {
sessionRunner.updateSessionVariables(parametersIterator.Next())
}
mutex.Unlock()
startTime := time.Now()
for _, step := range testcase.TestSteps {

View File

@@ -27,7 +27,7 @@ func TestBoomerStandaloneRun(t *testing.T) {
}
testcase2 := TestCasePath(demoTestCaseWithPluginJSONPath)
b := NewBoomer(2, 1)
b := NewStandaloneBoomer(2, 1)
go b.Run(testcase1, &testcase2)
time.Sleep(5 * time.Second)
b.Quit()

View File

@@ -7,6 +7,7 @@ import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"golang.org/x/net/context"
"github.com/httprunner/httprunner/v4/hrp"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer"
@@ -21,7 +22,7 @@ var boomCmd = &cobra.Command{
Example: ` $ hrp boom demo.json # run specified json testcase file
$ hrp boom demo.yaml # run specified yaml testcase file
$ hrp boom examples/ # run testcases in specified folder`,
Args: cobra.MinimumNArgs(1),
Args: cobra.MinimumNArgs(0),
PreRun: func(cmd *cobra.Command, args []string) {
boomer.SetUlimit(10240) // ulimit -n 10240
if !strings.EqualFold(logLevel, "DEBUG") {
@@ -35,26 +36,75 @@ var boomCmd = &cobra.Command{
path := hrp.TestCasePath(arg)
paths = append(paths, &path)
}
hrpBoomer := makeHRPBoomer()
hrpBoomer.Run(paths...)
// if set profile, the priority is higher than the other commands
if boomArgs.profile != "" {
err := builtin.LoadFile(boomArgs.profile, &boomArgs.Profile)
if err != nil {
log.Error().Err(err).Msg("failed to load profile")
os.Exit(1)
}
}
// init boomer
var hrpBoomer *hrp.HRPBoomer
if boomArgs.master {
hrpBoomer = hrp.NewMasterBoomer(boomArgs.masterBindHost, boomArgs.masterBindPort)
} else if boomArgs.worker {
hrpBoomer = hrp.NewWorkerBoomer(boomArgs.masterHost, boomArgs.masterPort)
} else {
hrpBoomer = hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate)
}
hrpBoomer.SetProfile(&boomArgs.Profile)
ctx := hrpBoomer.EnableGracefulQuit(context.Background())
// run boomer
switch hrpBoomer.GetMode() {
case "master":
hrpBoomer.SetTestCasesPath(args)
if boomArgs.autoStart {
hrpBoomer.SetAutoStart()
hrpBoomer.SetExpectWorkers(boomArgs.expectWorkers, boomArgs.expectWorkersMaxWait)
hrpBoomer.SetSpawnCount(boomArgs.SpawnCount)
hrpBoomer.SetSpawnRate(boomArgs.SpawnRate)
}
if boomArgs.autoStart {
hrpBoomer.InitBoomer()
} else {
go hrpBoomer.StartServer(ctx, boomArgs.masterHttpAddress)
}
go hrpBoomer.PollTestCases(ctx)
hrpBoomer.RunMaster()
case "worker":
if boomArgs.ignoreQuit {
hrpBoomer.SetIgnoreQuit()
}
go hrpBoomer.PollTasks(ctx)
hrpBoomer.RunWorker()
case "standalone":
if venv != "" {
hrpBoomer.SetPython3Venv(venv)
}
hrpBoomer.InitBoomer()
hrpBoomer.Run(paths...)
}
},
}
type BoomArgs struct {
SpawnCount int `json:"spawn-count,omitempty" yaml:"spawn-count,omitempty"`
SpawnRate float64 `json:"spawn-rate,omitempty" yaml:"spawn-rate,omitempty"`
MaxRPS int64 `json:"max-rps,omitempty" yaml:"max-rps,omitempty"`
LoopCount int64 `json:"loop-count,omitempty" yaml:"loop-count,omitempty"`
RequestIncreaseRate string `json:"request-increase-rate,omitempty" yaml:"request-increase-rate,omitempty"`
MemoryProfile string `json:"memory-profile,omitempty" yaml:"memory-profile,omitempty"`
MemoryProfileDuration time.Duration `json:"memory-profile-duration" yaml:"memory-profile-duration"`
CPUProfile string `json:"cpu-profile,omitempty" yaml:"cpu-profile,omitempty"`
CPUProfileDuration time.Duration `json:"cpu-profile-duration,omitempty" yaml:"cpu-profile-duration,omitempty"`
PrometheusPushgatewayURL string `json:"prometheus-gateway,omitempty" yaml:"prometheus-gateway,omitempty"`
DisableConsoleOutput bool `json:"disable-console-output,omitempty" yaml:"disable-console-output,omitempty"`
DisableCompression bool `json:"disable-compression,omitempty" yaml:"disable-compression,omitempty"`
DisableKeepalive bool `json:"disable-keepalive,omitempty" yaml:"disable-keepalive,omitempty"`
profile string
boomer.Profile
profile string
master bool
worker bool
ignoreQuit bool
masterHost string
masterPort int
masterBindHost string
masterBindPort int
masterHttpAddress string
autoStart bool
expectWorkers int
expectWorkersMaxWait int
}
var boomArgs BoomArgs
@@ -64,7 +114,7 @@ func init() {
boomCmd.Flags().Int64Var(&boomArgs.MaxRPS, "max-rps", 0, "Max RPS that boomer can generate, disabled by default.")
boomCmd.Flags().StringVar(&boomArgs.RequestIncreaseRate, "request-increase-rate", "-1", "Request increase rate, disabled by default.")
boomCmd.Flags().IntVar(&boomArgs.SpawnCount, "spawn-count", 1, "The number of users to spawn for load testing")
boomCmd.Flags().Int64Var(&boomArgs.SpawnCount, "spawn-count", 1, "The number of users to spawn for load testing")
boomCmd.Flags().Float64Var(&boomArgs.SpawnRate, "spawn-rate", 1, "The rate for spawning users")
boomCmd.Flags().Int64Var(&boomArgs.LoopCount, "loop-count", -1, "The specify running cycles for load testing")
boomCmd.Flags().StringVar(&boomArgs.MemoryProfile, "mem-profile", "", "Enable memory profiling.")
@@ -76,6 +126,17 @@ func init() {
boomCmd.Flags().BoolVar(&boomArgs.DisableCompression, "disable-compression", false, "Disable compression")
boomCmd.Flags().BoolVar(&boomArgs.DisableKeepalive, "disable-keepalive", false, "Disable keepalive")
boomCmd.Flags().StringVar(&boomArgs.profile, "profile", "", "profile for load testing")
boomCmd.Flags().BoolVar(&boomArgs.master, "master", false, "master of distributed testing")
boomCmd.Flags().StringVar(&boomArgs.masterBindHost, "master-bind-host", "127.0.0.1", "Interfaces (hostname, ip) that hrp master should bind to. Only used when running with --master. Defaults to * (all available interfaces).")
boomCmd.Flags().IntVar(&boomArgs.masterBindPort, "master-bind-port", 5557, "Port that hrp master should bind to. Only used when running with --master. Defaults to 5557.")
boomCmd.Flags().StringVar(&boomArgs.masterHttpAddress, "master-http-address", ":9771", "Interfaces (ip:port) that hrp master should control by user. Only used when running with --master. Defaults to *:9771.")
boomCmd.Flags().BoolVar(&boomArgs.worker, "worker", false, "worker of distributed testing")
boomCmd.Flags().BoolVar(&boomArgs.ignoreQuit, "ignore-quit", false, "ignores quit from master (only when --worker is used)")
boomCmd.Flags().StringVar(&boomArgs.masterHost, "master-host", "127.0.0.1", "Host or IP address of hrp master for distributed load testing.")
boomCmd.Flags().IntVar(&boomArgs.masterPort, "master-port", 5557, "The port to connect to that is used by the hrp master for distributed load testing.")
boomCmd.Flags().BoolVar(&boomArgs.autoStart, "auto-start", false, "Starts the test immediately. Use --spawn-count and --spawn-rate to control user count and increase rate")
boomCmd.Flags().IntVar(&boomArgs.expectWorkers, "expect-workers", 1, "How many workers master should expect to connect before starting the test (only when --autostart is used)")
boomCmd.Flags().IntVar(&boomArgs.expectWorkersMaxWait, "expect-workers-max-wait", 120, "How many workers master should expect to connect before starting the test (only when --autostart is used")
}
func makeHRPBoomer() *hrp.HRPBoomer {
@@ -87,26 +148,12 @@ func makeHRPBoomer() *hrp.HRPBoomer {
os.Exit(1)
}
}
hrpBoomer := hrp.NewBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate)
hrpBoomer.SetRateLimiter(boomArgs.MaxRPS, boomArgs.RequestIncreaseRate)
if boomArgs.LoopCount > 0 {
hrpBoomer.SetLoopCount(boomArgs.LoopCount)
}
if !boomArgs.DisableConsoleOutput {
hrpBoomer.AddOutput(boomer.NewConsoleOutput())
}
if boomArgs.PrometheusPushgatewayURL != "" {
hrpBoomer.AddOutput(boomer.NewPrometheusPusherOutput(boomArgs.PrometheusPushgatewayURL, "hrp", hrpBoomer.GetMode()))
}
hrpBoomer.SetDisableKeepAlive(boomArgs.DisableKeepalive)
hrpBoomer.SetDisableCompression(boomArgs.DisableCompression)
hrpBoomer.SetClientTransport()
hrpBoomer := hrp.NewStandaloneBoomer(boomArgs.SpawnCount, boomArgs.SpawnRate)
if venv != "" {
hrpBoomer.SetPython3Venv(venv)
}
hrpBoomer.EnableCPUProfile(boomArgs.CPUProfile, boomArgs.CPUProfileDuration)
hrpBoomer.EnableMemoryProfile(boomArgs.MemoryProfile, boomArgs.MemoryProfileDuration)
hrpBoomer.EnableGracefulQuit()
hrpBoomer.SetProfile(&boomArgs.Profile)
hrpBoomer.EnableGracefulQuit(context.Background())
hrpBoomer.InitBoomer()
return hrpBoomer
}

View File

@@ -32,7 +32,8 @@ type TConfig struct {
Timeout float64 `json:"timeout,omitempty" yaml:"timeout,omitempty"` // global timeout in seconds
Export []string `json:"export,omitempty" yaml:"export,omitempty"`
Weight int `json:"weight,omitempty" yaml:"weight,omitempty"`
Path string `json:"path,omitempty" yaml:"path,omitempty"` // testcase file path
Path string `json:"path,omitempty" yaml:"path,omitempty"` // testcase file path
PluginSetting *PluginConfig `json:"plugin,omitempty" yaml:"plugin,omitempty"` // plugin config
}
// WithVariables sets variables for current testcase.
@@ -98,7 +99,7 @@ func (c *TConfig) SetWebSocket(times, interval, timeout, size int64) {
}
type ThinkTimeConfig struct {
Strategy thinkTimeStrategy `json:"strategy,omitempty" yaml:"strategy,omitempty"` // default、random、limit、multiply、ignore
Strategy thinkTimeStrategy `json:"strategy,omitempty" yaml:"strategy,omitempty"` // default、random、multiply、ignore
Setting interface{} `json:"setting,omitempty" yaml:"setting,omitempty"` // random(map): {"min_percentage": 0.5, "max_percentage": 1.5}; 10、multiply(float64): 1.5
Limit float64 `json:"limit,omitempty" yaml:"limit,omitempty"` // limit think time no more than specific time, ignore if value <= 0
}
@@ -172,3 +173,9 @@ const (
)
var thinkTimeDefaultRandom = map[string]float64{"min_percentage": 0.5, "max_percentage": 1.5}
type PluginConfig struct {
Path string
Type string // bin、so、py
Content []byte
}

View File

@@ -1,6 +1,8 @@
package boomer
import (
"github.com/httprunner/httprunner/v4/hrp/internal/json"
"golang.org/x/net/context"
"math"
"os"
"os/signal"
@@ -25,9 +27,15 @@ const (
// A Boomer is used to run tasks.
type Boomer struct {
mode Mode
masterHost string
masterPort int
mode Mode
localRunner *localRunner
localRunner *localRunner
workerRunner *workerRunner
masterRunner *masterRunner
testcasePath []string
cpuProfile string
cpuProfileDuration time.Duration
@@ -39,6 +47,44 @@ type Boomer struct {
disableCompression bool
}
type Profile struct {
SpawnCount int64 `json:"spawn-count,omitempty" yaml:"spawn-count,omitempty" mapstructure:"spawn-count,omitempty"`
SpawnRate float64 `json:"spawn-rate,omitempty" yaml:"spawn-rate,omitempty" mapstructure:"spawn-rate,omitempty"`
MaxRPS int64 `json:"max-rps,omitempty" yaml:"max-rps,omitempty" mapstructure:"max-rps,omitempty"`
LoopCount int64 `json:"loop-count,omitempty" yaml:"loop-count,omitempty" mapstructure:"loop-count,omitempty"`
RequestIncreaseRate string `json:"request-increase-rate,omitempty" yaml:"request-increase-rate,omitempty" mapstructure:"request-increase-rate,omitempty"`
MemoryProfile string `json:"memory-profile,omitempty" yaml:"memory-profile,omitempty" mapstructure:"memory-profile,omitempty"`
MemoryProfileDuration time.Duration `json:"memory-profile-duration,omitempty" yaml:"memory-profile-duration,omitempty" mapstructure:"memory-profile-duration,omitempty"`
CPUProfile string `json:"cpu-profile,omitempty" yaml:"cpu-profile,omitempty" mapstructure:"cpu-profile,omitempty"`
CPUProfileDuration time.Duration `json:"cpu-profile-duration,omitempty" yaml:"cpu-profile-duration,omitempty" mapstructure:"cpu-profile-duration,omitempty"`
PrometheusPushgatewayURL string `json:"prometheus-gateway,omitempty" yaml:"prometheus-gateway,omitempty" mapstructure:"prometheus-gateway,omitempty"`
DisableConsoleOutput bool `json:"disable-console-output,omitempty" yaml:"disable-console-output,omitempty" mapstructure:"disable-console-output,omitempty"`
DisableCompression bool `json:"disable-compression,omitempty" yaml:"disable-compression,omitempty" mapstructure:"disable-compression,omitempty"`
DisableKeepalive bool `json:"disable-keepalive,omitempty" yaml:"disable-keepalive,omitempty" mapstructure:"disable-keepalive,omitempty"`
}
func (b *Boomer) GetProfile() *Profile {
switch b.mode {
case DistributedMasterMode:
return b.masterRunner.profile
case DistributedWorkerMode:
return b.workerRunner.profile
default:
return b.localRunner.profile
}
}
func (b *Boomer) SetProfile(profile *Profile) {
switch b.mode {
case DistributedMasterMode:
b.masterRunner.profile = profile
case DistributedWorkerMode:
b.workerRunner.profile = profile
default:
b.localRunner.profile = profile
}
}
// SetMode only accepts boomer.DistributedMasterMode、boomer.DistributedWorkerMode and boomer.StandaloneMode.
func (b *Boomer) SetMode(mode Mode) {
switch mode {
@@ -69,13 +115,147 @@ func (b *Boomer) GetMode() string {
}
// NewStandaloneBoomer returns a new Boomer, which can run without master.
func NewStandaloneBoomer(spawnCount int, spawnRate float64) *Boomer {
func NewStandaloneBoomer(spawnCount int64, spawnRate float64) *Boomer {
return &Boomer{
mode: StandaloneMode,
localRunner: newLocalRunner(spawnCount, spawnRate),
}
}
// NewMasterBoomer returns a new Boomer.
func NewMasterBoomer(masterBindHost string, masterBindPort int) *Boomer {
return &Boomer{
masterRunner: newMasterRunner(masterBindHost, masterBindPort),
mode: DistributedMasterMode,
}
}
// NewWorkerBoomer returns a new Boomer.
func NewWorkerBoomer(masterHost string, masterPort int) *Boomer {
return &Boomer{
workerRunner: newWorkerRunner(masterHost, masterPort),
masterHost: masterHost,
masterPort: masterPort,
mode: DistributedWorkerMode,
}
}
// SetAutoStart auto start to load testing
func (b *Boomer) SetAutoStart() {
b.masterRunner.autoStart = true
}
// RunMaster start to run master runner
func (b *Boomer) RunMaster() {
b.masterRunner.run()
}
// RunWorker start to run worker runner
func (b *Boomer) RunWorker() {
b.workerRunner.run()
}
// TestCaseBytesChan gets test case bytes chan
func (b *Boomer) TestCaseBytesChan() chan []byte {
return b.masterRunner.testCaseBytes
}
func ProfileToBytes(profile *Profile) []byte {
profileBytes, err := json.Marshal(profile)
if err != nil {
log.Error().Err(err).Msg("failed to marshal testcases")
return nil
}
return profileBytes
}
func BytesToProfile(profileBytes []byte) *Profile {
var profile *Profile
err := json.Unmarshal(profileBytes, &profile)
if err != nil {
log.Error().Err(err).Msg("failed to unmarshal testcases")
}
return profile
}
// GetTasksChan getsTasks chan
func (b *Boomer) GetTasksChan() chan *task {
switch b.mode {
case DistributedWorkerMode:
return b.workerRunner.tasksChan
default:
return nil
}
}
func (b *Boomer) GetRebalanceChan() chan bool {
switch b.mode {
case DistributedWorkerMode:
return b.workerRunner.rebalance
default:
return nil
}
}
func (b *Boomer) SetTestCasesPath(paths []string) {
b.testcasePath = paths
}
func (b *Boomer) GetTestCasesPath() []string {
return b.testcasePath
}
func (b *Boomer) ParseTestCasesChan() chan bool {
return b.masterRunner.parseTestCasesChan
}
// GetMasterHost returns master IP
func (b *Boomer) GetMasterHost() string {
return b.masterHost
}
// GetState gets worker state
func (b *Boomer) GetState() int32 {
switch b.mode {
case DistributedWorkerMode:
return b.workerRunner.getState()
case DistributedMasterMode:
return b.masterRunner.getState()
default:
return b.localRunner.getState()
}
}
// SetSpawnCount sets spawn count
func (b *Boomer) SetSpawnCount(spawnCount int64) {
switch b.mode {
case DistributedMasterMode:
b.masterRunner.setSpawnCount(spawnCount)
case DistributedWorkerMode:
b.workerRunner.setSpawnCount(spawnCount)
default:
b.localRunner.setSpawnCount(spawnCount)
}
}
// SetSpawnRate sets spawn rate
func (b *Boomer) SetSpawnRate(spawnRate float64) {
switch b.mode {
case DistributedMasterMode:
b.masterRunner.setSpawnRate(spawnRate)
case DistributedWorkerMode:
b.workerRunner.setSpawnRate(spawnRate)
default:
b.localRunner.setSpawnRate(spawnRate)
}
}
// SetExpectWorkers sets expect workers while load testing
func (b *Boomer) SetExpectWorkers(expectWorkers int, expectWorkersMaxWait int) {
b.masterRunner.setExpectWorkers(expectWorkers, expectWorkersMaxWait)
}
// SetRateLimiter creates rate limiter with the given limit and burst.
func (b *Boomer) SetRateLimiter(maxRPS int64, requestIncreaseRate string) {
var rateLimiter RateLimiter
@@ -98,8 +278,14 @@ func (b *Boomer) SetRateLimiter(maxRPS int64, requestIncreaseRate string) {
}
if rateLimiter != nil {
b.localRunner.rateLimitEnabled = true
b.localRunner.rateLimiter = rateLimiter
switch b.mode {
case DistributedWorkerMode:
b.workerRunner.rateLimitEnabled = true
b.workerRunner.rateLimiter = rateLimiter
case StandaloneMode:
b.localRunner.rateLimitEnabled = true
b.localRunner.rateLimiter = rateLimiter
}
}
}
@@ -108,6 +294,11 @@ func (b *Boomer) SetDisableKeepAlive(disableKeepalive bool) {
b.disableKeepalive = disableKeepalive
}
// SetIgnoreQuit not quit while master quit
func (b *Boomer) SetIgnoreQuit() {
b.workerRunner.ignoreQuit = true
}
// SetDisableCompression disable compression to prevent the Transport from requesting compression with an "Accept-Encoding: gzip"
func (b *Boomer) SetDisableCompression(disableCompression bool) {
b.disableCompression = disableCompression
@@ -124,12 +315,26 @@ func (b *Boomer) GetDisableCompression() bool {
// SetLoopCount set loop count for test.
func (b *Boomer) SetLoopCount(loopCount int64) {
// total loop count for testcase, it will be evenly distributed to each worker
b.localRunner.loop = &Loop{loopCount: loopCount * int64(b.localRunner.spawnCount)}
switch b.mode {
case DistributedWorkerMode:
b.workerRunner.loop = &Loop{loopCount: loopCount * b.workerRunner.getSpawnCount()}
case DistributedMasterMode:
b.masterRunner.loop = &Loop{loopCount: loopCount * b.masterRunner.getSpawnCount()}
case StandaloneMode:
b.localRunner.loop = &Loop{loopCount: loopCount * b.localRunner.getSpawnCount()}
}
}
// AddOutput accepts outputs which implements the boomer.Output interface.
func (b *Boomer) AddOutput(o Output) {
b.localRunner.addOutput(o)
switch b.mode {
case DistributedWorkerMode:
b.workerRunner.addOutput(o)
case DistributedMasterMode:
b.masterRunner.addOutput(o)
case StandaloneMode:
b.localRunner.addOutput(o)
}
}
// EnableCPUProfile will start cpu profiling after run.
@@ -145,13 +350,16 @@ func (b *Boomer) EnableMemoryProfile(memoryProfile string, duration time.Duratio
}
// EnableGracefulQuit catch SIGINT and SIGTERM signals to quit gracefully
func (b *Boomer) EnableGracefulQuit() {
func (b *Boomer) EnableGracefulQuit(ctx context.Context) context.Context {
ctx, cancel := context.WithCancel(ctx)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-c
b.Quit()
cancel()
}()
return ctx
}
// Run accepts a slice of Task and connects to the locust master.
@@ -169,13 +377,45 @@ func (b *Boomer) Run(tasks ...*Task) {
}
}
b.localRunner.setTasks(tasks)
b.localRunner.start()
switch b.mode {
case DistributedWorkerMode:
log.Info().Msg("running in worker mode")
b.workerRunner.setTasks(tasks)
b.workerRunner.start()
case StandaloneMode:
log.Info().Msg("running in standalone mode")
b.localRunner.setTasks(tasks)
b.localRunner.start()
default:
log.Error().Err(errors.New("Invalid mode, expected boomer.DistributedMode or boomer.StandaloneMode"))
}
}
func (b *Boomer) SetTasks(tasks ...*Task) {
switch b.mode {
case DistributedWorkerMode:
log.Info().Msg("set tasks to worker")
b.workerRunner.setTasks(tasks)
case StandaloneMode:
log.Info().Msg("set tasks to standalone")
b.localRunner.setTasks(tasks)
default:
log.Error().Err(errors.New("Invalid mode, expected boomer.DistributedMode or boomer.StandaloneMode"))
}
}
// RecordTransaction reports a transaction stat.
func (b *Boomer) RecordTransaction(name string, success bool, elapsedTime int64, contentSize int64) {
b.localRunner.stats.transactionChan <- &transaction{
var runnerStats *requestStats
switch b.mode {
case DistributedWorkerMode:
runnerStats = b.workerRunner.stats
case DistributedMasterMode:
runnerStats = b.masterRunner.stats
case StandaloneMode:
runnerStats = b.localRunner.stats
}
runnerStats.transactionChan <- &transaction{
name: name,
success: success,
elapsedTime: elapsedTime,
@@ -185,7 +425,16 @@ func (b *Boomer) RecordTransaction(name string, success bool, elapsedTime int64,
// RecordSuccess reports a success.
func (b *Boomer) RecordSuccess(requestType, name string, responseTime int64, responseLength int64) {
b.localRunner.stats.requestSuccessChan <- &requestSuccess{
var runnerStats *requestStats
switch b.mode {
case DistributedWorkerMode:
runnerStats = b.workerRunner.stats
case DistributedMasterMode:
runnerStats = b.masterRunner.stats
case StandaloneMode:
runnerStats = b.localRunner.stats
}
runnerStats.requestSuccessChan <- &requestSuccess{
requestType: requestType,
name: name,
responseTime: responseTime,
@@ -195,7 +444,16 @@ func (b *Boomer) RecordSuccess(requestType, name string, responseTime int64, res
// RecordFailure reports a failure.
func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exception string) {
b.localRunner.stats.requestFailureChan <- &requestFailure{
var runnerStats *requestStats
switch b.mode {
case DistributedWorkerMode:
runnerStats = b.workerRunner.stats
case DistributedMasterMode:
runnerStats = b.masterRunner.stats
case StandaloneMode:
runnerStats = b.localRunner.stats
}
runnerStats.requestFailureChan <- &requestFailure{
requestType: requestType,
name: name,
responseTime: responseTime,
@@ -203,19 +461,108 @@ func (b *Boomer) RecordFailure(requestType, name string, responseTime int64, exc
}
}
// Start starts to run
func (b *Boomer) Start(Args *Profile) error {
if b.masterRunner.isStarting() {
return errors.New("already started")
}
if b.masterRunner.isStopping() {
return errors.New("Please wait for all workers to finish")
}
b.SetSpawnCount(Args.SpawnCount)
b.SetSpawnRate(Args.SpawnRate)
b.SetProfile(Args)
err := b.masterRunner.start()
return err
}
// ReBalance starts to rebalance load test
func (b *Boomer) ReBalance(Args *Profile) error {
if !b.masterRunner.isStarting() {
return errors.New("no start")
}
b.SetSpawnCount(Args.SpawnCount)
b.SetSpawnRate(Args.SpawnRate)
b.SetProfile(Args)
err := b.masterRunner.rebalance()
if err != nil {
log.Error().Err(err).Msg("failed to rebalance")
}
return err
}
// Stop stops to load test
func (b *Boomer) Stop() error {
return b.masterRunner.stop()
}
// GetWorkersInfo gets workers information
func (b *Boomer) GetWorkersInfo() []WorkerNode {
return b.masterRunner.server.getAllWorkers()
}
// GetMasterInfo gets master information
func (b *Boomer) GetMasterInfo() map[string]interface{} {
masterInfo := make(map[string]interface{})
masterInfo["state"] = b.masterRunner.getState()
masterInfo["workers"] = b.masterRunner.server.getClientsLength()
masterInfo["target_users"] = b.masterRunner.getSpawnCount()
return masterInfo
}
func (b *Boomer) GetCloseChan() chan bool {
switch b.mode {
case DistributedWorkerMode:
return b.workerRunner.closeChan
case DistributedMasterMode:
return b.masterRunner.closeChan
default:
return b.localRunner.closeChan
}
}
// Quit will send a quit message to the master.
func (b *Boomer) Quit() {
b.localRunner.stop()
switch b.mode {
case DistributedWorkerMode:
b.workerRunner.stop()
b.workerRunner.close()
case DistributedMasterMode:
b.masterRunner.close()
case StandaloneMode:
b.localRunner.stop()
}
}
func (b *Boomer) GetSpawnDoneChan() chan struct{} {
return b.localRunner.spawnDone
switch b.mode {
case DistributedWorkerMode:
return b.workerRunner.controller.getSpawnDone()
case DistributedMasterMode:
return b.masterRunner.controller.getSpawnDone()
default:
return b.localRunner.controller.getSpawnDone()
}
}
func (b *Boomer) GetSpawnCount() int {
return b.localRunner.spawnCount
switch b.mode {
case DistributedWorkerMode:
return int(b.workerRunner.getSpawnCount())
case DistributedMasterMode:
return int(b.masterRunner.getSpawnCount())
default:
return int(b.localRunner.getSpawnCount())
}
}
func (b *Boomer) ResetStartTime() {
b.localRunner.stats.total.resetStartTime()
switch b.mode {
case DistributedWorkerMode:
b.workerRunner.stats.total.resetStartTime()
case DistributedMasterMode:
b.masterRunner.stats.total.resetStartTime()
default:
b.localRunner.stats.total.resetStartTime()
}
}

View File

@@ -0,0 +1,335 @@
package boomer
import (
"context"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"golang.org/x/oauth2"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
"google.golang.org/grpc/metadata"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer/data"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
type grpcClient struct {
messager.MessageClient
masterHost string
masterPort int
identity string // nodeID
config *grpcClientConfig
fromMaster chan *genericMessage
toMaster chan *genericMessage
disconnectedChan chan bool
shutdownChan chan bool
failCount int32
}
type grpcClientConfig struct {
// ctx is used for the lifetime of the stream that may need to be canceled
// on client shutdown.
ctx context.Context
ctxCancel context.CancelFunc
conn *grpc.ClientConn
biStream messager.Message_BidirectionalStreamingMessageClient
mutex sync.RWMutex
}
const token = "httprunner-secret-token"
// unaryInterceptor is an example unary interceptor.
func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
var credsConfigured bool
for _, o := range opts {
_, ok := o.(grpc.PerRPCCredsCallOption)
if ok {
credsConfigured = true
break
}
}
if !credsConfigured {
opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{
AccessToken: token,
})))
}
start := time.Now()
err := invoker(ctx, method, req, reply, cc, opts...)
end := time.Now()
logger("RPC: %s, start time: %s, end time: %s, err: %v", method, start.Format("Basic"), end.Format(time.RFC3339), err)
return err
}
// wrappedStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
// SendMsg method call.
type wrappedStream struct {
grpc.ClientStream
}
func (w *wrappedStream) RecvMsg(m interface{}) error {
logger("Receive a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339))
return w.ClientStream.RecvMsg(m)
}
func (w *wrappedStream) SendMsg(m interface{}) error {
logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339))
return w.ClientStream.SendMsg(m)
}
func newWrappedStream(s grpc.ClientStream) grpc.ClientStream {
return &wrappedStream{s}
}
func extractToken(ctx context.Context) (tkn string, ok bool) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok || len(md[token]) == 0 {
return "", false
}
return md[token][0], true
}
// streamInterceptor is an example stream interceptor.
func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
var credsConfigured bool
for _, o := range opts {
_, ok := o.(*grpc.PerRPCCredsCallOption)
if ok {
credsConfigured = true
break
}
}
if !credsConfigured {
opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{
AccessToken: token,
})))
}
s, err := streamer(ctx, desc, cc, method, opts...)
if err != nil {
return nil, err
}
return newWrappedStream(s), nil
}
func (c *grpcClientConfig) getBiStreamClient() messager.Message_BidirectionalStreamingMessageClient {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.biStream
}
func (c *grpcClientConfig) setBiStreamClient(s messager.Message_BidirectionalStreamingMessageClient) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.biStream = s
}
func newClient(masterHost string, masterPort int, identity string) (client *grpcClient) {
log.Info().Msg("Boomer is built with grpc support.")
// Initiate the stream with a context that supports cancellation.
ctx, cancel := context.WithCancel(context.Background())
client = &grpcClient{
masterHost: masterHost,
masterPort: masterPort,
identity: identity,
fromMaster: make(chan *genericMessage, 100),
toMaster: make(chan *genericMessage, 100),
disconnectedChan: make(chan bool),
shutdownChan: make(chan bool),
config: &grpcClientConfig{
ctx: ctx,
ctxCancel: cancel,
mutex: sync.RWMutex{},
},
}
return client
}
func (c *grpcClient) start() (err error) {
addr := fmt.Sprintf("%v:%v", c.masterHost, c.masterPort)
// Create tls based credential.
creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "www.httprunner.com")
if err != nil {
log.Fatal().Msg(fmt.Sprintf("failed to load credentials: %v", err))
}
opts := []grpc.DialOption{
// oauth.NewOauthAccess requires the configuration of transport
// credentials.
grpc.WithTransportCredentials(creds),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(32 * 10e9)),
grpc.WithUnaryInterceptor(unaryInterceptor),
grpc.WithStreamInterceptor(streamInterceptor),
grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoff.Config{
BaseDelay: 1 * time.Second,
Multiplier: 1.2,
MaxDelay: 3 * time.Second,
},
MinConnectTimeout: 3 * time.Second,
}),
}
c.config.conn, err = grpc.Dial(addr, opts...)
if err != nil {
log.Error().Err(err).Msg("failed to connect")
return err
}
c.MessageClient = messager.NewMessageClient(c.config.conn)
return nil
}
func (c *grpcClient) register(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
res, err := c.Register(ctx, &messager.RegisterRequest{NodeID: c.identity, Os: runtime.GOOS, Arch: runtime.GOARCH})
if err != nil {
return err
}
if res.Code != "0" {
return errors.New(res.Message)
}
return nil
}
func (c *grpcClient) signOut(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
res, err := c.SignOut(ctx, &messager.SignOutRequest{NodeID: c.identity})
if err != nil {
return err
}
if res.Code != "0" {
return errors.New(res.Message)
}
return nil
}
func (c *grpcClient) newBiStreamClient() (err error) {
md := metadata.New(map[string]string{token: c.identity})
ctx := metadata.NewOutgoingContext(c.config.ctx, md)
biStream, err := c.BidirectionalStreamingMessage(ctx)
if err != nil {
return err
}
// reset failCount
atomic.StoreInt32(&c.failCount, 0)
// set bidirectional stream client
c.config.setBiStreamClient(biStream)
println("successful to establish bidirectional stream with master, press Ctrl+c to quit.")
return nil
}
func (c *grpcClient) recvChannel() chan *genericMessage {
return c.fromMaster
}
func (c *grpcClient) recv() {
for {
select {
case <-c.shutdownChan:
return
default:
if c.config.getBiStreamClient() == nil {
time.Sleep(1 * time.Second)
continue
}
msg, err := c.config.getBiStreamClient().Recv()
if err != nil {
time.Sleep(1 * time.Second)
//log.Error().Err(err).Msg("failed to get message")
continue
}
if msg == nil {
continue
}
if msg.NodeID != c.identity {
log.Info().
Str("nodeID", msg.NodeID).
Str("type", msg.Type).
Interface("data", msg.Data).
Msg(fmt.Sprintf("not for me(%s)", c.identity))
continue
}
c.fromMaster <- &genericMessage{
Type: msg.Type,
Profile: msg.Profile,
Data: msg.Data,
NodeID: msg.NodeID,
Tasks: msg.Tasks,
}
log.Info().
Str("nodeID", msg.NodeID).
Str("type", msg.Type).
Interface("data", msg.Data).
Interface("tasks", msg.Tasks).
Msg("receive data from master")
}
}
}
func (c *grpcClient) sendChannel() chan *genericMessage {
return c.toMaster
}
func (c *grpcClient) send() {
for {
select {
case <-c.shutdownChan:
return
case msg := <-c.toMaster:
c.sendMessage(msg)
// We may send genericMessage to master.
switch msg.Type {
case "quit":
c.disconnectedChan <- true
}
}
}
}
func (c *grpcClient) sendMessage(msg *genericMessage) {
log.Info().
Str("nodeID", msg.NodeID).
Str("type", msg.Type).
Interface("data", msg.Data).
Msg("send data to server")
if c.config.getBiStreamClient() == nil {
atomic.AddInt32(&c.failCount, 1)
return
}
err := c.config.getBiStreamClient().Send(&messager.StreamRequest{Type: msg.Type, Data: msg.Data, NodeID: msg.NodeID})
switch err {
case nil:
atomic.StoreInt32(&c.failCount, 0)
default:
//log.Error().Err(err).Interface("genericMessage", *msg).Msg("failed to send message")
atomic.AddInt32(&c.failCount, 1)
}
}
func (c *grpcClient) disconnectedChannel() chan bool {
return c.disconnectedChan
}
func (c *grpcClient) close() {
close(c.shutdownChan)
c.config.ctxCancel()
if c.config.conn != nil {
c.config.conn.Close()
}
}

View File

@@ -0,0 +1 @@
package boomer

View File

@@ -0,0 +1,62 @@
/*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package data
import (
"embed"
"os"
"path/filepath"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
)
// hrpPath is .hrp directory under the user directory.
var hrpPath string
//go:embed x509/*
var x509Dir embed.FS
func init() {
home, err := os.UserHomeDir()
if err != nil {
return
}
hrpPath = filepath.Join(home, ".hrp")
_ = builtin.EnsureFolderExists(filepath.Join(hrpPath, "x509"))
}
// Path returns the absolute path the given relative file or directory path
func Path(rel string) (destPath string) {
destPath = rel
if !filepath.IsAbs(rel) {
destPath = filepath.Join(hrpPath, rel)
}
if !builtin.IsFilePathExists(destPath) {
content, err := x509Dir.ReadFile(rel)
if err != nil {
return
}
err = os.WriteFile(destPath, content, 0o644)
if err != nil {
return
}
}
return
}

View File

@@ -0,0 +1,6 @@
This directory contains x509 certificates and associated private keys used in
examples.
How were these test certs/keys generated ?
------------------------------------------
Run `./create.sh`

View File

@@ -0,0 +1,34 @@
-----BEGIN CERTIFICATE-----
MIIF6jCCA9KgAwIBAgIJAKg0eWNBWobLMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD
MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjA3MTAwNDMwMTJaFw0zMjA3
MDcwNDMwMTJaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD
U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANpgfrPDdZAqqXrRbjmiXYbBdCvL
Oh4B/1p6yNulFspn8wTm0V1V1pPqUBWolSOpSUuxT9XDnkGq89loYaMGnRm8V6un
tNLQx3zzLjLoVeyEajztIIg1p/k9Boe4g90eLbF/Dirg9tOI1yw50Ay0v/Wvp6/d
+h3kTAXXfB4Rc78dh40/FlnEjqeywLObHQftxojC4CcwvMLVqxEZgz8/ZUoBw1Rd
I7muiMItMw8vyf3yhSpTntNoa1dqZ6a1tZzdvPlnvdP3ByEdh7MI7PKthlLZhPoU
zjFhI3+vgHq+U8yuyEpbBILBJqQ2Kd5H7x6EGiRMpeCWzIdl/PwcXhgwuUSDVUTy
6w/qKTmhzPytIiC/wyuHcX8Cvhe0Ch54x1YAPK07BB9dnaLVsStAsw7O22eSvWG7
aAFFaXUhBGWvkRz/7bWlAlRL/Rt87oXrjF0hCDotcaWRMnH5mSY9N9LsGbLd0iVP
H5zAKFr3iytF9F0T1FcXcKcMEJbjFeUP0lKUpZ5J/Ei9Nw9AQ72xHE7mqJj/UQNf
G/hfCNGVhlcsmQmwGdtobUHrIOJYkESs1H/91r/rDYO4s0z5PEKKOx1xFPnhPcs7
3/0ZYDocCjqIKcigN2Zowr6KgSB4l+t0xjZZp+2QjfMQ22e0NZkc+cjsrcLmJQ1n
jE4aVM/Vl2leNesjAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
FgQU/BcimdJ/xrkakVLfuYPzEa22aY0wgYAGA1UdIwR5MHeAFPwXIpnSf8a5GpFS
37mD8xGttmmNoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV
BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC
CQCoNHljQVqGyzAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAJmU
v0gjSkzzRIGEQTA9jZzOrZq6H+Gh6r+UtFzVtRmN9Xga0myNuxzXNkxI/Ew0nToR
uTYvnQBE7JkyEVELjN5QXByXNme/km5yP6mZJs6shF4u3szZ9E/zSJvVZ6Mp1Dw1
LJj/WLyJnord0zyYxkpX2ukTpvb5D+UsDu4QxJ7Kkq1YZUFss6/wHsUgnheI64Ez
DV8FoqhiMmIwcI9QdNY3udNCvp3oHSgi777WEDoZUIJZEF/rO/i/oojuGWjYBha9
+jO6E4jhqGE9ZwvXYOx9agMZJtZ7N4a+7tuBmmYkB8r+A60uIqocni8fzU0F7hdN
R3RIS3kWW+o/4Xz8a3fE19+RFSZd4vUgS1U+8eTeVvuCw4KaAQsEUDv8pEH6GjD+
xQwtPbg4grufTmC1a3PmEjeeYagP0BdSbuvRqXCl4i6QK/Yp2lPUWmGVC27+X0UL
xXibxUfcgT26eIAddepO2RUVG6QAtYC6GMgCbANAIVm37Sc8JV+quF/gloBIKCY9
dSi+x8wOTAsmJkceyAt+UOhayn1+u6+6YGqIiRt4/wBpuZj0UyvaZLmDcxdNXDBc
cZAAUwvcsa0yt/QiF7IE+/GS1mja0NcuzBjamnf/LqTcgQin9bEpVTw5suKUqmCR
BdUlu7drONjYIhMb3zY/QFmTGD7rPu/DaHE63ThL
-----END CERTIFICATE-----

View File

@@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDaYH6zw3WQKql6
0W45ol2GwXQryzoeAf9aesjbpRbKZ/ME5tFdVdaT6lAVqJUjqUlLsU/Vw55BqvPZ
aGGjBp0ZvFerp7TS0Md88y4y6FXshGo87SCINaf5PQaHuIPdHi2xfw4q4PbTiNcs
OdAMtL/1r6ev3fod5EwF13weEXO/HYeNPxZZxI6nssCzmx0H7caIwuAnMLzC1asR
GYM/P2VKAcNUXSO5rojCLTMPL8n98oUqU57TaGtXamemtbWc3bz5Z73T9wchHYez
COzyrYZS2YT6FM4xYSN/r4B6vlPMrshKWwSCwSakNineR+8ehBokTKXglsyHZfz8
HF4YMLlEg1VE8usP6ik5ocz8rSIgv8Mrh3F/Ar4XtAoeeMdWADytOwQfXZ2i1bEr
QLMOzttnkr1hu2gBRWl1IQRlr5Ec/+21pQJUS/0bfO6F64xdIQg6LXGlkTJx+Zkm
PTfS7Bmy3dIlTx+cwCha94srRfRdE9RXF3CnDBCW4xXlD9JSlKWeSfxIvTcPQEO9
sRxO5qiY/1EDXxv4XwjRlYZXLJkJsBnbaG1B6yDiWJBErNR//da/6w2DuLNM+TxC
ijsdcRT54T3LO9/9GWA6HAo6iCnIoDdmaMK+ioEgeJfrdMY2WaftkI3zENtntDWZ
HPnI7K3C5iUNZ4xOGlTP1ZdpXjXrIwIDAQABAoICAQDMwwwq7MywaIBP7E5pdkgy
EfUnF0EgYAkawuTRp2POWFfzsaaA2PsB6QQ8ur1VGefjNJhCPVGIC47ovUpHvezS
89pU10TjI+bZz3/zNg1TX/nptQL7FSyytDkKS8ZBMInx08vqAtUOFlKEYpUlRNp1
ucYHTqG3I5jxJVN5Mi4Q9tRiadRASeDld+PexUQcaiTtmaTqunVUT1s/Bmgdhwkn
sq1/znGwKuqLACzPQaUqHBwnSw8y9ccoyVn1ZI6tTvFh/pdtSEUEFRdnlafwCStZ
RiK9B4MrpATQNjTHYu1akEy4A84f+JKOCUeK6HJbb8y/WqtzApM3JjdoAgVss0sT
Kb7bP0cXkG+RnP0+XAklT5/KidUX6At8KavI5/oQA9JY/qQs6xEtUyrDHhAxfpgm
2pTkyUcW71QLJKlNH1i6j7it0u0s/6Ezjo/MF9pfF5yqBxCPskNDJEzTYXNCzMp8
ki1F47ypwQawpVTQqP0Bgjqujvta64CWl7qt8FL7cKu0068ykHpN27qXQhYSNk5s
jax6V429npjCARRUVl+0+jiyP5LQmBcDFQbmPfe5p9CZcZiZ1EQnT/MKTKR/pTVc
IyEBaUIGGy/OojQreIOO39HYIBaV0sNvnrvBO9Fjbg60mRZDY91BARhoQAjHPMGC
5xFrfggLjW4a6j0SM6vJOQKCAQEA+3agIxYArZ2y7qNudc5jBI+eJejE9kAofznP
WP5cs9HnQnI5zSUGdX3ZPAdC18m8TLDCdtTVh9o/sCadGTIIlsGmFiae3yI93mN8
eVw73gtNW3qYJGZe+yZwsTZ+33rG+z6YFBhOGn+EUF7h4McPOLAl94EQmjRmwwy8
pfXlyPGle5NfoBBN5qSBwJtmBNaF+TxoeP+zmOxnF0HZpBIot0lEZDwN83OL8GC/
KLlti0mByUJs4e7dcmv+xBKFsUBD5AUMMaVHlh0ALqpGg4vmMqUzX/vAoJHiHHt4
iWo2eqy/dGEYwSoKJpwLVferb+S9fTWmdZEruUQluSMi87JXrwKCAQEA3lEPk1RF
TtZHfO5Twj3m5UsdMb6Ch2wmMzGBhTI50QzXRafIOygnHKy481btIHE3e6QJAJzR
eLe4ahyNaGSLuZ+VajXsCX4jzbZdKWQJm451d7l+XjVSAVw12hjMToUyAuvV6dHo
CaCVP3s22oDQ9wPHGny6v0gY8dOE030AWqS7G3zRiT69wkjkLWdeAFEQjY5cxKhh
XgpiJTlIROJ3EPH3Hm7dwzJL3OTb2eP5pC3lbR39QJ14KYIIKTqq4WZd4L0Zdt7d
mbvjhZcNkrdXP0fSPDgkjjEJ3lYUlGfay/As2UEieQymTznXIQrCIokos3/oQfkH
L6vTsrcAwS6MzQKCAQEAi9qI65qUG/smBgUNLSXw+htqCIlx6cb6/u9G+6bUJgpq
xRDERuz9r6Cjjfg3283OFRUFwpNSgvEGFNEU9GtYTYg79/vYxh7ELAhGtTRv82lz
x5niPfRVhPb3HAhD/cTKH/fLGvn9jk03aH+svpfXRl7pbsLwWeMk9/wAe4jMGLsU
nyrytxH6UXlS1K1Yyv4ImvpW3FzSJQ3ttAiio9aZoH52NA0WcTzlKnaUOnEOlLX4
Idf4uJthu/6GPcRTaKZmW83W31GeA8XzUQDQoN7Q03//l7Vrh6I7ED43Zq2UyRuE
i5Ro8R2RcbG9uD07ssqT/Kw2/RIVMD/Pfy0khka87wKCAQEA1eycl0F0+9q3qaDP
2k6kmyl/azmN8u//hi1yG5BsEBxSHcXIqBwIHtCZnBaeUSSApin/O6aq7oWjIABf
lf+CcFj+dthyS+QkYbPEy6pmkFgx8sX8snyOb56idz57gmcq66KyEbAZnwH1+8L9
0p439imdcoBpVtzym+jUnIlhSNfQ8C9Ylb9Y69YmMwaPbrCSxBQkclwwbUSCkp0f
TKG6vwSGrbMzE7yXQXS7lVyJARHk/e3onz+nvBFS9xFsEz7kwPhVw4vLIz6oPglP
V0my28Kpq6a+jlDj1R1x6ihRYwK2tUu291JTylK3DyWCD6d6EdfXz3vpDVdDe2ob
gMjhVQKCAQBWmWrIdyglsetIKAT/j6Z4hJSWA6L77ii1gMeMv6Cw2XKc19gm8fnF
DfPh531pNaKjxBgwJTz6UrtVq1RcOqY/EWxDKeW5WU79RMV0duXE20EWnMqN7eXp
gZLso8ChZtz5BF4UAeXHfIskIt1KCnF6ubbmyUTa9aeJcqUwcr9Ymtu3fy5e1uCP
PdRxkpU/Q+xhR85g46GMIbjzwruTSMV7btuGh5WBjPeV2OBS6+aj2bWG3yeVAwar
w1zj0Vbxw7VMcblPm1EQ0hyZ/Q24ZSoLZL2l4FoaOhPXaYj1HuKQjiPbabj2zUZY
8xnynnp57i3BHHHbjY4R02Mqsfi1nNoN
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,34 @@
-----BEGIN CERTIFICATE-----
MIIF6jCCA9KgAwIBAgIJAKRZXNeAdHXzMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD
MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjA3MTAwNDMwMTNaFw0zMjA3
MDcwNDMwMTNaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD
U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANMfUOCyV55rvt/nELLym4CSL1/X
eg7NPWoXcAkjZt0P4j5/PRzf1i5kvleb9KXjKLyxBFd+S1+FnGg34Cq5YZWwkpfc
23qNFZobzk11QvhMJs+mJDGRYMmQ3T274wv2QQJ2zD5Qx5ZjOpDHLHauxW/3lD3t
D9f52svKuoVoeOHRR3kDYOmPj3BHJJu0RdLxWA0HwVnpy2dqnJyyMU+czm800DL+
HfaQFPwsPvdgQnlVRa0J9GMAtY4vqpRhgvoN7kKidG75i0BRG1BNrgFhZ/Qackmx
hLvCYCQqBHUAkg1rFXr6FdsOcK+GUD9N5Hvq24v3U1nsRIo7MH56EdhERsGKFuYK
pVppBZXnNT89ji3TDZ1j/TourAdi9XiPbiqMvZrF8VEwcnewLYnfIfpv03w8TDlt
NoGVy6WIWtL9LC4blH6/riyrVnC+J1sElPiUqebtsoP/vuTLTBoM4kaCGeDjRmR1
Q0EZDSMFODk6BaMjrigyab+KaoHc98aX740vTEl1VTvtFCeGCgbbWaBBI2z/qz1r
MNYMvGM68G7vbH3thM1KGWGnL7CTYjpz8nAvQliUxhUvE1LUK0LMdpl2pMrvjDog
f7h8/ZCAzwN8QrknYpVvgU6CKtDZz/YwZg49ew7sdUIIorntQ1hL0j1RwnGxWKJ+
GKuwPkSL6jAHauPDAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
FgQUWNurDpJ7V480NBKoiMUlFBG5Pa4wgYAGA1UdIwR5MHeAFFjbqw6Se1ePNDQS
qIjFJRQRuT2uoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV
BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC
CQCkWVzXgHR18zAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAHpl
MizBOtEWJ7WGhCWFpbrZJPMx+vQ0ixY2Uz/wjj2jiE7O4kIR45OxgQws/LdG/D8v
nhumeau8JjYPXZHF2wVa/CbF183OHzJEgL7DRteL5qfR+simSMWdXkKXrGK6riCl
IWT2CET1u//fa9I0245KdBDlzmkxpYUB2If+jOYKIzJ3o041zWGVx7+uQ8wQuNSU
6WWNP+g9k4hgNPO8kPkbOq+YX+mcxgKslKP2HfIonzeTtLcnvBCDY7fsag9wVfTT
bP84k3c5ocvQIta/S+3rSLo6Q1EvYclV8qkI0meap91DisCVsKWekNQgnRoWjMrZ
QpSuFjnfM6rWRBlZD+Vq47WaxzxkWarOX9+XuHXf1K5VyAVbe9n7QLeXFm42eRBr
lZtwTH7aDifdyuGzG3/xu06NzLSFi+G4WedG46j3GVGj0Uche3sCx5K5HE5dIJQN
iQ7hV7hAkPyCkY8uviQWwA91ffPIJJb/bBSySo354IgRtfmPqhpfLrf75lUuy9kE
/HgRHZf916JL4A52XEX7S66JcZGqtram2/Vo64ksjnyM9ZRKE+jWRIS8YYAnDmkX
NZCAQFD3CE0zlwQQLCPtMqeSk7MrXj58y80e3mUZoZQoPWYuBIktlbCmCiRKmNGm
WHrY9obxbjh5CBJb3Ilior3lnm24S9M9bClr6RpY
-----END CERTIFICATE-----

View File

@@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDTH1Dgsleea77f
5xCy8puAki9f13oOzT1qF3AJI2bdD+I+fz0c39YuZL5Xm/Sl4yi8sQRXfktfhZxo
N+AquWGVsJKX3Nt6jRWaG85NdUL4TCbPpiQxkWDJkN09u+ML9kECdsw+UMeWYzqQ
xyx2rsVv95Q97Q/X+drLyrqFaHjh0Ud5A2Dpj49wRySbtEXS8VgNB8FZ6ctnapyc
sjFPnM5vNNAy/h32kBT8LD73YEJ5VUWtCfRjALWOL6qUYYL6De5ConRu+YtAURtQ
Ta4BYWf0GnJJsYS7wmAkKgR1AJINaxV6+hXbDnCvhlA/TeR76tuL91NZ7ESKOzB+
ehHYREbBihbmCqVaaQWV5zU/PY4t0w2dY/06LqwHYvV4j24qjL2axfFRMHJ3sC2J
3yH6b9N8PEw5bTaBlculiFrS/SwuG5R+v64sq1ZwvidbBJT4lKnm7bKD/77ky0wa
DOJGghng40ZkdUNBGQ0jBTg5OgWjI64oMmm/imqB3PfGl++NL0xJdVU77RQnhgoG
21mgQSNs/6s9azDWDLxjOvBu72x97YTNShlhpy+wk2I6c/JwL0JYlMYVLxNS1CtC
zHaZdqTK74w6IH+4fP2QgM8DfEK5J2KVb4FOgirQ2c/2MGYOPXsO7HVCCKK57UNY
S9I9UcJxsViifhirsD5Ei+owB2rjwwIDAQABAoICAQDDrPTDLciz1l1VHM6HbQDf
i55JEGfarDNNz2dRsPQ30+73yeqUhon2+fzJKoz367DoIpFJno6xfB7ZIWCteKCP
otZb1qG91mG9MiRl+lcV107piq1lG78/UvsbqrbncVgTtpPa9ffm1RWE9nWpkpcA
DdHiC4RxwuwdkkqKN6hCdDvwV0dNcneZsvalMdK9jl7zxMpaUazqrw901FuL1GQp
AiQt/wU6b5RjnYbGtPsnhfdMSDuwPwoHPPq3CCHjLWI1dGjCKpv8ArB0H2s1cFhv
EMv4rYW+mIuPOTpkTyEPOr7v+jajj6C1rqFV6xXoHGdcNOGWKLvl+rIZp34+mhmQ
vQRkmcOzoSkdTERAOtYfKYcylzBch6WHmgVE2ZRntiQTAp56pXxUq5lEnAtTc0jo
3J2fItVgzT9ZGxNOgzA5VOoQA1as2Xr+v6YeUibn4/I8KKHV/FXTFk7ojb3EObF1
n39OZXw6a28QNP9/7TYmB7F41fzHcRPzl48lx4rPXyUXOwYh0qwqTixmgl/HcGD7
i2XUyJ0CHi/uzvxo6Bqg+VMdQzfqT5npf22axays9xRk0nxwvY1wHwiRQCHcT8dU
ovoLTZJFWzNik7EthMgPT+3Ec0eAs4j1N03Hb7KXUVBn70QChf2uaDEuAXJh/pOB
T8OsSN+9k0/VF3Wxni/TgQKCAQEA8DIam0wpwzabwKdpWntdhGpP6ak+o++bsNyL
hyBBT7RlmbNtKtfZAdUNT1PicYZ/yFR+4DhrfPHsIMAdTuP5uq6JpBVWYb132Hv3
9rXZiyhRPZJmL0ZIRcY/K6jqNHlQJp5ov9yAVmFEChPdI0JagVGy52a/lbctcKaQ
lSFMSaVl1EKqXM8LljgANRTRv9Hr1Owx/IdjT+M1FqjHXWO51AWPxDAmINIo9UrR
SAOK8/kMyULG8FvEhk/g0KtpwQcW4HRZVeATyrOIcxBmSfAQ46+fpfs6qa4AB2U7
lpxDWPuY43DUZMY7uLTEoFraya3dj42mwyvKK4UeyiKn6uNI4wKCAQEA4QN+usMh
InAdPC9cMQyvjZ5asWqmTGk6jCvUJWvr8R2z0Si8nbPuh7ciz8g6rS+ggqym3e0w
AWZt+rlXvrC9cpfvERDxQosFaWD7w0+h8h+URtRJJchlLPMxjaxtHx3mhArfsgTI
MkIFHS4Q7p+H3IyeqlALTVFwnLBNaD9RSI6T/Zn0AOhxqMTDnjnonADL2wbK1pfw
GTsjk4FNNVmSOY+ZRbobgTkAegbyra8+oa+GR97U/hT8Pii6FwX+iR7PXjBjgvHD
m7AKkcdorvleFH3Yxz1Z9Fje8rAOGf6hWJFTU1qMmaLNdvATSJu5ne5CrSN2m3FK
qr2uPmrIJdRPoQKCAQAZIKS36lfUHDpfBSR4Wr+FwrlpcFMlQ0O+VNQj5rPuaqjW
U3bwLHR/RJKH4famebOUeYJsYnqcL5LMOkzWm/LcHLY5fCH1R6Tp+M4P+SYw8J7P
GimmeGvHIN4q6xjVNHu2DoxWxfKHFtXPWBSiQ6bEMI/OtWkFeIxAZKxrbXhVm//z
HKZF30MPC/y5kNwAfS12sN7p1CAHk3VSUYXJt00RaSOJGqBifpnaT2FlbzlyHHPB
+kJlkrQUePbD3arKjrtN794IpdBsPCviHa0Vvw+FQjIpYwbYCWPnYifBscc539g2
su8FO9ezkvWe8OJChvXOtrrjYAleVCbMbqOyZuSRAoIBAQCnrfkUqDDa/v1qSkjD
bJauTGF9cOJ4crpklozDTkdHKUFFDrxwMRQCIuFYQfgn8yQD/TFklEp/4Jr4ioHu
4rpq2PoYl62STxM7UkCLbZ5bVlki5zOTamCrPJei4el3lMqhf5Dvkky11ykEc72+
dTfDjS738Cpb9eKbgW5Nz1F9ZnK2O7Hvs0hv4iF8md7T0mwXzln9zL/prX53f5XP
ue4T4wTvRx8UDyxhwye5cqyTxL+mc1H5/h1zHNqAKcFi4YjaweiGPi/spyVZOWaz
bbVEQ/v1jaypQEj0RWpcyLnnzHRx2zqHiyDeD03vf8y0+kbJy3GpqKVh03Qzo1N/
jVXBAoIBAGEvsOGIBFUiDLihDEIUTBdQHzzKXN+zjzxUnmcrLn2MKBx8gjlpgZrO
pAgK0depxWA9RAuQBgqqodi8CY82h6kMaK7ANYOfgC+UDMCJ+XJKqKaa4MG4xOiv
BqJZCYIhB5ALs4DDLwWNCYQqVg3ErVk7hDgKQugQviBXGQFbEwkSHgf6MUxbe99/
DkSgkil3TWKcVE82auY4ud04tJOBIFl+fnMysF99FqOLJTwqHDK5pC6A63zyBHgm
3hL5vjRn6DWb8wBgQo6/K8pbYQ+7dADGbNvQxUj7nqjhH3I+vEBHAg+oVt3ZPr96
+3KzjPLML31OD8TN22FUzsYcdw2prEU=
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx
CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV
BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDcxMDA0MzAxNVoXDTMyMDcwNzA0MzAx
NVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL
BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBAK36523v5SEM+J8ReNt3USwylERoUMqygoQRTIy7
ipzfO2dmo5OANFsJtPb3CH+YB6kS9llAioLa9UNrD6SBlR23No/QJeXBiXgpUXAE
DCLhQ/aj0fEy8AEnW+a6mM5jmsEHOy/O3q/KF1JdjNA1T7HuBS6cIvp5+7rF1rG9
tzJLLrXwUZLKlMjdCDuLxp/qtYUoH81CIuveWAODH3oad559HgD6UBgDRntdT902
IUnTejCAOY9Q0yTlcMMbz+FEMZ43Xq4E89YQ7Mel+xkb0lL7H6mNabvfZTX+5qm9
RDtxrNvLH+hZ+OPOp2qrfyJBaj/yP+4TTN4pC4y5Vqkq7sZ1fjfx9gZTsQLAvmr6
/c/Z59IlsAIvttbam7FFNrwVlWsD5uRP2DZyKXTjRRCA8NnBo9fltD1FbKKevcqu
PilMiyg8+dJnhKxOeMlw1WSx0h8FFU+jf4MFFX+qFsJB7Ecss1bWpnoYsaeKGMG7
mcOx5weglRlVccDQollZBXoIM/pDKJNrAbA8otKXbGGl1LJY20HZLNYPIRRlH2pe
YoLyhUi1AKFMecHxcGOIxlHVZ0gfEoWcChYvlWi6M/09c2Qtqq/QfKhD7DAXmMDS
xYFskyAAYSxgX2Q/5Y6mP+qRzDxT0Qm5JyN+UV0laqQ1KBA11+BF8RKriMGYSXy4
afDJAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNv1CHU7dlRoE4Lh
/elJzmaSFpU5MA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD
AjANBgkqhkiG9w0BAQsFAAOCAgEAoF0Jc770+dMNNiDyKsGOPgUJBsYMTyGqPmpd
7Nu7wmI+PBlgDkTvVZjU3EO/Y5Ez6fum5gCtf7OKPIYLfV95WBxgkkEvBEYaX4To
eL9nr9jP9AQ9sZocPTSCrlVrIeOT3tV683BY+N8sfHW6xIeI9tqTXTExCKmwuKyZ
+qyokn35Kkydyn47J4bclPD56UWctQinO2cXm2RVHkJlmQSoFREdb0S3xiFt8aAW
olB2xWMCwXb7LDyi5M0HCvz3lGErCTnpL9GBPjsWCSZOK55D/BSxL4NRSBqzsv4N
25SQOP2NgIqabRsYqYhTCRWK0n1h3IBAVh6fVQ2CCStd4gkuDUepTfM+R7mcYR9g
u2hn4kn+1i8y+Uj0z6yN48/i9Cnz3Sq/e8Z48Rbjut5Rx32ldFvHIkdtFjjkgv47
LbVKaYH4uqQF2xs3tAPuqq/QXNOn8Ie9yHv0MeJiPymIPAk6GBrUOA/Br4kof15v
uEbxeR/nnrzm+eyWMn4dsE0n7GA6wm2gMGENK4E8WK0sYujIAPtG8LHfShEv5f/j
77+3tAcigec39bau4yTkXBV8op1iMPBtEejLD0B5RKZig17Bfdw5v2TP+yGbzD5d
PwhAxn4aVK8zXFdYmwNfXNXBpLaEILxYFpeExaA9Gr5Mn/h+vD987GTW9F4fBhht
MtkfvRA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEArfrnbe/lIQz4nxF423dRLDKURGhQyrKChBFMjLuKnN87Z2aj
k4A0Wwm09vcIf5gHqRL2WUCKgtr1Q2sPpIGVHbc2j9Al5cGJeClRcAQMIuFD9qPR
8TLwASdb5rqYzmOawQc7L87er8oXUl2M0DVPse4FLpwi+nn7usXWsb23MksutfBR
ksqUyN0IO4vGn+q1hSgfzUIi695YA4Mfehp3nn0eAPpQGANGe11P3TYhSdN6MIA5
j1DTJOVwwxvP4UQxnjdergTz1hDsx6X7GRvSUvsfqY1pu99lNf7mqb1EO3Gs28sf
6Fn4486naqt/IkFqP/I/7hNM3ikLjLlWqSruxnV+N/H2BlOxAsC+avr9z9nn0iWw
Ai+21tqbsUU2vBWVawPm5E/YNnIpdONFEIDw2cGj1+W0PUVsop69yq4+KUyLKDz5
0meErE54yXDVZLHSHwUVT6N/gwUVf6oWwkHsRyyzVtamehixp4oYwbuZw7HnB6CV
GVVxwNCiWVkFeggz+kMok2sBsDyi0pdsYaXUsljbQdks1g8hFGUfal5igvKFSLUA
oUx5wfFwY4jGUdVnSB8ShZwKFi+VaLoz/T1zZC2qr9B8qEPsMBeYwNLFgWyTIABh
LGBfZD/ljqY/6pHMPFPRCbknI35RXSVqpDUoEDXX4EXxEquIwZhJfLhp8MkCAwEA
AQKCAgA2Vgo5d5bj/50WcOqCAH3Fg/ZydvHknGPOw2hY+6mK3N08qf2kb4HqfNmb
2AM7dkvOLjHqJhIcVC4NZD56bk4X/cR4ndV4MD2y3ZSlm13+9sO3H+rNnc7/TT+S
i+x1aP5IEu4VPFKoLEGkY7s6u6usMl5D9FeoSrin2Gn5EPtKJdjs0aVoZwSYxw9v
KXRbNX6Dm8hy3pjxeXubfTQzelipkwHv5D1ngn5cwQPUXrd+yyF6TFGtxNxsxYu2
I9WE0Tt94mUbjEhrLtYEdH47lUjWyb9VwOio2FhPyNBZatcIibQm4QWSF6d33m7D
DdSi6jM4zXvR6w0yxTbqOGgsZVA0/y6419tfigKOV1JlPI0X7xJFLmywHcC6zA0C
GstZGU3igxtbTdkq2lUWYhTTbxAR+TAZd/FLq6y+48lWEIWhon9xDryHHCnNtYwP
ZbYJXf++V6I8LnamVNw+TCdaehMjxoEqUNuzfgm1XdOD1xlNeRSRM0y40wiTAAHj
WIRV66TEQ/y66sbp58lGyvtxcUj3iWz5loFA+gXEnvK1eFcJqRfmEx+dz+EZeKSS
rgt86RJweAuebtGZnOSj5grxPwhsS46KKWH3KEvOZ7ZEduxCgAONy7VAoSLoKMaE
/XADVUj2HukgRxRR4yIE61fVwWlb8XEm6WbhsMCcRu9wR4e50QKCAQEA3v531aFH
gzwrjMQ+6LdDNbQf9QUK5qVf/WG6f/eXcq5x6E5OptoqdYl1B2QqbnbdYCQWga+W
21YnlSOgmo0trS4Zr8LMcyvdiHL2LyYNoo6nE8qI2xYfrdpJZwkR0X+eNJFRa1/X
mha3x0oUAm559ROuaRto6HL3V1nUUiGDmPSqSyOJgTrOI29hBcvWUgpCr/1CL0uL
NmqtkMya9/0Xn0o+BTbdg3PogTIElGgWtStDx3mj67ORGPUqI7nx0TmCxYeWN4OT
779gmc6lleth1+L1RRm1hT+tMSty7fTEivU4Sj7sGmivQzAyiD9Lqg3lOeaRYBGD
UmAWbI9uaYDrQwKCAQEAx7s8Jq80t1DD3kSPCuRiw8r7RjUD3L6CQtag+QJCPts3
7qV2RtQ2qwcmpFsZ9DcIn08xmx2rZ7sx8CJrys1sL9Wu3krpbdtPjp5AstVS6CBx
mLulGrl5nCO1bnVRKlz6S97FgZ0hjBkeMalJLoYIuD9VUOqNwi19K/oU7mFOPHvm
Jbvo2ZgygwXvSg6nSNqvd5T33ZMnL0dnUhsFsZV47nO8QMB/ZsdlWUEuV+Y5RJBY
3FLo3NBJLA9zIpLEm0hlvA0D/GEvBCQfJOEEgm8K9x7CVGF5rYDBd7R7oGrB5t4T
zFgkUkqskiG3VFE0TnpOq4gkZB/1g0E4W/VmhdelAwKCAQB/Xyaf1cF9So8tlqLA
Vn2DXWGrmLfDSs7rcjkPAyN0lAPoR2JRl+gMvvkjwaki8647TiG07dDjc/CkFXeV
D/L5Ko3tgP07A+FEITZRdBDxuz3f5h4J1jc+HKM0wU92NMjvCdpR1KrYDwXmRX/s
a6IpxJYo30krDRAOyval+xKp+YaT6LaQJEC+qM3oe6ftsIKq96QoU6Qu7vw460XR
RLWLfOK0I8SfY0N5GFLZWiMuVIoglHB3H1hPwynQwlNHyOvTXEEHcJa9qLjK4ehf
G9YFdFPYpniypc6NeV3qYZcqMCt47Tv7UbRaUltqy4yyk8FNM0/yac5y7QOh+sN8
a/D1AoIBABnCJ+vFRMMvg1My/E+nTKV7lBRl2e2qFBqSm4gBppF8rCX26N4RmEtO
TMl9hkdcoZwKFpeup+Bk3/fcOJKbE4zHvhmlB53HXudBuY5WvK57IKtV5+EecnSU
ll18e88+1njabhZdMWpkAuTctDdvycgZQuOAnG+idjYptnFX00Mxp2jOZyVI35rO
NSIT6bcXnPGLILxOsgsC5mxMV9ujL0lxW6HuMYALzyJHqbZkVpZlF1Cy0J1Jr2Yj
R/H5g6mTGKu78fumfO3HysxyyKYZtAxSxzUirRKXPFw3xonVutQPZ/Y+l9CVGNRv
zLjvEBPe6i5tDGRtSrh2vNH/QA2a1gkCggEBAJi2TtRxR4YRgRlzP1NLbAO82OdO
1opIzPaxb+9JxvFm+xILb8kvNe0THkLhLM2nNImTydshCqLXGP6/jahw7Vh9NJNj
QrCHEx9RnJYdcdaayWeDzSJO8oGARs0CXMZXzgPYiFnNXcFFG/R+Ughv0yctIz4o
af6elMwheOPXEyNu1yV0ALlvO/xkPpBRs3HuffJ5EiMkT5SKFa4ErFUaAlDaYpRz
EITcEh6UKnZiAhQADl9rHSymWUlt88xhXw4wEDTBvNmzgOgQvfjnoud8JXO8a7S0
ihaKprOq1WFRss1USidGfm7lBxIPM60AeSHKt2VsVgpf+KgXgNs3RONhY8c=
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,69 @@
#!/bin/bash
# Create the server CA certs.
openssl req -x509 \
-newkey rsa:4096 \
-nodes \
-days 3650 \
-keyout ca_key.pem \
-out ca_cert.pem \
-subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \
-config ./openssl.cnf \
-extensions test_ca \
-sha256
# Create the client CA certs.
openssl req -x509 \
-newkey rsa:4096 \
-nodes \
-days 3650 \
-keyout client_ca_key.pem \
-out client_ca_cert.pem \
-subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \
-config ./openssl.cnf \
-extensions test_ca \
-sha256
# Generate a server cert.
openssl genrsa -out server_key.pem 4096
openssl req -new \
-key server_key.pem \
-days 3650 \
-out server_csr.pem \
-subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server1/ \
-config ./openssl.cnf \
-reqexts test_server
openssl x509 -req \
-in server_csr.pem \
-CAkey ca_key.pem \
-CA ca_cert.pem \
-days 3650 \
-set_serial 1000 \
-out server_cert.pem \
-extfile ./openssl.cnf \
-extensions test_server \
-sha256
openssl verify -verbose -CAfile ca_cert.pem server_cert.pem
# Generate a client cert.
openssl genrsa -out client_key.pem 4096
openssl req -new \
-key client_key.pem \
-days 3650 \
-out client_csr.pem \
-subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \
-config ./openssl.cnf \
-reqexts test_client
openssl x509 -req \
-in client_csr.pem \
-CAkey client_ca_key.pem \
-CA client_ca_cert.pem \
-days 3650 \
-set_serial 1000 \
-out client_cert.pem \
-extfile ./openssl.cnf \
-extensions test_client \
-sha256
openssl verify -verbose -CAfile client_ca_cert.pem client_cert.pem
rm *_csr.pem

View File

@@ -0,0 +1,28 @@
[req]
distinguished_name = req_distinguished_name
attributes = req_attributes
[req_distinguished_name]
[req_attributes]
[test_ca]
basicConstraints = critical,CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always
keyUsage = critical,keyCertSign
[test_server]
basicConstraints = critical,CA:FALSE
subjectKeyIdentifier = hash
keyUsage = critical,digitalSignature,keyEncipherment,keyAgreement
subjectAltName = @server_alt_names
[server_alt_names]
DNS.1 = *.httprunner.com
[test_client]
basicConstraints = critical,CA:FALSE
subjectKeyIdentifier = hash
keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment
extendedKeyUsage = critical,clientAuth

View File

@@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFdjCCA16gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx
CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV
BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDcxMDA0MzAxNFoXDTMyMDcwNzA0MzAx
NFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL
BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBAL0HTaTaYQ1GbvZ/Py3NJf3WSOzXdm/qh9Fv7hAs
8FGPEEDCRhrvFMjWqAwp3EiQkRavLgTv4t1hkga9y/hc7t/q9ATFm8SC3Dtdkg2X
0YdxsyotPaWgUSmsIJ0uwCIMkU5oGE1J2fopdBxG87T+QGUo1r4QxDQGQ2H9CMsD
217Ca+PdrdldctNs/D2AVkXTew1Bd/nNaOXh3vc14/4b86Y7A2HOFFyRi3QaemJJ
ksnH0CmhydRob5rAZQRClftzjri9gaUfJW5LSUYBXn3Yx1gam6lM5LcPlgWLmXs9
wthfksY6YlpCa1NtdnNbZIY+6cCHN6ytSPj/1BY8+C954cySSuNVSsAAvm8C80Zz
hnNaivhdouvmWTZM8febnrrt6qo0SEtnn+RkzUznOjVVxyPffgjI8s4gNc3DAIbf
oDwrAgxNF9nXAoeYTVOUxeGcjeG8fIKcfC7pxfI6/ejMiUU7LkL5rEIbfT2bF6EW
ntGyrxYRNdw+VX2MxNNvPKHUUu90JTCxzjaUCSnR4lhatcQPKeYVnn5I+jv6kMm5
FAkjVwk4U/u7W1DtCedaN9nUJNRWwptHqX2VXcnM0k9tA5yBtBM55yf0zYHz/fOz
RJ/bqHzbs5+il07u1uedMUJ9X9pp85Pm0PFD1zbv8MwZetTJigA4CdU4XU8K56Nz
Avc1AgMBAAGjXDBaMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFI0rfKZ3rjLJZ3R4
tv5NeYgJyiaqMA4GA1UdDwEB/wQEAwIDqDAbBgNVHREEFDASghAqLmh0dHBydW5u
ZXIuY29tMA0GCSqGSIb3DQEBCwUAA4ICAQCYbWsz11jUxABZDkQDNqGGpdAEJuaD
gAe3Ko28ntT+pjEdInD/YrfEjGI3KQhT00yMVkiWXiK8bBynZB3TpDUfG4OTBhAV
PZy/jQ08wOfmgFQco3asxQovimmKXVwbeJBOlZBfZoseB3h4zz7PcfLI9Xr8dz34
Pbilg+XOZywoxdHWd1To13ycKi9DPh81cRWu7QACS92wGGsX/eYVW7YKFmjcnj0I
2+WJl7nHD7h+Qyy6QiHmHa6/ZKAx2vkf2ALAHr4zKvIf+LLlQVTKGxtkyRMusiP+
sZuDq7RN5oYE5G1P5tF6Xb6AUGFrazaiC3kI0K3njs0xifjxiM+7KyfXQHOWV/a6
NNk9CX9twaKhq8Ay5jjILSUoXWgyl1OXOyIHIpWmsJMyGrQCapS5BZHGwc/K/6yW
TETmn6frJUh8VHJ+gjLvoUVMQvkJbV5IecMQaIfHBegRobi9TDkmjGC1v6+rpfjc
tVhQ7rUQgYtkuoOfRjwvCvY0UQ3hf73u/FCG/+Lw1b/Wcp8PMU+6vpZqlAaaFGVr
WHdrPHC0B0Sc3Pr6dmJp70KVb4gx45icRaJnPLR7sr5CBkorZs9NKXUzNnf9oZWF
Nfm5/isLCqLfwA+VTk78vyWqRycdDJ0lswxZt5pvwI3gXitOhlE6zXtsA883TwZ9
TxGOtJdjo0IEAQ==
-----END CERTIFICATE-----

View File

@@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEAvQdNpNphDUZu9n8/Lc0l/dZI7Nd2b+qH0W/uECzwUY8QQMJG
Gu8UyNaoDCncSJCRFq8uBO/i3WGSBr3L+Fzu3+r0BMWbxILcO12SDZfRh3GzKi09
paBRKawgnS7AIgyRTmgYTUnZ+il0HEbztP5AZSjWvhDENAZDYf0IywPbXsJr492t
2V1y02z8PYBWRdN7DUF3+c1o5eHe9zXj/hvzpjsDYc4UXJGLdBp6YkmSycfQKaHJ
1GhvmsBlBEKV+3OOuL2BpR8lbktJRgFefdjHWBqbqUzktw+WBYuZez3C2F+Sxjpi
WkJrU212c1tkhj7pwIc3rK1I+P/UFjz4L3nhzJJK41VKwAC+bwLzRnOGc1qK+F2i
6+ZZNkzx95ueuu3qqjRIS2ef5GTNTOc6NVXHI99+CMjyziA1zcMAht+gPCsCDE0X
2dcCh5hNU5TF4ZyN4bx8gpx8LunF8jr96MyJRTsuQvmsQht9PZsXoRae0bKvFhE1
3D5VfYzE0288odRS73QlMLHONpQJKdHiWFq1xA8p5hWefkj6O/qQybkUCSNXCThT
+7tbUO0J51o32dQk1FbCm0epfZVdyczST20DnIG0EznnJ/TNgfP987NEn9uofNuz
n6KXTu7W550xQn1f2mnzk+bQ8UPXNu/wzBl61MmKADgJ1ThdTwrno3MC9zUCAwEA
AQKCAgBXlDapFnS4zdVDZ5lCAzaC8PFAqmM5XxQmORG3dNqzLvF8z4XjnLmog6vA
VvS0uiY+uFM9/lbB8x7Q+Maz/3q9TAJa46NT3L1k0+mDWr+9XTSBagyR3EE+aX2C
1dI29FOuXBRGWt0fRm2BXG41gUccl1tHHEWLRQubLr0QMm1E7hdGr8KIXv+AbZJA
fGF8YIs2jQqlNkJPn+LJ7rH/Xbv5XIYonm5YpSZTWKEzQJs92dHcOBVm0CxFKrai
zqbmpZeOiF60vkV9YGxGfwPkkrdpXoqYWgPtvM7pKtClhOvti/pY1VwULYnEUYb7
03AzsppilUN6QZ75nq4Iz569gF7YuUCTqFwYt8eX2TIpXctkvHeTIdqLUEn1JMTh
Iqr8xmnsGPTICiLc1bHPXDfOyg9wI1zcFAdS9FAzdlYyGPSZt4KwgBG7e+9daz0A
whUaim4OV4mpHQMi/Tx0aF4NPRz/BQbzfKrjvaeHVI+VBJUWR46MwjpjwaDpUiIe
fkgJf6wVeFbdzJMOCP1xZys5N/UkC9V371J6kLywPrzeVuljqSVGxP2SHzRlEnlE
cSff5sbLAHz30y5y+HMVePC/svZ93/vzdZNU6PeHvEcvGu82/KRuy4iKgCJIeSa2
DjxlfZn6AnQCllHwVJjit1SxPYn4nmNGoCMqNln6STT47jj9UQKCAQEA5KScPzAR
2u8Wsyhfl2wqd5lfTsNqZ0VM7RuGMEKGIx3WpoSrUiTRLwalHRuJUudbePRlnYze
gQLgiEgmv2d9RaPEckMzgQwG5EIEY4z8RYaB5zadcNUla8M467vHFHDpPZ5TPHg5
HpbREi0J0sL+Oa8M5Nf/XRO8x/uL25f9sQUoE2nSfr4PnV/ysbvoBn5sE6cn4jsr
/HDrPjksgx2/uQcnmUq70Kxhm7iCUUcbTlxoWDCV/g0UNJcZ/6PgDN7fVaXItXVK
QHnCS0yQkJERHDg5mBWGS8SChqPUTKC1O7KYEanenoxqm0mpJAvMG+DznvqClmiT
kDxJAcX31kakkwKCAQEA06VKwR8Wy3XYHpsX094ZeLXGTcnHFEM/jOBGQZvQjV2d
39dhGKj7dqw08RQGAVZ5KK3coMNk6uIO3VuYwYBjEG47a8q9FeWP0tBcDIPCGibV
HLwGgExJDyFdgWtLnI6yPWKsoZjMppstVcQZK0ouWpLvgK8nrg7WoUeJmvCfnw7f
p9pxj9S98ja8Q2uajvo8SWaV04YKm6jW0+fxwlMBqaNZbIxXyXGfO3qMGczAbCne
oPxzkHI9AZ97qevBzMAh/IXqUr7e+8BM/5vxoszEXtLgfIQL+owsy6ALe5N0UUuq
LYrauuzjaYMjkEZ1Ow2aRmrkOaStMLXPI78CW4faFwKCAQBHzV34BfuFepHxX1tt
rR1FA9hHXtz6Y2v+BifE3g9L1eID1yQKHt/GWdreYjhk3Zz/RhjnOkbh0up6QdZR
Q4m2pfBaRbpV61X6trS0IqFSoCQJXUBiH72pstwcQ5MIW1ET9bWEBulBLvGnOJee
JXg62zs8XoymST1+vAM2yet0fP8R4ail/r/elzQbFryN1YPRRCwlQpnUpA1sM/5D
isMbsyB/ZlXG+WuJwI7EQYVUvXZTQ6bG6oqO3WjfvDHvOMqAFhkKyzOvPc2DYh8A
F159Mzb7CL9s6eBnselIyys+/R3+Zg8wUT5lV+OTG1VU5/b51QfPfjXhFN2EfgwP
sY2bAoIBAQCLNB978BfNEKBqWPYOGvnD5EMe7MUs9aI55VUwV+yO3nE1RfMOBi8G
+fMEUXg1rwuXjusbLgkVWEQQoetR8kC2ENqyZjGB0nCLZxH0BUFIdBwdfyoDfqla
80YOFmUv/scLCviifN62AkCKNaWcTHk6h4RRrmK53/aZM3U1XGiQdHb0bv/caz/X
rNqcuYx51+qJGJkY/APEKAPMcrUXbAMe8Vqiw5gF3d6uf0bgvUQeoFdWqVTVP94S
UDRFKdRY+FIiRm49qF7/VJcQVCBVRLsv5yFRpIRAcawQ7h4/VFfgFJVEyRxeb+qP
fnqIrV7zzVmYUTv1EfP7oskwKLTDQRJXAoIBAG2pAsyv963Bxy4cUq2v2c1tSHSV
Pi65N/0ynhWqh7tYGmgUigEhRwbuVCmC4nFOat0b9uXauFpUWth29JKOKO3Tdaze
Nb6Nrlb2AYHAs4x1LSd73mf2GR82eahcBNpFkG5NN7vg/mySN3DoBuFx2ZvrlYuw
yjvNf51QcIlOFEWcbfOvsE9/2aXGkdmySqUZ+BJato/FMmuvSdjVOsb2zdtRG/j8
D3nvxRqJITI849PHWVEMWeDOFT4dRTqgzd1yDB7UUggQwHExujAn9ZbTOivjn6H5
j/aLw4IjkKge1qz9c5a13LMulYkYE8bn2GZ7Jali1v5dV5gIWtq+wtZ+32s=
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,585 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: grpc/proto/messager.proto
package messager
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type StreamRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
Data map[string][]byte `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *StreamRequest) Reset() {
*x = StreamRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamRequest) ProtoMessage() {}
func (x *StreamRequest) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamRequest.ProtoReflect.Descriptor instead.
func (*StreamRequest) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{0}
}
func (x *StreamRequest) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *StreamRequest) GetNodeID() string {
if x != nil {
return x.NodeID
}
return ""
}
func (x *StreamRequest) GetData() map[string][]byte {
if x != nil {
return x.Data
}
return nil
}
type StreamResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
NodeID string `protobuf:"bytes,2,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
Profile []byte `protobuf:"bytes,3,opt,name=profile,proto3" json:"profile,omitempty"`
Tasks []byte `protobuf:"bytes,4,opt,name=tasks,proto3" json:"tasks,omitempty"`
Data map[string][]byte `protobuf:"bytes,5,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *StreamResponse) Reset() {
*x = StreamResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StreamResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StreamResponse) ProtoMessage() {}
func (x *StreamResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StreamResponse.ProtoReflect.Descriptor instead.
func (*StreamResponse) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{1}
}
func (x *StreamResponse) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *StreamResponse) GetNodeID() string {
if x != nil {
return x.NodeID
}
return ""
}
func (x *StreamResponse) GetProfile() []byte {
if x != nil {
return x.Profile
}
return nil
}
func (x *StreamResponse) GetTasks() []byte {
if x != nil {
return x.Tasks
}
return nil
}
func (x *StreamResponse) GetData() map[string][]byte {
if x != nil {
return x.Data
}
return nil
}
type RegisterRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
Os string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"`
Arch string `protobuf:"bytes,3,opt,name=arch,proto3" json:"arch,omitempty"`
}
func (x *RegisterRequest) Reset() {
*x = RegisterRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RegisterRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterRequest) ProtoMessage() {}
func (x *RegisterRequest) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterRequest.ProtoReflect.Descriptor instead.
func (*RegisterRequest) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{2}
}
func (x *RegisterRequest) GetNodeID() string {
if x != nil {
return x.NodeID
}
return ""
}
func (x *RegisterRequest) GetOs() string {
if x != nil {
return x.Os
}
return ""
}
func (x *RegisterRequest) GetArch() string {
if x != nil {
return x.Arch
}
return ""
}
type RegisterResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
}
func (x *RegisterResponse) Reset() {
*x = RegisterResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RegisterResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RegisterResponse) ProtoMessage() {}
func (x *RegisterResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RegisterResponse.ProtoReflect.Descriptor instead.
func (*RegisterResponse) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{3}
}
func (x *RegisterResponse) GetCode() string {
if x != nil {
return x.Code
}
return ""
}
func (x *RegisterResponse) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
type SignOutRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"`
}
func (x *SignOutRequest) Reset() {
*x = SignOutRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SignOutRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignOutRequest) ProtoMessage() {}
func (x *SignOutRequest) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignOutRequest.ProtoReflect.Descriptor instead.
func (*SignOutRequest) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{4}
}
func (x *SignOutRequest) GetNodeID() string {
if x != nil {
return x.NodeID
}
return ""
}
type SignOutResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
}
func (x *SignOutResponse) Reset() {
*x = SignOutResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_grpc_proto_messager_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SignOutResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignOutResponse) ProtoMessage() {}
func (x *SignOutResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_proto_messager_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignOutResponse.ProtoReflect.Descriptor instead.
func (*SignOutResponse) Descriptor() ([]byte, []int) {
return file_grpc_proto_messager_proto_rawDescGZIP(), []int{5}
}
func (x *SignOutResponse) GetCode() string {
if x != nil {
return x.Code
}
return ""
}
func (x *SignOutResponse) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
var File_grpc_proto_messager_proto protoreflect.FileDescriptor
var file_grpc_proto_messager_proto_rawDesc = []byte{
0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65,
0x49, 0x44, 0x12, 0x34, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65,
0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44,
0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61,
0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73,
0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21,
0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
0x22, 0x4d, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x6f,
0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61,
0x72, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x63, 0x68, 0x22,
0x40, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x22, 0x28, 0x0a, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x0f, 0x53,
0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12,
0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f,
0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xe4, 0x01, 0x0a,
0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69,
0x73, 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52,
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x53,
0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x12, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x18, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x75,
0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x1d, 0x42,
0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28,
0x01, 0x30, 0x01, 0x42, 0x0f, 0x5a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_grpc_proto_messager_proto_rawDescOnce sync.Once
file_grpc_proto_messager_proto_rawDescData = file_grpc_proto_messager_proto_rawDesc
)
func file_grpc_proto_messager_proto_rawDescGZIP() []byte {
file_grpc_proto_messager_proto_rawDescOnce.Do(func() {
file_grpc_proto_messager_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_proto_messager_proto_rawDescData)
})
return file_grpc_proto_messager_proto_rawDescData
}
var file_grpc_proto_messager_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_grpc_proto_messager_proto_goTypes = []interface{}{
(*StreamRequest)(nil), // 0: message.StreamRequest
(*StreamResponse)(nil), // 1: message.StreamResponse
(*RegisterRequest)(nil), // 2: message.RegisterRequest
(*RegisterResponse)(nil), // 3: message.RegisterResponse
(*SignOutRequest)(nil), // 4: message.SignOutRequest
(*SignOutResponse)(nil), // 5: message.SignOutResponse
nil, // 6: message.StreamRequest.DataEntry
nil, // 7: message.StreamResponse.DataEntry
}
var file_grpc_proto_messager_proto_depIdxs = []int32{
6, // 0: message.StreamRequest.data:type_name -> message.StreamRequest.DataEntry
7, // 1: message.StreamResponse.data:type_name -> message.StreamResponse.DataEntry
2, // 2: message.Message.Register:input_type -> message.RegisterRequest
4, // 3: message.Message.SignOut:input_type -> message.SignOutRequest
0, // 4: message.Message.BidirectionalStreamingMessage:input_type -> message.StreamRequest
3, // 5: message.Message.Register:output_type -> message.RegisterResponse
5, // 6: message.Message.SignOut:output_type -> message.SignOutResponse
1, // 7: message.Message.BidirectionalStreamingMessage:output_type -> message.StreamResponse
5, // [5:8] is the sub-list for method output_type
2, // [2:5] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_grpc_proto_messager_proto_init() }
func file_grpc_proto_messager_proto_init() {
if File_grpc_proto_messager_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_grpc_proto_messager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_grpc_proto_messager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_grpc_proto_messager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegisterRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_grpc_proto_messager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RegisterResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_grpc_proto_messager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SignOutRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_grpc_proto_messager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SignOutResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_grpc_proto_messager_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_grpc_proto_messager_proto_goTypes,
DependencyIndexes: file_grpc_proto_messager_proto_depIdxs,
MessageInfos: file_grpc_proto_messager_proto_msgTypes,
}.Build()
File_grpc_proto_messager_proto = out.File
file_grpc_proto_messager_proto_rawDesc = nil
file_grpc_proto_messager_proto_goTypes = nil
file_grpc_proto_messager_proto_depIdxs = nil
}

View File

@@ -0,0 +1,210 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.19.4
// source: grpc/proto/messager.proto
package messager
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// MessageClient is the client API for Message service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type MessageClient interface {
Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error)
SignOut(ctx context.Context, in *SignOutRequest, opts ...grpc.CallOption) (*SignOutResponse, error)
BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error)
}
type messageClient struct {
cc grpc.ClientConnInterface
}
func NewMessageClient(cc grpc.ClientConnInterface) MessageClient {
return &messageClient{cc}
}
func (c *messageClient) Register(ctx context.Context, in *RegisterRequest, opts ...grpc.CallOption) (*RegisterResponse, error) {
out := new(RegisterResponse)
err := c.cc.Invoke(ctx, "/message.Message/Register", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *messageClient) SignOut(ctx context.Context, in *SignOutRequest, opts ...grpc.CallOption) (*SignOutResponse, error) {
out := new(SignOutResponse)
err := c.cc.Invoke(ctx, "/message.Message/SignOut", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *messageClient) BidirectionalStreamingMessage(ctx context.Context, opts ...grpc.CallOption) (Message_BidirectionalStreamingMessageClient, error) {
stream, err := c.cc.NewStream(ctx, &Message_ServiceDesc.Streams[0], "/message.Message/BidirectionalStreamingMessage", opts...)
if err != nil {
return nil, err
}
x := &messageBidirectionalStreamingMessageClient{stream}
return x, nil
}
type Message_BidirectionalStreamingMessageClient interface {
Send(*StreamRequest) error
Recv() (*StreamResponse, error)
grpc.ClientStream
}
type messageBidirectionalStreamingMessageClient struct {
grpc.ClientStream
}
func (x *messageBidirectionalStreamingMessageClient) Send(m *StreamRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *messageBidirectionalStreamingMessageClient) Recv() (*StreamResponse, error) {
m := new(StreamResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// MessageServer is the server API for Message service.
// All implementations must embed UnimplementedMessageServer
// for forward compatibility
type MessageServer interface {
Register(context.Context, *RegisterRequest) (*RegisterResponse, error)
SignOut(context.Context, *SignOutRequest) (*SignOutResponse, error)
BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error
mustEmbedUnimplementedMessageServer()
}
// UnimplementedMessageServer must be embedded to have forward compatible implementations.
type UnimplementedMessageServer struct {
}
func (UnimplementedMessageServer) Register(context.Context, *RegisterRequest) (*RegisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Register not implemented")
}
func (UnimplementedMessageServer) SignOut(context.Context, *SignOutRequest) (*SignOutResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SignOut not implemented")
}
func (UnimplementedMessageServer) BidirectionalStreamingMessage(Message_BidirectionalStreamingMessageServer) error {
return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingMessage not implemented")
}
func (UnimplementedMessageServer) mustEmbedUnimplementedMessageServer() {}
// UnsafeMessageServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to MessageServer will
// result in compilation errors.
type UnsafeMessageServer interface {
mustEmbedUnimplementedMessageServer()
}
func RegisterMessageServer(s grpc.ServiceRegistrar, srv MessageServer) {
s.RegisterService(&Message_ServiceDesc, srv)
}
func _Message_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RegisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MessageServer).Register(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/message.Message/Register",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MessageServer).Register(ctx, req.(*RegisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Message_SignOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SignOutRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MessageServer).SignOut(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/message.Message/SignOut",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MessageServer).SignOut(ctx, req.(*SignOutRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Message_BidirectionalStreamingMessage_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(MessageServer).BidirectionalStreamingMessage(&messageBidirectionalStreamingMessageServer{stream})
}
type Message_BidirectionalStreamingMessageServer interface {
Send(*StreamResponse) error
Recv() (*StreamRequest, error)
grpc.ServerStream
}
type messageBidirectionalStreamingMessageServer struct {
grpc.ServerStream
}
func (x *messageBidirectionalStreamingMessageServer) Send(m *StreamResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *messageBidirectionalStreamingMessageServer) Recv() (*StreamRequest, error) {
m := new(StreamRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Message_ServiceDesc is the grpc.ServiceDesc for Message service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Message_ServiceDesc = grpc.ServiceDesc{
ServiceName: "message.Message",
HandlerType: (*MessageServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Register",
Handler: _Message_Register_Handler,
},
{
MethodName: "SignOut",
Handler: _Message_SignOut_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "BidirectionalStreamingMessage",
Handler: _Message_BidirectionalStreamingMessage_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "grpc/proto/messager.proto",
}

View File

@@ -0,0 +1,45 @@
syntax = "proto3";
package message;
option go_package = "grpc/messager";
service Message {
rpc Register(RegisterRequest) returns (RegisterResponse) {}
rpc SignOut(SignOutRequest) returns (SignOutResponse) {}
rpc BidirectionalStreamingMessage(stream StreamRequest) returns (stream StreamResponse){};
}
message StreamRequest{
string type = 1;
string nodeID = 2;
map<string, bytes> data = 3;
}
message StreamResponse{
string type = 1;
string nodeID = 2;
bytes profile = 3;
bytes tasks = 4;
map<string, bytes> data = 5;
}
message RegisterRequest{
string nodeID = 1;
string os = 2;
string arch = 3;
}
message RegisterResponse{
string code = 1;
string message = 2;
}
message SignOutRequest{
string nodeID = 1;
}
message SignOutResponse{
string code = 1;
string message = 2;
}

View File

@@ -0,0 +1,55 @@
package boomer
const (
typeClientReady = "client_ready"
typeClientStopped = "client_stopped"
typeHeartbeat = "heartbeat"
typeSpawning = "spawning"
typeSpawningComplete = "spawning_complete"
typeQuit = "quit"
typeException = "exception"
)
type genericMessage struct {
Type string `json:"type,omitempty"`
Profile []byte `json:"profile,omitempty"`
Data map[string][]byte `json:"data,omitempty"`
NodeID string `json:"node_id,omitempty"`
Tasks []byte `json:"tasks,omitempty"`
}
type task struct {
Profile *Profile `json:"profile,omitempty"`
TestCases []byte `json:"testcases,omitempty"`
}
func newGenericMessage(t string, data map[string][]byte, nodeID string) (msg *genericMessage) {
return &genericMessage{
Type: t,
Data: data,
NodeID: nodeID,
}
}
func newQuitMessage(nodeID string) (msg *genericMessage) {
return &genericMessage{
Type: "quit",
NodeID: nodeID,
}
}
func newMessageToWorker(t string, profile []byte, data map[string][]byte, tasks []byte) (msg *genericMessage) {
return &genericMessage{
Type: t,
Profile: profile,
Data: data,
Tasks: tasks,
}
}
func newClientReadyMessageToMaster(nodeID string) (msg *genericMessage) {
return &genericMessage{
Type: "client_ready",
NodeID: nodeID,
}
}

View File

@@ -0,0 +1 @@
package boomer

View File

@@ -116,19 +116,7 @@ func (o *ConsoleOutput) OnEvent(data map[string]interface{}) {
return
}
var state string
switch output.State {
case stateInit:
state = "initializing"
case stateSpawning:
state = "spawning"
case stateRunning:
state = "running"
case stateQuitting:
state = "quitting"
case stateStopped:
state = "stopped"
}
state := getStateName(output.State)
currentTime := time.Now()
println(fmt.Sprintf("Current time: %s, Users: %d, State: %s, Total RPS: %.1f, Total Average Response Time: %.1fms, Total Fail Ratio: %.1f%%",
@@ -169,7 +157,7 @@ type statsEntryOutput struct {
}
type dataOutput struct {
UserCount int32 `json:"user_count"`
UserCount int64 `json:"user_count"`
State int32 `json:"state"`
TotalStats *statsEntryOutput `json:"stats_total"`
TransactionsPassed int64 `json:"transactions_passed"`
@@ -186,7 +174,7 @@ type dataOutput struct {
}
func convertData(data map[string]interface{}) (output *dataOutput, err error) {
userCount, ok := data["user_count"].(int32)
userCount, ok := data["user_count"].(int64)
if !ok {
return nil, fmt.Errorf("user_count is not int32")
}
@@ -404,7 +392,7 @@ var (
gaugeState = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "state",
Help: "The current runner state, 1=initializing, 2=spawning, 3=running, 4=quitting, 5=stopped",
Help: "The current runner state, 1=initializing, 2=spawning, 3=running, 4=stopping, 5=stopped, 6=quitting, 7=missing",
},
)
gaugeDuration = prometheus.NewGauge(
@@ -487,6 +475,8 @@ type PrometheusPusherOutput struct {
// OnStart will register all prometheus metric collectors
func (o *PrometheusPusherOutput) OnStart() {
// reset all prometheus metrics
resetPrometheusMetrics()
log.Info().Msg("register prometheus metric collectors")
registry := prometheus.NewRegistry()
registry.MustRegister(
@@ -525,7 +515,7 @@ func (o *PrometheusPusherOutput) OnStart() {
// OnStop of PrometheusPusherOutput has nothing to do.
func (o *PrometheusPusherOutput) OnStop() {
// update runner state: stopped
gaugeState.Set(float64(stateStopped))
gaugeState.Set(float64(StateStopped))
if err := o.pusher.Push(); err != nil {
log.Error().Err(err).Msg("push to Pushgateway failed")
}
@@ -618,3 +608,35 @@ func (o *PrometheusPusherOutput) OnEvent(data map[string]interface{}) {
log.Error().Err(err).Msg("push to Pushgateway failed")
}
}
// resetPrometheusMetrics will reset all metrics
func resetPrometheusMetrics() {
log.Info().Msg("reset all prometheus metrics")
gaugeNumRequests.Reset()
gaugeNumFailures.Reset()
gaugeMedianResponseTime.Reset()
gaugeAverageResponseTime.Reset()
gaugeMinResponseTime.Reset()
gaugeMaxResponseTime.Reset()
gaugeAverageContentLength.Reset()
gaugeCurrentRPS.Reset()
gaugeCurrentFailPerSec.Reset()
// counter for total
counterErrors.Reset()
counterTotalNumRequests.Reset()
counterTotalNumFailures.Reset()
// summary for total
summaryResponseTime.Reset()
// gauges for total
gaugeUsers.Set(0)
gaugeState.Set(1)
gaugeDuration.Set(0)
gaugeTotalAverageResponseTime.Set(0)
gaugeTotalMinResponseTime.Reset()
gaugeTotalMaxResponseTime.Reset()
gaugeTotalRPS.Set(0)
gaugeTotalFailRatio.Set(0)
gaugeTotalFailPerSec.Set(0)
gaugeTransactionsPassed.Set(0)
gaugeTransactionsFailed.Set(0)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,13 @@
package boomer
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
"github.com/stretchr/testify/assert"
)
@@ -106,9 +109,439 @@ func TestLoopCount(t *testing.T) {
runner := newLocalRunner(2, 2)
runner.loop = &Loop{loopCount: 4}
runner.setTasks(tasks)
go runner.start()
<-runner.stopChan
if !assert.Equal(t, runner.loop.loopCount, atomic.LoadInt64(&runner.loop.finishedCount)) {
runner.start()
if !assert.Equal(t, atomic.LoadInt64(&runner.loop.loopCount), atomic.LoadInt64(&runner.loop.finishedCount)) {
t.Fatal()
}
}
func TestStopNotify(t *testing.T) {
r := &localRunner{
runner: runner{
stopChan: make(chan bool),
doneChan: make(chan bool),
},
}
go func() {
<-r.stopChan
close(r.doneChan)
}()
notifier := r.stopNotify()
select {
case <-notifier:
t.Fatalf("received unexpected stop notification")
default:
}
r.gracefulStop()
select {
case <-notifier:
default:
t.Fatalf("cannot receive stop notification")
}
}
func TestSpawnWorkers(t *testing.T) {
taskA := &Task{
Weight: 10,
Fn: func() {
time.Sleep(time.Second)
},
Name: "TaskA",
}
tasks := []*Task{taskA}
runner := newWorkerRunner("localhost", 5557)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.reset()
runner.setTasks(tasks)
go runner.spawnWorkers(10, 10, runner.stopChan, runner.spawnComplete)
time.Sleep(2 * time.Second)
currentClients := runner.controller.getCurrentClientsNum()
if currentClients != 10 {
t.Error("Unexpected count", currentClients)
}
}
func TestSpawnWorkersWithManyTasks(t *testing.T) {
var lock sync.Mutex
taskCalls := map[string]int{}
createTask := func(name string, weight int) *Task {
return &Task{
Name: name,
Weight: weight,
Fn: func() {
lock.Lock()
taskCalls[name]++
lock.Unlock()
},
}
}
tasks := []*Task{
createTask("one hundred", 100),
createTask("ten", 10),
createTask("one", 1),
}
runner := newWorkerRunner("localhost", 5557)
defer runner.close()
runner.reset()
runner.setTasks(tasks)
runner.client = newClient("localhost", 5557, runner.nodeID)
const numToSpawn int64 = 20
go runner.spawnWorkers(numToSpawn, float64(numToSpawn), runner.stopChan, runner.spawnComplete)
time.Sleep(3 * time.Second)
currentClients := runner.controller.getCurrentClientsNum()
assert.Equal(t, numToSpawn, int64(currentClients))
lock.Lock()
hundreds := taskCalls["one hundred"]
tens := taskCalls["ten"]
ones := taskCalls["one"]
lock.Unlock()
total := hundreds + tens + ones
t.Logf("total tasks run: %d\n", total)
assert.True(t, total > 111)
assert.True(t, ones > 1)
actPercentage := float64(ones) / float64(total)
expectedPercentage := 1.0 / 111.0
if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage {
t.Errorf("Unexpected percentage of ones task: exp %v, act %v", expectedPercentage, actPercentage)
}
assert.True(t, tens > 10)
actPercentage = float64(tens) / float64(total)
expectedPercentage = 10.0 / 111.0
if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage {
t.Errorf("Unexpected percentage of tens task: exp %v, act %v", expectedPercentage, actPercentage)
}
assert.True(t, hundreds > 100)
actPercentage = float64(hundreds) / float64(total)
expectedPercentage = 100.0 / 111.0
if actPercentage > 2*expectedPercentage || actPercentage < 0.5*expectedPercentage {
t.Errorf("Unexpected percentage of hundreds task: exp %v, act %v", expectedPercentage, actPercentage)
}
}
func TestSpawnAndStop(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
taskB := &Task{
Fn: func() {
time.Sleep(2 * time.Second)
},
}
tasks := []*Task{taskA, taskB}
runner := newWorkerRunner("localhost", 5557)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.setTasks(tasks)
runner.setSpawnCount(10)
runner.setSpawnRate(10)
go runner.start()
// wait for spawning goroutines
time.Sleep(2 * time.Second)
if runner.controller.getCurrentClientsNum() != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count", runner.controller.getCurrentClientsNum())
}
msg := <-runner.client.sendChannel()
if msg.Type != "spawning_complete" {
t.Error("Runner should send spawning_complete message when spawning completed, got", msg.Type)
}
go runner.stop()
close(runner.doneChan)
runner.onQuiting()
msg = <-runner.client.sendChannel()
if msg.Type != "quit" {
t.Error("Runner should send quit message on quitting, got", msg.Type)
}
}
func TestStop(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
tasks := []*Task{taskA}
runner := newWorkerRunner("localhost", 5557)
runner.setTasks(tasks)
runner.reset()
runner.updateState(StateSpawning)
go runner.stop()
close(runner.doneChan)
time.Sleep(1 * time.Second)
if runner.getState() != StateStopped {
t.Error("Expected runner state to be 5, was", getStateName(runner.getState()))
}
}
func TestOnSpawnMessage(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
runner := newWorkerRunner("localhost", 5557)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.updateState(StateInit)
runner.reset()
runner.setTasks([]*Task{taskA})
runner.setSpawnCount(100)
runner.setSpawnRate(100)
runner.onSpawnMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 20, SpawnRate: 20}), nil, nil))
if runner.getSpawnCount() != 20 {
t.Error("workers should be overwrote by onSpawnMessage, expected: 20, was:", runner.controller.spawnCount)
}
if runner.getSpawnRate() != 20 {
t.Error("spawnRate should be overwrote by onSpawnMessage, expected: 20, was:", runner.controller.spawnRate)
}
runner.onMessage(newGenericMessage("stop", nil, runner.nodeID))
}
func TestOnQuitMessage(t *testing.T) {
runner := newWorkerRunner("localhost", 5557)
runner.client = newClient("localhost", 5557, "test")
runner.updateState(StateInit)
runner.onMessage(newGenericMessage("quit", nil, runner.nodeID))
<-runner.closeChan
runner.updateState(StateRunning)
runner.reset()
runner.closeChan = make(chan bool)
runner.client.shutdownChan = make(chan bool)
go runner.onMessage(newGenericMessage("quit", nil, runner.nodeID))
close(runner.doneChan)
<-runner.closeChan
if runner.getState() != StateQuitting {
t.Error("Runner's state should be StateQuitting")
}
runner.updateState(StateStopped)
runner.closeChan = make(chan bool)
runner.reset()
runner.client.shutdownChan = make(chan bool)
runner.onMessage(newGenericMessage("quit", nil, runner.nodeID))
<-runner.closeChan
if runner.getState() != StateQuitting {
t.Error("Runner's state should be StateQuitting")
}
}
func TestOnMessage(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
taskB := &Task{
Fn: func() {
time.Sleep(2 * time.Second)
},
}
tasks := []*Task{taskA, taskB}
runner := newWorkerRunner("localhost", 5557)
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.updateState(StateInit)
runner.setTasks(tasks)
// start spawning
runner.onMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 10, SpawnRate: 10}), nil, nil))
go runner.start()
msg := <-runner.client.sendChannel()
if msg.Type != "spawning" {
t.Error("Runner should send spawning message when starting spawn, got", msg.Type)
}
// spawn complete and running
time.Sleep(2 * time.Second)
if runner.controller.getCurrentClientsNum() != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.controller.getCurrentClientsNum())
}
msg = <-runner.client.sendChannel()
if msg.Type != "spawning_complete" {
t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type)
}
if runner.getState() != StateRunning {
t.Error("State of runner is not running after spawn, got", getStateName(runner.getState()))
}
// increase goroutines while running
runner.onMessage(newMessageToWorker("rebalance", ProfileToBytes(&Profile{SpawnCount: 15, SpawnRate: 15}), nil, nil))
runner.rebalance <- true
time.Sleep(2 * time.Second)
if runner.getState() != StateRunning {
t.Error("State of runner is not running after spawn, got", getStateName(runner.getState()))
}
if runner.controller.getCurrentClientsNum() != 15 {
t.Error("Number of goroutines mismatches, expected: 15, current count:", runner.controller.getCurrentClientsNum())
}
// stop all the workers
runner.onMessage(newGenericMessage("stop", nil, runner.nodeID))
if runner.getState() != StateStopped {
t.Error("State of runner is not stopped, got", getStateName(runner.getState()))
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_stopped" {
t.Error("Runner should send client_stopped message, got", msg.Type)
}
time.Sleep(3 * time.Second)
// spawn again
runner.onMessage(newMessageToWorker("spawn", ProfileToBytes(&Profile{SpawnCount: 10, SpawnRate: 10}), nil, nil))
go runner.start()
msg = <-runner.client.sendChannel()
if msg.Type != "spawning" {
t.Error("Runner should send spawning message when starting spawn, got", msg.Type)
}
// spawn complete and running
time.Sleep(3 * time.Second)
if runner.controller.getCurrentClientsNum() != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.controller.getCurrentClientsNum())
}
if runner.getState() != StateRunning {
t.Error("State of runner is not running after spawn, got", getStateName(runner.getState()))
}
msg = <-runner.client.sendChannel()
if msg.Type != "spawning_complete" {
t.Error("Runner should send spawning_complete message when spawn completed, got", msg.Type)
}
// stop all the workers
runner.onMessage(newGenericMessage("stop", nil, runner.nodeID))
if runner.getState() != StateStopped {
t.Error("State of runner is not stopped, got", getStateName(runner.getState()))
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_stopped" {
t.Error("Runner should send client_stopped message, got", msg.Type)
}
time.Sleep(3 * time.Second)
// quit
runner.onMessage(newGenericMessage("quit", nil, runner.nodeID))
}
func TestClientListener(t *testing.T) {
runner := newMasterRunner("localhost", 5557)
defer runner.close()
runner.updateState(StateInit)
runner.setSpawnCount(10)
runner.setSpawnRate(10)
go runner.clientListener()
runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 3, stream: make(chan *messager.StreamResponse, 10)})
runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 3, stream: make(chan *messager.StreamResponse, 10)})
runner.server.recvChannel() <- &genericMessage{
Type: typeClientReady,
NodeID: "testID1",
}
worker1, ok := runner.server.getClients().Load("testID1")
if !ok {
t.Fatal("error")
}
workerInfo1, ok := worker1.(*WorkerNode)
if !ok {
t.Fatal("error")
}
time.Sleep(time.Second)
if workerInfo1.getState() != StateInit {
t.Error("State of worker runner is not init, got", workerInfo1.getState())
}
runner.server.recvChannel() <- &genericMessage{
Type: typeClientStopped,
NodeID: "testID2",
}
worker2, ok := runner.server.getClients().Load("testID2")
if !ok {
t.Fatal("error")
}
workerInfo2, ok := worker2.(*WorkerNode)
if !ok {
t.Fatal("error")
}
time.Sleep(time.Second)
if workerInfo2.getState() != StateStopped {
t.Error("State of worker runner is not stopped, got", workerInfo2.getState())
}
runner.server.recvChannel() <- &genericMessage{
Type: typeClientStopped,
NodeID: "testID1",
}
time.Sleep(time.Second)
if runner.getState() != StateStopped {
t.Error("State of master runner is not stopped, got", getStateName(runner.getState()))
}
}
func TestHeartbeatWorker(t *testing.T) {
runner := newMasterRunner("localhost", 5557)
defer runner.close()
runner.updateState(StateInit)
runner.setSpawnCount(10)
runner.setSpawnRate(10)
runner.server.clients.Store("testID1", &WorkerNode{ID: "testID1", Heartbeat: 1, State: StateInit, stream: make(chan *messager.StreamResponse, 10)})
runner.server.clients.Store("testID2", &WorkerNode{ID: "testID2", Heartbeat: 1, State: StateInit, stream: make(chan *messager.StreamResponse, 10)})
go runner.clientListener()
go runner.heartbeatWorker()
time.Sleep(3 * time.Second)
worker1, ok := runner.server.getClients().Load("testID1")
if !ok {
t.Fatal()
}
workerInfo1, ok := worker1.(*WorkerNode)
if !ok {
t.Fatal()
}
if workerInfo1.getState() != StateMissing {
t.Error("expected state of worker runner is missing, but got", workerInfo1.getState())
}
runner.server.recvChannel() <- &genericMessage{
Type: typeHeartbeat,
NodeID: "testID2",
Data: map[string][]byte{"state": builtin.Int64ToBytes(3)},
}
worker2, ok := runner.server.getClients().Load("testID2")
if !ok {
t.Fatal()
}
workerInfo2, ok := worker2.(*WorkerNode)
if !ok {
t.Fatal()
}
time.Sleep(time.Second)
if workerInfo2.getState() == StateMissing {
t.Error("expected state of worker runner is not missing, but got missing")
}
}

View File

@@ -0,0 +1,530 @@
package boomer
import (
"context"
"fmt"
"net"
"strings"
"sync"
"sync/atomic"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer/data"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer/grpc/messager"
"github.com/rs/zerolog/log"
)
type WorkerNode struct {
ID string `json:"id"`
IP string `json:"ip"`
OS string `json:"os"`
Arch string `json:"arch"`
State int32 `json:"state"`
Heartbeat int32 `json:"heartbeat"`
UserCount int64 `json:"user_count"`
WorkerCPUUsage float64 `json:"worker_cpu_usage"`
CPUUsage float64 `json:"cpu_usage"`
CPUWarningEmitted bool `json:"cpu_warning_emitted"`
WorkerMemoryUsage float64 `json:"worker_memory_usage"`
MemoryUsage float64 `json:"memory_usage"`
stream chan *messager.StreamResponse
mutex sync.RWMutex
disconnectedChan chan bool
}
func newWorkerNode(id, ip, os, arch string) *WorkerNode {
stream := make(chan *messager.StreamResponse, 100)
return &WorkerNode{State: StateInit, ID: id, IP: ip, OS: os, Arch: arch, Heartbeat: 3, stream: stream, disconnectedChan: make(chan bool)}
}
func (w *WorkerNode) getState() int32 {
return atomic.LoadInt32(&w.State)
}
func (w *WorkerNode) setState(state int32) {
atomic.StoreInt32(&w.State, state)
}
func (w *WorkerNode) updateHeartbeat(heartbeat int32) {
atomic.StoreInt32(&w.Heartbeat, heartbeat)
}
func (w *WorkerNode) getHeartbeat() int32 {
return atomic.LoadInt32(&w.Heartbeat)
}
func (w *WorkerNode) updateUserCount(spawnCount int64) {
atomic.StoreInt64(&w.UserCount, spawnCount)
}
func (w *WorkerNode) getUserCount() int64 {
return atomic.LoadInt64(&w.UserCount)
}
func (w *WorkerNode) updateCPUUsage(cpuUsage float64) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.CPUUsage = cpuUsage
}
func (w *WorkerNode) getCPUUsage() float64 {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.CPUUsage
}
func (w *WorkerNode) updateWorkerCPUUsage(workerCPUUsage float64) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.WorkerCPUUsage = workerCPUUsage
}
func (w *WorkerNode) getWorkerCPUUsage() float64 {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.WorkerCPUUsage
}
func (w *WorkerNode) updateCPUWarningEmitted(cpuWarningEmitted bool) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.CPUWarningEmitted = cpuWarningEmitted
}
func (w *WorkerNode) getCPUWarningEmitted() bool {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.CPUWarningEmitted
}
func (w *WorkerNode) updateWorkerMemoryUsage(workerMemoryUsage float64) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.WorkerMemoryUsage = workerMemoryUsage
}
func (w *WorkerNode) getWorkerMemoryUsage() float64 {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.WorkerMemoryUsage
}
func (w *WorkerNode) updateMemoryUsage(memoryUsage float64) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.MemoryUsage = memoryUsage
}
func (w *WorkerNode) getMemoryUsage() float64 {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.MemoryUsage
}
func (w *WorkerNode) setStream(stream chan *messager.StreamResponse) {
w.mutex.RLock()
defer w.mutex.RUnlock()
w.stream = stream
}
func (w *WorkerNode) getStream() chan *messager.StreamResponse {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.stream
}
func (w *WorkerNode) getWorkerInfo() WorkerNode {
w.mutex.RLock()
defer w.mutex.RUnlock()
return WorkerNode{
ID: w.ID,
IP: w.IP,
OS: w.OS,
Arch: w.Arch,
State: w.getState(),
Heartbeat: w.getHeartbeat(),
UserCount: w.getUserCount(),
WorkerCPUUsage: w.getWorkerCPUUsage(),
CPUUsage: w.getCPUUsage(),
CPUWarningEmitted: w.getCPUWarningEmitted(),
WorkerMemoryUsage: w.getWorkerMemoryUsage(),
MemoryUsage: w.getMemoryUsage(),
}
}
type grpcServer struct {
messager.UnimplementedMessageServer
masterHost string
masterPort int
server *grpc.Server
clients *sync.Map
fromWorker chan *genericMessage
disconnectedChan chan bool
shutdownChan chan bool
wg sync.WaitGroup
}
var (
errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata")
errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token")
)
func logger(format string, a ...interface{}) {
// FIXME: support server-side and client-side logging to files
log.Info().Msg(fmt.Sprintf(format, a...))
}
// valid validates the authorization.
func valid(authorization []string) bool {
if len(authorization) < 1 {
return false
}
token := strings.TrimPrefix(authorization[0], "Bearer ")
return token == "httprunner-secret-token"
}
func serverUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
// authentication (token verification)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, errMissingMetadata
}
if !valid(md["authorization"]) {
return nil, errInvalidToken
}
m, err := handler(ctx, req)
if err != nil {
logger("RPC failed with error %v", err)
}
return m, err
}
// serverWrappedStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
// SendMsg method call.
type serverWrappedStream struct {
grpc.ServerStream
}
func (w *serverWrappedStream) RecvMsg(m interface{}) error {
logger("Receive a message (Type: %T) at %s", m, time.Now().Format(time.RFC3339))
return w.ServerStream.RecvMsg(m)
}
func (w *serverWrappedStream) SendMsg(m interface{}) error {
logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339))
return w.ServerStream.SendMsg(m)
}
func newServerWrappedStream(s grpc.ServerStream) grpc.ServerStream {
return &serverWrappedStream{s}
}
func serverStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
// authentication (token verification)
md, ok := metadata.FromIncomingContext(ss.Context())
if !ok {
return errMissingMetadata
}
if !valid(md["authorization"]) {
return errInvalidToken
}
err := handler(srv, newServerWrappedStream(ss))
if err != nil {
logger("RPC failed with error %v", err)
}
return err
}
func newServer(masterHost string, masterPort int) (server *grpcServer) {
log.Info().Msg("Boomer is built with grpc support.")
server = &grpcServer{
masterHost: masterHost,
masterPort: masterPort,
clients: &sync.Map{},
fromWorker: make(chan *genericMessage, 100),
disconnectedChan: make(chan bool),
shutdownChan: make(chan bool),
wg: sync.WaitGroup{},
}
return server
}
func (s *grpcServer) start() (err error) {
addr := fmt.Sprintf("%v:%v", s.masterHost, s.masterPort)
// Create tls based credential.
creds, err := credentials.NewServerTLSFromFile(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem"))
if err != nil {
log.Fatal().Msg(fmt.Sprintf("failed to load key pair: %s", err))
}
opts := []grpc.ServerOption{
grpc.UnaryInterceptor(serverUnaryInterceptor),
grpc.StreamInterceptor(serverStreamInterceptor),
// Enable TLS for all incoming connections.
grpc.Creds(creds),
}
lis, err := net.Listen("tcp", addr)
if err != nil {
log.Error().Err(err).Msg("failed to listen")
return
}
// create gRPC server
s.server = grpc.NewServer(opts...)
// register message server
messager.RegisterMessageServer(s.server, s)
reflection.Register(s.server)
// start grpc server
go func() {
err = s.server.Serve(lis)
if err != nil {
log.Error().Err(err).Msg("failed to serve")
return
}
}()
return nil
}
func (s *grpcServer) Register(ctx context.Context, req *messager.RegisterRequest) (*messager.RegisterResponse, error) {
// get client ip
p, _ := peer.FromContext(ctx)
clientIp := strings.Split(p.Addr.String(), ":")[0]
// store worker information
wn := newWorkerNode(req.NodeID, clientIp, req.Os, req.Arch)
s.clients.Store(req.NodeID, wn)
log.Warn().Str("worker id", req.NodeID).Msg("worker joined")
return &messager.RegisterResponse{Code: "0", Message: "register successfully"}, nil
}
func (s *grpcServer) SignOut(_ context.Context, req *messager.SignOutRequest) (*messager.SignOutResponse, error) {
// delete worker information
s.clients.Delete(req.NodeID)
log.Warn().Str("worker id", req.NodeID).Msg("worker quited")
return &messager.SignOutResponse{Code: "0", Message: "sign out successfully"}, nil
}
func (s *grpcServer) valid(token string) (isValid bool) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.ID == token {
isValid = true
}
}
return true
})
return
}
func (s *grpcServer) BidirectionalStreamingMessage(srv messager.Message_BidirectionalStreamingMessageServer) error {
s.wg.Add(1)
defer s.wg.Done()
token, ok := extractToken(srv.Context())
if !ok {
return status.Error(codes.Unauthenticated, "missing token header")
}
ok = s.valid(token)
if !ok {
return status.Error(codes.Unauthenticated, "invalid token")
}
go s.sendMsg(srv, token)
FOR:
for {
select {
case <-srv.Context().Done():
break FOR
case <-s.disconnectedChannel():
break FOR
default:
msg, err := srv.Recv()
if st, ok := status.FromError(err); ok {
switch st.Code() {
case codes.OK:
s.fromWorker <- newGenericMessage(msg.Type, msg.Data, msg.NodeID)
log.Info().
Str("nodeID", msg.NodeID).
Str("type", msg.Type).
Interface("data", msg.Data).
Msg("receive data from worker")
case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded:
s.fromWorker <- newQuitMessage(token)
break FOR
default:
log.Error().Err(err).Msg("failed to get stream from client")
break FOR
}
}
}
}
log.Info().Str("worker id", token).Msg("bidirectional stream closed")
return nil
}
func (s *grpcServer) sendMsg(srv messager.Message_BidirectionalStreamingMessageServer, id string) {
stream := s.getWorkersByID(id).getStream()
for {
select {
case <-srv.Context().Done():
return
case <-s.disconnectedChannel():
return
case res := <-stream:
if s, ok := status.FromError(srv.Send(res)); ok {
switch s.Code() {
case codes.OK:
log.Info().
Str("nodeID", res.NodeID).
Str("type", res.Type).
Interface("data", res.Data).
Interface("profile", res.Profile).
Msg("send data to worker")
case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded:
log.Warn().Msg(fmt.Sprintf("client (%s) terminated connection", id))
return
default:
log.Warn().Msg(fmt.Sprintf("failed to send to client (%s): %v", id, s.Err()))
return
}
}
}
}
}
func (s *grpcServer) sendBroadcasts(msg *genericMessage) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.getState() == StateQuitting || workerInfo.getState() == StateMissing {
return true
}
workerInfo.getStream() <- &messager.StreamResponse{
Type: msg.Type,
Profile: msg.Profile,
Data: msg.Data,
NodeID: workerInfo.ID,
Tasks: msg.Tasks,
}
}
return true
})
}
func (s *grpcServer) stopServer(ctx context.Context) {
ch := make(chan struct{})
go func() {
defer close(ch)
// close listeners to stop accepting new connections,
// will block on any existing transports
s.server.GracefulStop()
}()
// wait until all pending RPCs are finished
select {
case <-ch:
case <-ctx.Done():
// took too long, manually close open transports
// e.g. watch streams
s.server.Stop()
// concurrent GracefulStop should be interrupted
<-ch
}
}
func (s *grpcServer) close() {
// close client requests with request timeout
timeout := 2 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
s.stopServer(ctx)
cancel()
// disconnecting workers
close(s.disconnectedChan)
// waiting to close bidirectional stream
s.wg.Wait()
}
func (s *grpcServer) recvChannel() chan *genericMessage {
return s.fromWorker
}
func (s *grpcServer) shutdownChannel() chan bool {
return s.shutdownChan
}
func (s *grpcServer) disconnectedChannel() chan bool {
return s.disconnectedChan
}
func (s *grpcServer) getWorkersByState(state int32) (wns []*WorkerNode) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.getState() == state {
wns = append(wns, workerInfo)
}
}
return true
})
return wns
}
func (s *grpcServer) getWorkersByID(id string) (wn *WorkerNode) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.ID == id {
wn = workerInfo
}
}
return true
})
return wn
}
func (s *grpcServer) getWorkersLengthByState(state int32) (l int) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.getState() == state {
l++
}
}
return true
})
return
}
func (s *grpcServer) getAllWorkers() (wns []WorkerNode) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
wns = append(wns, workerInfo.getWorkerInfo())
}
return true
})
return wns
}
func (s *grpcServer) getClients() *sync.Map {
return s.clients
}
func (s *grpcServer) getClientsLength() (l int) {
s.clients.Range(func(key, value interface{}) bool {
if workerInfo, ok := value.(*WorkerNode); ok {
if workerInfo.getState() != StateQuitting && workerInfo.getState() != StateMissing {
l++
}
}
return true
})
return
}

View File

@@ -0,0 +1 @@
package boomer

View File

@@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package boomer

View File

@@ -1,3 +1,4 @@
//go:build windows
// +build windows
package boomer

View File

@@ -7,9 +7,15 @@ import (
"math"
"os"
"runtime/pprof"
"strings"
"time"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/mem"
"github.com/shirou/gopsutil/process"
)
func round(val float64, roundOn float64, places int) (newVal float64) {
@@ -75,3 +81,71 @@ func startCPUProfile(file string, duration time.Duration) (err error) {
})
return nil
}
// generate a random nodeID like locust does, using the same algorithm.
func getNodeID() (nodeID string) {
hostname, _ := os.Hostname()
id := strings.Replace(uuid.New().String(), "-", "", -1)
nodeID = fmt.Sprintf("%s_%s", hostname, id)
return
}
// GetCurrentPidCPUUsage get current pid CPU usage
func GetCurrentPidCPUUsage() float64 {
currentPid := os.Getpid()
p, err := process.NewProcess(int32(currentPid))
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
percent, err := p.CPUPercent()
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
return percent
}
// GetCurrentPidCPUPercent get the percentage of current pid cpu used
func GetCurrentPidCPUPercent() float64 {
currentPid := os.Getpid()
p, err := process.NewProcess(int32(currentPid))
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
percent, err := p.Percent(time.Second)
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
return percent
}
// GetCurrentCPUPercent get the percentage of current cpu used
func GetCurrentCPUPercent() float64 {
percent, _ := cpu.Percent(time.Second, false)
return percent[0]
}
// GetCurrentMemoryPercent get the percentage of current memory used
func GetCurrentMemoryPercent() float64 {
memInfo, _ := mem.VirtualMemory()
return memInfo.UsedPercent
}
// GetCurrentPidMemoryUsage get current Memory usage
func GetCurrentPidMemoryUsage() float64 {
currentPid := os.Getpid()
p, err := process.NewProcess(int32(currentPid))
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
percent, err := p.MemoryPercent()
if err != nil {
log.Error().Err(err).Msg(fmt.Sprintf("failed to get CPU percent\n"))
return 0.0
}
return float64(percent)
}

View File

@@ -3,9 +3,11 @@ package builtin
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/csv"
builtinJSON "encoding/json"
"fmt"
"math"
"math/rand"
"os"
"os/exec"
@@ -490,3 +492,75 @@ func GetFileNameWithoutExtension(path string) string {
ext := filepath.Ext(base)
return base[0 : len(base)-len(ext)]
}
func Bytes2File(data []byte, filename string) error {
file, err := os.OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0755)
defer file.Close()
if err != nil {
log.Error().Err(err).Msg("failed to generate file")
}
count, err := file.Write(data)
if err != nil {
return err
}
log.Info().Msg(fmt.Sprintf("write file %s len: %d \n", filename, count))
return nil
}
func Float32ToByte(v float32) []byte {
bits := math.Float32bits(v)
bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(bytes, bits)
return bytes
}
func ByteToFloat32(v []byte) float32 {
bits := binary.LittleEndian.Uint32(v)
return math.Float32frombits(bits)
}
func Float64ToByte(v float64) []byte {
bits := math.Float64bits(v)
bts := make([]byte, 8)
binary.LittleEndian.PutUint64(bts, bits)
return bts
}
func ByteToFloat64(v []byte) float64 {
bits := binary.LittleEndian.Uint64(v)
return math.Float64frombits(bits)
}
func Int64ToBytes(n int64) []byte {
bytesBuf := bytes.NewBuffer([]byte{})
_ = binary.Write(bytesBuf, binary.BigEndian, n)
return bytesBuf.Bytes()
}
func BytesToInt64(bys []byte) (data int64) {
byteBuff := bytes.NewBuffer(bys)
_ = binary.Read(byteBuff, binary.BigEndian, &data)
return
}
func SplitInteger(m, n int) (ints []int) {
quotient := m / n
remainder := m % n
if remainder >= 0 {
for i := 0; i < n-remainder; i++ {
ints = append(ints, quotient)
}
for i := 0; i < remainder; i++ {
ints = append(ints, quotient+1)
}
return
} else if remainder < 0 {
for i := 0; i < -remainder; i++ {
ints = append(ints, quotient-1)
}
for i := 0; i < n+remainder; i++ {
ints = append(ints, quotient)
}
}
return
}

View File

@@ -178,6 +178,14 @@ func (iter *ParametersIterator) Next() map[string]interface{} {
return selectedParameters
}
func (iter *ParametersIterator) outParameters() map[string]interface{} {
res := map[string]interface{}{}
for key, params := range iter.data {
res[key] = params
}
return res
}
func genCartesianProduct(multiParameters []Parameters) Parameters {
if len(multiParameters) == 0 {
return nil

View File

@@ -8,10 +8,9 @@ import (
"github.com/httprunner/funplugin"
"github.com/httprunner/funplugin/fungo"
"github.com/rs/zerolog/log"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
"github.com/httprunner/httprunner/v4/hrp/internal/sdk"
"github.com/rs/zerolog/log"
)
const (

View File

@@ -7,6 +7,7 @@ import (
"net/http/cookiejar"
"net/url"
"path/filepath"
"strings"
"testing"
"time"
@@ -16,6 +17,7 @@ import (
"github.com/rs/zerolog/log"
"golang.org/x/net/http2"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
"github.com/httprunner/httprunner/v4/hrp/internal/sdk"
)
@@ -280,6 +282,21 @@ func (r *HRPRunner) newCaseRunner(testcase *TestCase) (*testCaseRunner, error) {
runner.hrpRunner.SetTimeout(timeout)
}
// load plugin info to testcase config
if plugin != nil {
pluginPath, _ := locatePlugin(testcase.Config.Path)
pluginContent, err := builtin.ReadFile(pluginPath)
if err != nil {
return nil, err
}
tp := strings.Split(plugin.Path(), ".")
runner.parsedConfig.PluginSetting = &PluginConfig{
Path: pluginPath,
Content: pluginContent,
Type: tp[len(tp)-1],
}
}
return runner, nil
}

382
hrp/server.go Normal file
View File

@@ -0,0 +1,382 @@
package hrp
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/httprunner/httprunner/v4/hrp/internal/boomer"
"github.com/httprunner/httprunner/v4/hrp/internal/json"
"github.com/mitchellh/mapstructure"
)
const jsonContentType = "application/json; encoding=utf-8"
func methods(h http.HandlerFunc, methods ...string) http.HandlerFunc {
methodMap := make(map[string]struct{}, len(methods))
for _, m := range methods {
methodMap[m] = struct{}{}
// GET implies support for HEAD
if m == "GET" {
methodMap["HEAD"] = struct{}{}
}
}
return func(w http.ResponseWriter, r *http.Request) {
if _, ok := methodMap[r.Method]; !ok {
http.Error(w, fmt.Sprintf("method %s not allowed", r.Method), http.StatusMethodNotAllowed)
return
}
h.ServeHTTP(w, r)
}
}
func parseBody(r *http.Request) (data map[string]interface{}, err error) {
if r.Body == nil {
return nil, nil
}
// Always set resp.Data to the incoming request body, in case we don't know
// how to handle the content type
body, err := ioutil.ReadAll(r.Body)
if err != nil {
r.Body.Close()
return nil, err
}
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
return data, nil
}
func writeJSON(w http.ResponseWriter, body []byte, status int) {
w.Header().Set("Content-Type", jsonContentType)
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(body)))
w.WriteHeader(status)
w.Write(body)
}
type ServerCode int
// server response code
const (
Success ServerCode = iota
ParamsError
ServerError
StopError
)
// ServerStatus stores http response code and message
type ServerStatus struct {
Code ServerCode `json:"code"`
Message string `json:"message"`
}
var EnumAPIResponseSuccess = ServerStatus{
Code: Success,
Message: "success",
}
func EnumAPIResponseParamError(errMsg string) ServerStatus {
return ServerStatus{
Code: ParamsError,
Message: errMsg,
}
}
func EnumAPIResponseServerError(errMsg string) ServerStatus {
return ServerStatus{
Code: ServerError,
Message: errMsg,
}
}
func EnumAPIResponseStopError(errMsg string) ServerStatus {
return ServerStatus{
Code: StopError,
Message: errMsg,
}
}
func CustomAPIResponse(errCode ServerCode, errMsg string) ServerStatus {
return ServerStatus{
Code: errCode,
Message: errMsg,
}
}
type StartRequestBody struct {
boomer.Profile `mapstructure:",squash"`
Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"` // all
TestCasePath string `json:"testcase-path" yaml:"testcase-path" mapstructure:"testcase-path"`
Other map[string]interface{} `mapstructure:",remain"`
}
type RebalanceRequestBody struct {
boomer.Profile `mapstructure:",squash"`
Worker string `json:"worker,omitempty" yaml:"worker,omitempty" mapstructure:"worker"`
Other map[string]interface{} `mapstructure:",remain"`
}
type StopRequestBody struct {
Worker string `json:"worker"`
}
type QuitRequestBody struct {
Worker string `json:"worker"`
}
type CommonResponseBody struct {
ServerStatus
}
type APIGetWorkersRequestBody struct {
}
type APIGetWorkersResponseBody struct {
ServerStatus
Data []boomer.WorkerNode `json:"data"`
}
type APIGetMasterRequestBody struct {
}
type APIGetMasterResponseBody struct {
ServerStatus
Data map[string]interface{} `json:"data"`
}
type apiHandler struct {
boomer *HRPBoomer
}
func (b *HRPBoomer) NewAPIHandler() *apiHandler {
return &apiHandler{boomer: b}
}
// Index renders an HTML index page
func (api *apiHandler) Index(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.Error(w, "Not Found", http.StatusNotFound)
return
}
w.Header().Set("Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' www.httprunner.com")
fmt.Fprintf(w, "Welcome to httprunner page!")
}
func (api *apiHandler) Start(w http.ResponseWriter, r *http.Request) {
var resp *CommonResponseBody
var err error
defer func() {
if err != nil {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseServerError(err.Error()),
}
} else {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseSuccess,
}
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}()
// parse body
data, err := parseBody(r)
if err != nil {
return
}
req := StartRequestBody{
Profile: *api.boomer.GetProfile(),
}
err = mapstructure.Decode(data, &req)
if err != nil {
return
}
// recognize invalid parameters
if len(req.Other) > 0 {
keys := make([]string, 0, len(req.Other))
for k := range req.Other {
keys = append(keys, k)
}
err = errors.New(fmt.Sprintf("failed to recognize params: %v", keys))
return
}
// parse testcase path
if req.TestCasePath == "" {
err = errors.New("missing testcases path")
return
}
paths := strings.Split(req.TestCasePath, ",")
// set testcase path
api.boomer.SetTestCasesPath(paths)
// start boomer with profile
err = api.boomer.Start(&req.Profile)
}
func (api *apiHandler) ReBalance(w http.ResponseWriter, r *http.Request) {
var resp *CommonResponseBody
var err error
defer func() {
if err != nil {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseServerError(err.Error()),
}
} else {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseSuccess,
}
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}()
// parse body
data, err := parseBody(r)
if err != nil {
return
}
req := RebalanceRequestBody{
Profile: *api.boomer.GetProfile(),
}
err = mapstructure.Decode(data, &req)
if err != nil {
return
}
// recognize invalid parameters
if len(req.Other) > 0 {
keys := make([]string, 0, len(req.Other))
for k := range req.Other {
keys = append(keys, k)
}
err = errors.New(fmt.Sprintf("failed to recognize params: %v", keys))
return
}
// rebalance boomer with profile
err = api.boomer.ReBalance(&req.Profile)
}
func (api *apiHandler) Stop(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{}
args := r.URL.Query()
for k, vs := range args {
for _, v := range vs {
data[k] = v
}
}
var resp *CommonResponseBody
var err error
defer func() {
if err != nil {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseStopError(err.Error()),
}
} else {
resp = &CommonResponseBody{
ServerStatus: EnumAPIResponseSuccess,
}
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}()
// stop boomer
err = api.boomer.Stop()
}
func (api *apiHandler) Quit(w http.ResponseWriter, r *http.Request) {
data := map[string]interface{}{}
args := r.URL.Query()
for k, vs := range args {
for _, v := range vs {
data[k] = v
}
}
defer func() {
resp := &CommonResponseBody{
ServerStatus: EnumAPIResponseSuccess,
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}()
// quit boomer
api.boomer.Quit()
}
func (api *apiHandler) GetWorkersInfo(w http.ResponseWriter, r *http.Request) {
resp := &APIGetWorkersResponseBody{
ServerStatus: EnumAPIResponseSuccess,
Data: api.boomer.GetWorkersInfo(),
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}
func (api *apiHandler) GetMasterInfo(w http.ResponseWriter, r *http.Request) {
resp := &APIGetMasterResponseBody{
ServerStatus: EnumAPIResponseSuccess,
Data: api.boomer.GetMasterInfo(),
}
body, _ := json.Marshal(resp)
writeJSON(w, body, http.StatusOK)
}
func (api *apiHandler) Handler() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/", methods(api.Index, "GET"))
mux.HandleFunc("/start", methods(api.Start, "POST"))
mux.HandleFunc("/rebalance", methods(api.ReBalance, "POST"))
mux.HandleFunc("/stop", methods(api.Stop, "GET"))
mux.HandleFunc("/quit", methods(api.Quit, "GET"))
mux.HandleFunc("/workers", methods(api.GetWorkersInfo, "GET"))
mux.HandleFunc("/master", methods(api.GetMasterInfo, "GET"))
return mux
}
func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {}
func (b *HRPBoomer) StartServer(ctx context.Context, addr string) {
h := b.NewAPIHandler()
mux := h.Handler()
server := &http.Server{
Addr: addr,
Handler: mux,
}
go func() {
select {
case <-ctx.Done():
case <-b.GetCloseChan():
}
if err := server.Shutdown(context.Background()); err != nil {
log.Fatal("shutdown server:", err)
}
}()
log.Println(fmt.Sprintf("starting HTTP server (%v), please use the API to control master", server.Addr))
err := server.ListenAndServe()
if err != nil {
if err == http.ErrServerClosed {
log.Print("server closed under request")
} else {
log.Fatal("server closed unexpected")
}
}
}

View File

@@ -155,9 +155,9 @@ func (r *Rendezvous) setReleased() {
}
func initRendezvous(testcase *TestCase, total int64) []*Rendezvous {
tCase := testcase.ToTCase()
var rendezvousList []*Rendezvous
for _, step := range tCase.TestSteps {
for _, s := range testcase.TestSteps {
step := s.Struct()
if step.Rendezvous == nil {
continue
}
@@ -188,16 +188,20 @@ func initRendezvous(testcase *TestCase, total int64) []*Rendezvous {
return rendezvousList
}
func waitRendezvous(rendezvousList []*Rendezvous) {
func (r *Rendezvous) updateRendezvousNumber(number int64) {
atomic.StoreInt64(&r.Number, int64(float32(number)*r.Percent))
}
func waitRendezvous(rendezvousList []*Rendezvous, b *HRPBoomer) {
if rendezvousList != nil {
lastRendezvous := rendezvousList[len(rendezvousList)-1]
for _, rendezvous := range rendezvousList {
go waitSingleRendezvous(rendezvous, rendezvousList, lastRendezvous)
go waitSingleRendezvous(rendezvous, rendezvousList, lastRendezvous, b)
}
}
}
func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous, lastRendezvous *Rendezvous) {
func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous, lastRendezvous *Rendezvous, b *HRPBoomer) {
for {
// cycle start: block current checking until current rendezvous activated
<-rendezvous.activateChan
@@ -241,6 +245,8 @@ func waitSingleRendezvous(rendezvous *Rendezvous, rendezvousList []*Rendezvous,
if rendezvous == lastRendezvous {
for _, r := range rendezvousList {
r.reset()
// dynamic adjustment based on the number of concurrent users
r.updateRendezvousNumber(int64(b.GetSpawnCount()))
}
} else {
<-lastRendezvous.releaseChan

View File

@@ -11,6 +11,7 @@ import (
"github.com/rs/zerolog/log"
"github.com/httprunner/httprunner/v4/hrp/internal/builtin"
"github.com/mitchellh/mapstructure"
)
// ITestCase represents interface for testcases,
@@ -40,6 +41,11 @@ func (tc *TestCase) ToTCase() *TCase {
Config: tc.Config,
}
for _, step := range tc.TestSteps {
if step.Type() == stepTypeTestCase {
if testcase, ok := step.Struct().TestCase.(*TestCase); ok {
step.Struct().TestCase = testcase.ToTCase()
}
}
tCase.TestSteps = append(tCase.TestSteps, step.Struct())
}
return tCase
@@ -106,13 +112,17 @@ func (tc *TCase) ToTestCase(casePath string) (*TestCase, error) {
tc.Config = &TConfig{Name: "please input testcase name"}
}
tc.Config.Path = casePath
return tc.toTestCase()
}
// toTestCase converts *TCase to *TestCase
func (tc *TCase) toTestCase() (*TestCase, error) {
testCase := &TestCase{
Config: tc.Config,
}
// locate project root dir by plugin path
projectRootDir, err := GetProjectRootDirPath(casePath)
projectRootDir, err := GetProjectRootDirPath(tc.Config.Path)
if err != nil {
return nil, errors.Wrap(err, "failed to get project root dir")
}
@@ -139,40 +149,71 @@ func (tc *TCase) ToTestCase(casePath string) (*TestCase, error) {
for _, step := range tc.TestSteps {
if step.API != nil {
apiPath, ok := step.API.(string)
if ok {
path := filepath.Join(projectRootDir, apiPath)
if !builtin.IsFilePathExists(path) {
return nil, errors.New("referenced api file not found: " + path)
}
refAPI := APIPath(path)
apiContent, err := refAPI.ToAPI()
if err != nil {
return nil, err
}
step.API = apiContent
} else {
apiMap, ok := step.API.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("referenced api should be map or path(string), got %v", step.API)
}
api := &API{}
err = mapstructure.Decode(apiMap, api)
if err != nil {
return nil, err
}
step.API = api
}
_, ok = step.API.(*API)
if !ok {
return nil, fmt.Errorf("referenced api path should be string, got %v", step.API)
return nil, fmt.Errorf("failed to handle referenced API, got %v", step.TestCase)
}
path := filepath.Join(projectRootDir, apiPath)
if !builtin.IsFilePathExists(path) {
return nil, errors.New("referenced api file not found: " + path)
}
refAPI := APIPath(path)
apiContent, err := refAPI.ToAPI()
if err != nil {
return nil, err
}
step.API = apiContent
testCase.TestSteps = append(testCase.TestSteps, &StepAPIWithOptionalArgs{
step: step,
})
} else if step.TestCase != nil {
casePath, ok := step.TestCase.(string)
if !ok {
return nil, fmt.Errorf("referenced testcase path should be string, got %v", step.TestCase)
}
path := filepath.Join(projectRootDir, casePath)
if !builtin.IsFilePathExists(path) {
return nil, errors.New("referenced testcase file not found: " + path)
}
if ok {
path := filepath.Join(projectRootDir, casePath)
if !builtin.IsFilePathExists(path) {
return nil, errors.New("referenced testcase file not found: " + path)
}
refTestCase := TestCasePath(path)
tc, err := refTestCase.ToTestCase()
if err != nil {
return nil, err
refTestCase := TestCasePath(path)
tc, err := refTestCase.ToTestCase()
if err != nil {
return nil, err
}
step.TestCase = tc
} else {
testCaseMap, ok := step.TestCase.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("referenced testcase should be map or path(string), got %v", step.TestCase)
}
tCase := &TCase{}
err = mapstructure.Decode(testCaseMap, tCase)
if err != nil {
return nil, err
}
tc, err := tCase.toTestCase()
if err != nil {
return nil, err
}
step.TestCase = tc
}
_, ok = step.TestCase.(*TestCase)
if !ok {
return nil, fmt.Errorf("failed to handle referenced testcase, got %v", step.TestCase)
}
step.TestCase = tc
testCase.TestSteps = append(testCase.TestSteps, &StepTestCaseWithOptionalArgs{
step: step,
})