Merge pull request #896 from httprunner/v3

## 3.0.2 (2020-05-16)

**Added**

- feat: add `make` sub-command to generate python testcases from YAML/JSON  
- feat: format generated python testcases with [`black`](https://github.com/psf/black)
- test: add postman echo & httpbin as testcase examples

**Changed**

- refactor all
- replace jsonschema validation with pydantic
- remove compatibility with testcase/testsuite format v1
- replace unittest with pytest
- remove builtin html report, allure will be used with pytest later
- remove locust support temporarily
- update command line interface
This commit is contained in:
debugtalk
2020-05-16 16:45:51 +08:00
committed by GitHub
155 changed files with 4813 additions and 12522 deletions

View File

@@ -30,7 +30,14 @@ jobs:
poetry build
ls dist/*.whl | xargs pip install # test installation
hrun -V
locusts -V
- name: Run smoketest for hrun command
hrun run -h
hrun startproject -h
hrun har2case -h
pip install locustio
hrun locusts -h
- name: Run smoketest - postman echo
run: |
cd tests/httpbin && hrun basic.yml --failfast && cd -
hrun examples/postman_echo/request_methods
- name: Run smoketest - httpbin
run: |
hrun examples/httpbin/

View File

@@ -10,8 +10,8 @@ jobs:
strategy:
max-parallel: 12
matrix:
python-version: [3.6, 3.7] # TODO: 3.8
os: [ubuntu-latest, macos-latest] # TODO: windows-latest
python-version: [3.6, 3.7, 3.8]
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
@@ -27,9 +27,11 @@ jobs:
poetry install -vv
- name: Run unittest for httprunner
run: |
poetry run python -m httprunner.cli hrun -V
poetry run python -m httprunner.cli hrun -h
poetry run coverage run --source=httprunner -m unittest discover
poetry run httprunner
poetry run hmake
poetry run hrun
poetry run har2case
poetry run coverage run --source=httprunner -m pytest httprunner
poetry run coverage xml
poetry run coverage report -m
- name: Codecov

View File

@@ -1,5 +1,29 @@
# Release History
## 3.0.2 (2020-05-16)
**Added**
- feat: add `make` sub-command to generate python testcases from YAML/JSON
- feat: format generated python testcases with [`black`](https://github.com/psf/black)
- test: add postman echo & httpbin as testcase examples
**Changed**
- refactor all
- replace jsonschema validation with pydantic
- remove compatibility with testcase/testsuite format v1
- replace unittest with pytest
- remove builtin html report, allure will be used with pytest later
- remove locust support temporarily
- update command line interface
## 3.0.1 (2020-03-24)
**Changed**
- remove sentry sdk
## 3.0.0 (2020-03-10)
**Added**
@@ -16,7 +40,6 @@
- generate reports/logs folder in current working directory
- remove cli `--validate`
- remove cli `--pretty`
- remove sentry sdk
## 2.5.7 (2020-02-21)

View File

@@ -45,7 +45,7 @@
- eq: ["status_code", 200]
```
参考案例:[httprunner/tests/httpbin/upload.v2.yml][2]
参考案例:[httprunner/tests/httpbin/upload.yml][2]
[1]: https://toolbelt.readthedocs.io/en/latest/uploading-data.html
[2]: https://github.com/httprunner/httprunner/blob/master/tests/httpbin/upload.v2.yml
[2]: https://github.com/httprunner/httprunner/blob/master/tests/httpbin/upload.yml

View File

@@ -1,54 +1,45 @@
- config:
config:
name: basic test with httpbin
base_url: https://httpbin.org/
#- test:
# TODO: fix compatibility with Python 2.7, UnicodeDecodeError
# name: index
# request:
# url: /
# method: GET
# validate:
# - eq: ["status_code", 200]
# - contains: [content, "HTTP Request & Response Service"]
- test:
teststeps:
-
name: headers
request:
url: /headers
method: GET
validate:
- eq: ["status_code", 200]
- eq: [content.headers.Host, "httpbin.org"]
- eq: [body.headers.Host, "httpbin.org"]
- test:
-
name: user-agent
request:
url: /user-agent
method: GET
validate:
- eq: ["status_code", 200]
- startswith: [content.user-agent, "python-requests"]
# - startswith: [body.user-agent, "python-requests"]
- test:
-
name: get without params
request:
url: /get
method: GET
validate:
- eq: ["status_code", 200]
- eq: [content.args, {}]
- eq: [body.args, {}]
- test:
-
name: get with params in url
request:
url: /get?a=1&b=2
method: GET
validate:
- eq: ["status_code", 200]
- eq: [content.args, {'a': '1', 'b': '2'}]
- eq: [body.args, {'a': '1', 'b': '2'}]
- test:
-
name: get with params in params field
request:
url: /get
@@ -58,9 +49,9 @@
method: GET
validate:
- eq: ["status_code", 200]
- eq: [content.args, {'a': '1', 'b': '2'}]
- eq: [body.args, {'a': '1', 'b': '2'}]
- test:
-
name: set cookie
request:
url: /cookies/set?name=value
@@ -69,7 +60,7 @@
- eq: ["status_code", 200]
# - eq: [cookies.name, "value"]
- test:
-
name: extract cookie
request:
url: /cookies
@@ -78,7 +69,7 @@
- eq: ["status_code", 200]
# - eq: [cookies.name, "value"]
- test:
-
name: post data
request:
url: /post
@@ -89,12 +80,10 @@
validate:
- eq: ["status_code", 200]
- test:
name: validate content length
-
name: validate body length
request:
url: /spec.json
method: GET
validate:
- len_eq: ["content", 9]
- len_eq: ["json", 9]
- len_eq: ["text", 9]
- len_eq: ["body", 9]

View File

@@ -0,0 +1,96 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseBasic(HttpRunner):
config = TConfig(
**{
"name": "basic test with httpbin",
"base_url": "https://httpbin.org/",
"path": "examples/httpbin/basic_test.py",
}
)
teststeps = [
TStep(
**{
"name": "headers",
"request": {"url": "/headers", "method": "GET"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.headers.Host", "httpbin.org"]},
],
}
),
TStep(
**{
"name": "user-agent",
"request": {"url": "/user-agent", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "get without params",
"request": {"url": "/get", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}, {"eq": ["body.args", {}]}],
}
),
TStep(
**{
"name": "get with params in url",
"request": {"url": "/get?a=1&b=2", "method": "GET"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args", {"a": "1", "b": "2"}]},
],
}
),
TStep(
**{
"name": "get with params in params field",
"request": {"url": "/get", "params": {"a": 1, "b": 2}, "method": "GET"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args", {"a": "1", "b": "2"}]},
],
}
),
TStep(
**{
"name": "set cookie",
"request": {"url": "/cookies/set?name=value", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "extract cookie",
"request": {"url": "/cookies", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post data",
"request": {
"url": "/post",
"method": "POST",
"headers": {"Content-Type": "application/json"},
"data": "abc",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "validate body length",
"request": {"url": "/spec.json", "method": "GET"},
"validate": [{"len_eq": ["body", 9]}],
}
),
]
if __name__ == "__main__":
TestCaseBasic().test_start()

View File

@@ -2,27 +2,33 @@ import os
import random
import string
import time
import uuid
from tests.api_server import HTTPBIN_SERVER, gen_md5, get_sign
BASE_URL = "http://127.0.0.1:5000"
from loguru import logger
def get_httpbin_server():
return HTTPBIN_SERVER
return "https://httpbin.org"
def get_base_url():
return BASE_URL
def setup_testcase(variables):
logger.info(f"setup_testcase, variables: {variables}")
variables["request_id_prefix"] = str(int(time.time()))
def get_default_request():
return {
"base_url": BASE_URL,
"headers": {
"content-type": "application/json"
}
}
def teardown_testcase():
logger.info(f"teardown_testcase.")
def setup_teststep(request, variables):
logger.info(f"setup_teststep, request: {request}, variables: {variables}")
request.setdefault("headers", {})
request_id_prefix = variables["request_id_prefix"]
request["headers"]["HRUN-Request-ID"] = request_id_prefix + "-" + str(uuid.uuid4())
def teardown_teststep(response):
logger.info(f"teardown_teststep, response status code: {response.status_code}")
def sum_two(m, n):
@@ -58,16 +64,13 @@ def get_user_agent():
def gen_app_version():
return [
{"app_version": "2.8.5"},
{"app_version": "2.8.6"}
]
return [{"app_version": "2.8.5"}, {"app_version": "2.8.6"}]
def get_account():
return [
{"username": "user1", "password": "111111"},
{"username": "user2", "password": "222222"}
{"username": "user2", "password": "222222"},
]
@@ -81,7 +84,7 @@ def gen_random_string(str_len):
random_char = random.choice(string.ascii_letters + string.digits)
random_char_list.append(random_char)
random_string = ''.join(random_char_list)
random_string = "".join(random_char_list)
return random_string
@@ -113,9 +116,11 @@ def modify_request_json(request, os_platform):
def setup_hook_httpntlmauth(request):
if "httpntlmauth" in request:
from requests_ntlm import HttpNtlmAuth
auth_account = request.pop("httpntlmauth")
request["auth"] = HttpNtlmAuth(
auth_account["username"], auth_account["password"])
auth_account["username"], auth_account["password"]
)
def alter_response(response):
@@ -123,18 +128,15 @@ def alter_response(response):
response.headers["Content-Type"] = "html/text"
response.json["headers"]["Host"] = "127.0.0.1:8888"
response.new_attribute = "new_attribute_value"
response.new_attribute_dict = {
"key": 123
}
response.new_attribute_dict = {"key": 123}
def alter_response_302(response):
response.status_code = 500
response.headers["Content-Type"] = "html/text"
response.text = "abcdef"
response.new_attribute = "new_attribute_value"
response.new_attribute_dict = {
"key": 123
}
response.new_attribute_dict = {"key": 123}
def alter_response_error(response):
@@ -143,7 +145,4 @@ def alter_response_error(response):
def gen_variables():
return {
"var_a": 1,
"var_b": 2
}
return {"var_a": 1, "var_b": 2}

View File

@@ -1,4 +1,4 @@
- config:
config:
name: basic test with httpbin
base_url: ${get_httpbin_server()}
setup_hooks:
@@ -6,8 +6,11 @@
teardown_hooks:
- ${hook_print(teardown)}
- test:
teststeps:
-
name: headers
variables:
a: 123
request:
url: /headers
method: GET
@@ -18,9 +21,9 @@
- ${teardown_hook_sleep_N_secs($response, 1)}
validate:
- eq: ["status_code", 200]
- contained_by: [content.headers.Host, "${get_httpbin_server()}"]
- contained_by: [body.headers.Host, "${get_httpbin_server()}"]
- test:
-
name: alter response
request:
url: /headers
@@ -28,8 +31,6 @@
teardown_hooks:
- ${alter_response($response)}
validate:
- eq: ["status_code", 500]
- eq: ["headers.content-type", "html/text"]
- eq: [json.headers.Host, "127.0.0.1:8888"]
- eq: [content.headers.Host, "127.0.0.1:8888"]
- eq: [text.headers.Host, "127.0.0.1:8888"]
- eq: ["status_code", 200]
# - eq: ["headers.content-type", "html/text"]
- eq: [body.headers.Host, "httpbin.org"]

View File

@@ -0,0 +1,48 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseHooks(HttpRunner):
config = TConfig(
**{
"name": "basic test with httpbin",
"base_url": "${get_httpbin_server()}",
"setup_hooks": ["${hook_print(setup)}"],
"teardown_hooks": ["${hook_print(teardown)}"],
"path": "examples/httpbin/hooks_test.py",
}
)
teststeps = [
TStep(
**{
"name": "headers",
"variables": {"a": 123},
"request": {"url": "/headers", "method": "GET"},
"setup_hooks": [
"${setup_hook_add_kwargs($request)}",
"${setup_hook_remove_kwargs($request)}",
],
"teardown_hooks": ["${teardown_hook_sleep_N_secs($response, 1)}"],
"validate": [
{"eq": ["status_code", 200]},
{"contained_by": ["body.headers.Host", "${get_httpbin_server()}"]},
],
}
),
TStep(
**{
"name": "alter response",
"request": {"url": "/headers", "method": "GET"},
"teardown_hooks": ["${alter_response($response)}"],
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.headers.Host", "httpbin.org"]},
],
}
),
]
if __name__ == "__main__":
TestCaseHooks().test_start()

View File

@@ -1,8 +1,9 @@
- config:
config:
name: load images
base_url: ${get_httpbin_server()}
- test:
teststeps:
-
name: get png image
request:
url: /image/png
@@ -10,7 +11,7 @@
validate:
- eq: ["status_code", 200]
- test:
-
name: get jpeg image
request:
url: /image/jpeg
@@ -18,7 +19,7 @@
validate:
- eq: ["status_code", 200]
- test:
-
name: get webp image
request:
url: /image/webp
@@ -26,7 +27,7 @@
validate:
- eq: ["status_code", 200]
- test:
-
name: get svg image
request:
url: /image/svg

View File

@@ -0,0 +1,47 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseLoadImage(HttpRunner):
config = TConfig(
**{
"name": "load images",
"base_url": "${get_httpbin_server()}",
"path": "examples/httpbin/load_image_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get png image",
"request": {"url": "/image/png", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "get jpeg image",
"request": {"url": "/image/jpeg", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "get webp image",
"request": {"url": "/image/webp", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "get svg image",
"request": {"url": "/image/svg", "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
}
),
]
if __name__ == "__main__":
TestCaseLoadImage().test_start()

View File

@@ -6,7 +6,7 @@ teststeps:
-
name: upload file
variables:
file_path: "data/test.env"
file_path: "test.env"
m_encoder: ${multipart_encoder(file=$file_path)}
request:
url: /post
@@ -16,7 +16,7 @@ teststeps:
data: $m_encoder
validate:
- eq: ["status_code", 200]
- startswith: ["content.files.file", "UserName=test"]
- startswith: ["body.files.file", "UserName=test"]
-
name: upload file with keyword
@@ -24,7 +24,7 @@ teststeps:
url: /post
method: POST
upload:
file: "data/test.env"
file: "test.env"
validate:
- eq: ["status_code", 200]
- startswith: ["content.files.file", "UserName=test"]
- startswith: ["body.files.file", "UserName=test"]

View File

@@ -0,0 +1,54 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseUpload(HttpRunner):
config = TConfig(
**{
"name": "test upload file with httpbin",
"base_url": "${get_httpbin_server()}",
"path": "examples/httpbin/upload_test.py",
}
)
teststeps = [
TStep(
**{
"name": "upload file",
"variables": {
"file_path": "test.env",
"m_encoder": "${multipart_encoder(file=$file_path)}",
},
"request": {
"url": "/post",
"method": "POST",
"headers": {
"Content-Type": "${multipart_content_type($m_encoder)}"
},
"data": "$m_encoder",
},
"validate": [
{"eq": ["status_code", 200]},
{"startswith": ["body.files.file", "UserName=test"]},
],
}
),
TStep(
**{
"name": "upload file with keyword",
"request": {
"url": "/post",
"method": "POST",
"upload": {"file": "test.env"},
},
"validate": [
{"eq": ["status_code", 200]},
{"startswith": ["body.files.file", "UserName=test"]},
],
}
),
]
if __name__ == "__main__":
TestCaseUpload().test_start()

View File

@@ -1,8 +1,9 @@
- config:
config:
name: basic test with httpbin
base_url: http://httpbin.org/
- test:
teststeps:
-
name: validate response with json path
request:
url: /get
@@ -12,13 +13,13 @@
method: GET
validate:
- eq: ["status_code", 200]
- eq: ["json.args.a", '1']
- eq: ["json.args.b", '2']
- eq: ["body.args.a", 1]
- eq: ["body.args.b", 2]
validate_script:
- "assert status_code == 200"
- test:
-
name: validate response with python script
request:
url: /get

View File

@@ -0,0 +1,43 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseValidate(HttpRunner):
config = TConfig(
**{
"name": "basic test with httpbin",
"base_url": "http://httpbin.org/",
"path": "examples/httpbin/validate_test.py",
}
)
teststeps = [
TStep(
**{
"name": "validate response with json path",
"request": {"url": "/get", "params": {"a": 1, "b": 2}, "method": "GET"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.a", 1]},
{"eq": ["body.args.b", 2]},
],
"validate_script": ["assert status_code == 200"],
}
),
TStep(
**{
"name": "validate response with python script",
"request": {"url": "/get", "params": {"a": 1, "b": 2}, "method": "GET"},
"validate": [{"eq": ["status_code", 200]}],
"validate_script": [
"assert status_code == 201",
"a = response_json.get('args').get('a')",
"assert a == '1'",
],
}
),
]
if __name__ == "__main__":
TestCaseValidate().test_start()

View File

@@ -0,0 +1,41 @@
config:
name: "set & delete cookies."
variables:
foo1: bar1
foo2: bar2
base_url: "https://postman-echo.com"
verify: False
export: ["cookie_foo1", "cookie_foo3"]
teststeps:
-
name: set cookie foo1 & foo2 & foo3
variables:
foo3: bar3
request:
method: GET
url: /cookies/set
params:
foo1: bar111
foo2: $foo2
foo3: $foo3
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
extract:
cookie_foo1: $.cookies.foo1
cookie_foo3: $.cookies.foo3
validate:
- eq: ["status_code", 200]
- ne: ["$.cookies.foo3", "$foo3"]
-
name: delete cookie foo2
request:
method: GET
url: /cookies/delete?foo2
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
validate:
- eq: ["status_code", 200]
- ne: ["$.cookies.foo1", "$foo1"]
- eq: ["$.cookies.foo1", "$cookie_foo1"]
- eq: ["$.cookies.foo3", "$cookie_foo3"]

View File

@@ -0,0 +1,9 @@
from httprunner import __version__
def get_httprunner_version():
return __version__
def sum_two(m, n):
return m + n

View File

@@ -0,0 +1,51 @@
config:
name: "request methods testcase in hardcode"
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: get with params
request:
method: GET
url: /get
params:
foo1: bar1
foo2: bar2
headers:
User-Agent: HttpRunner/3.0
validate:
- eq: ["status_code", 200]
-
name: post raw text
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "text/plain"
data: "This is expected to be sent back as part of response body."
validate:
- eq: ["status_code", 200]
-
name: post form data
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "application/x-www-form-urlencoded"
data: "foo1=bar1&foo2=bar2"
validate:
- eq: ["status_code", 200]
-
name: put request
request:
method: PUT
url: /put
headers:
User-Agent: HttpRunner/3.0
Content-Type: "text/plain"
data: "This is expected to be sent back as part of response body."
validate:
- eq: ["status_code", 200]

View File

@@ -0,0 +1,77 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseHardcode(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase in hardcode",
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/hardcode_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "bar1", "foo2": "bar2"},
"headers": {"User-Agent": "HttpRunner/3.0"},
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post raw text",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post form data",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=bar1&foo2=bar2",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "put request",
"request": {
"method": "PUT",
"url": "/put",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
]
if __name__ == "__main__":
TestCaseHardcode().test_start()

View File

@@ -0,0 +1,61 @@
config:
name: "request methods testcase with functions"
variables:
foo1: session_bar1
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: get with params
variables:
foo1: bar1
foo2: session_bar2
sum_v: "${sum_two(1, 2)}"
request:
method: GET
url: /get
params:
foo1: $foo1
foo2: $foo2
sum_v: $sum_v
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
extract:
session_foo2: "body.args.foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.args.foo1", "session_bar1"]
- eq: ["body.args.sum_v", 3]
- eq: ["body.args.foo2", "session_bar2"]
-
name: post raw text
variables:
foo1: "hello world"
foo3: "$session_foo2"
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
Content-Type: "text/plain"
data: "This is expected to be sent back as part of response body: $foo1-$foo3."
validate:
- eq: ["status_code", 200]
- eq: ["body.data", "This is expected to be sent back as part of response body: session_bar1-session_bar2."]
-
name: post form data
variables:
foo1: bar1
foo2: bar2
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
Content-Type: "application/x-www-form-urlencoded"
data: "foo1=$foo1&foo2=$foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.form.foo1", "session_bar1"]
- eq: ["body.form.foo2", "bar2"]

View File

@@ -0,0 +1,88 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseRequestWithFunctions(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase with functions",
"variables": {"foo1": "session_bar1"},
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/request_with_functions_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"variables": {
"foo1": "bar1",
"foo2": "session_bar2",
"sum_v": "${sum_two(1, 2)}",
},
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"},
"headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"},
},
"extract": {"session_foo2": "body.args.foo2"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.foo1", "session_bar1"]},
{"eq": ["body.args.sum_v", 3]},
{"eq": ["body.args.foo2", "session_bar2"]},
],
}
),
TStep(
**{
"name": "post raw text",
"variables": {"foo1": "hello world", "foo3": "$session_foo2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/${get_httprunner_version()}",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body: $foo1-$foo3.",
},
"validate": [
{"eq": ["status_code", 200]},
{
"eq": [
"body.data",
"This is expected to be sent back as part of response body: session_bar1-session_bar2.",
]
},
],
}
),
TStep(
**{
"name": "post form data",
"variables": {"foo1": "bar1", "foo2": "bar2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/${get_httprunner_version()}",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=$foo1&foo2=$foo2",
},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.form.foo1", "session_bar1"]},
{"eq": ["body.form.foo2", "bar2"]},
],
}
),
]
if __name__ == "__main__":
TestCaseRequestWithFunctions().test_start()

View File

@@ -0,0 +1,13 @@
config:
name: "request methods testcase: reference testcase"
variables:
foo1: session_bar1
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: request with variables
variables:
foo1: override_bar1
testcase: request_methods/request_with_variables.yml

View File

@@ -0,0 +1,28 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseRequestWithTestcaseReference(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase: reference testcase",
"variables": {"foo1": "session_bar1"},
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/request_with_testcase_reference_test.py",
}
)
teststeps = [
TStep(
**{
"name": "request with variables",
"variables": {"foo1": "override_bar1"},
"testcase": "request_methods/request_with_variables.yml",
}
),
]
if __name__ == "__main__":
TestCaseRequestWithTestcaseReference().test_start()

View File

@@ -0,0 +1,58 @@
config:
name: "request methods testcase with variables"
variables:
foo1: session_bar1
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: get with params
variables:
foo1: bar1
foo2: session_bar2
request:
method: GET
url: /get
params:
foo1: $foo1
foo2: $foo2
headers:
User-Agent: HttpRunner/3.0
extract:
session_foo2: "body.args.foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.args.foo1", "session_bar1"]
- eq: ["body.args.foo2", "session_bar2"]
-
name: post raw text
variables:
foo1: "hello world"
foo3: "$session_foo2"
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "text/plain"
data: "This is expected to be sent back as part of response body: $foo1-$foo3."
validate:
- eq: ["status_code", 200]
- eq: ["body.data", "This is expected to be sent back as part of response body: session_bar1-session_bar2."]
-
name: post form data
variables:
foo1: bar1
foo2: bar2
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "application/x-www-form-urlencoded"
data: "foo1=$foo1&foo2=$foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.form.foo1", "session_bar1"]
- eq: ["body.form.foo2", "bar2"]

View File

@@ -0,0 +1,83 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseRequestWithVariables(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase with variables",
"variables": {"foo1": "session_bar1"},
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/request_with_variables_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"variables": {"foo1": "bar1", "foo2": "session_bar2"},
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2"},
"headers": {"User-Agent": "HttpRunner/3.0"},
},
"extract": {"session_foo2": "body.args.foo2"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.foo1", "session_bar1"]},
{"eq": ["body.args.foo2", "session_bar2"]},
],
}
),
TStep(
**{
"name": "post raw text",
"variables": {"foo1": "hello world", "foo3": "$session_foo2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body: $foo1-$foo3.",
},
"validate": [
{"eq": ["status_code", 200]},
{
"eq": [
"body.data",
"This is expected to be sent back as part of response body: session_bar1-session_bar2.",
]
},
],
}
),
TStep(
**{
"name": "post form data",
"variables": {"foo1": "bar1", "foo2": "bar2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=$foo1&foo2=$foo2",
},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.form.foo1", "session_bar1"]},
{"eq": ["body.form.foo2", "bar2"]},
],
}
),
]
if __name__ == "__main__":
TestCaseRequestWithVariables().test_start()

View File

@@ -0,0 +1,29 @@
config:
name: "request methods testcase: validate with functions"
variables:
foo1: session_bar1
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: get with params
variables:
foo1: bar1
foo2: session_bar2
sum_v: "${sum_two(1, 2)}"
request:
method: GET
url: /get
params:
foo1: $foo1
foo2: $foo2
sum_v: $sum_v
headers:
User-Agent: HttpRunner/${get_httprunner_version()}
extract:
session_foo2: "body.args.foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.args.sum_v", 3]
- less_than: ["body.args.sum_v", "${sum_two(2, 2)}"]

View File

@@ -0,0 +1,43 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseValidateWithFunctions(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase: validate with functions",
"variables": {"foo1": "session_bar1"},
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/validate_with_functions_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"variables": {
"foo1": "bar1",
"foo2": "session_bar2",
"sum_v": "${sum_two(1, 2)}",
},
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"},
"headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"},
},
"extract": {"session_foo2": "body.args.foo2"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.sum_v", 3]},
{"less_than": ["body.args.sum_v", "${sum_two(2, 2)}"]},
],
}
),
]
if __name__ == "__main__":
TestCaseValidateWithFunctions().test_start()

View File

@@ -0,0 +1,58 @@
config:
name: "request methods testcase: validate with variables"
variables:
foo1: session_bar1
base_url: "https://postman-echo.com"
verify: False
teststeps:
-
name: get with params
variables:
foo1: bar1
foo2: session_bar2
request:
method: GET
url: /get
params:
foo1: $foo1
foo2: $foo2
headers:
User-Agent: HttpRunner/3.0
extract:
session_foo2: "body.args.foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.args.foo1", "$foo1"]
- eq: ["body.args.foo2", "$foo2"]
-
name: post raw text
variables:
foo1: "hello world"
foo3: "$session_foo2"
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "text/plain"
data: "This is expected to be sent back as part of response body: $foo1-$foo3."
validate:
- eq: ["status_code", 200]
- eq: ["body.data", "This is expected to be sent back as part of response body: session_bar1-$foo3."]
-
name: post form data
variables:
foo1: bar1
foo2: bar2
request:
method: POST
url: /post
headers:
User-Agent: HttpRunner/3.0
Content-Type: "application/x-www-form-urlencoded"
data: "foo1=$foo1&foo2=$foo2"
validate:
- eq: ["status_code", 200]
- eq: ["body.form.foo1", "$foo1"]
- eq: ["body.form.foo2", "$foo2"]

View File

@@ -0,0 +1,83 @@
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class TestCaseValidateWithVariables(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase: validate with variables",
"variables": {"foo1": "session_bar1"},
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/validate_with_variables_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"variables": {"foo1": "bar1", "foo2": "session_bar2"},
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2"},
"headers": {"User-Agent": "HttpRunner/3.0"},
},
"extract": {"session_foo2": "body.args.foo2"},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.foo1", "$foo1"]},
{"eq": ["body.args.foo2", "$foo2"]},
],
}
),
TStep(
**{
"name": "post raw text",
"variables": {"foo1": "hello world", "foo3": "$session_foo2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body: $foo1-$foo3.",
},
"validate": [
{"eq": ["status_code", 200]},
{
"eq": [
"body.data",
"This is expected to be sent back as part of response body: session_bar1-$foo3.",
]
},
],
}
),
TStep(
**{
"name": "post form data",
"variables": {"foo1": "bar1", "foo2": "bar2"},
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=$foo1&foo2=$foo2",
},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.form.foo1", "$foo1"]},
{"eq": ["body.form.foo2", "$foo2"]},
],
}
),
]
if __name__ == "__main__":
TestCaseValidateWithVariables().test_start()

View File

@@ -1,4 +1,13 @@
__version__ = "3.0.0"
__version__ = "3.0.2"
__description__ = "One-stop solution for HTTP(S) testing."
__all__ = ["__version__", "__description__"]
from httprunner.runner import HttpRunner
from httprunner.schema import TConfig, TStep
__all__ = [
"__version__",
"__description__",
"HttpRunner",
"TConfig",
"TStep",
]

View File

@@ -1,342 +0,0 @@
import os
import sys
import unittest
from loguru import logger
from httprunner import (__version__, exceptions, loader, parser,
report, runner, utils)
class HttpRunner(object):
""" Developer Interface: Main Interface
Usage:
from httprunner.api import HttpRunner
runner = HttpRunner(
failfast=True,
save_tests=True,
log_level="INFO",
log_file="test.log"
)
summary = runner.run(path_or_tests)
"""
def __init__(self, failfast=False, save_tests=False, log_level="WARNING", log_file=None):
""" initialize HttpRunner.
Args:
failfast (bool): stop the test run on the first error or failure.
save_tests (bool): save loaded/parsed tests to JSON file.
log_level (str): logging level.
log_file (str): log file path.
"""
self.exception_stage = "initialize HttpRunner()"
kwargs = {
"failfast": failfast,
"resultclass": report.HtmlTestResult
}
logger.remove()
log_level = log_level.upper()
logger.add(sys.stdout, level=log_level)
if log_file:
logger.add(log_file, level=log_level)
self.unittest_runner = unittest.TextTestRunner(**kwargs)
self.test_loader = unittest.TestLoader()
self.save_tests = save_tests
self._summary = None
self.test_path = None
def _add_tests(self, testcases):
""" initialize testcase with Runner() and add to test suite.
Args:
testcases (list): testcases list.
Returns:
unittest.TestSuite()
"""
def _add_test(test_runner, test_dict):
""" add test to testcase.
"""
def test(self):
try:
test_runner.run_test(test_dict)
except exceptions.MyBaseFailure as ex:
self.fail(str(ex))
finally:
self.meta_datas = test_runner.meta_datas
if "config" in test_dict:
# run nested testcase
test.__doc__ = test_dict["config"].get("name")
variables = test_dict["config"].get("variables", {})
else:
# run api test
test.__doc__ = test_dict.get("name")
variables = test_dict.get("variables", {})
if isinstance(test.__doc__, parser.LazyString):
try:
parsed_variables = parser.parse_variables_mapping(variables)
test.__doc__ = parser.parse_lazy_data(
test.__doc__, parsed_variables
)
except exceptions.VariableNotFound:
test.__doc__ = str(test.__doc__)
return test
test_suite = unittest.TestSuite()
for testcase in testcases:
config = testcase.get("config", {})
test_runner = runner.Runner(config)
TestSequense = type('TestSequense', (unittest.TestCase,), {})
tests = testcase.get("teststeps", [])
for index, test_dict in enumerate(tests):
times = test_dict.get("times", 1)
try:
times = int(times)
except ValueError:
raise exceptions.ParamsError(
f"times should be digit, given: {times}")
for times_index in range(times):
# suppose one testcase should not have more than 9999 steps,
# and one step should not run more than 999 times.
test_method_name = 'test_{:04}_{:03}'.format(index, times_index)
test_method = _add_test(test_runner, test_dict)
setattr(TestSequense, test_method_name, test_method)
loaded_testcase = self.test_loader.loadTestsFromTestCase(TestSequense)
setattr(loaded_testcase, "config", config)
setattr(loaded_testcase, "teststeps", tests)
setattr(loaded_testcase, "runner", test_runner)
test_suite.addTest(loaded_testcase)
return test_suite
def _run_suite(self, test_suite):
""" run tests in test_suite
Args:
test_suite: unittest.TestSuite()
Returns:
list: tests_results
"""
tests_results = []
for index, testcase in enumerate(test_suite):
log_handler = None
if self.save_tests:
logs_file_abs_path = utils.prepare_log_file_abs_path(
self.test_path, f"testcase_{index+1}.log"
)
log_handler = logger.add(logs_file_abs_path, level="DEBUG")
testcase_name = testcase.config.get("name")
logger.info(f"Start to run testcase: {testcase_name}")
result = self.unittest_runner.run(testcase)
if result.wasSuccessful():
tests_results.append((testcase, result))
else:
tests_results.insert(0, (testcase, result))
if self.save_tests and log_handler:
logger.remove(log_handler)
return tests_results
def _aggregate(self, tests_results):
""" aggregate results
Args:
tests_results (list): list of (testcase, result)
"""
summary = {
"success": True,
"stat": {
"testcases": {
"total": len(tests_results),
"success": 0,
"fail": 0
},
"teststeps": {}
},
"time": {},
"platform": report.get_platform(),
"details": []
}
for index, tests_result in enumerate(tests_results):
testcase, result = tests_result
testcase_summary = report.get_summary(result)
if testcase_summary["success"]:
summary["stat"]["testcases"]["success"] += 1
else:
summary["stat"]["testcases"]["fail"] += 1
summary["success"] &= testcase_summary["success"]
testcase_summary["name"] = testcase.config.get("name")
testcase_summary["in_out"] = utils.get_testcase_io(testcase)
report.aggregate_stat(summary["stat"]["teststeps"], testcase_summary["stat"])
report.aggregate_stat(summary["time"], testcase_summary["time"])
if self.save_tests:
logs_file_abs_path = utils.prepare_log_file_abs_path(
self.test_path, f"testcase_{index+1}.log"
)
testcase_summary["log"] = logs_file_abs_path
testcase_summary["HRUN-Request-ID"] = testcase.runner.hrun_request_id
summary["details"].append(testcase_summary)
return summary
def run_tests(self, tests_mapping):
""" run testcase/testsuite data
"""
self.test_path = tests_mapping.get("project_mapping", {}).get("test_path", "")
if self.save_tests:
utils.dump_json_file(
tests_mapping,
utils.prepare_log_file_abs_path(self.test_path, "loaded.json")
)
# parse tests
self.exception_stage = "parse tests"
parsed_testcases = parser.parse_tests(tests_mapping)
parse_failed_testfiles = parser.get_parse_failed_testfiles()
if parse_failed_testfiles:
logger.warning("parse failures occurred ...")
utils.dump_json_file(
parse_failed_testfiles,
utils.prepare_log_file_abs_path(self.test_path, "parse_failed.json")
)
if len(parsed_testcases) == 0:
logger.error("failed to parse all cases, abort.")
raise exceptions.ParseTestsFailure
if self.save_tests:
utils.dump_json_file(
parsed_testcases,
utils.prepare_log_file_abs_path(self.test_path, "parsed.json")
)
# add tests to test suite
self.exception_stage = "add tests to test suite"
test_suite = self._add_tests(parsed_testcases)
# run test suite
self.exception_stage = "run test suite"
results = self._run_suite(test_suite)
# aggregate results
self.exception_stage = "aggregate results"
self._summary = self._aggregate(results)
# generate html report
self.exception_stage = "generate html report"
report.stringify_summary(self._summary)
if self.save_tests:
utils.dump_json_file(
self._summary,
utils.prepare_log_file_abs_path(self.test_path, "summary.json")
)
# save variables and export data
vars_out = self.get_vars_out()
utils.dump_json_file(
vars_out,
utils.prepare_log_file_abs_path(self.test_path, "io.json")
)
return self._summary
def get_vars_out(self):
""" get variables and output
Returns:
list: list of variables and output.
if tests are parameterized, list items are corresponded to parameters.
[
{
"in": {
"user1": "leo"
},
"out": {
"out1": "out_value_1"
}
},
{...}
]
None: returns None if tests not started or finished or corrupted.
"""
if not self._summary:
return None
return [
summary["in_out"]
for summary in self._summary["details"]
]
def run_path(self, path, dot_env_path=None, mapping=None):
""" run testcase/testsuite file or folder.
Args:
path (str): testcase/testsuite file/foler path.
dot_env_path (str): specified .env file path.
mapping (dict): if mapping is specified, it will override variables in config block.
Returns:
dict: result summary
"""
# load tests
self.exception_stage = "load tests"
tests_mapping = loader.load_cases(path, dot_env_path)
if mapping:
tests_mapping["project_mapping"]["variables"] = mapping
return self.run_tests(tests_mapping)
def run(self, path_or_tests, dot_env_path=None, mapping=None):
""" main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data
dot_env_path (str): specified .env file path.
mapping (dict): if mapping is specified, it will override variables in config block.
Returns:
dict: result summary
"""
logger.info(f"HttpRunner version: {__version__}")
if loader.is_test_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif loader.is_test_content(path_or_tests):
project_working_directory = path_or_tests.get("project_mapping", {}).get("PWD", os.getcwd())
loader.init_pwd(project_working_directory)
return self.run_tests(path_or_tests)
else:
raise exceptions.ParamsError(f"Invalid testcase path or testcases: {path_or_tests}")

View File

@@ -8,13 +8,7 @@ app = FastAPI()
@app.get("/hrun/version")
async def get_hrun_version():
return {
"code": 0,
"message": "success",
"result": {
"HttpRunner": __version__
}
}
return {"code": 0, "message": "success", "result": {"HttpRunner": __version__}}
app.include_router(deps.router)

View File

@@ -1,6 +1,6 @@
from fastapi import APIRouter
from httprunner.api import HttpRunner
from httprunner.runner import HttpRunner
from httprunner.schema import ProjectMeta, TestCase
router = APIRouter()
@@ -9,35 +9,25 @@ runner = HttpRunner()
@router.post("/hrun/debug/testcase", tags=["debug"])
async def debug_single_testcase(project_meta: ProjectMeta, testcase: TestCase):
resp = {
"code": 0,
"message": "success",
"result": {}
}
resp = {"code": 0, "message": "success", "result": {}}
project_meta_json = project_meta.dict(by_alias=True)
if project_meta.debugtalk_py:
origin_local_keys = list(locals().keys()).copy()
exec(project_meta.debugtalk_py, {}, locals())
new_local_keys = list(locals().keys()).copy()
new_added_keys = set(new_local_keys) - set(origin_local_keys)
new_added_keys.remove("origin_local_keys")
project_meta_json["functions"] = {}
for func_name in new_added_keys:
project_meta_json["functions"][func_name] = locals()[func_name]
project_meta.functions[func_name] = locals()[func_name]
testcase_json = testcase.dict(by_alias=True)
tests_mapping = {
"project_mapping": project_meta_json,
"testcases": [testcase_json]
}
runner.with_project_meta(project_meta).run(testcase)
summary = runner.get_summary()
summary = runner.run_tests(tests_mapping)
if not summary["success"]:
if not summary.success:
resp["code"] = 1
resp["message"] = "fail"
resp["result"] = summary
resp["result"] = summary.dict()
return resp
@@ -59,6 +49,6 @@ async def debug_single_testcase(project_meta: ProjectMeta, testcase: TestCase):
# @router.post("/hrun/debug/testcases", tags=["debug"])
# async def debug_multiple_testcases(project_meta: ProjectMeta, testcases: TestCases):
# tests_mapping = {
# "project_mapping": project_meta,
# "project_meta": project_meta,
# "testcases": testcases
# }

View File

@@ -8,13 +8,12 @@ client = TestClient(app)
class TestDebug(unittest.TestCase):
def test_debug_single_testcase(self):
json_data = {
"project_meta": {
"debugtalk_py": "\ndef hello(name):\n print(f'hello, {name}')\n",
"variables": {},
"env": {}
"env": {},
},
"testcase": {
"config": {
@@ -24,7 +23,7 @@ class TestDebug(unittest.TestCase):
"variables": {},
"setup_hooks": [],
"teardown_hooks": [],
"export": []
"export": [],
},
"teststeps": [
{
@@ -38,13 +37,13 @@ class TestDebug(unittest.TestCase):
"cookies": {},
"timeout": 30,
"allow_redirects": True,
"verify": False
"verify": False,
},
"extract": {},
"validate": []
"validate": [],
}
]
}
],
},
}
response = client.post("/hrun/debug/testcase", json=json_data)
assert response.status_code == 200

View File

@@ -23,15 +23,11 @@ def stdout_io(stdout=None):
async def debug_python(request: Request):
body = await request.body()
if request.headers.get('content-transfer-encoding') == "base64":
if request.headers.get("content-transfer-encoding") == "base64":
# TODO: decode base64
pass
resp = {
"code": 0,
"message": "success",
"result": ""
}
resp = {"code": 0, "message": "success", "result": ""}
try:
with stdout_io() as s:
exec(body, globals())

View File

@@ -10,11 +10,7 @@ router = APIRouter()
@router.get("/hrun/deps", tags=["deps"])
async def get_installed_dependenies():
resp = {
"code": 0,
"message": "success",
"result": {}
}
resp = {"code": 0, "message": "success", "result": {}}
for p in pkg_resources.working_set:
resp["result"][p.project_name] = p.version
@@ -23,11 +19,7 @@ async def get_installed_dependenies():
@router.post("/hrun/deps", tags=["deps"])
async def install_dependenies(deps: List[str]):
resp = {
"code": 0,
"message": "success",
"result": {}
}
resp = {"code": 0, "message": "success", "result": {}}
for dep in deps:
try:
p = subprocess.run(["pip", "install", dep])

View File

@@ -13,8 +13,9 @@ from httprunner.exceptions import ParamsError
def gen_random_string(str_len):
""" generate random string with specified length
"""
return ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len))
return "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len)
)
def get_timestamp(str_len=13):
@@ -36,4 +37,3 @@ def sleep(n_secs):
""" sleep n seconds
"""
time.sleep(n_secs)

View File

@@ -2,12 +2,42 @@ import argparse
import os
import sys
from loguru import logger
import pytest
from httprunner import __description__, __version__
from httprunner.api import HttpRunner
from httprunner.report import gen_html_report
from httprunner.utils import create_scaffold
from httprunner import __description__, __version__, exceptions
from httprunner.ext.har2case import init_har2case_parser, main_har2case
from httprunner.ext.make import init_make_parser, main_make, convert_testcase_path
from httprunner.ext.scaffold import init_parser_scaffold, main_scaffold
def init_parser_run(subparsers):
sub_parser_run = subparsers.add_parser(
"run", help="Make HttpRunner testcases and run with pytest."
)
return sub_parser_run
def main_run(extra_args):
tests_path_list = []
for index, item in enumerate(extra_args):
if not os.path.exists(item):
# item is not file/folder path
continue
elif os.path.isfile(item):
# replace YAML/JSON file path with generated python file
extra_args[index] = convert_testcase_path(item)
tests_path_list.append(item)
if len(tests_path_list) == 0:
# has not specified any testcase path
raise exceptions.ParamsError("Missed testcase path")
main_make(tests_path_list)
if "-s" not in extra_args:
extra_args.insert(0, "-s")
pytest.main(extra_args)
def main():
@@ -15,80 +45,102 @@ def main():
"""
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'testfile_paths', nargs='*',
help="Specify api/testcase/testsuite file paths to run.")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'--dot-env-path',
help="Specify .env file path, which is useful for keeping sensitive data.")
parser.add_argument(
'--report-template',
help="Specify report template path.")
parser.add_argument(
'--report-dir',
help="Specify report save directory.")
parser.add_argument(
'--report-file',
help="Specify report file path, this has higher priority than specifying report dir.")
parser.add_argument(
'--save-tests', action='store_true', default=False,
help="Save loaded/parsed/vars_out/summary json data to JSON files.")
parser.add_argument(
'--failfast', action='store_true', default=False,
help="Stop the test run on the first error or failure.")
parser.add_argument(
'--startproject',
help="Specify new project name.")
"-V", "--version", dest="version", action="store_true", help="show version"
)
args = parser.parse_args()
subparsers = parser.add_subparsers(help="sub-command help")
sub_parser_run = init_parser_run(subparsers)
sub_parser_scaffold = init_parser_scaffold(subparsers)
sub_parser_har2case = init_har2case_parser(subparsers)
sub_parser_make = init_make_parser(subparsers)
if len(sys.argv) == 1:
# no argument passed
# httprunner
parser.print_help()
sys.exit(0)
elif len(sys.argv) == 2:
# print help for sub-commands
if sys.argv[1] in ["-V", "--version"]:
# httprunner -V
print(f"{__version__}")
elif sys.argv[1] in ["-h", "--help"]:
# httprunner -h
parser.print_help()
elif sys.argv[1] == "startproject":
# httprunner startproject
sub_parser_scaffold.print_help()
elif sys.argv[1] == "har2case":
# httprunner har2case
sub_parser_har2case.print_help()
elif sys.argv[1] == "run":
# httprunner run
pytest.main(["-h"])
elif sys.argv[1] == "make":
# httprunner make
sub_parser_make.print_help()
sys.exit(0)
elif (
len(sys.argv) == 3 and sys.argv[1] == "run" and sys.argv[2] in ["-h", "--help"]
):
# httprunner run -h
pytest.main(["-h"])
sys.exit(0)
extra_args = []
if len(sys.argv) >= 2 and sys.argv[1] in ["run", "locusts"]:
args, extra_args = parser.parse_known_args()
else:
args = parser.parse_args()
if args.version:
print(f"{__version__}")
sys.exit(0)
project_name = args.startproject
if project_name:
create_scaffold(project_name)
sys.exit(0)
runner = HttpRunner(
failfast=args.failfast,
save_tests=args.save_tests,
log_level=args.log_level,
log_file=args.log_file
)
err_code = 0
try:
for path in args.testfile_paths:
summary = runner.run(path, dot_env_path=args.dot_env_path)
report_dir = args.report_dir or os.path.join(os.getcwd(), "reports")
gen_html_report(
summary,
report_template=args.report_template,
report_dir=report_dir,
report_file=args.report_file
)
err_code |= (0 if summary and summary["success"] else 1)
except Exception as ex:
logger.error(f"!!!!!!!!!! exception stage: {runner.exception_stage} !!!!!!!!!!\n{str(ex)}")
err_code = 1
sys.exit(err_code)
if sys.argv[1] == "run":
main_run(extra_args)
elif sys.argv[1] == "startproject":
main_scaffold(args)
elif sys.argv[1] == "har2case":
main_har2case(args)
elif sys.argv[1] == "make":
main_make(args.testcase_path)
if __name__ == '__main__':
def main_hrun_alias():
""" command alias
hrun = httprunner run
"""
if len(sys.argv) == 2:
if sys.argv[1] in ["-V", "--version"]:
# hrun -V
sys.argv = ["httprunner", "-V"]
elif sys.argv[1] in ["-h", "--help"]:
pytest.main(["-h"])
sys.exit(0)
else:
# hrun /path/to/testcase
sys.argv.insert(1, "run")
else:
sys.argv.insert(1, "run")
main()
def main_make_alias():
""" command alias
hmake = httprunner make
"""
sys.argv.insert(1, "make")
main()
def main_har2case_alias():
""" command alias
har2case = httprunner har2case
"""
sys.argv.insert(1, "har2case")
main()
if __name__ == "__main__":
main()

View File

@@ -6,7 +6,6 @@ from httprunner.cli import main
class TestCli(unittest.TestCase):
def setUp(self):
self.captured_output = io.StringIO()
sys.stdout = self.captured_output
@@ -23,6 +22,7 @@ class TestCli(unittest.TestCase):
self.assertEqual(cm.exception.code, 0)
from httprunner import __version__
self.assertIn(__version__, self.captured_output.getvalue().strip())
def test_show_help(self):
@@ -34,4 +34,5 @@ class TestCli(unittest.TestCase):
self.assertEqual(cm.exception.code, 0)
from httprunner import __description__
self.assertIn(__description__, self.captured_output.getvalue().strip())

View File

@@ -1,94 +1,91 @@
# encoding: utf-8
import time
import requests
import urllib3
from loguru import logger
from requests import Request, Response
from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema,
RequestException)
from requests.exceptions import (
InvalidSchema,
InvalidURL,
MissingSchema,
RequestException,
)
from httprunner import response
from httprunner.schema import RequestData, ResponseData
from httprunner.schema import SessionData, ReqRespData
from httprunner.utils import lower_dict_keys, omit_long_data
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_req_resp_record(resp_obj):
class ApiResponse(Response):
def raise_for_status(self):
if hasattr(self, "error") and self.error:
raise self.error
Response.raise_for_status(self)
def get_req_resp_record(resp_obj: Response) -> ReqRespData:
""" get request and response info from Response() object.
"""
def log_print(req_resp_dict, r_type):
def log_print(req_or_resp, r_type):
msg = f"\n================== {r_type} details ==================\n"
for key, value in req_resp_dict[r_type].items():
for key, value in req_or_resp.dict().items():
msg += "{:<16} : {}\n".format(key, repr(value))
logger.debug(msg)
req_resp_dict = {
"request": {},
"response": {}
}
# record actual request info
req_resp_dict["request"]["url"] = resp_obj.request.url
req_resp_dict["request"]["method"] = resp_obj.request.method
req_resp_dict["request"]["headers"] = dict(resp_obj.request.headers)
request_headers = dict(resp_obj.request.headers)
request_body = resp_obj.request.body
if request_body:
request_content_type = lower_dict_keys(
req_resp_dict["request"]["headers"]
).get("content-type")
request_content_type = lower_dict_keys(request_headers).get("content-type")
if request_content_type and "multipart/form-data" in request_content_type:
# upload file type
req_resp_dict["request"]["body"] = "upload file stream (OMITTED)"
else:
req_resp_dict["request"]["body"] = request_body
request_body = "upload file stream (OMITTED)"
request_data = RequestData(
method=resp_obj.request.method,
url=resp_obj.request.url,
headers=request_headers,
body=request_body,
)
# log request details in debug mode
log_print(req_resp_dict, "request")
log_print(request_data, "request")
# record response info
req_resp_dict["response"]["ok"] = resp_obj.ok
req_resp_dict["response"]["url"] = resp_obj.url
req_resp_dict["response"]["status_code"] = resp_obj.status_code
req_resp_dict["response"]["reason"] = resp_obj.reason
req_resp_dict["response"]["cookies"] = resp_obj.cookies or {}
req_resp_dict["response"]["encoding"] = resp_obj.encoding
resp_headers = dict(resp_obj.headers)
req_resp_dict["response"]["headers"] = resp_headers
lower_resp_headers = lower_dict_keys(resp_headers)
content_type = lower_resp_headers.get("content-type", "")
req_resp_dict["response"]["content_type"] = content_type
if "image" in content_type:
# response is image type, record bytes content only
req_resp_dict["response"]["body"] = resp_obj.content
response_body = resp_obj.content
else:
try:
# try to record json data
if isinstance(resp_obj, response.ResponseObject):
req_resp_dict["response"]["body"] = resp_obj.json
else:
req_resp_dict["response"]["body"] = resp_obj.json()
response_body = resp_obj.json()
except ValueError:
# only record at most 512 text charactors
resp_text = resp_obj.text
req_resp_dict["response"]["body"] = omit_long_data(resp_text)
response_body = omit_long_data(resp_text)
response_data = ResponseData(
status_code=resp_obj.status_code,
cookies=resp_obj.cookies or {},
encoding=resp_obj.encoding,
headers=resp_headers,
content_type=content_type,
body=response_body,
)
# log response details in debug mode
log_print(req_resp_dict, "response")
log_print(response_data, "response")
return req_resp_dict
class ApiResponse(Response):
def raise_for_status(self):
if hasattr(self, 'error') and self.error:
raise self.error
Response.raise_for_status(self)
req_resp_data = ReqRespData(request=request_data, response=response_data)
return req_resp_data
class HttpSession(requests.Session):
@@ -100,43 +97,18 @@ class HttpSession(requests.Session):
This is a slightly extended version of `python-request <http://python-requests.org>`_'s
:py:class:`requests.Session` class and mostly this class works exactly the same.
"""
def __init__(self):
super(HttpSession, self).__init__()
self.init_meta_data()
def init_meta_data(self):
""" initialize meta_data, it will store detail data of request and response
"""
self.meta_data = {
"name": "",
"data": [
{
"request": {
"url": "N/A",
"method": "N/A",
"headers": {}
},
"response": {
"status_code": "N/A",
"headers": {},
"encoding": None,
"content_type": ""
}
}
],
"stat": {
"content_size": "N/A",
"response_time_ms": "N/A",
"elapsed_ms": "N/A",
}
}
self.data = SessionData()
def update_last_req_resp_record(self, resp_obj):
"""
update request and response info from Response() object.
"""
self.meta_data["data"].pop()
self.meta_data["data"].append(get_req_resp_record(resp_obj))
# TODO: fix
self.data.req_resps.pop()
self.data.req_resps.append(get_req_resp_record(resp_obj))
def request(self, method, url, name=None, **kwargs):
"""
@@ -177,16 +149,10 @@ class HttpSession(requests.Session):
:param cert: (optional)
if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
self.init_meta_data()
self.data = SessionData()
# record test name
self.meta_data["name"] = name
# record original request info
self.meta_data["data"][0]["request"]["method"] = method
self.meta_data["data"][0]["request"]["url"] = url
# timeout default to 120 seconds
kwargs.setdefault("timeout", 120)
self.meta_data["data"][0]["request"].update(kwargs)
start_timestamp = time.time()
response = self._send_request_safe_mode(method, url, **kwargs)
@@ -200,17 +166,14 @@ class HttpSession(requests.Session):
content_size = len(response.content or "")
# record the consumed time
self.meta_data["stat"] = {
"response_time_ms": response_time_ms,
"elapsed_ms": response.elapsed.microseconds / 1000.0,
"content_size": content_size
}
self.data.stat.response_time_ms = response_time_ms
self.data.stat.elapsed_ms = response.elapsed.microseconds / 1000.0
self.data.stat.content_size = content_size
# record request and response histories, include 30X redirection
response_list = response.history + [response]
self.meta_data["data"] = [
get_req_resp_record(resp_obj)
for resp_obj in response_list
self.data.req_resps = [
get_req_resp_record(resp_obj) for resp_obj in response_list
]
try:

View File

@@ -1,64 +0,0 @@
from httprunner import parser, utils
class SessionContext(object):
""" HttpRunner session, store runtime variables.
Examples:
>>> variables = {"SECRET_KEY": "DebugTalk"}
>>> context = SessionContext(variables)
Equivalent to:
>>> context = SessionContext()
>>> context.update_session_variables(variables)
"""
def __init__(self, variables=None):
variables_mapping = utils.ensure_mapping_format(variables or {})
self.session_variables_mapping = parser.parse_variables_mapping(variables_mapping)
self.test_variables_mapping = {}
self.init_test_variables()
def init_test_variables(self, variables_mapping=None):
""" init test variables, called when each test(api) starts.
variables_mapping will be evaluated first.
Args:
variables_mapping (dict)
{
"random": "${gen_random_string(5)}",
"authorization": "${gen_md5($TOKEN, $data, $random)}",
"data": '{"name": "user", "password": "123456"}',
"TOKEN": "debugtalk",
}
"""
variables_mapping = variables_mapping or {}
variables_mapping = utils.ensure_mapping_format(variables_mapping)
variables_mapping.update(self.session_variables_mapping)
parsed_variables_mapping = parser.parse_variables_mapping(variables_mapping)
self.test_variables_mapping = {}
# priority: extracted variable > teststep variable
self.test_variables_mapping.update(parsed_variables_mapping)
self.test_variables_mapping.update(self.session_variables_mapping)
def update_test_variables(self, variable_name, variable_value):
""" update test variables, these variables are only valid in the current test.
"""
self.test_variables_mapping[variable_name] = variable_value
def update_session_variables(self, variables_mapping):
""" update session with extracted variables mapping.
these variables are valid in the whole running session.
"""
variables_mapping = utils.ensure_mapping_format(variables_mapping)
self.session_variables_mapping.update(variables_mapping)
self.test_variables_mapping.update(self.session_variables_mapping)
def eval_content(self, content):
""" evaluate content recursively, take effect on each variable and function in content.
content may be in any data structure, include dict, list, tuple, number, string, etc.
"""
return parser.parse_lazy_data(content, self.test_variables_mapping)

View File

@@ -40,6 +40,10 @@ class FileFormatError(MyBaseError):
pass
class TestCaseFormatError(MyBaseError):
pass
class ParamsError(MyBaseError):
pass

View File

@@ -0,0 +1,61 @@
""" Convert HAR (HTTP Archive) to YAML/JSON testcase for HttpRunner.
Usage:
# convert to JSON format testcase
$ hrun har2case demo.har
# convert to YAML format testcase
$ hrun har2case demo.har -2y
"""
import os
import sys
from loguru import logger
from httprunner.ext.har2case.core import HarParser
def init_har2case_parser(subparsers):
""" HAR converter: parse command line options and run commands.
"""
parser = subparsers.add_parser(
"har2case",
help="Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.",
)
parser.add_argument("har_source_file", nargs="?", help="Specify HAR source file")
parser.add_argument(
"-2y",
"--to-yml",
"--to-yaml",
dest="to_yaml",
action="store_true",
help="Convert to YAML format, if not specified, convert to JSON format by default.",
)
parser.add_argument(
"--filter",
help="Specify filter keyword, only url include filter string will be converted.",
)
parser.add_argument(
"--exclude",
help="Specify exclude keyword, url that includes exclude string will be ignored, "
"multiple keywords can be joined with '|'",
)
return parser
def main_har2case(args):
har_source_file = args.har_source_file
if not har_source_file or not har_source_file.endswith(".har"):
logger.error("HAR file not specified.")
sys.exit(1)
if not os.path.isfile(har_source_file):
logger.error(f"HAR file not exists: {har_source_file}")
sys.exit(1)
output_file_type = "YML" if args.to_yaml else "JSON"
HarParser(har_source_file, args.filter, args.exclude).gen_testcase(output_file_type)
return 0

View File

@@ -0,0 +1,356 @@
import base64
import json
import os
import sys
import urllib.parse as urlparse
from loguru import logger
from httprunner.ext.har2case import utils
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
IGNORE_REQUEST_HEADERS = [
"host",
"accept",
"content-length",
"connection",
"accept-encoding",
"accept-language",
"origin",
"cache-control",
"pragma",
"upgrade-insecure-requests",
":authority",
":method",
":scheme",
":path",
]
class HarParser(object):
def __init__(self, har_file_path, filter_str=None, exclude_str=None):
self.har_file_path = har_file_path
self.filter_str = filter_str
self.exclude_str = exclude_str or ""
def __make_request_url(self, teststep_dict, entry_json):
""" parse HAR entry request url and queryString, and make teststep url and params
Args:
entry_json (dict):
{
"request": {
"url": "https://httprunner.top/home?v=1&w=2",
"queryString": [
{"name": "v", "value": "1"},
{"name": "w", "value": "2"}
],
},
"response": {}
}
Returns:
{
"name: "/home",
"request": {
url: "https://httprunner.top/home",
params: {"v": "1", "w": "2"}
}
}
"""
request_params = utils.convert_list_to_dict(
entry_json["request"].get("queryString", [])
)
url = entry_json["request"].get("url")
if not url:
logger.exception("url missed in request.")
sys.exit(1)
parsed_object = urlparse.urlparse(url)
if request_params:
parsed_object = parsed_object._replace(query="")
teststep_dict["request"]["url"] = parsed_object.geturl()
teststep_dict["request"]["params"] = request_params
else:
teststep_dict["request"]["url"] = url
teststep_dict["name"] = parsed_object.path
def __make_request_method(self, teststep_dict, entry_json):
""" parse HAR entry request method, and make teststep method.
"""
method = entry_json["request"].get("method")
if not method:
logger.exception("method missed in request.")
sys.exit(1)
teststep_dict["request"]["method"] = method
def __make_request_headers(self, teststep_dict, entry_json):
""" parse HAR entry request headers, and make teststep headers.
header in IGNORE_REQUEST_HEADERS will be ignored.
Args:
entry_json (dict):
{
"request": {
"headers": [
{"name": "Host", "value": "httprunner.top"},
{"name": "Content-Type", "value": "application/json"},
{"name": "User-Agent", "value": "iOS/10.3"}
],
},
"response": {}
}
Returns:
{
"request": {
headers: {"Content-Type": "application/json"}
}
"""
teststep_headers = {}
for header in entry_json["request"].get("headers", []):
if header["name"].lower() in IGNORE_REQUEST_HEADERS:
continue
teststep_headers[header["name"]] = header["value"]
if teststep_headers:
teststep_dict["request"]["headers"] = teststep_headers
def _make_request_data(self, teststep_dict, entry_json):
""" parse HAR entry request data, and make teststep request data
Args:
entry_json (dict):
{
"request": {
"method": "POST",
"postData": {
"mimeType": "application/x-www-form-urlencoded; charset=utf-8",
"params": [
{"name": "a", "value": 1},
{"name": "b", "value": "2"}
}
},
},
"response": {...}
}
Returns:
{
"request": {
"method": "POST",
"data": {"v": "1", "w": "2"}
}
}
"""
method = entry_json["request"].get("method")
if method in ["POST", "PUT", "PATCH"]:
postData = entry_json["request"].get("postData", {})
mimeType = postData.get("mimeType")
# Note that text and params fields are mutually exclusive.
if "text" in postData:
post_data = postData.get("text")
else:
params = postData.get("params", [])
post_data = utils.convert_list_to_dict(params)
request_data_key = "data"
if not mimeType:
pass
elif mimeType.startswith("application/json"):
try:
post_data = json.loads(post_data)
request_data_key = "json"
except JSONDecodeError:
pass
elif mimeType.startswith("application/x-www-form-urlencoded"):
post_data = utils.convert_x_www_form_urlencoded_to_dict(post_data)
else:
# TODO: make compatible with more mimeType
pass
teststep_dict["request"][request_data_key] = post_data
def _make_validate(self, teststep_dict, entry_json):
""" parse HAR entry response and make teststep validate.
Args:
entry_json (dict):
{
"request": {},
"response": {
"status": 200,
"headers": [
{
"name": "Content-Type",
"value": "application/json; charset=utf-8"
},
],
"content": {
"size": 71,
"mimeType": "application/json; charset=utf-8",
"text": "eyJJc1N1Y2Nlc3MiOnRydWUsIkNvZGUiOjIwMCwiTWVzc2FnZSI6bnVsbCwiVmFsdWUiOnsiQmxuUmVzdWx0Ijp0cnVlfX0=",
"encoding": "base64"
}
}
}
Returns:
{
"validate": [
{"eq": ["status_code", 200]}
]
}
"""
teststep_dict["validate"].append(
{"eq": ["status_code", entry_json["response"].get("status")]}
)
resp_content_dict = entry_json["response"].get("content")
headers_mapping = utils.convert_list_to_dict(
entry_json["response"].get("headers", [])
)
if "Content-Type" in headers_mapping:
teststep_dict["validate"].append(
{"eq": ["headers.Content-Type", headers_mapping["Content-Type"]]}
)
text = resp_content_dict.get("text")
if not text:
return
mime_type = resp_content_dict.get("mimeType")
if mime_type and mime_type.startswith("application/json"):
encoding = resp_content_dict.get("encoding")
if encoding and encoding == "base64":
content = base64.b64decode(text).decode("utf-8")
else:
content = text
try:
resp_content_json = json.loads(content)
except JSONDecodeError:
logger.warning(
"response content can not be loaded as json: {}".format(
content.encode("utf-8")
)
)
return
if not isinstance(resp_content_json, dict):
return
for key, value in resp_content_json.items():
if isinstance(value, (dict, list)):
continue
teststep_dict["validate"].append(
{"eq": ["content.{}".format(key), value]}
)
def _prepare_teststep(self, entry_json):
""" extract info from entry dict and make teststep
Args:
entry_json (dict):
{
"request": {
"method": "POST",
"url": "https://httprunner.top/api/v1/Account/Login",
"headers": [],
"queryString": [],
"postData": {},
},
"response": {
"status": 200,
"headers": [],
"content": {}
}
}
"""
teststep_dict = {"name": "", "request": {}, "validate": []}
self.__make_request_url(teststep_dict, entry_json)
self.__make_request_method(teststep_dict, entry_json)
self.__make_request_headers(teststep_dict, entry_json)
self._make_request_data(teststep_dict, entry_json)
self._make_validate(teststep_dict, entry_json)
return teststep_dict
def _prepare_config(self):
""" prepare config block.
"""
return {"name": "testcase description", "variables": {}}
def _prepare_teststeps(self):
""" make teststep list.
teststeps list are parsed from HAR log entries list.
"""
def is_exclude(url, exclude_str):
exclude_str_list = exclude_str.split("|")
for exclude_str in exclude_str_list:
if exclude_str and exclude_str in url:
return True
return False
teststeps = []
log_entries = utils.load_har_log_entries(self.har_file_path)
for entry_json in log_entries:
url = entry_json["request"].get("url")
if self.filter_str and self.filter_str not in url:
continue
if is_exclude(url, self.exclude_str):
continue
teststeps.append(self._prepare_teststep(entry_json))
return teststeps
def _make_testcase(self):
""" Extract info from HAR file and prepare for testcase
"""
logger.info("Extract info from HAR file and prepare for testcase.")
config = self._prepare_config()
teststeps = self._prepare_teststeps()
testcase = {"config": config, "teststeps": teststeps}
return testcase
def gen_testcase(self, file_type="JSON"):
logger.info(f"Start to generate testcase from {self.har_file_path}")
harfile = os.path.splitext(self.har_file_path)[0]
output_testcase_file = "{}.{}".format(harfile, file_type.lower())
testcase = self._make_testcase()
logger.debug("prepared testcase: {}".format(testcase))
if file_type == "JSON":
utils.dump_json(testcase, output_testcase_file)
else:
utils.dump_yaml(testcase, output_testcase_file)
logger.info(f"generated testcase: {output_testcase_file}")

View File

@@ -0,0 +1,166 @@
import os
from httprunner.ext.har2case.core import HarParser
from httprunner.ext.har2case.utils import load_har_log_entries
from httprunner.ext.har2case.utils_test import TestUtils
class TestHar(TestUtils):
def setUp(self):
self.har_path = os.path.join(os.path.dirname(__file__), "data", "demo.har")
self.har_parser = HarParser(self.har_path)
def test_prepare_teststep(self):
log_entries = load_har_log_entries(self.har_path)
teststep_dict = self.har_parser._prepare_teststep(log_entries[0])
self.assertIn("name", teststep_dict)
self.assertIn("request", teststep_dict)
self.assertIn("validate", teststep_dict)
validators_mapping = {
validator["eq"][0]: validator["eq"][1]
for validator in teststep_dict["validate"]
}
self.assertEqual(validators_mapping["status_code"], 200)
self.assertEqual(validators_mapping["content.IsSuccess"], True)
self.assertEqual(validators_mapping["content.Code"], 200)
self.assertEqual(validators_mapping["content.Message"], None)
def test_prepare_teststeps(self):
teststeps = self.har_parser._prepare_teststeps()
self.assertIsInstance(teststeps, list)
self.assertIn("name", teststeps[0])
self.assertIn("request", teststeps[0])
self.assertIn("validate", teststeps[0])
def test_gen_testcase_yaml(self):
yaml_file = os.path.join(os.path.dirname(__file__), "data", "demo.yaml")
self.har_parser.gen_testcase(file_type="YAML")
self.assertTrue(os.path.isfile(yaml_file))
os.remove(yaml_file)
def test_gen_testcase_json(self):
json_file = os.path.join(os.path.dirname(__file__), "data", "demo.json")
self.har_parser.gen_testcase(file_type="JSON")
self.assertTrue(os.path.isfile(json_file))
os.remove(json_file)
def test_filter(self):
filter_str = "httprunner"
har_parser = HarParser(self.har_path, filter_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(
teststeps[0]["request"]["url"],
"https://httprunner.top/api/v1/Account/Login",
)
filter_str = "debugtalk"
har_parser = HarParser(self.har_path, filter_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(teststeps, [])
def test_exclude(self):
exclude_str = "debugtalk"
har_parser = HarParser(self.har_path, exclude_str=exclude_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(
teststeps[0]["request"]["url"],
"https://httprunner.top/api/v1/Account/Login",
)
exclude_str = "httprunner"
har_parser = HarParser(self.har_path, exclude_str=exclude_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(teststeps, [])
def test_exclude_multiple(self):
exclude_str = "httprunner|v2"
har_parser = HarParser(self.har_path, exclude_str=exclude_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(teststeps, [])
exclude_str = "http2|v1"
har_parser = HarParser(self.har_path, exclude_str=exclude_str)
teststeps = har_parser._prepare_teststeps()
self.assertEqual(teststeps, [])
def test_make_request_data_params(self):
testcase_dict = {"name": "", "request": {}, "validate": []}
entry_json = {
"request": {
"method": "POST",
"postData": {
"mimeType": "application/x-www-form-urlencoded; charset=utf-8",
"params": [{"name": "a", "value": 1}, {"name": "b", "value": "2"}],
},
}
}
self.har_parser._make_request_data(testcase_dict, entry_json)
self.assertEqual(testcase_dict["request"]["data"]["a"], 1)
self.assertEqual(testcase_dict["request"]["data"]["b"], "2")
def test_make_request_data_json(self):
testcase_dict = {"name": "", "request": {}, "validate": []}
entry_json = {
"request": {
"method": "POST",
"postData": {
"mimeType": "application/json; charset=utf-8",
"text": '{"a":"1","b":"2"}',
},
}
}
self.har_parser._make_request_data(testcase_dict, entry_json)
self.assertEqual(testcase_dict["request"]["json"], {"a": "1", "b": "2"})
def test_make_request_data_text_empty(self):
testcase_dict = {"name": "", "request": {}, "validate": []}
entry_json = {
"request": {
"method": "POST",
"postData": {"mimeType": "application/json; charset=utf-8", "text": ""},
}
}
self.har_parser._make_request_data(testcase_dict, entry_json)
self.assertEqual(testcase_dict["request"]["data"], "")
def test_make_validate(self):
testcase_dict = {"name": "", "request": {}, "validate": []}
entry_json = {
"request": {},
"response": {
"status": 200,
"headers": [
{
"name": "Content-Type",
"value": "application/json; charset=utf-8",
},
],
"content": {
"size": 71,
"mimeType": "application/json; charset=utf-8",
# raw response content text is application/jose type
"text": "ZXlKaGJHY2lPaUpTVTBFeFh6VWlMQ0psYm1NaU9pSkJNVEk0UTBKRExV",
"encoding": "base64",
},
},
}
self.har_parser._make_validate(testcase_dict, entry_json)
self.assertEqual(testcase_dict["validate"][0], {"eq": ["status_code", 200]})
self.assertEqual(
testcase_dict["validate"][1],
{"eq": ["headers.Content-Type", "application/json; charset=utf-8"]},
)
def test_make_testcase(self):
har_path = os.path.join(
os.path.dirname(__file__), "data", "demo-quickstart.har"
)
har_parser = HarParser(har_path)
testcase = har_parser._make_testcase()
self.assertIsInstance(testcase, dict)
self.assertIn("config", testcase)
self.assertIn("teststeps", testcase)
self.assertEqual(len(testcase["teststeps"]), 2)

View File

@@ -0,0 +1,223 @@
{
"log": {
"version": "1.2",
"creator": {
"name": "Charles Proxy",
"version": "4.2.1"
},
"entries": [
{
"startedDateTime": "2018-02-19T17:30:00.904+08:00",
"time": 3,
"request": {
"method": "POST",
"url": "http://127.0.0.1:5000/api/get-token",
"httpVersion": "HTTP/1.1",
"cookies": [],
"headers": [
{
"name": "Host",
"value": "127.0.0.1:5000"
},
{
"name": "User-Agent",
"value": "python-requests/2.18.4"
},
{
"name": "Accept-Encoding",
"value": "gzip, deflate"
},
{
"name": "Accept",
"value": "*/*"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "device_sn",
"value": "FwgRiO7CNA50DSU"
},
{
"name": "user_agent",
"value": "iOS/10.3"
},
{
"name": "os_platform",
"value": "ios"
},
{
"name": "app_version",
"value": "2.8.6"
},
{
"name": "Content-Length",
"value": "52"
},
{
"name": "Content-Type",
"value": "application/json"
}
],
"queryString": [],
"postData": {
"mimeType": "application/json",
"text": "{\"sign\": \"958a05393efef0ac7c0fb80a7eac45e24fd40c27\"}"
},
"headersSize": 299,
"bodySize": 52
},
"response": {
"_charlesStatus": "COMPLETE",
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.0",
"cookies": [],
"headers": [
{
"name": "Content-Type",
"value": "application/json"
},
{
"name": "Content-Length",
"value": "46"
},
{
"name": "Server",
"value": "Werkzeug/0.14.1 Python/3.6.4"
},
{
"name": "Date",
"value": "Mon, 19 Feb 2018 09:30:00 GMT"
},
{
"name": "Proxy-Connection",
"value": "Close"
}
],
"content": {
"size": 46,
"mimeType": "application/json",
"text": "eyJzdWNjZXNzIjogdHJ1ZSwgInRva2VuIjogImJhTkxYMXpoRllQMTFTZWIifQ\u003d\u003d",
"encoding": "base64"
},
"headersSize": 175,
"bodySize": 46
},
"serverIPAddress": "127.0.0.1",
"cache": {},
"timings": {
"dns": 1,
"connect": 0,
"ssl": -1,
"send": 0,
"wait": 1,
"receive": 1
}
},
{
"startedDateTime": "2018-02-19T17:30:00.911+08:00",
"time": 3,
"request": {
"method": "POST",
"url": "http://127.0.0.1:5000/api/users/1000",
"httpVersion": "HTTP/1.1",
"cookies": [],
"headers": [
{
"name": "Host",
"value": "127.0.0.1:5000"
},
{
"name": "User-Agent",
"value": "python-requests/2.18.4"
},
{
"name": "Accept-Encoding",
"value": "gzip, deflate"
},
{
"name": "Accept",
"value": "*/*"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "device_sn",
"value": "FwgRiO7CNA50DSU"
},
{
"name": "token",
"value": "baNLX1zhFYP11Seb"
},
{
"name": "Content-Length",
"value": "39"
},
{
"name": "Content-Type",
"value": "application/json"
}
],
"queryString": [],
"postData": {
"mimeType": "application/json",
"text": "{\"name\": \"user1\", \"password\": \"123456\"}"
},
"headersSize": 265,
"bodySize": 39
},
"response": {
"_charlesStatus": "COMPLETE",
"status": 201,
"statusText": "CREATED",
"httpVersion": "HTTP/1.0",
"cookies": [],
"headers": [
{
"name": "Content-Type",
"value": "application/json"
},
{
"name": "Content-Length",
"value": "54"
},
{
"name": "Server",
"value": "Werkzeug/0.14.1 Python/3.6.4"
},
{
"name": "Date",
"value": "Mon, 19 Feb 2018 09:30:00 GMT"
},
{
"name": "Proxy-Connection",
"value": "Close"
}
],
"content": {
"size": 54,
"mimeType": "application/json",
"text": "eyJzdWNjZXNzIjogdHJ1ZSwgIm1zZyI6ICJ1c2VyIGNyZWF0ZWQgc3VjY2Vzc2Z1bGx5LiJ9",
"encoding": "base64"
},
"headersSize": 77,
"bodySize": 54
},
"serverIPAddress": "127.0.0.1",
"cache": {},
"timings": {
"dns": 0,
"connect": 0,
"ssl": -1,
"send": 0,
"wait": 3,
"receive": 0
}
}
]
}
}

View File

@@ -0,0 +1,148 @@
{
"log": {
"version": "1.2",
"creator": {
"name": "Charles Proxy",
"version": "4.2"
},
"entries": [
{
"startedDateTime": "2017-11-13T11:40:07.212+08:00",
"time": 35,
"request": {
"method": "POST",
"url": "https://httprunner.top/api/v1/Account/Login",
"httpVersion": "HTTP/1.1",
"cookies": [
{
"name": "lang",
"value": "zh"
}
],
"headers": [
{
"name": "Host",
"value": "httprunner.top"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "Content-Length",
"value": "50"
},
{
"name": "Accept",
"value": "application/json"
},
{
"name": "Origin",
"value": "https://httprunner.top"
},
{
"name": "User-Agent",
"value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
},
{
"name": "Content-Type",
"value": "application/json"
},
{
"name": "Referer",
"value": "https://httprunner.top/login"
},
{
"name": "Accept-Encoding",
"value": "gzip, deflate, br"
},
{
"name": "Accept-Language",
"value": "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4"
}
],
"queryString": [],
"postData": {
"mimeType": "application/json",
"text": "{\"UserName\":\"test001\",\"Pwd\":\"123\",\"VerCode\":\"\"}"
},
"headersSize": 640,
"bodySize": 50
},
"response": {
"_charlesStatus": "COMPLETE",
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.1",
"cookies": [
{
"name": "lang",
"value": "zh",
"path": "/",
"domain": ".httprunner.top",
"expires": null,
"httpOnly": false,
"secure": false,
"comment": null,
"_maxAge": null
}
],
"headers": [
{
"name": "Date",
"value": "Mon, 13 Nov 2017 03:40:07 GMT"
},
{
"name": "Content-Type",
"value": "application/json; charset=utf-8"
},
{
"name": "Content-Length",
"value": "71"
},
{
"name": "Cache-Control",
"value": "no-cache"
},
{
"name": "Pragma",
"value": "no-cache"
},
{
"name": "Expires",
"value": "-1"
},
{
"name": "Server",
"value": "Microsoft-IIS/8.5"
},
{
"name": "X-AspNet-Version",
"value": "4.0.30319"
}
],
"content": {
"size": 71,
"mimeType": "application/json; charset=utf-8",
"text": "eyJJc1N1Y2Nlc3MiOnRydWUsIkNvZGUiOjIwMCwiTWVzc2FnZSI6bnVsbCwiVmFsdWUiOnsiQmxuUmVzdWx0Ijp0cnVlfX0=",
"encoding": "base64"
},
"redirectURL": null,
"headersSize": 0,
"bodySize": 71
},
"serverIPAddress": "192.168.1.169",
"cache": {},
"timings": {
"dns": -1,
"connect": -1,
"ssl": -1,
"send": 6,
"wait": 28,
"receive": 1
}
}
]
}
}

View File

@@ -0,0 +1,128 @@
import io
import json
import logging
import sys
from json.decoder import JSONDecodeError
from urllib.parse import unquote
import yaml
def load_har_log_entries(file_path):
""" load HAR file and return log entries list
Args:
file_path (str)
Returns:
list: entries
[
{
"request": {},
"response": {}
},
{
"request": {},
"response": {}
}
]
"""
with io.open(file_path, "r+", encoding="utf-8-sig") as f:
try:
content_json = json.loads(f.read())
return content_json["log"]["entries"]
except (KeyError, TypeError, JSONDecodeError):
logging.error("HAR file content error: {}".format(file_path))
sys.exit(1)
def x_www_form_urlencoded(post_data):
""" convert origin dict to x-www-form-urlencoded
Args:
post_data (dict):
{"a": 1, "b":2}
Returns:
str:
a=1&b=2
"""
if isinstance(post_data, dict):
return "&".join(
[u"{}={}".format(key, value) for key, value in post_data.items()]
)
else:
return post_data
def convert_x_www_form_urlencoded_to_dict(post_data):
""" convert x_www_form_urlencoded data to dict
Args:
post_data (str): a=1&b=2
Returns:
dict: {"a":1, "b":2}
"""
if isinstance(post_data, str):
converted_dict = {}
for k_v in post_data.split("&"):
try:
key, value = k_v.split("=")
except ValueError:
raise Exception(
"Invalid x_www_form_urlencoded data format: {}".format(post_data)
)
converted_dict[key] = unquote(value)
return converted_dict
else:
return post_data
def convert_list_to_dict(origin_list):
""" convert HAR data list to mapping
Args:
origin_list (list)
[
{"name": "v", "value": "1"},
{"name": "w", "value": "2"}
]
Returns:
dict:
{"v": "1", "w": "2"}
"""
return {item["name"]: item.get("value") for item in origin_list}
def dump_yaml(testcase, yaml_file):
""" dump HAR entries to yaml testcase
"""
logging.info("dump testcase to YAML format.")
with io.open(yaml_file, "w", encoding="utf-8") as outfile:
yaml.dump(
testcase, outfile, allow_unicode=True, default_flow_style=False, indent=4
)
logging.info("Generate YAML testcase successfully: {}".format(yaml_file))
def dump_json(testcase, json_file):
""" dump HAR entries to json testcase
"""
logging.info("dump testcase to JSON format.")
with io.open(json_file, "w", encoding="utf-8") as outfile:
my_json_str = json.dumps(testcase, ensure_ascii=False, indent=4)
if isinstance(my_json_str, bytes):
my_json_str = my_json_str.decode("utf-8")
outfile.write(my_json_str)
logging.info("Generate JSON testcase successfully: {}".format(json_file))

View File

@@ -0,0 +1,54 @@
import json
import os
import unittest
from httprunner.ext.har2case import utils
class TestUtils(unittest.TestCase):
@staticmethod
def create_har_file(file_name, content):
file_path = os.path.join(
os.path.dirname(__file__), "data", "{}.har".format(file_name)
)
with open(file_path, "w") as f:
f.write(json.dumps(content))
return file_path
def test_load_har_log_entries(self):
har_path = os.path.join(os.path.dirname(__file__), "data", "demo.har")
log_entries = utils.load_har_log_entries(har_path)
self.assertIsInstance(log_entries, list)
self.assertIn("request", log_entries[0])
self.assertIn("response", log_entries[0])
def test_load_har_log_key_error(self):
empty_json_file_path = TestUtils.create_har_file(
file_name="empty_json", content={}
)
with self.assertRaises(SystemExit):
utils.load_har_log_entries(empty_json_file_path)
os.remove(empty_json_file_path)
def test_load_har_log_empty_error(self):
empty_file_path = TestUtils.create_har_file(file_name="empty", content="")
with self.assertRaises(SystemExit):
utils.load_har_log_entries(empty_file_path)
os.remove(empty_file_path)
# def test_x_www_form_urlencoded(self):
# origin_dict = {"a":1, "b": "2"}
# self.assertIn("a=1", utils.x_www_form_urlencoded(origin_dict))
# self.assertIn("b=2", utils.x_www_form_urlencoded(origin_dict))
def test_convert_list_to_dict(self):
origin_list = [{"name": "v", "value": "1"}, {"name": "w", "value": "2"}]
self.assertEqual(utils.convert_list_to_dict(origin_list), {"v": "1", "w": "2"})
def test_convert_x_www_form_urlencoded_to_dict(self):
origin_str = "a=1&b=2"
converted_dict = utils.convert_x_www_form_urlencoded_to_dict(origin_str)
self.assertIsInstance(converted_dict, dict)
self.assertEqual(converted_dict["a"], "1")
self.assertEqual(converted_dict["b"], "2")

View File

@@ -1,104 +0,0 @@
# locusts
## Installation
```shell script
$ pip install locustio
```
## Usage
```shell script
$ locusts -f xxx.yml
```
```shell script
$ locusts -f xxx.yml --processes
```
```shell script
$ python3 -m httprunner.ext.locusts -h
Usage: locust [options] [LocustClass [LocustClass2 ... ]]
Options:
-h, --help show this help message and exit
-H HOST, --host=HOST Host to load test in the following format:
http://10.21.32.33
--web-host=WEB_HOST Host to bind the web interface to. Defaults to '' (all
interfaces)
-P PORT, --port=PORT, --web-port=PORT
Port on which to run web host
-f LOCUSTFILE, --locustfile=LOCUSTFILE
Python module file to import, e.g. '../other.py'.
Default: locustfile
--csv=CSVFILEBASE, --csv-base-name=CSVFILEBASE
Store current request stats to files in CSV format.
--master Set locust to run in distributed mode with this
process as master
--slave Set locust to run in distributed mode with this
process as slave
--master-host=MASTER_HOST
Host or IP address of locust master for distributed
load testing. Only used when running with --slave.
Defaults to 127.0.0.1.
--master-port=MASTER_PORT
The port to connect to that is used by the locust
master for distributed load testing. Only used when
running with --slave. Defaults to 5557. Note that
slaves will also connect to the master node on this
port + 1.
--master-bind-host=MASTER_BIND_HOST
Interfaces (hostname, ip) that locust master should
bind to. Only used when running with --master.
Defaults to * (all available interfaces).
--master-bind-port=MASTER_BIND_PORT
Port that locust master should bind to. Only used when
running with --master. Defaults to 5557. Note that
Locust will also use this port + 1, so by default the
master node will bind to 5557 and 5558.
--heartbeat-liveness=HEARTBEAT_LIVENESS
set number of seconds before failed heartbeat from
slave
--heartbeat-interval=HEARTBEAT_INTERVAL
set number of seconds delay between slave heartbeats
to master
--expect-slaves=EXPECT_SLAVES
How many slaves master should expect to connect before
starting the test (only when --no-web used).
--no-web Disable the web interface, and instead start running
the test immediately. Requires -c and -r to be
specified.
-c NUM_CLIENTS, --clients=NUM_CLIENTS
Number of concurrent Locust users. Only used together
with --no-web
-r HATCH_RATE, --hatch-rate=HATCH_RATE
The rate per second in which clients are spawned. Only
used together with --no-web
-t RUN_TIME, --run-time=RUN_TIME
Stop after the specified amount of time, e.g. (300s,
20m, 3h, 1h30m, etc.). Only used together with --no-
web
-L LOGLEVEL, --loglevel=LOGLEVEL
Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL.
Default is INFO.
--logfile=LOGFILE Path to log file. If not set, log will go to
stdout/stderr
--print-stats Print stats in the console
--only-summary Only print the summary stats
--no-reset-stats [DEPRECATED] Do not reset statistics once hatching has
been completed. This is now the default behavior. See
--reset-stats to disable
--reset-stats Reset statistics once hatching has been completed.
Should be set on both master and slaves when running
in distributed mode
-l, --list Show list of possible locust classes and exit
--show-task-ratio print table of the locust classes' task execution
ratio
--show-task-ratio-json
print json data of the locust classes' task execution
ratio
-V, --version show program's version number and exit
--exit-code-on-error=EXIT_CODE_ON_ERROR
sets the exit code to post on error
```

View File

@@ -1,4 +0,0 @@
from httprunner.ext.locusts.cli import main
if __name__ == "__main__":
main()

View File

@@ -1,174 +0,0 @@
try:
# monkey patch ssl at beginning to avoid RecursionError when running locust.
from gevent import monkey
monkey.patch_ssl()
from locust import main as locust_main
except ImportError:
msg = """
Locust is not installed, install first and try again.
install with pip:
$ pip install locustio
"""
print(msg)
import sys
sys.exit(0)
import io
import multiprocessing
import os
import sys
from loguru import logger
from httprunner import __version__
def parse_locustfile(file_path):
""" parse testcase file and return locustfile path.
if file_path is a Python file, assume it is a locustfile
if file_path is a YAML/JSON file, convert it to locustfile
"""
if not os.path.isfile(file_path):
logger.error("file path invalid, exit.")
sys.exit(1)
file_suffix = os.path.splitext(file_path)[1]
if file_suffix == ".py":
locustfile_path = file_path
elif file_suffix in ['.yaml', '.yml', '.json']:
locustfile_path = gen_locustfile(file_path)
else:
# '' or other suffix
logger.error("file type should be YAML/JSON/Python, exit.")
sys.exit(1)
return locustfile_path
def gen_locustfile(testcase_file_path):
""" generate locustfile from template.
"""
locustfile_path = 'locustfile.py'
template_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"locustfile_template.py"
)
with io.open(template_path, encoding='utf-8') as template:
with io.open(locustfile_path, 'w', encoding='utf-8') as locustfile:
template_content = template.read()
template_content = template_content.replace("$TESTCASE_FILE", testcase_file_path)
locustfile.write(template_content)
return locustfile_path
def start_locust_main():
locust_main.main()
def start_master(sys_argv):
sys_argv.append("--master")
sys.argv = sys_argv
start_locust_main()
def start_slave(sys_argv):
if "--slave" not in sys_argv:
sys_argv.extend(["--slave"])
sys.argv = sys_argv
start_locust_main()
def run_locusts_with_processes(sys_argv, processes_count):
processes = []
manager = multiprocessing.Manager()
for _ in range(processes_count):
p_slave = multiprocessing.Process(target=start_slave, args=(sys_argv,))
p_slave.daemon = True
p_slave.start()
processes.append(p_slave)
try:
if "--slave" in sys_argv:
[process.join() for process in processes]
else:
start_master(sys_argv)
except KeyboardInterrupt:
manager.shutdown()
def main():
""" Performance test with locust: parse command line options and run commands.
"""
print(f"HttpRunner version: {__version__}")
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
start_locust_main()
def get_arg_index(*target_args):
for arg in target_args:
if arg not in sys.argv:
continue
return sys.argv.index(arg) + 1
return None
# set logging level
loglevel_index = get_arg_index("-L", "--loglevel")
if loglevel_index and loglevel_index < len(sys.argv):
loglevel = sys.argv[loglevel_index]
loglevel = loglevel.upper()
else:
# default
loglevel = "WARNING"
logger.remove()
logger.add(sys.stdout, level=loglevel)
# get testcase file path
try:
testcase_index = get_arg_index("-f", "--locustfile")
assert testcase_index and testcase_index < len(sys.argv)
except AssertionError:
print("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = parse_locustfile(testcase_file_path)
if "--processes" in sys.argv:
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
if processes_count_index >= len(sys.argv):
""" do not specify processes count explicitly
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
logger.warning(f"processes count not specified, use {processes_count} by default.")
else:
try:
""" locusts -f locustfile.py --processes 4 """
processes_count = int(sys.argv[processes_count_index])
sys.argv.pop(processes_count_index)
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
logger.warning(f"processes count not specified, use {processes_count} by default.")
sys.argv.pop(processes_index)
run_locusts_with_processes(sys.argv, processes_count)
else:
start_locust_main()

View File

@@ -1,43 +0,0 @@
import logging
import random
from locust import HttpLocust, TaskSet, task
from locust.events import request_failure
from httprunner.exceptions import MyBaseError, MyBaseFailure
from httprunner.ext.locusts.utils import prepare_locust_tests
from httprunner.runner import Runner
logging.getLogger().setLevel(logging.CRITICAL)
logging.getLogger('locust.main').setLevel(logging.INFO)
logging.getLogger('locust.runners').setLevel(logging.INFO)
class WebPageTasks(TaskSet):
def on_start(self):
config = {}
self.test_runner = Runner(config, self.client)
@task
def test_any(self):
test_dict = random.choice(self.locust.tests)
try:
self.test_runner.run_test(test_dict)
except (AssertionError, MyBaseError, MyBaseFailure) as ex:
request_failure.fire(
request_type=self.test_runner.exception_request_type,
name=self.test_runner.exception_name,
response_time=0,
exception=ex
)
class WebPageUser(HttpLocust):
host = ""
task_set = WebPageTasks
min_wait = 10
max_wait = 30
# file_path is generated on locusts startup
file_path = "$TESTCASE_FILE"
tests = prepare_locust_tests(file_path)

View File

@@ -1,29 +0,0 @@
from httprunner import loader, parser
def prepare_locust_tests(path):
""" prepare locust testcases
Args:
path (str): testcase file path.
Returns:
list: locust tests data
[
testcase1_dict,
testcase2_dict
]
"""
tests_mapping = loader.load_cases(path)
testcases = parser.parse_tests(tests_mapping)
locust_tests = []
for testcase in testcases:
testcase_weight = testcase.get("config", {}).pop("weight", 1)
for _ in range(testcase_weight):
locust_tests.append(testcase)
return locust_tests

View File

@@ -0,0 +1,128 @@
import os
import subprocess
from typing import Union, Text, List
import jinja2
from loguru import logger
from httprunner import exceptions
from httprunner.exceptions import TestCaseFormatError
from httprunner.loader import load_testcase_file, load_folder_files
__TMPL__ = """# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
from httprunner import HttpRunner, TConfig, TStep
class {{ class_name }}(HttpRunner):
config = TConfig(**{{ config }})
teststeps = [
{% for teststep in teststeps %}
TStep(**{{ teststep }}),
{% endfor %}
]
if __name__ == "__main__":
{{ class_name }}().test_start()
"""
def make_testcase(testcase_path: str) -> Union[str, None]:
logger.info(f"start to make testcase: {testcase_path}")
try:
testcase, _ = load_testcase_file(testcase_path)
except TestCaseFormatError:
return None
template = jinja2.Template(__TMPL__)
raw_file_name, _ = os.path.splitext(os.path.basename(testcase_path))
# convert title case, e.g. request_with_variables => RequestWithVariables
name_in_title_case = raw_file_name.title().replace("_", "")
testcase_dir = os.path.dirname(testcase_path)
testcase_python_path = os.path.join(testcase_dir, f"{raw_file_name}_test.py")
config = testcase["config"]
config["path"] = testcase_python_path
data = {
"class_name": f"TestCase{name_in_title_case}",
"config": config,
"teststeps": testcase["teststeps"],
}
content = template.render(data)
with open(testcase_python_path, "w") as f:
f.write(content)
logger.info(f"generated testcase: {testcase_python_path}")
return testcase_python_path
def convert_testcase_path(testcase_path: Text) -> Text:
"""convert single YAML/JSON testcase path to python file"""
if os.path.isdir(testcase_path):
# folder does not need to convert
return testcase_path
file_suffix = os.path.splitext(testcase_path)[1].lower()
if file_suffix == ".json":
return testcase_path.replace(".json", "_test.py")
elif file_suffix == ".yaml":
return testcase_path.replace(".yaml", "_test.py")
elif file_suffix == ".yml":
return testcase_path.replace(".yml", "_test.py")
else:
raise exceptions.ParamsError("")
def format_with_black(tests_path: Text):
logger.info("format testcases with black ...")
tests_path = convert_testcase_path(tests_path)
try:
subprocess.run(["black", tests_path])
except subprocess.CalledProcessError as ex:
logger.error(ex)
def make(tests_path: Text) -> List:
testcases = []
if os.path.isdir(tests_path):
files_list = load_folder_files(tests_path)
testcases.extend(files_list)
elif os.path.isfile(tests_path):
testcases.append(tests_path)
else:
raise exceptions.TestcaseNotFound(f"Invalid tests path: {tests_path}")
testcase_path_list = []
for testcase_path in testcases:
testcase_path = make_testcase(testcase_path)
if not testcase_path:
continue
testcase_path_list.append(testcase_path)
format_with_black(tests_path)
return testcase_path_list
def main_make(tests_paths: List[Text]) -> List:
testcase_path_list = []
for tests_path in tests_paths:
testcase_path_list.extend(make(tests_path))
return testcase_path_list
def init_make_parser(subparsers):
""" make testcases: parse command line options and run commands.
"""
parser = subparsers.add_parser(
"make", help="Convert YAML/JSON testcases to Python unittests.",
)
parser.add_argument(
"testcase_path", nargs="*", help="Specify YAML/JSON testcase file/folder path"
)
return parser

View File

@@ -0,0 +1,20 @@
import unittest
from httprunner.ext.make import make_testcase, main_make
class TestLoader(unittest.TestCase):
def test_make_testcase(self):
path = "examples/postman_echo/request_methods/request_with_variables.yml"
testcase_python_path = make_testcase(path)
self.assertEqual(
testcase_python_path,
"examples/postman_echo/request_methods/request_with_variables_test.py",
)
def test_make_testcase_folder(self):
path = ["examples/postman_echo/request_methods/"]
testcase_python_list = main_make(path)
self.assertIn(
"examples/postman_echo/request_methods/request_with_functions_test.py",
testcase_python_list,
)

View File

@@ -0,0 +1,132 @@
import os.path
import sys
from loguru import logger
def init_parser_scaffold(subparsers):
sub_parser_scaffold = subparsers.add_parser(
"startproject", help="Create a new project with template structure."
)
sub_parser_scaffold.add_argument(
"project_name", type=str, nargs="?", help="Specify new project name."
)
return sub_parser_scaffold
def create_scaffold(project_name):
""" create scaffold with specified project name.
"""
if os.path.isdir(project_name):
logger.warning(
f"Folder {project_name} exists, please specify a new folder name."
)
return
logger.info(f"Start to create new project: {project_name}")
logger.info(f"CWD: {os.getcwd()}")
def create_folder(path):
os.makedirs(path)
msg = f"created folder: {path}"
logger.info(msg)
def create_file(path, file_content=""):
with open(path, "w") as f:
f.write(file_content)
msg = f"created file: {path}"
logger.info(msg)
demo_api_content = """
name: demo api
variables:
var1: value1
var2: value2
request:
url: /api/path/$var1
method: POST
headers:
Content-Type: "application/json"
json:
key: $var2
validate:
- eq: ["status_code", 200]
"""
demo_testcase_content = """
config:
name: "demo testcase"
variables:
device_sn: "ABC"
username: ${ENV(USERNAME)}
password: ${ENV(PASSWORD)}
base_url: "http://127.0.0.1:5000"
teststeps:
-
name: demo step 1
api: path/to/api1.yml
variables:
user_agent: 'iOS/10.3'
device_sn: $device_sn
extract:
token: content.token
validate:
- eq: ["status_code", 200]
-
name: demo step 2
api: path/to/api2.yml
variables:
token: $token
"""
demo_testsuite_content = """
config:
name: "demo testsuite"
variables:
device_sn: "XYZ"
base_url: "http://127.0.0.1:5000"
testcases:
-
name: call demo_testcase with data 1
testcase: path/to/demo_testcase.yml
variables:
device_sn: $device_sn
-
name: call demo_testcase with data 2
testcase: path/to/demo_testcase.yml
variables:
device_sn: $device_sn
"""
ignore_content = "\n".join(
[".env", "reports/*", "__pycache__/*", "*.pyc", ".python-version", "logs/*"]
)
demo_debugtalk_content = """
import time
def sleep(n_secs):
time.sleep(n_secs)
"""
demo_env_content = "\n".join(["USERNAME=leolee", "PASSWORD=123456"])
create_folder(project_name)
create_folder(os.path.join(project_name, "api"))
create_folder(os.path.join(project_name, "testcases"))
create_folder(os.path.join(project_name, "testsuites"))
create_folder(os.path.join(project_name, "reports"))
create_file(os.path.join(project_name, "api", "demo_api.yml"), demo_api_content)
create_file(
os.path.join(project_name, "testcases", "demo_testcase.yml"),
demo_testcase_content,
)
create_file(
os.path.join(project_name, "testsuites", "demo_testsuite.yml"),
demo_testsuite_content,
)
create_file(os.path.join(project_name, "debugtalk.py"), demo_debugtalk_content)
create_file(os.path.join(project_name, ".env"), demo_env_content)
create_file(os.path.join(project_name, ".gitignore"), ignore_content)
def main_scaffold(args):
create_scaffold(args.project_name)
sys.exit(0)

View File

@@ -0,0 +1,18 @@
import os
import shutil
import unittest
from httprunner.ext.scaffold import create_scaffold
class TestUtils(unittest.TestCase):
def test_create_scaffold(self):
project_name = "projectABC"
create_scaffold(project_name)
self.assertTrue(os.path.isdir(os.path.join(project_name, "api")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "testcases")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "testsuites")))
self.assertTrue(os.path.isdir(os.path.join(project_name, "reports")))
self.assertTrue(os.path.isfile(os.path.join(project_name, "debugtalk.py")))
self.assertTrue(os.path.isfile(os.path.join(project_name, ".env")))
shutil.rmtree(project_name)

View File

@@ -44,6 +44,10 @@ For compatibility, you can also write upload test script in old way:
import os
import sys
from typing import Text, NoReturn
from httprunner.parser import parse_variables_mapping
from httprunner.schema import TStep, FunctionsMapping
try:
import filetype
@@ -57,16 +61,13 @@ $ pip install requests_toolbelt filetype
print(msg)
sys.exit(0)
from httprunner.exceptions import ParamsError
def prepare_upload_test(test_dict):
def prepare_upload_step(step: TStep, functions: FunctionsMapping) -> "NoReturn":
""" preprocess for upload test
replace `upload` info with MultipartEncoder
Args:
test_dict (dict):
step: teststep
{
"variables": {},
"request": {
@@ -81,27 +82,29 @@ def prepare_upload_test(test_dict):
}
}
}
functions: functions mapping
"""
upload_json = test_dict["request"].pop("upload", {})
if not upload_json:
raise ParamsError(f"invalid upload info: {upload_json}")
if not step.request.upload:
return
params_list = []
for key, value in upload_json.items():
test_dict["variables"][key] = value
for key, value in step.request.upload.items():
step.variables[key] = value
params_list.append(f"{key}=${key}")
params_str = ", ".join(params_list)
test_dict["variables"]["m_encoder"] = "${multipart_encoder(" + params_str + ")}"
step.variables["m_encoder"] = "${multipart_encoder(" + params_str + ")}"
test_dict["request"].setdefault("headers", {})
test_dict["request"]["headers"]["Content-Type"] = "${multipart_content_type($m_encoder)}"
# parse variables
step.variables = parse_variables_mapping(step.variables, functions)
test_dict["request"]["data"] = "$m_encoder"
step.request.headers["Content-Type"] = "${multipart_content_type($m_encoder)}"
step.request.data = "$m_encoder"
def multipart_encoder(**kwargs):
def multipart_encoder(**kwargs) -> MultipartEncoder:
""" initialize MultipartEncoder with uploading fields.
"""
@@ -121,8 +124,9 @@ def multipart_encoder(**kwargs):
is_exists_file = os.path.isfile(value)
else:
# value is not absolute file path, check if it is relative file path
from httprunner.loader import get_pwd
_file_path = os.path.join(get_pwd(), value)
from httprunner.loader import project_working_directory
_file_path = os.path.join(project_working_directory, value)
is_exists_file = os.path.isfile(_file_path)
if is_exists_file:
@@ -130,7 +134,7 @@ def multipart_encoder(**kwargs):
filename = os.path.basename(_file_path)
mime_type = get_filetype(_file_path)
# TODO: fix ResourceWarning for unclosed file
file_handler = open(_file_path, 'rb')
file_handler = open(_file_path, "rb")
fields_dict[key] = (filename, file_handler, mime_type)
else:
fields_dict[key] = value
@@ -138,7 +142,7 @@ def multipart_encoder(**kwargs):
return MultipartEncoder(fields=fields_dict)
def multipart_content_type(m_encoder):
def multipart_content_type(m_encoder: MultipartEncoder) -> Text:
""" prepare Content-Type for request headers
"""
return m_encoder.content_type

411
httprunner/loader.py Normal file
View File

@@ -0,0 +1,411 @@
import csv
import importlib
import io
import json
import os
import sys
import types
from typing import Tuple, Dict, Union, Text, List, Callable
import yaml
from loguru import logger
from pydantic import ValidationError
from httprunner import builtin, utils
from httprunner import exceptions
from httprunner.schema import TestCase, ProjectMeta
try:
# PyYAML version >= 5.1
# ref: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({"YAMLLoadWarning": False})
except AttributeError:
pass
project_meta_cached_mapping: Dict[Text, ProjectMeta] = {}
project_working_directory: Union[Text, None] = None
def _load_yaml_file(yaml_file: Text) -> Dict:
""" load yaml file and check file content format
"""
with io.open(yaml_file, "r", encoding="utf-8") as stream:
try:
yaml_content = yaml.load(stream)
except yaml.YAMLError as ex:
logger.error(str(ex))
raise exceptions.FileFormatError
return yaml_content
def _load_json_file(json_file: Text) -> Dict:
""" load json file and check file content format
"""
with io.open(json_file, encoding="utf-8") as data_file:
try:
json_content = json.load(data_file)
except json.JSONDecodeError:
err_msg = f"JSONDecodeError: JSON file format error: {json_file}"
logger.error(err_msg)
raise exceptions.FileFormatError(err_msg)
return json_content
def load_testcase_file(testcase_file: Text) -> Tuple[Dict, TestCase]:
"""load testcase file and validate with pydantic model"""
if not os.path.isfile(testcase_file):
raise exceptions.FileNotFound(f"testcase file not exists: {testcase_file}")
file_suffix = os.path.splitext(testcase_file)[1].lower()
if file_suffix == ".json":
testcase_content = _load_json_file(testcase_file)
elif file_suffix in [".yaml", ".yml"]:
testcase_content = _load_yaml_file(testcase_file)
else:
# '' or other suffix
raise exceptions.FileFormatError(
f"testcase file should be YAML/JSON format, invalid testcase file: {testcase_file}"
)
try:
# validate with pydantic TestCase model
testcase_obj = TestCase.parse_obj(testcase_content)
except ValidationError as ex:
err_msg = f"Invalid testcase format: {testcase_file}"
logger.error(f"{err_msg}\n{ex}")
raise exceptions.TestCaseFormatError(err_msg)
testcase_content["config"]["path"] = testcase_file
testcase_obj.config.path = testcase_file
return testcase_content, testcase_obj
def load_dot_env_file(dot_env_path: Text) -> Dict:
""" load .env file.
Args:
dot_env_path (str): .env file path
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "123456",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormatError: If .env file format is invalid.
"""
if not os.path.isfile(dot_env_path):
return {}
logger.info(f"Loading environment variables from {dot_env_path}")
env_variables_mapping = {}
with io.open(dot_env_path, "r", encoding="utf-8") as fp:
for line in fp:
# maxsplit=1
if "=" in line:
variable, value = line.split("=", 1)
elif ":" in line:
variable, value = line.split(":", 1)
else:
raise exceptions.FileFormatError(".env format error")
env_variables_mapping[variable.strip()] = value.strip()
utils.set_os_environ(env_variables_mapping)
return env_variables_mapping
def load_csv_file(csv_file: Text) -> List[Dict]:
""" load csv file and check file content format
Args:
csv_file (str): csv file path, csv file content is like below:
Returns:
list: list of parameters, each parameter is in dict format
Examples:
>>> cat csv_file
username,password
test1,111111
test2,222222
test3,333333
>>> load_csv_file(csv_file)
[
{'username': 'test1', 'password': '111111'},
{'username': 'test2', 'password': '222222'},
{'username': 'test3', 'password': '333333'}
]
"""
if not os.path.isabs(csv_file):
global project_working_directory
if project_working_directory is None:
raise exceptions.MyBaseFailure("load_project_meta() has not been called!")
# make compatible with Windows/Linux
csv_file = os.path.join(project_working_directory, *csv_file.split("/"))
if not os.path.isfile(csv_file):
# file path not exist
raise exceptions.CSVNotFound(csv_file)
csv_content_list = []
with io.open(csv_file, encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
csv_content_list.append(row)
return csv_content_list
def load_folder_files(folder_path: Text, recursive: bool = True) -> List:
""" load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json
"""
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.endswith((".yml", ".yaml", ".json")):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list
def load_module_functions(module) -> Dict[Text, Callable]:
""" load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
}
"""
module_functions = {}
for name, item in vars(module).items():
if isinstance(item, types.FunctionType):
module_functions[name] = item
return module_functions
def load_builtin_functions() -> Dict[Text, Callable]:
""" load builtin module functions
"""
return load_module_functions(builtin)
def locate_file(start_path: Text, file_name: Text) -> Text:
""" locate filename and return absolute file path.
searching will be recursive upward until current working directory or system root dir.
Args:
file_name (str): target locate file name
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound(f"invalid path: {start_path}")
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return os.path.abspath(file_path)
# current working directory
if os.path.abspath(start_dir_path) == os.getcwd():
raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# system root dir
# Windows, e.g. 'E:\\'
# Linux/Darwin, '/'
parent_dir = os.path.dirname(start_dir_path)
if parent_dir == start_dir_path:
raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# locate recursive upward
return locate_file(parent_dir, file_name)
def locate_debugtalk_py(start_path: Text) -> Text:
""" locate debugtalk.py file
Args:
start_path (str): start locating path,
maybe testcase file path or directory path
Returns:
str: debugtalk.py file path, None if not found
"""
try:
# locate debugtalk.py file.
debugtalk_path = locate_file(start_path, "debugtalk.py")
except exceptions.FileNotFound:
debugtalk_path = None
return debugtalk_path
def init_project_working_directory(test_path: Text) -> Tuple[Text, Text]:
""" this should be called at startup
run test file:
run_path -> load_cases -> load_project_data -> init_project_working_directory
or run passed in data structure:
run -> init_project_working_directory
Args:
test_path: specified testfile path
Returns:
(str, str): debugtalk.py path, project_working_directory
"""
def prepare_path(path):
if not os.path.exists(path):
err_msg = f"path not exist: {path}"
logger.error(err_msg)
raise exceptions.FileNotFound(err_msg)
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return path
test_path = prepare_path(test_path)
# locate debugtalk.py file
debugtalk_path = locate_debugtalk_py(test_path)
global project_working_directory
if debugtalk_path:
# The folder contains debugtalk.py will be treated as PWD.
project_working_directory = os.path.dirname(debugtalk_path)
else:
# debugtalk.py not found, use os.getcwd() as PWD.
project_working_directory = os.getcwd()
# add PWD to sys.path
sys.path.insert(0, project_working_directory)
return debugtalk_path, project_working_directory
def load_debugtalk_functions() -> Dict[Text, Callable]:
""" load project debugtalk.py module functions
debugtalk.py should be located in project working directory.
Returns:
dict: debugtalk module functions mapping
{
"func1_name": func1,
"func2_name": func2
}
"""
# load debugtalk.py module
imported_module = importlib.import_module("debugtalk")
return load_module_functions(imported_module)
def load_project_meta(test_path: Text) -> ProjectMeta:
""" load api, testcases, .env, debugtalk.py functions.
api/testcases folder is relative to project_working_directory
Args:
test_path (str): test file/folder path, locate pwd from this path.
Returns:
project loaded api/testcases definitions,
environments and debugtalk.py functions.
"""
if test_path in project_meta_cached_mapping:
return project_meta_cached_mapping[test_path]
debugtalk_path, project_working_directory = init_project_working_directory(
test_path
)
project_meta = ProjectMeta()
# load .env file
# NOTICE:
# environment variable maybe loaded in debugtalk.py
# thus .env file should be loaded before loading debugtalk.py
dot_env_path = os.path.join(project_working_directory, ".env")
project_meta.env = load_dot_env_file(dot_env_path)
if debugtalk_path:
# load debugtalk.py functions
debugtalk_functions = load_debugtalk_functions()
else:
debugtalk_functions = {}
# locate PWD and load debugtalk.py functions
project_meta.PWD = project_working_directory
project_meta.functions = debugtalk_functions
project_meta.test_path = os.path.abspath(test_path)[
len(project_working_directory) + 1 :
]
project_meta_cached_mapping[test_path] = project_meta
return project_meta

View File

@@ -1,27 +0,0 @@
"""
HttpRunner loader
- check: validate api/testcase/testsuite data structure with JSON schema
- locate: locate debugtalk.py, make it's dir as project root path
- load: load testcase files and relevant data, including debugtalk.py, .env, yaml/json api/testcases, csv, etc.
- buildup: assemble loaded content to httprunner testcase/testsuite data structure
"""
from httprunner.loader.check import is_test_path, is_test_content, JsonSchemaChecker
from httprunner.loader.locate import get_project_working_directory as get_pwd, \
init_project_working_directory as init_pwd
from httprunner.loader.load import load_csv_file, load_builtin_functions
from httprunner.loader.buildup import load_cases, load_project_data
__all__ = [
"is_test_path",
"is_test_content",
"JsonSchemaChecker",
"get_pwd",
"init_pwd",
"load_csv_file",
"load_builtin_functions",
"load_project_data",
"load_cases"
]

View File

@@ -1,511 +0,0 @@
import importlib
import os
from loguru import logger
from httprunner import exceptions, utils
from httprunner.loader.check import JsonSchemaChecker
from httprunner.loader.load import load_module_functions, load_file, load_dot_env_file, \
load_folder_files
from httprunner.loader.locate import init_project_working_directory, get_project_working_directory
tests_def_mapping = {
"api": {},
"testcases": {}
}
def load_debugtalk_functions():
""" load project debugtalk.py module functions
debugtalk.py should be located in project working directory.
Returns:
dict: debugtalk module functions mapping
{
"func1_name": func1,
"func2_name": func2
}
"""
# load debugtalk.py module
imported_module = importlib.import_module("debugtalk")
return load_module_functions(imported_module)
def __extend_with_api_ref(raw_testinfo):
""" extend with api reference
Raises:
exceptions.ApiNotFound: api not found
"""
api_name = raw_testinfo["api"]
# api maybe defined in two types:
# 1, individual file: each file is corresponding to one api definition
# 2, api sets file: one file contains a list of api definitions
if not os.path.isabs(api_name):
# make compatible with Windows/Linux
pwd = get_project_working_directory()
api_path = os.path.join(pwd, *api_name.split("/"))
if os.path.isfile(api_path):
# type 1: api is defined in individual file
api_name = api_path
if api_name in tests_def_mapping["api"]:
block = tests_def_mapping["api"][api_name]
elif not os.path.isfile(api_name):
raise exceptions.ApiNotFound(f"{api_name} not found!")
else:
block = load_file(api_name)
# NOTICE: avoid project_mapping been changed during iteration.
raw_testinfo["api_def"] = utils.deepcopy_dict(block)
tests_def_mapping["api"][api_name] = block
def __extend_with_testcase_ref(raw_testinfo):
""" extend with testcase reference
"""
testcase_path = raw_testinfo["testcase"]
if testcase_path not in tests_def_mapping["testcases"]:
# make compatible with Windows/Linux
pwd = get_project_working_directory()
testcase_path = os.path.join(
pwd,
*testcase_path.split("/")
)
loaded_testcase = load_file(testcase_path)
if isinstance(loaded_testcase, list):
# make compatible with version < 2.2.0
testcase_dict = load_testcase(loaded_testcase)
elif isinstance(loaded_testcase, dict) and "teststeps" in loaded_testcase:
# format version 2, implemented in 2.2.0
testcase_dict = load_testcase_v2(loaded_testcase)
else:
raise exceptions.FileFormatError(
f"Invalid format testcase: {testcase_path}")
tests_def_mapping["testcases"][testcase_path] = testcase_dict
else:
testcase_dict = tests_def_mapping["testcases"][testcase_path]
raw_testinfo["testcase_def"] = testcase_dict
def load_teststep(raw_testinfo):
""" load testcase step content.
teststep maybe defined directly, or reference api/testcase.
Args:
raw_testinfo (dict): test data, maybe in 3 formats.
# api reference
{
"name": "add product to cart",
"api": "/path/to/api",
"variables": {},
"validate": [],
"extract": {}
}
# testcase reference
{
"name": "add product to cart",
"testcase": "/path/to/testcase",
"variables": {}
}
# define directly
{
"name": "checkout cart",
"request": {},
"variables": {},
"validate": [],
"extract": {}
}
Returns:
dict: loaded teststep content
"""
# reference api
if "api" in raw_testinfo:
__extend_with_api_ref(raw_testinfo)
# TODO: reference proc functions
# elif "func" in raw_testinfo:
# pass
# reference testcase
elif "testcase" in raw_testinfo:
__extend_with_testcase_ref(raw_testinfo)
# define directly
else:
pass
return raw_testinfo
def load_testcase(raw_testcase):
""" load testcase with api/testcase references.
Args:
raw_testcase (list): raw testcase content loaded from JSON/YAML file:
[
# config part
{
"config": {
"name": "XXXX",
"base_url": "https://debugtalk.com"
}
},
# teststeps part
{
"test": {...}
},
{
"test": {...}
}
]
Returns:
dict: loaded testcase content
{
"config": {},
"teststeps": [test11, test12]
}
"""
JsonSchemaChecker.validate_testcase_v1_format(raw_testcase)
config = {}
tests = []
for item in raw_testcase:
key, test_block = item.popitem()
if key == "config":
config.update(test_block)
elif key == "test":
tests.append(load_teststep(test_block))
else:
logger.warning(
f"unexpected block key: {key}. block key should only be 'config' or 'test'."
)
return {
"config": config,
"teststeps": tests
}
def load_testcase_v2(raw_testcase):
""" load testcase in format version 2.
Args:
raw_testcase (dict): raw testcase content loaded from JSON/YAML file:
{
"config": {
"name": "xxx",
"variables": {}
}
"teststeps": [
{
"name": "teststep 1",
"request" {...}
},
{
"name": "teststep 2",
"request" {...}
},
]
}
Returns:
dict: loaded testcase content
{
"config": {},
"teststeps": [test11, test12]
}
"""
JsonSchemaChecker.validate_testcase_v2_format(raw_testcase)
raw_teststeps = raw_testcase.pop("teststeps")
raw_testcase["teststeps"] = [
load_teststep(teststep)
for teststep in raw_teststeps
]
return raw_testcase
def load_testsuite(raw_testsuite):
""" load testsuite with testcase references.
support two different formats.
Args:
raw_testsuite (dict): raw testsuite content loaded from JSON/YAML file:
# version 1, compatible with version < 2.2.0
{
"config": {
"name": "xxx",
"variables": {}
}
"testcases": {
"testcase1": {
"testcase": "/path/to/testcase",
"variables": {...},
"parameters": {...}
},
"testcase2": {}
}
}
# version 2, implemented in 2.2.0
{
"config": {
"name": "xxx",
"variables": {}
}
"testcases": [
{
"name": "testcase1",
"testcase": "/path/to/testcase",
"variables": {...},
"parameters": {...}
},
{}
]
}
Returns:
dict: loaded testsuite content
{
"config": {},
"testcases": [testcase1, testcase2]
}
"""
raw_testcases = raw_testsuite["testcases"]
if isinstance(raw_testcases, dict):
# format version 1, make compatible with version < 2.2.0
JsonSchemaChecker.validate_testsuite_v1_format(raw_testsuite)
raw_testsuite["testcases"] = {}
for name, raw_testcase in raw_testcases.items():
__extend_with_testcase_ref(raw_testcase)
raw_testcase.setdefault("name", name)
raw_testsuite["testcases"][name] = raw_testcase
elif isinstance(raw_testcases, list):
# format version 2, implemented in 2.2.0
JsonSchemaChecker.validate_testsuite_v2_format(raw_testsuite)
raw_testsuite["testcases"] = {}
for raw_testcase in raw_testcases:
__extend_with_testcase_ref(raw_testcase)
testcase_name = raw_testcase["name"]
raw_testsuite["testcases"][testcase_name] = raw_testcase
else:
# invalid format
raise exceptions.FileFormatError("Invalid testsuite format!")
return raw_testsuite
def load_test_file(path):
""" load test file, file maybe testcase/testsuite/api
Args:
path (str): test file path
Returns:
dict: loaded test content
# api
{
"path": path,
"type": "api",
"name": "",
"request": {}
}
# testcase
{
"path": path,
"type": "testcase",
"config": {},
"teststeps": []
}
# testsuite
{
"path": path,
"type": "testsuite",
"config": {},
"testcases": {}
}
"""
raw_content = load_file(path)
if isinstance(raw_content, dict):
if "testcases" in raw_content:
# file_type: testsuite
loaded_content = load_testsuite(raw_content)
loaded_content["path"] = path
loaded_content["type"] = "testsuite"
elif "teststeps" in raw_content:
# file_type: testcase (format version 2)
loaded_content = load_testcase_v2(raw_content)
loaded_content["path"] = path
loaded_content["type"] = "testcase"
elif "request" in raw_content:
# file_type: api
JsonSchemaChecker.validate_api_format(raw_content)
loaded_content = raw_content
loaded_content["path"] = path
loaded_content["type"] = "api"
else:
# invalid format
raise exceptions.FileFormatError("Invalid test file format!")
elif isinstance(raw_content, list) and len(raw_content) > 0:
# file_type: testcase
# make compatible with version < 2.2.0
loaded_content = load_testcase(raw_content)
loaded_content["path"] = path
loaded_content["type"] = "testcase"
else:
# invalid format
raise exceptions.FileFormatError("Invalid test file format!")
return loaded_content
def load_project_data(test_path, dot_env_path=None):
""" load api, testcases, .env, debugtalk.py functions.
api/testcases folder is relative to project_working_directory
Args:
test_path (str): test file/folder path, locate pwd from this path.
dot_env_path (str): specified .env file path
Returns:
dict: project loaded api/testcases definitions,
environments and debugtalk.py functions.
"""
debugtalk_path, project_working_directory = init_project_working_directory(test_path)
project_mapping = {}
# load .env file
# NOTICE:
# environment variable maybe loaded in debugtalk.py
# thus .env file should be loaded before loading debugtalk.py
dot_env_path = dot_env_path or os.path.join(project_working_directory, ".env")
project_mapping["env"] = load_dot_env_file(dot_env_path)
if debugtalk_path:
# load debugtalk.py functions
debugtalk_functions = load_debugtalk_functions()
else:
debugtalk_functions = {}
# locate PWD and load debugtalk.py functions
project_mapping["PWD"] = project_working_directory
project_mapping["functions"] = debugtalk_functions
project_mapping["test_path"] = os.path.abspath(test_path)[len(project_working_directory)+1:]
return project_mapping
def load_cases(path, dot_env_path=None):
""" load testcases from file path, extend and merge with api/testcase definitions.
Args:
path (str): testcase/testsuite file/foler path.
path could be in 2 types:
- absolute/relative file path
- absolute/relative folder path
dot_env_path (str): specified .env file path
Returns:
dict: tests mapping, include project_mapping and testcases.
each testcase is corresponding to a file.
{
"project_mapping": {
"PWD": "XXXXX",
"functions": {},
"env": {}
},
"testcases": [
{ # testcase data structure
"config": {
"name": "desc1",
"path": "testcase1_path",
"variables": [], # optional
},
"teststeps": [
# test data structure
{
'name': 'test desc1',
'variables': [], # optional
'extract': [], # optional
'validate': [],
'request': {}
},
test_dict_2 # another test dict
]
},
testcase_2_dict # another testcase dict
],
"testsuites": [
{ # testsuite data structure
"config": {},
"testcases": {
"testcase1": {},
"testcase2": {},
}
},
testsuite_2_dict
]
}
"""
tests_mapping = {
"project_mapping": load_project_data(path, dot_env_path)
}
def __load_file_content(path):
loaded_content = None
try:
loaded_content = load_test_file(path)
except exceptions.ApiNotFound as ex:
logger.warning(f"Invalid api reference in {path}: {ex}")
except exceptions.FileFormatError:
logger.warning(f"Invalid test file format: {path}")
if not loaded_content:
pass
elif loaded_content["type"] == "testsuite":
tests_mapping.setdefault("testsuites", []).append(loaded_content)
elif loaded_content["type"] == "testcase":
tests_mapping.setdefault("testcases", []).append(loaded_content)
elif loaded_content["type"] == "api":
tests_mapping.setdefault("apis", []).append(loaded_content)
if os.path.isdir(path):
files_list = load_folder_files(path)
for path in files_list:
__load_file_content(path)
elif os.path.isfile(path):
__load_file_content(path)
return tests_mapping

View File

@@ -1,291 +0,0 @@
import os
import unittest
from httprunner import exceptions, loader
from httprunner.loader import buildup
class TestModuleLoader(unittest.TestCase):
def test_filter_module_functions(self):
module_functions = buildup.load_module_functions(buildup)
self.assertIn("load_module_functions", module_functions)
self.assertNotIn("is_py3", module_functions)
def test_load_debugtalk_module(self):
project_mapping = buildup.load_project_data(os.path.join(os.getcwd(), "httprunner"))
self.assertNotIn("alter_response", project_mapping["functions"])
project_mapping = buildup.load_project_data(os.path.join(os.getcwd(), "tests"))
self.assertIn("alter_response", project_mapping["functions"])
is_status_code_200 = project_mapping["functions"]["is_status_code_200"]
self.assertTrue(is_status_code_200(200))
self.assertFalse(is_status_code_200(500))
def test_load_debugtalk_py(self):
project_mapping = buildup.load_project_data("tests/data/demo_testcase.yml")
project_working_directory = project_mapping["PWD"]
debugtalk_functions = project_mapping["functions"]
self.assertEqual(
project_working_directory,
os.path.join(os.getcwd(), "tests")
)
self.assertIn("gen_md5", debugtalk_functions)
project_mapping = buildup.load_project_data("tests/base.py")
project_working_directory = project_mapping["PWD"]
debugtalk_functions = project_mapping["functions"]
self.assertEqual(
project_working_directory,
os.path.join(os.getcwd(), "tests")
)
self.assertIn("gen_md5", debugtalk_functions)
project_mapping = buildup.load_project_data("httprunner/__init__.py")
project_working_directory = project_mapping["PWD"]
debugtalk_functions = project_mapping["functions"]
self.assertEqual(
project_working_directory,
os.getcwd()
)
self.assertEqual(debugtalk_functions, {})
class TestSuiteLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.project_mapping = buildup.load_project_data(os.path.join(os.getcwd(), "tests"))
cls.tests_def_mapping = buildup.tests_def_mapping
def test_load_teststep_api(self):
raw_test = {
"name": "create user (override).",
"api": "api/create_user.yml",
"variables": [
{"uid": "999"}
]
}
teststep = buildup.load_teststep(raw_test)
self.assertEqual(
"create user (override).",
teststep["name"]
)
self.assertIn("api_def", teststep)
api_def = teststep["api_def"]
self.assertEqual(api_def["name"], "create user")
self.assertEqual(api_def["request"]["url"], "/api/users/$uid")
def test_load_teststep_testcase(self):
raw_test = {
"name": "setup and reset all (override).",
"testcase": "testcases/setup.yml",
"variables": [
{"device_sn": "$device_sn"}
]
}
testcase = buildup.load_teststep(raw_test)
self.assertEqual(
"setup and reset all (override).",
testcase["name"]
)
tests = testcase["testcase_def"]["teststeps"]
self.assertEqual(len(tests), 2)
self.assertEqual(tests[0]["name"], "get token (setup)")
self.assertEqual(tests[1]["name"], "reset all users")
def test_load_test_file_api(self):
loaded_content = buildup.load_test_file("tests/api/create_user.yml")
self.assertEqual(loaded_content["type"], "api")
self.assertIn("path", loaded_content)
self.assertIn("request", loaded_content)
self.assertEqual(loaded_content["request"]["url"], "/api/users/$uid")
def test_load_test_file_testcase(self):
for loaded_content in [
buildup.load_test_file("tests/testcases/setup.yml"),
buildup.load_test_file("tests/testcases/setup.json")
]:
self.assertEqual(loaded_content["type"], "testcase")
self.assertIn("path", loaded_content)
self.assertIn("config", loaded_content)
self.assertEqual(loaded_content["config"]["name"], "setup and reset all.")
self.assertIn("teststeps", loaded_content)
self.assertEqual(len(loaded_content["teststeps"]), 2)
def test_load_test_file_testcase_v2(self):
for loaded_content in [
buildup.load_test_file("tests/testcases/setup.v2.yml"),
buildup.load_test_file("tests/testcases/setup.v2.json")
]:
self.assertEqual(loaded_content["type"], "testcase")
self.assertIn("path", loaded_content)
self.assertIn("config", loaded_content)
self.assertEqual(loaded_content["config"]["name"], "setup and reset all.")
self.assertIn("teststeps", loaded_content)
self.assertEqual(len(loaded_content["teststeps"]), 2)
def test_load_test_file_testsuite(self):
for loaded_content in [
buildup.load_test_file("tests/testsuites/create_users.yml"),
buildup.load_test_file("tests/testsuites/create_users.json")
]:
self.assertEqual(loaded_content["type"], "testsuite")
testcases = loaded_content["testcases"]
self.assertEqual(len(testcases), 2)
self.assertIn('create user 1000 and check result.', testcases)
self.assertIn('testcase_def', testcases["create user 1000 and check result."])
self.assertEqual(
testcases["create user 1000 and check result."]["testcase_def"]["config"]["name"],
"create user and check result."
)
def test_load_test_file_testsuite_v2(self):
for loaded_content in [
buildup.load_test_file("tests/testsuites/create_users.v2.yml"),
buildup.load_test_file("tests/testsuites/create_users.v2.json")
]:
self.assertEqual(loaded_content["type"], "testsuite")
testcases = loaded_content["testcases"]
self.assertEqual(len(testcases), 2)
self.assertIn('create user 1000 and check result.', testcases)
self.assertIn('testcase_def', testcases["create user 1000 and check result."])
self.assertEqual(
testcases["create user 1000 and check result."]["testcase_def"]["config"]["name"],
"create user and check result."
)
def test_load_tests_api_file(self):
path = os.path.join(
os.getcwd(), 'tests/api/create_user.yml')
tests_mapping = loader.load_cases(path)
project_mapping = tests_mapping["project_mapping"]
api_list = tests_mapping["apis"]
self.assertEqual(len(api_list), 1)
self.assertEqual(api_list[0]["request"]["url"], "/api/users/$uid")
def test_load_tests_testcase_file(self):
# absolute file path
path = os.path.join(
os.getcwd(), 'tests/data/demo_testcase_hardcode.json')
tests_mapping = loader.load_cases(path)
project_mapping = tests_mapping["project_mapping"]
testcases_list = tests_mapping["testcases"]
self.assertEqual(len(testcases_list), 1)
self.assertEqual(len(testcases_list[0]["teststeps"]), 3)
self.assertIn("get_sign", project_mapping["functions"])
# relative file path
path = 'tests/data/demo_testcase_hardcode.yml'
tests_mapping = loader.load_cases(path)
project_mapping = tests_mapping["project_mapping"]
testcases_list = tests_mapping["testcases"]
self.assertEqual(len(testcases_list), 1)
self.assertEqual(len(testcases_list[0]["teststeps"]), 3)
self.assertIn("get_sign", project_mapping["functions"])
def test_load_tests_testcase_file_2(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testcase.yml')
tests_mapping = loader.load_cases(testcase_file_path)
testcases = tests_mapping["testcases"]
self.assertIsInstance(testcases, list)
self.assertEqual(testcases[0]["config"]["name"], '123t$var_a')
self.assertIn(
"sum_two",
tests_mapping["project_mapping"]["functions"]
)
self.assertEqual(
testcases[0]["config"]["variables"]["var_c"],
"${sum_two($var_a, $var_b)}"
)
self.assertEqual(
testcases[0]["config"]["variables"]["PROJECT_KEY"],
"${ENV(PROJECT_KEY)}"
)
def test_load_tests_testcase_file_with_api_ref(self):
path = os.path.join(
os.getcwd(), 'tests/data/demo_testcase_layer.yml')
tests_mapping = loader.load_cases(path)
project_mapping = tests_mapping["project_mapping"]
testcases_list = tests_mapping["testcases"]
self.assertIn('device_sn', testcases_list[0]["config"]["variables"])
self.assertIn("gen_md5", project_mapping["functions"])
self.assertIn("base_url", testcases_list[0]["config"])
test_dict0 = testcases_list[0]["teststeps"][0]
self.assertEqual(
"get token with $user_agent, $app_version",
test_dict0["name"]
)
self.assertIn("/api/get-token", test_dict0["api_def"]["request"]["url"])
self.assertIn(
{'eq': ['status_code', 200]},
test_dict0["validate"]
)
def test_load_tests_testsuite_file_with_testcase_ref(self):
path = os.path.join(
os.getcwd(), 'tests/testsuites/create_users.yml')
tests_mapping = loader.load_cases(path)
project_mapping = tests_mapping["project_mapping"]
testsuites_list = tests_mapping["testsuites"]
self.assertEqual(
"create users with uid",
testsuites_list[0]["config"]["name"]
)
self.assertEqual(
'${gen_random_string(15)}',
testsuites_list[0]["config"]["variables"]['device_sn']
)
self.assertIn(
"create user 1000 and check result.",
testsuites_list[0]["testcases"]
)
self.assertEqual(
testsuites_list[0]["testcases"]["create user 1000 and check result."]["testcase_def"]["config"]["name"],
"create user and check result."
)
def test_load_tests_folder_path(self):
# absolute folder path
path = os.path.join(os.getcwd(), 'tests/data')
tests_mapping = loader.load_cases(path)
testcase_list_1 = tests_mapping["testcases"]
self.assertGreater(len(testcase_list_1), 4)
# relative folder path
path = 'tests/data/'
tests_mapping = loader.load_cases(path)
testcase_list_2 = tests_mapping["testcases"]
self.assertEqual(len(testcase_list_1), len(testcase_list_2))
def test_load_tests_path_not_exist(self):
# absolute folder path
path = os.path.join(os.getcwd(), 'tests/data_not_exist')
with self.assertRaises(exceptions.FileNotFound):
loader.load_cases(path)
# relative folder path
path = 'tests/data_not_exist'
with self.assertRaises(exceptions.FileNotFound):
loader.load_cases(path)
def test_load_project_tests(self):
buildup.load_project_data(os.path.join(os.getcwd(), "tests"))
self.assertIn("gen_md5", self.project_mapping["functions"])
self.assertEqual(self.project_mapping["env"]["PROJECT_KEY"], "ABCDEFGH")
self.assertEqual(
os.path.basename(self.project_mapping["PWD"]),
"tests"
)
self.assertEqual(
os.path.basename(self.project_mapping["test_path"]),
"tests"
)

View File

@@ -1,215 +0,0 @@
import io
import json
import os
import platform
import jsonschema
from loguru import logger
from httprunner import exceptions
schemas_root_dir = os.path.join(os.path.dirname(__file__), "schemas")
common_schema_path = os.path.join(schemas_root_dir, "common.schema.json")
api_schema_path = os.path.join(schemas_root_dir, "api.schema.json")
testcase_schema_v1_path = os.path.join(schemas_root_dir, "testcase.schema.v1.json")
testcase_schema_v2_path = os.path.join(schemas_root_dir, "testcase.schema.v2.json")
testsuite_schema_v1_path = os.path.join(schemas_root_dir, "testsuite.schema.v1.json")
testsuite_schema_v2_path = os.path.join(schemas_root_dir, "testsuite.schema.v2.json")
with io.open(api_schema_path, encoding='utf-8') as f:
api_schema = json.load(f)
with io.open(common_schema_path, encoding='utf-8') as f:
if platform.system() == "Windows":
absolute_base_path = 'file:///' + os.path.abspath(schemas_root_dir).replace("\\", "/") + '/'
else:
# Linux, Darwin
absolute_base_path = "file://" + os.path.abspath(schemas_root_dir) + "/"
common_schema = json.load(f)
resolver = jsonschema.RefResolver(absolute_base_path, common_schema)
with io.open(testcase_schema_v1_path, encoding='utf-8') as f:
testcase_schema_v1 = json.load(f)
with io.open(testcase_schema_v2_path, encoding='utf-8') as f:
testcase_schema_v2 = json.load(f)
with io.open(testsuite_schema_v1_path, encoding='utf-8') as f:
testsuite_schema_v1 = json.load(f)
with io.open(testsuite_schema_v2_path, encoding='utf-8') as f:
testsuite_schema_v2 = json.load(f)
class JsonSchemaChecker(object):
@staticmethod
def validate_format(content, scheme):
""" check api/testcase/testsuite format if valid
"""
try:
jsonschema.validate(content, scheme, resolver=resolver)
except jsonschema.exceptions.ValidationError as ex:
logger.error(str(ex))
raise exceptions.FileFormatError
return True
@staticmethod
def validate_api_format(content):
""" check api format if valid
"""
return JsonSchemaChecker.validate_format(content, api_schema)
@staticmethod
def validate_testcase_v1_format(content):
""" check testcase format v1 if valid
"""
return JsonSchemaChecker.validate_format(content, testcase_schema_v1)
@staticmethod
def validate_testcase_v2_format(content):
""" check testcase format v2 if valid
"""
return JsonSchemaChecker.validate_format(content, testcase_schema_v2)
@staticmethod
def validate_testsuite_v1_format(content):
""" check testsuite format v1 if valid
"""
return JsonSchemaChecker.validate_format(content, testsuite_schema_v1)
@staticmethod
def validate_testsuite_v2_format(content):
""" check testsuite format v2 if valid
"""
return JsonSchemaChecker.validate_format(content, testsuite_schema_v2)
def is_test_path(path):
""" check if path is valid json/yaml file path or a existed directory.
Args:
path (str/list/tuple): file path/directory or file path list.
Returns:
bool: True if path is valid file path or path list, otherwise False.
"""
if not isinstance(path, (str, list, tuple)):
return False
elif isinstance(path, (list, tuple)):
for p in path:
if not is_test_path(p):
return False
return True
else:
# path is string
if not os.path.exists(path):
return False
# path exists
if os.path.isfile(path):
# path is a file
file_suffix = os.path.splitext(path)[1].lower()
if file_suffix not in ['.json', '.yaml', '.yml']:
# path is not json/yaml file
return False
else:
return True
elif os.path.isdir(path):
# path is a directory
return True
else:
# path is neither a folder nor a file, maybe a symbol link or something else
return False
def is_test_content(data_structure):
""" check if data_structure is apis/testcases/testsuites.
Args:
data_structure (dict): should include keys, apis or testcases or testsuites
Returns:
bool: True if data_structure is valid apis/testcases/testsuites, otherwise False.
"""
if not isinstance(data_structure, dict):
return False
if "apis" in data_structure:
# maybe a group of api content
apis = data_structure["apis"]
if not isinstance(apis, list):
return False
for item in apis:
is_testcase = False
try:
JsonSchemaChecker.validate_api_format(item)
is_testcase = True
except exceptions.FileFormatError:
pass
if not is_testcase:
return False
return True
elif "testcases" in data_structure:
# maybe a testsuite, containing a group of testcases
testcases = data_structure["testcases"]
if not isinstance(testcases, list):
return False
for item in testcases:
is_testcase = False
try:
JsonSchemaChecker.validate_testcase_v2_format(item)
is_testcase = True
except exceptions.FileFormatError:
pass
try:
JsonSchemaChecker.validate_testcase_v2_format(item)
is_testcase = True
except exceptions.FileFormatError:
pass
if not is_testcase:
return False
return True
elif "testsuites" in data_structure:
# maybe a group of testsuites
testsuites = data_structure["testsuites"]
if not isinstance(testsuites, list):
return False
for item in testsuites:
is_testcase = False
try:
JsonSchemaChecker.validate_testsuite_v1_format(item)
is_testcase = True
except exceptions.FileFormatError:
pass
try:
JsonSchemaChecker.validate_testsuite_v2_format(item)
is_testcase = True
except exceptions.FileFormatError:
pass
if not is_testcase:
return False
return True
else:
return False

View File

@@ -1,45 +0,0 @@
import unittest
from httprunner.loader import check
class TestLoaderCheck(unittest.TestCase):
def test_is_testcases(self):
data_structure = "path/to/file"
self.assertFalse(check.is_test_content(data_structure))
data_structure = ["path/to/file1", "path/to/file2"]
self.assertFalse(check.is_test_content(data_structure))
data_structure = {
"project_mapping": {
"PWD": "XXXXX",
"functions": {},
"env": {}
},
"testcases": [
{ # testcase data structure
"config": {
"name": "desc1",
"path": "testcase1_path",
"variables": [], # optional
},
"teststeps": [
# test data structure
{
'name': 'test step desc1',
'variables': [], # optional
'extract': {}, # optional
'validate': [],
'request': {
"method": "GET",
"url": "https://docs.httprunner.org"
}
},
# test_dict2 # another test dict
]
},
# testcase_dict_2 # another testcase dict
]
}
self.assertTrue(check.is_test_content(data_structure))

View File

@@ -1,219 +0,0 @@
import csv
import io
import json
import os
import types
import yaml
from loguru import logger
from httprunner import builtin
from httprunner import exceptions, utils
from httprunner.loader.locate import get_project_working_directory
try:
# PyYAML version >= 5.1
# ref: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({'YAMLLoadWarning': False})
except AttributeError:
pass
def _load_yaml_file(yaml_file):
""" load yaml file and check file content format
"""
with io.open(yaml_file, 'r', encoding='utf-8') as stream:
try:
yaml_content = yaml.load(stream)
except yaml.YAMLError as ex:
logger.error(str(ex))
raise exceptions.FileFormatError
return yaml_content
def _load_json_file(json_file):
""" load json file and check file content format
"""
with io.open(json_file, encoding='utf-8') as data_file:
try:
json_content = json.load(data_file)
except json.JSONDecodeError:
err_msg = f"JSONDecodeError: JSON file format error: {json_file}"
logger.error(err_msg)
raise exceptions.FileFormatError(err_msg)
return json_content
def load_csv_file(csv_file):
""" load csv file and check file content format
Args:
csv_file (str): csv file path, csv file content is like below:
Returns:
list: list of parameters, each parameter is in dict format
Examples:
>>> cat csv_file
username,password
test1,111111
test2,222222
test3,333333
>>> load_csv_file(csv_file)
[
{'username': 'test1', 'password': '111111'},
{'username': 'test2', 'password': '222222'},
{'username': 'test3', 'password': '333333'}
]
"""
if not os.path.isabs(csv_file):
pwd = get_project_working_directory()
# make compatible with Windows/Linux
csv_file = os.path.join(pwd, *csv_file.split("/"))
if not os.path.isfile(csv_file):
# file path not exist
raise exceptions.CSVNotFound(csv_file)
csv_content_list = []
with io.open(csv_file, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
csv_content_list.append(row)
return csv_content_list
def load_file(file_path):
if not os.path.isfile(file_path):
raise exceptions.FileNotFound(f"{file_path} does not exist.")
file_suffix = os.path.splitext(file_path)[1].lower()
if file_suffix == '.json':
return _load_json_file(file_path)
elif file_suffix in ['.yaml', '.yml']:
return _load_yaml_file(file_path)
elif file_suffix == ".csv":
return load_csv_file(file_path)
else:
# '' or other suffix
logger.warning(f"Unsupported file format: {file_path}")
return []
def load_folder_files(folder_path, recursive=True):
""" load folder path, return all files endswith yml/yaml/json in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json
"""
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.endswith(('.yml', '.yaml', '.json')):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list
def load_dot_env_file(dot_env_path):
""" load .env file.
Args:
dot_env_path (str): .env file path
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "123456",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormatError: If .env file format is invalid.
"""
if not os.path.isfile(dot_env_path):
return {}
logger.info(f"Loading environment variables from {dot_env_path}")
env_variables_mapping = {}
with io.open(dot_env_path, 'r', encoding='utf-8') as fp:
for line in fp:
# maxsplit=1
if "=" in line:
variable, value = line.split("=", 1)
elif ":" in line:
variable, value = line.split(":", 1)
else:
raise exceptions.FileFormatError(".env format error")
env_variables_mapping[variable.strip()] = value.strip()
utils.set_os_environ(env_variables_mapping)
return env_variables_mapping
def load_module_functions(module):
""" load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
}
"""
module_functions = {}
for name, item in vars(module).items():
if isinstance(item, types.FunctionType):
module_functions[name] = item
return module_functions
def load_builtin_functions():
""" load builtin module functions
"""
return load_module_functions(builtin)

View File

@@ -1,153 +0,0 @@
import os
import unittest
from httprunner import exceptions
from httprunner.loader import load
from httprunner.loader.buildup import load_test_file
class TestFileLoader(unittest.TestCase):
def test_load_yaml_file_file_format_error(self):
yaml_tmp_file = "tests/data/tmp.yml"
# create empty yaml file
with open(yaml_tmp_file, 'w') as f:
f.write("")
with self.assertRaises(exceptions.FileFormatError):
load_test_file(yaml_tmp_file)
os.remove(yaml_tmp_file)
# create invalid format yaml file
with open(yaml_tmp_file, 'w') as f:
f.write("abc")
with self.assertRaises(exceptions.FileFormatError):
load_test_file(yaml_tmp_file)
os.remove(yaml_tmp_file)
def test_load_json_file_file_format_error(self):
json_tmp_file = "tests/data/tmp.json"
# create empty file
with open(json_tmp_file, 'w') as f:
f.write("")
with self.assertRaises(exceptions.FileFormatError):
load_test_file(json_tmp_file)
os.remove(json_tmp_file)
# create empty json file
with open(json_tmp_file, 'w') as f:
f.write("{}")
with self.assertRaises(exceptions.FileFormatError):
load_test_file(json_tmp_file)
os.remove(json_tmp_file)
# create invalid format json file
with open(json_tmp_file, 'w') as f:
f.write("abc")
with self.assertRaises(exceptions.FileFormatError):
load_test_file(json_tmp_file)
os.remove(json_tmp_file)
def test_load_testcases_bad_filepath(self):
testcase_file_path = os.path.join(os.getcwd(), 'tests/data/demo')
with self.assertRaises(exceptions.FileNotFound):
load.load_file(testcase_file_path)
def test_load_json_testcases(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testcase_hardcode.json')
testcases = load.load_file(testcase_file_path)
self.assertEqual(len(testcases), 3)
test = testcases[0]["test"]
self.assertIn('name', test)
self.assertIn('request', test)
self.assertIn('url', test['request'])
self.assertIn('method', test['request'])
def test_load_yaml_testcases(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/data/demo_testcase_hardcode.yml')
testcases = load.load_file(testcase_file_path)
self.assertEqual(len(testcases), 3)
test = testcases[0]["test"]
self.assertIn('name', test)
self.assertIn('request', test)
self.assertIn('url', test['request'])
self.assertIn('method', test['request'])
def test_load_csv_file_one_parameter(self):
csv_file_path = os.path.join(
os.getcwd(), 'tests/data/user_agent.csv')
csv_content = load.load_file(csv_file_path)
self.assertEqual(
csv_content,
[
{'user_agent': 'iOS/10.1'},
{'user_agent': 'iOS/10.2'},
{'user_agent': 'iOS/10.3'}
]
)
def test_load_csv_file_multiple_parameters(self):
csv_file_path = os.path.join(
os.getcwd(), 'tests/data/account.csv')
csv_content = load.load_file(csv_file_path)
self.assertEqual(
csv_content,
[
{'username': 'test1', 'password': '111111'},
{'username': 'test2', 'password': '222222'},
{'username': 'test3', 'password': '333333'}
]
)
def test_load_folder_files(self):
folder = os.path.join(os.getcwd(), 'tests')
file1 = os.path.join(os.getcwd(), 'tests', 'test_utils.py')
file2 = os.path.join(os.getcwd(), 'tests', 'api', 'reset_all.yml')
files = load.load_folder_files(folder, recursive=False)
self.assertEqual(files, [])
files = load.load_folder_files(folder)
self.assertIn(file2, files)
self.assertNotIn(file1, files)
files = load.load_folder_files("not_existed_foulder", recursive=False)
self.assertEqual([], files)
files = load.load_folder_files(file2, recursive=False)
self.assertEqual([], files)
def test_load_dot_env_file(self):
dot_env_path = os.path.join(
os.getcwd(), "tests", ".env"
)
env_variables_mapping = load.load_dot_env_file(dot_env_path)
self.assertIn("PROJECT_KEY", env_variables_mapping)
self.assertEqual(env_variables_mapping["UserName"], "debugtalk")
def test_load_custom_dot_env_file(self):
dot_env_path = os.path.join(
os.getcwd(), "tests", "data", "test.env"
)
env_variables_mapping = load.load_dot_env_file(dot_env_path)
self.assertIn("PROJECT_KEY", env_variables_mapping)
self.assertEqual(env_variables_mapping["UserName"], "test")
self.assertEqual(env_variables_mapping["content_type"], "application/json; charset=UTF-8")
def test_load_env_path_not_exist(self):
dot_env_path = os.path.join(
os.getcwd(), "tests", "data",
)
env_variables_mapping = load.load_dot_env_file(dot_env_path)
self.assertEqual(env_variables_mapping, {})

View File

@@ -1,123 +0,0 @@
import os
import sys
from loguru import logger
from httprunner import exceptions
project_working_directory = None
def locate_file(start_path, file_name):
""" locate filename and return absolute file path.
searching will be recursive upward until current working directory or system root dir.
Args:
file_name (str): target locate file name
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound(f"invalid path: {start_path}")
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return os.path.abspath(file_path)
# current working directory
if os.path.abspath(start_dir_path) == os.getcwd():
raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# system root dir
# Windows, e.g. 'E:\\'
# Linux/Darwin, '/'
parent_dir = os.path.dirname(start_dir_path)
if parent_dir == start_dir_path:
raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# locate recursive upward
return locate_file(parent_dir, file_name)
def locate_debugtalk_py(start_path):
""" locate debugtalk.py file
Args:
start_path (str): start locating path,
maybe testcase file path or directory path
Returns:
str: debugtalk.py file path, None if not found
"""
try:
# locate debugtalk.py file.
debugtalk_path = locate_file(start_path, "debugtalk.py")
except exceptions.FileNotFound:
debugtalk_path = None
return debugtalk_path
def init_project_working_directory(test_path):
""" this should be called at startup
run test file:
run_path -> load_cases -> load_project_data -> init_project_working_directory
or run passed in data structure:
run -> init_project_working_directory
Args:
test_path: specified testfile path
Returns:
(str, str): debugtalk.py path, project_working_directory
"""
def prepare_path(path):
if not os.path.exists(path):
err_msg = f"path not exist: {path}"
logger.error(err_msg)
raise exceptions.FileNotFound(err_msg)
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return path
test_path = prepare_path(test_path)
# locate debugtalk.py file
debugtalk_path = locate_debugtalk_py(test_path)
global project_working_directory
if debugtalk_path:
# The folder contains debugtalk.py will be treated as PWD.
project_working_directory = os.path.dirname(debugtalk_path)
else:
# debugtalk.py not found, use os.getcwd() as PWD.
project_working_directory = os.getcwd()
# add PWD to sys.path
sys.path.insert(0, project_working_directory)
return debugtalk_path, project_working_directory
def get_project_working_directory():
global project_working_directory
if project_working_directory is None:
raise exceptions.MyBaseFailure("loader.load_cases() has not been called!")
return project_working_directory

View File

@@ -1,40 +0,0 @@
import os
import unittest
from httprunner import exceptions
from httprunner.loader import locate
class TestLoaderLocate(unittest.TestCase):
def test_locate_file(self):
with self.assertRaises(exceptions.FileNotFound):
locate.locate_file(os.getcwd(), "debugtalk.py")
with self.assertRaises(exceptions.FileNotFound):
locate.locate_file("", "debugtalk.py")
start_path = os.path.join(os.getcwd(), "tests")
self.assertEqual(
locate.locate_file(start_path, "debugtalk.py"),
os.path.join(
os.getcwd(), "tests/debugtalk.py"
)
)
self.assertEqual(
locate.locate_file("tests/", "debugtalk.py"),
os.path.join(os.getcwd(), "tests", "debugtalk.py")
)
self.assertEqual(
locate.locate_file("tests", "debugtalk.py"),
os.path.join(os.getcwd(), "tests", "debugtalk.py")
)
self.assertEqual(
locate.locate_file("tests/base.py", "debugtalk.py"),
os.path.join(os.getcwd(), "tests", "debugtalk.py")
)
self.assertEqual(
locate.locate_file("tests/data/demo_testcase.yml", "debugtalk.py"),
os.path.join(os.getcwd(), "tests", "debugtalk.py")
)

View File

@@ -1,59 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "httprunner api schema definition",
"type": "object",
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"base_url": {
"$ref": "common.schema.json#/definitions/base_url"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"request": {
"$ref": "common.schema.json#/definitions/request"
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"extract": {
"$ref": "common.schema.json#/definitions/extract"
},
"validate": {
"$ref": "common.schema.json#/definitions/validate"
}
},
"required": [
"name",
"request"
],
"examples": [
{
"name": "demo api",
"variables": {
"var1": "value1",
"var2": "value2"
},
"request": {
"url": "/api/path/$var1",
"method": "POST",
"headers": {
"Content-Type": "application/json"
},
"json": {
"key": "$var2"
},
"validate": [
{
"eq": ["status_code", 200]
}
]
}
}
]
}

View File

@@ -1,392 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "common json schema definitions for httprunner api/testcase/testsuite",
"definitions": {
"name": {
"description": "used as api/teststep/testcase/testsuite identification",
"type": "string",
"examples": [
"basic test for httpbin"
]
},
"base_url": {
"description": "The base_url will be used with relative URI",
"type": "string",
"examples": [
"https://httpbin.org"
]
},
"variables": {
"description": "define variables for api/teststep/testcase/testsuite",
"oneOf": [
{
"type": "object",
"examples": [
{
"var1": "value1",
"var2": "value2"
}
]
},
{
"type": "array",
"items": {
"type": "object",
"maxProperties": 1,
"minProperties": 1
},
"examples": [
[
{
"var1": "value1"
},
{
"var2": "value2"
}
]
]
},
{
"type": "string",
"pattern": "^\\$.*",
"examples": [
"$prepared_variables",
"${prepare_variables()}",
"${prepare_variables($a, $b)}"
]
}
]
},
"verify": {
"description": "whether to verify the servers TLS certificate",
"type": "boolean",
"examples": [
true,
false
]
},
"hook": {
"description": "used to define setup_hooks/teardown_hooks for api/teststep/testcase",
"type": "array",
"items": {
"oneOf": [
{
"description": "call setup/teardown hook functions, return nothing",
"type": "string",
"examples": [
[
"${sleep(2)}",
"${hook_print(setup)}",
"${modify_request_json($request, android)}",
"${alter_response($response)}"
]
]
},
{
"description": "call setup/teardown hook functions, return value and assign to variable",
"type": "object",
"examples": [
{
"total": "${sum_two(1, 5)}"
},
{
"filed_name": "get_decoded_response_field($response)"
}
]
}
]
}
},
"config": {
"description": "used in testcase/testsuite to configure common fields",
"type": "object",
"properties": {
"name": {
"$ref": "#/definitions/name"
},
"base_url": {
"$ref": "#/definitions/base_url"
},
"variables": {
"$ref": "#/definitions/variables"
},
"setup_hooks": {
"$ref": "#/definitions/hook"
},
"teardown_hooks": {
"$ref": "#/definitions/hook"
},
"verify": {
"$ref": "#/definitions/verify"
}
},
"required": ["name"]
},
"request": {
"description": "used to define a api request. properties is the same as python package `requests.request`",
"type": "object",
"properties": {
"method": {
"type": "string",
"description": "request method",
"enum": [
"GET",
"POST",
"OPTIONS",
"HEAD",
"PUT",
"PATCH",
"DELETE",
"CONNECT",
"TRACE"
]
},
"url": {
"description": "request url, may be absolute or relative URI",
"type": "string",
"examples": [
"http://httpbin.org/get?a=1&b=2",
"/get?a=1&b=2",
"get?a=1&b=2"
]
},
"params": {
"description": "query string for request url",
"type": "object",
"examples": [
{
"a": 1,
"b": 2
}
]
},
"data": {
"anyOf": [
{
"description": "request body in json format",
"type": "object",
"examples": [
{
"a": 1,
"b": 2
}
]
},
{
"description": "request body in application/x-www-form-urlencoded format",
"type": "string",
"examples": [
"a=1&b=2"
]
},
{
"description": "request body prepared with function, or reference a variable",
"type": "string",
"examples": [
"$post_data",
"${prepare_data($a, $b)}"
]
}
]
},
"json": {
"oneOf": [
{
"description": "request body in json format",
"type": "object"
},
{
"description": "request body prepared with function, or reference a variable",
"type": "string",
"pattern": "^\\$.*",
"examples": [
"$post_data",
"${prepare_post_data($a, $b)}"
]
}
]
},
"headers": {
"description": "request headers",
"oneOf": [
{
"description": "request headers in json format",
"type": "object",
"examples": [
{
"User-Agent": "python-requests/2.18.4",
"Content-Type": "application/json"
}
]
},
{
"description": "request headers prepared with function, or reference a variable",
"type": "string",
"examples": [
"$prepared_headers",
"${prepare_headers($a, $b)}"
]
}
]
},
"cookies": {
"description": "request cookies",
"type": "object"
},
"files": {
"description": "request files, used to upload files",
"type": "object"
},
"auth": {
"description": "Auth tuple to enable Basic/Digest/Custom HTTP Auth.",
"type": "array"
},
"timeout": {
"description": "How many seconds to wait for the server to send data before giving up",
"type": "number",
"examples": [
120
]
},
"allow_redirects": {
"description": "Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to True",
"type": "boolean"
},
"proxies": {
"description": "Dictionary mapping protocol to the URL of the proxy",
"type": "object"
},
"verify": {
"description": "configure verify for current api/teststep",
"$ref": "#/definitions/verify"
},
"stream": {
"description": "if False, the response content will be immediately downloaded.",
"type": "boolean"
},
"upload": {
"description": "upload files",
"type": "object",
"examples": [
{
"file": "data/file_to_upload",
"md5": "123"
}
]
}
},
"required": [
"method",
"url"
]
},
"extract": {
"description": "used to extract session variables for later requests",
"oneOf": [
{
"type": "object",
"patternProperties": {
"^[A-Za-z_][A-Za-z0-9_]*$": {
"description": "extraction rule for session variable, maybe in jsonpath/regex/jmespath",
"type": "string"
}
},
"examples": [
{
"code__by_jsonpath": "$.code",
"item_id__by_jsonpath": "$..items.*.id",
"var_name__by_regex": "\"LB[\\d]*(.*)RB[\\d]*\"",
"content_type": "headers.content-type",
"first_name": "content.person.name.first_name"
}
]
},
{
"type": "array",
"items": {
"type": "object",
"patternProperties": {
"^[A-Za-z_][A-Za-z0-9_]*$": {
"description": "extraction rule for session variable, maybe in jsonpath/regex/jmespath",
"type": "string"
}
},
"minProperties": 1,
"maxProperties": 1
},
"examples": [
{
"code__by_jsonpath": "$.code"
},
{
"item_id__by_jsonpath": "$..items.*.id"
},
{
"var_name__by_regex": "\"LB[\\d]*(.*)RB[\\d]*\""
},
{
"content_type": "headers.content-type"
},
{
"first_name": "content.person.name.first_name"
}
]
}
]
},
"validate": {
"description": "used to validate response fields",
"type": "array",
"items": {
"description": "one validator definition",
"oneOf": [
{
"type": "object",
"properties": {
"check": {
"type": "string"
},
"comparator": {
"type": "string"
},
"expect": {
"description": "expected value"
}
},
"required": ["check", "expect"],
"examples": [
{
"check": "body.code",
"comparator": "gt",
"expect": 0
},
{
"check": "status_code",
"expect": 200
}
]
},
{
"type": "object",
"patternProperties": {
"^[A-Za-z_][A-Za-z0-9_]*$": {
"description": "validate_func_name: [check_value, expect_value]",
"type": "array",
"minItems": 2,
"maxItems": 2
}
},
"examples": [
{
"eq": ["status_code", 200]
},
{
"gt": ["body.code", 0]
}
]
}
]
}
}
}
}

View File

@@ -1,138 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "httprunner testcase schema v1 definition",
"type": "array",
"definitions": {
"test": {
"type": "object",
"oneOf": [
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"request": {
"description": "define api request directly",
"$ref": "common.schema.json#/definitions/request"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"$ref": "common.schema.json#/definitions/extract"
},
"validate": {
"$ref": "common.schema.json#/definitions/validate"
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"request"
]
},
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"api": {
"description": "api reference, value is api file relative path",
"type": "string"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"oneOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{
"$ref": "common.schema.json#/definitions/extract"
}
]
},
"validate": {
"$ref": "common.schema.json#/definitions/validate"
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"api"
]
},
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"testcase": {
"description": "testcase reference, value is testcase file relative path",
"type": "string"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"type": "array",
"items": {
"type": "string"
}
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"testcase"
]
}
]
}
},
"items": {
"type": "object",
"oneOf": [
{
"type": "object",
"properties": {
"config": {
"$ref": "common.schema.json#/definitions/config"
}
},
"additionalProperties": false
},
{
"type": "object",
"properties": {
"test": {
"$ref": "testcase.schema.v1.json#/definitions/test"
}
},
"additionalProperties": false
}
],
"minProperties": 1,
"maxProperties": 1
},
"minItems": 2
}

View File

@@ -1,184 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "httprunner testcase schema v2 definition",
"type": "object",
"definitions": {
"teststep": {
"type": "object",
"oneOf": [
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"request": {
"description": "define api request directly",
"$ref": "common.schema.json#/definitions/request"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"$ref": "common.schema.json#/definitions/extract"
},
"validate": {
"$ref": "common.schema.json#/definitions/validate"
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"request"
]
},
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"api": {
"description": "api reference, value is api file relative path",
"type": "string"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"oneOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{
"$ref": "common.schema.json#/definitions/extract"
}
]
},
"validate": {
"$ref": "common.schema.json#/definitions/validate"
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"api"
]
},
{
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"testcase": {
"description": "testcase reference, value is testcase file relative path",
"type": "string"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"extract": {
"type": "array",
"items": {
"type": "string"
}
},
"setup_hooks": {
"$ref": "common.schema.json#/definitions/hook"
},
"teardown_hooks": {
"$ref": "common.schema.json#/definitions/hook"
}
},
"required": [
"name",
"testcase"
]
}
]
}
},
"properties": {
"config": {
"$ref": "common.schema.json#/definitions/config"
},
"teststeps": {
"description": "teststep of a testcase",
"type": "array",
"minItems": 1,
"items": {
"$ref": "testcase.schema.v2.json#/definitions/teststep"
}
}
},
"required": [
"config",
"teststeps"
],
"examples": [
{
"config": {
"name": "testcase name"
},
"teststeps": [
{
"name": "api 1",
"api": "/path/to/api1"
},
{
"name": "api 2",
"api": "/path/to/api2"
}
]
},
{
"config": {
"name": "demo testcase",
"variables": {
"device_sn": "ABC",
"username": "${ENV(USERNAME)}",
"password": "${ENV(PASSWORD)}"
},
"base_url": "http://127.0.0.1:5000"
},
"teststeps": [
{
"name": "demo step 1",
"api": "path/to/api1.yml",
"variables": {
"user_agent": "iOS/10.3",
"device_sn": "$device_sn"
},
"extract": [
{
"token": "content.token"
}
],
"validate": [
{
"eq": ["status_code", 200]
}
]
},
{
"name": "demo step 2",
"api": "path/to/api2.yml",
"variables": {
"token": "$token"
}
}
]
}
]
}

View File

@@ -1,66 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "httprunner testsuite schema v1 definition",
"type": "object",
"definitions": {
"testcase": {
"type": "object",
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"parameters": {
"description": "generate cartesian product variables with parameters, each group of variables will be run once",
"type": "object"
},
"testcase": {
"description": "testcase reference, value is testcase file relative path",
"type": "string"
}
},
"required": [
"testcase"
]
}
},
"properties": {
"config": {
"$ref": "common.schema.json#/definitions/config"
},
"testcases": {
"description": "testcase of a testsuite",
"type": "object",
"minProperties": 1,
"patternProperties": {
".*": {
"description": "testcase definition",
"$ref": "testsuite.schema.v1.json#/definitions/testcase"
}
}
}
},
"required": [
"config",
"testcases"
],
"examples": [
{
"config": {
"name": "testsuite name"
},
"testcases": {
"testcase 1": {
"name": "testcase 1",
"testcase": "/path/to/testcase1"
},
"testcase 2": {
"name": "testcase 2",
"testcase": "/path/to/testcase2"
}
}
}
]
}

View File

@@ -1,88 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema",
"description": "httprunner testsuite schema v2 definition",
"type": "object",
"definitions": {
"testcase": {
"type": "object",
"properties": {
"name": {
"$ref": "common.schema.json#/definitions/name"
},
"variables": {
"$ref": "common.schema.json#/definitions/variables"
},
"parameters": {
"description": "generate cartesian product variables with parameters, each group of variables will be run once",
"type": "object"
},
"testcase": {
"description": "testcase reference, value is testcase file relative path",
"type": "string"
}
},
"required": [
"testcase"
]
}
},
"properties": {
"config": {
"$ref": "common.schema.json#/definitions/config"
},
"testcases": {
"description": "testcase of a testsuite",
"type": "array",
"minItems": 1,
"items": {
"$ref": "testsuite.schema.v2.json#/definitions/testcase"
}
}
},
"required": [
"config",
"testcases"
],
"examples": [
{
"config": {
"name": "testsuite name"
},
"testcases": [
{
"name": "testcase 1",
"testcase": "/path/to/testcase1"
},
{
"name": "testcase 2",
"testcase": "/path/to/testcase2"
}
]
},
{
"config": {
"name": "demo testsuite",
"variables": {
"device_sn": "XYZ"
},
"base_url": "http://127.0.0.1:5000"
},
"testcases": [
{
"name": "call demo_testcase with data 1",
"testcase": "path/to/demo_testcase.yml",
"variables": {
"device_sn": "$device_sn"
}
},
{
"name": "call demo_testcase with data 2",
"testcase": "path/to/demo_testcase.yml",
"variables": {
"device_sn": "$device_sn"
}
}
]
}
]
}

127
httprunner/loader_test.py Normal file
View File

@@ -0,0 +1,127 @@
import os
import unittest
from httprunner import exceptions, loader
class TestLoader(unittest.TestCase):
def test_load_testcase_file(self):
path = "examples/postman_echo/request_methods/request_with_variables.yml"
testcase_json, testcase_obj = loader.load_testcase_file(path)
self.assertEqual(
testcase_json["config"]["name"], "request methods testcase with variables"
)
self.assertEqual(
testcase_obj.config.name, "request methods testcase with variables"
)
self.assertEqual(len(testcase_json["teststeps"]), 3)
self.assertEqual(len(testcase_obj.teststeps), 3)
def test_load_json_file_file_format_error(self):
json_tmp_file = "/tmp/tmp.json"
# create empty file
with open(json_tmp_file, "w") as f:
f.write("")
with self.assertRaises(exceptions.FileFormatError):
loader._load_json_file(json_tmp_file)
os.remove(json_tmp_file)
# create empty json file
with open(json_tmp_file, "w") as f:
f.write("{}")
loader._load_json_file(json_tmp_file)
os.remove(json_tmp_file)
# create invalid format json file
with open(json_tmp_file, "w") as f:
f.write("abc")
with self.assertRaises(exceptions.FileFormatError):
loader._load_json_file(json_tmp_file)
os.remove(json_tmp_file)
def test_load_testcases_bad_filepath(self):
testcase_file_path = os.path.join(os.getcwd(), "tests/data/demo")
with self.assertRaises(exceptions.FileNotFound):
loader.load_testcase_file(testcase_file_path)
def test_load_csv_file_one_parameter(self):
csv_file_path = os.path.join(os.getcwd(), "examples/httpbin/user_agent.csv")
csv_content = loader.load_csv_file(csv_file_path)
self.assertEqual(
csv_content,
[
{"user_agent": "iOS/10.1"},
{"user_agent": "iOS/10.2"},
{"user_agent": "iOS/10.3"},
],
)
def test_load_csv_file_multiple_parameters(self):
csv_file_path = os.path.join(os.getcwd(), "examples/httpbin/account.csv")
csv_content = loader.load_csv_file(csv_file_path)
self.assertEqual(
csv_content,
[
{"username": "test1", "password": "111111"},
{"username": "test2", "password": "222222"},
{"username": "test3", "password": "333333"},
],
)
def test_load_folder_files(self):
folder = os.path.join(os.getcwd(), "examples")
file1 = os.path.join(os.getcwd(), "examples", "test_utils.py")
file2 = os.path.join(os.getcwd(), "examples", "httpbin", "hooks.yml")
files = loader.load_folder_files(folder, recursive=False)
self.assertEqual(files, [])
files = loader.load_folder_files(folder)
self.assertIn(file2, files)
self.assertNotIn(file1, files)
files = loader.load_folder_files("not_existed_foulder", recursive=False)
self.assertEqual([], files)
files = loader.load_folder_files(file2, recursive=False)
self.assertEqual([], files)
def test_load_custom_dot_env_file(self):
dot_env_path = os.path.join(os.getcwd(), "examples", "httpbin", "test.env")
env_variables_mapping = loader.load_dot_env_file(dot_env_path)
self.assertIn("PROJECT_KEY", env_variables_mapping)
self.assertEqual(env_variables_mapping["UserName"], "test")
self.assertEqual(
env_variables_mapping["content_type"], "application/json; charset=UTF-8"
)
def test_load_env_path_not_exist(self):
dot_env_path = os.path.join(os.getcwd(), "tests", "data",)
env_variables_mapping = loader.load_dot_env_file(dot_env_path)
self.assertEqual(env_variables_mapping, {})
def test_locate_file(self):
with self.assertRaises(exceptions.FileNotFound):
loader.locate_file(os.getcwd(), "debugtalk.py")
with self.assertRaises(exceptions.FileNotFound):
loader.locate_file("", "debugtalk.py")
start_path = os.path.join(os.getcwd(), "examples", "httpbin")
self.assertEqual(
loader.locate_file(start_path, "debugtalk.py"),
os.path.join(os.getcwd(), "examples/httpbin/debugtalk.py"),
)
self.assertEqual(
loader.locate_file("examples/httpbin/", "debugtalk.py"),
os.path.join(os.getcwd(), "examples", "httpbin", "debugtalk.py"),
)
self.assertEqual(
loader.locate_file("examples/httpbin/", "debugtalk.py"),
os.path.join(os.getcwd(), "examples/httpbin/debugtalk.py"),
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
"""
HttpRunner report
- summarize: aggregate test stat data to summary
- stringify: stringify summary, in order to dump json file and generate html report.
- html: render html report
"""
from httprunner.report.summarize import get_platform, aggregate_stat, get_summary
from httprunner.report.stringify import stringify_summary
from httprunner.report.html import HtmlTestResult, gen_html_report
__all__ = [
"get_platform",
"aggregate_stat",
"get_summary",
"stringify_summary",
"HtmlTestResult",
"gen_html_report"
]

View File

@@ -1,15 +0,0 @@
"""
HttpRunner html report
- result: define resultclass for unittest TextTestRunner
- gen_report: render html report with jinja2 template
"""
from httprunner.report.html.result import HtmlTestResult
from httprunner.report.html.gen_report import gen_html_report
__all__ = [
"HtmlTestResult",
"gen_html_report"
]

View File

@@ -1,64 +0,0 @@
import io
import os
from datetime import datetime
from jinja2 import Template
from loguru import logger
from httprunner.exceptions import SummaryEmpty
def gen_html_report(summary, report_template=None, report_dir=None, report_file=None):
""" render html report with specified report name and template
Args:
summary (dict): test result summary data
report_template (str): specify html report template path, template should be in Jinja2 format.
report_dir (str): specify html report save directory
report_file (str): specify html report file path, this has higher priority than specifying report dir.
"""
if not summary["time"] or summary["stat"]["testcases"]["total"] == 0:
logger.error(f"test result summary is empty ! {summary}")
raise SummaryEmpty
if not report_template:
report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"template.html"
)
logger.debug("No html report template specified, use default.")
else:
logger.info(f"render with html report template: {report_template}")
logger.info("Start to render Html report ...")
start_at_timestamp = summary["time"]["start_at"]
utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat()
summary["time"]["start_datetime"] = utc_time_iso_8601_str
if report_file:
report_dir = os.path.dirname(report_file)
report_file_name = os.path.basename(report_file)
else:
report_dir = report_dir or os.path.join(os.getcwd(), "reports")
# fix #826: Windows does not support file name include ":"
report_file_name = "{}.html".format(utc_time_iso_8601_str.replace(":", "").replace("-", ""))
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
report_path = os.path.join(report_dir, report_file_name)
with io.open(report_template, "r", encoding='utf-8') as fp_r:
template_content = fp_r.read()
with io.open(report_path, 'w', encoding='utf-8') as fp_w:
rendered_content = Template(
template_content,
extensions=["jinja2.ext.loopcontrols"]
).render(summary)
fp_w.write(rendered_content)
logger.info(f"Generated Html report: {report_path}")
return report_path

View File

@@ -1,64 +0,0 @@
import time
import unittest
from loguru import logger
class HtmlTestResult(unittest.TextTestResult):
""" A html result class that can generate formatted html results.
Used by TextTestRunner.
"""
def __init__(self, stream, descriptions, verbosity):
super(HtmlTestResult, self).__init__(stream, descriptions, verbosity)
self.records = []
def _record_test(self, test, status, attachment=''):
data = {
'name': test.shortDescription(),
'status': status,
'attachment': attachment,
"meta_datas": test.meta_datas
}
self.records.append(data)
def startTestRun(self):
self.start_at = time.time()
def startTest(self, test):
""" add start test time """
super(HtmlTestResult, self).startTest(test)
logger.info(test.shortDescription())
def addSuccess(self, test):
super(HtmlTestResult, self).addSuccess(test)
self._record_test(test, 'success')
print("")
def addError(self, test, err):
super(HtmlTestResult, self).addError(test, err)
self._record_test(test, 'error', self._exc_info_to_string(err, test))
print("")
def addFailure(self, test, err):
super(HtmlTestResult, self).addFailure(test, err)
self._record_test(test, 'failure', self._exc_info_to_string(err, test))
print("")
def addSkip(self, test, reason):
super(HtmlTestResult, self).addSkip(test, reason)
self._record_test(test, 'skipped', reason)
print("")
def addExpectedFailure(self, test, err):
super(HtmlTestResult, self).addExpectedFailure(test, err)
self._record_test(test, 'ExpectedFailure', self._exc_info_to_string(err, test))
print("")
def addUnexpectedSuccess(self, test):
super(HtmlTestResult, self).addUnexpectedSuccess(test)
self._record_test(test, 'UnexpectedSuccess')
print("")
@property
def duration(self):
return time.time() - self.start_at

View File

@@ -1,360 +0,0 @@
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{{html_report_name}} - TestReport</title>
<style>
body {
background-color: #f2f2f2;
color: #333;
margin: 0 auto;
width: 960px;
}
#summary {
width: 960px;
margin-bottom: 20px;
}
#summary th {
background-color: skyblue;
padding: 5px 12px;
}
#summary td {
background-color: lightblue;
text-align: center;
padding: 4px 8px;
}
.details {
width: 960px;
margin-bottom: 20px;
}
.details th {
background-color: skyblue;
padding: 5px 12px;
}
.details tr .passed {
background-color: lightgreen;
}
.details tr .failed {
background-color: red;
}
.details tr .unchecked {
background-color: gray;
}
.details td {
background-color: lightblue;
padding: 5px 12px;
}
.details .detail {
background-color: lightgrey;
font-size: smaller;
padding: 5px 10px;
line-height: 20px;
text-align: left;
}
.details .success {
background-color: greenyellow;
}
.details .error {
background-color: red;
}
.details .failure {
background-color: salmon;
}
.details .skipped {
background-color: gray;
}
.button {
font-size: 1em;
padding: 6px;
width: 4em;
text-align: center;
background-color: #06d85f;
border-radius: 20px/50px;
cursor: pointer;
transition: all 0.3s ease-out;
}
a.button{
color: gray;
text-decoration: none;
display: inline-block;
}
.button:hover {
background: #2cffbd;
}
.overlay {
position: fixed;
top: 0;
bottom: 0;
left: 0;
right: 0;
background: rgba(0, 0, 0, 0.7);
transition: opacity 500ms;
visibility: hidden;
opacity: 0;
line-height: 25px;
}
.overlay:target {
visibility: visible;
opacity: 1;
}
.popup {
margin: 70px auto;
padding: 20px;
background: #fff;
border-radius: 10px;
width: 50%;
position: relative;
transition: all 3s ease-in-out;
}
.popup h2 {
margin-top: 0;
color: #333;
font-family: Tahoma, Arial, sans-serif;
}
.popup .close {
position: absolute;
top: 20px;
right: 30px;
transition: all 200ms;
font-size: 30px;
font-weight: bold;
text-decoration: none;
color: #333;
}
.popup .close:hover {
color: #06d85f;
}
.popup .content {
max-height: 80%;
overflow: auto;
text-align: left;
}
.popup .separator {
color:royalblue
}
@media screen and (max-width: 700px) {
.box {
width: 70%;
}
.popup {
width: 70%;
}
}
</style>
</head>
<body>
<h1>Test Report: {{html_report_name}}</h1>
<h2>Summary</h2>
<table id="summary">
<tr>
<th>START AT</th>
<td colspan="4">{{time.start_datetime}}</td>
</tr>
<tr>
<th>DURATION</th>
<td colspan="4">{{ '%0.3f'| format(time.duration|float) }} seconds</td>
</tr>
<tr>
<th>PLATFORM</th>
<td>HttpRunner {{ platform.httprunner_version }} </td>
<td>{{ platform.python_version }} </td>
<td colspan="2">{{ platform.platform }}</td>
</tr>
<tr>
<th>STAT</th>
<th colspan="2">TESTCASES (success/fail)</th>
<th colspan="2">TESTSTEPS (success/fail/error/skip)</th>
</tr>
<tr>
<td>total (details) =></td>
<td colspan="2">{{stat.testcases.total}} ({{stat.testcases.success}}/{{stat.testcases.fail}})</td>
<td colspan="2">{{stat.teststeps.total}} ({{stat.teststeps.successes}}/{{stat.teststeps.failures}}/{{stat.teststeps.errors}}/{{stat.teststeps.skipped}})</td>
</tr>
</table>
<h2>Details</h2>
{% for test_suite_summary in details %}
{% set suite_index = loop.index %}
<h3>{{test_suite_summary.name}}</h3>
<table id="suite_{{suite_index}}" class="details">
<tr>
<td>TOTAL: {{test_suite_summary.stat.total}}</td>
<td>SUCCESS: {{test_suite_summary.stat.successes}}</td>
<td>FAILED: {{test_suite_summary.stat.failures}}</td>
<td>ERROR: {{test_suite_summary.stat.errors}}</td>
<td>SKIPPED: {{test_suite_summary.stat.skipped}}</td>
</tr>
<tr>
<th>Status</th>
<th colspan="2">Name</th>
<th>Response Time</th>
<th>Detail</th>
</tr>
{% for record in test_suite_summary.records %}
{% set record_index = "{}_{}".format(suite_index, loop.index) %}
{% set record_meta_datas = record.meta_datas_expanded %}
<tr id="record_{{record_index}}">
<th class="{{record.status}}" style="width:5em;">{{record.status}}</th>
<td colspan="2">{{record.name}}</td>
<td style="text-align:center;width:6em;">{{ record.response_time }} ms</td>
<td class="detail">
{% for meta_data in record_meta_datas %}
{% set meta_data_index = "{}_{}".format(record_index, loop.index) %}
<a class="button" href="#popup_log_{{meta_data_index}}">log-{{loop.index}}</a>
<div id="popup_log_{{meta_data_index}}" class="overlay">
<div class="popup">
<h2>Request and Response data</h2>
<a class="close" href="#record_{{meta_data_index}}">&times;</a>
<div class="content">
<h3>Name: {{ meta_data.name }}</h3>
{% for req_resp in meta_data.data %}
{% if loop.index > 1 %}
<div class="separator">==================================== redirect to ====================================</div>
{% endif %}
<h3>Request:</h3>
<div style="overflow: auto">
<table>
{% for key, value in req_resp.request.items() %}
<tr>
<th>{{key}}</th>
<td>
{% if key in ["headers", "body"] %}
<pre>{{ value | e }}</pre>
{% else %}
{{value}}
{% endif %}
</td>
</tr>
{% endfor %}
</table>
</div>
<h3>Response:</h3>
<div style="overflow: auto">
<table>
{% for key, value in req_resp.response.items() %}
<tr>
<th>{{key}}</th>
<td>
{% if key == "headers" %}
<pre>{{ value | e }}</pre>
{% elif key == "body" %}
{% if "image" in req_resp.response.content_type %}
<img src="{{ req_resp.response.content }}" />
{% else %}
<pre>{{ value | e }}</pre>
{% endif %}
{% else %}
{{ value }}
{% endif %}
</td>
</tr>
{% endfor %}
</table>
</div>
{% endfor %}
<h3>Validators:</h3>
<div style="overflow: auto">
{% set validate_extractors = meta_data.validators.validate_extractor %}
{% if validate_extractors %}
<table>
<tr>
<th>check</th>
<th>comparator</th>
<th>expect value</th>
<th>actual value</th>
</tr>
{% for validator in validate_extractors %}
<tr>
{% if validator.check_result == "pass" %}
<td class="passed">
{% elif validator.check_result == "fail" %}
<td class="failed">
{% elif validator.check_result == "unchecked" %}
<td class="unchecked">
{% endif %}
{{validator.check | e}}
</td>
<td>{{validator.comparator}}</td>
<td>{{validator.expect | e}}</td>
<td>{{validator.check_value | e}}</td>
</tr>
{% endfor %}
</table>
{% endif %}
{% set validate_script = meta_data.validators.validate_script %}
{% if validate_script %}
<table>
<tr>
<th>validate script</th><th>output</th>
</tr>
<tr>
<td><pre>{{validate_script.validate_script | safe}}</pre></td>
{% if validate_script.check_result == "pass" %}
<td class="passed">
{% elif validate_script.check_result == "fail" %}
<td class="failed">
{% endif %}
{{validate_script.output}}
</td>
</tr>
</table>
{% endif %}
</div>
<h3>Statistics:</h3>
<div style="overflow: auto">
<table>
<tr>
<th>content_size(bytes)</th>
<td>{{ meta_data.stat.content_size }}</td>
</tr>
<tr>
<th>response_time(ms)</th>
<td>{{ meta_data.stat.response_time_ms }}</td>
</tr>
<tr>
<th>elapsed(ms)</th>
<td>{{ meta_data.stat.elapsed_ms }}</td>
</tr>
</table>
</div>
</div>
</div>
</div>
{% endfor %}
{% if record.attachment %}
<a class="button" href="#popup_attachment_{{record_index}}">traceback</a>
<div id="popup_attachment_{{record_index}}" class="overlay">
<div class="popup">
<h2>Traceback Message</h2>
<a class="close" href="#record_{{record_index}}">&times;</a>
<div class="content"><pre>{{ record.attachment | e }}</pre></div>
</div>
</div>
{% endif %}
</td>
</tr>
{% endfor %}
</table>
{% endfor %}
</body>

View File

@@ -1,51 +0,0 @@
import json
import platform
import time
import uuid
import requests
from httprunner import __version__
def prepare_event_kwargs(event_name, params):
""" prepare report event kwargs"""
kwargs = {
"headers": {
'content-type': 'application/json'
},
"json": {
"user": {
"user_unique_id": str(uuid.getnode())
},
"header": {
"app_id": 173519,
"os_name": platform.system(),
"os_version": platform.release(),
"app_version": __version__ # HttpRunner version
},
"events": [
{
"event": event_name,
"params": json.dumps(params),
"time": int(time.time())
}
],
"verbose": 1
}
}
return kwargs
def report_event(event_name, success=True):
params = {
"success": 1 if success else 0
}
kwargs = prepare_event_kwargs(event_name, params)
resp = requests.post("http://mcs.snssdk.com/v1/json", **kwargs)
print("resp---", resp.json())
if __name__ == '__main__':
report_event("loader")

View File

@@ -1,215 +0,0 @@
import json
from base64 import b64encode
from collections import Iterable
from jinja2 import escape
from requests.cookies import RequestsCookieJar
def dumps_json(value):
""" dumps json value to indented string
Args:
value (dict): raw json data
Returns:
str: indented json dump string
"""
return json.dumps(value, indent=2, ensure_ascii=False)
def detect_encoding(value):
try:
return json.detect_encoding(value)
except AttributeError:
return "utf-8"
def __stringify_request(request_data):
""" stringfy HTTP request data
Args:
request_data (dict): HTTP request data in dict.
{
"url": "http://127.0.0.1:5000/api/get-token",
"method": "POST",
"headers": {
"User-Agent": "python-requests/2.20.0",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
"Connection": "keep-alive",
"user_agent": "iOS/10.3",
"device_sn": "TESTCASE_CREATE_XXX",
"os_platform": "ios",
"app_version": "2.8.6",
"Content-Type": "application/json",
"Content-Length": "52"
},
"body": b'{"sign": "cb9d60acd09080ea66c8e63a1c78c6459ea00168"}',
"verify": false
}
"""
for key, value in request_data.items():
if isinstance(value, (list, dict)):
value = dumps_json(value)
elif isinstance(value, bytes):
try:
encoding = detect_encoding(value)
value = value.decode(encoding)
if key == "body":
try:
# request body is in json format
value = json.loads(value)
value = dumps_json(value)
except json.JSONDecodeError:
pass
value = escape(value)
except UnicodeDecodeError:
pass
elif not isinstance(value, (str, bytes, int, float, Iterable)):
# class instance, e.g. MultipartEncoder()
value = repr(value)
elif isinstance(value, RequestsCookieJar):
value = value.get_dict()
request_data[key] = value
def __stringify_response(response_data):
""" stringfy HTTP response data
Args:
response_data (dict):
{
"status_code": 404,
"headers": {
"Content-Type": "application/json",
"Content-Length": "30",
"Server": "Werkzeug/0.14.1 Python/3.7.0",
"Date": "Tue, 27 Nov 2018 06:19:27 GMT"
},
"encoding": "None",
"content_type": "application/json",
"ok": false,
"url": "http://127.0.0.1:5000/api/users/9001",
"reason": "NOT FOUND",
"cookies": {},
"body": {
"success": false,
"data": {}
}
}
"""
for key, value in response_data.items():
if isinstance(value, (list, dict)):
value = dumps_json(value)
elif isinstance(value, bytes):
try:
encoding = response_data.get("encoding")
if not encoding or encoding == "None":
encoding = detect_encoding(value)
if key == "body" and "image" in response_data["content_type"]:
# display image
value = "data:{};base64,{}".format(
response_data["content_type"],
b64encode(value).decode(encoding)
)
else:
value = escape(value.decode(encoding))
except UnicodeDecodeError:
pass
elif not isinstance(value, (str, bytes, int, float, Iterable)):
# class instance, e.g. MultipartEncoder()
value = repr(value)
elif isinstance(value, RequestsCookieJar):
value = value.get_dict()
response_data[key] = value
def __expand_meta_datas(meta_datas, meta_datas_expanded):
""" expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
"""
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded)
def __get_total_response_time(meta_datas_expanded):
""" caculate total response time of all meta_datas
"""
try:
response_time = 0
for meta_data in meta_datas_expanded:
response_time += meta_data["stat"]["response_time_ms"]
return "{:.2f}".format(response_time)
except TypeError:
# failure exists
return "N/A"
def __stringify_meta_datas(meta_datas):
if isinstance(meta_datas, list):
for _meta_data in meta_datas:
__stringify_meta_datas(_meta_data)
elif isinstance(meta_datas, dict):
data_list = meta_datas["data"]
for data in data_list:
__stringify_request(data["request"])
__stringify_response(data["response"])
def stringify_summary(summary):
""" stringify summary, in order to dump json file and generate html report.
"""
for index, suite_summary in enumerate(summary["details"]):
if not suite_summary.get("name"):
suite_summary["name"] = f"testcase {index}"
for record in suite_summary.get("records"):
meta_datas = record['meta_datas']
__stringify_meta_datas(meta_datas)
meta_datas_expanded = []
__expand_meta_datas(meta_datas, meta_datas_expanded)
record["meta_datas_expanded"] = meta_datas_expanded
record["response_time"] = __get_total_response_time(meta_datas_expanded)

View File

@@ -1,82 +0,0 @@
import platform
from httprunner import __version__
def get_platform():
return {
"httprunner_version": __version__,
"python_version": "{} {}".format(
platform.python_implementation(),
platform.python_version()
),
"platform": platform.platform()
}
def aggregate_stat(origin_stat, new_stat):
""" aggregate new_stat to origin_stat.
Args:
origin_stat (dict): origin stat dict, will be updated with new_stat dict.
new_stat (dict): new stat dict.
"""
for key in new_stat:
if key not in origin_stat:
origin_stat[key] = new_stat[key]
elif key == "start_at":
# start datetime
origin_stat["start_at"] = min(origin_stat["start_at"], new_stat["start_at"])
elif key == "duration":
# duration = max_end_time - min_start_time
max_end_time = max(origin_stat["start_at"] + origin_stat["duration"],
new_stat["start_at"] + new_stat["duration"])
min_start_time = min(origin_stat["start_at"], new_stat["start_at"])
origin_stat["duration"] = max_end_time - min_start_time
else:
origin_stat[key] += new_stat[key]
def get_summary(result):
""" get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
"""
summary = {
"success": result.wasSuccessful(),
"stat": {
'total': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.expectedFailures),
'unexpectedSuccesses': len(result.unexpectedSuccesses)
}
}
summary["stat"]["successes"] = summary["stat"]["total"] \
- summary["stat"]["failures"] \
- summary["stat"]["errors"] \
- summary["stat"]["skipped"] \
- summary["stat"]["expectedFailures"] \
- summary["stat"]["unexpectedSuccesses"]
summary["time"] = {
'start_at': result.start_at,
'duration': result.duration
}
summary["records"] = result.records
return summary

View File

@@ -1,18 +1,113 @@
import json
import re
from collections import OrderedDict
from typing import Dict, Text, Any, NoReturn
import jsonpath
import jmespath
import requests
from loguru import logger
from httprunner import exceptions, utils
from httprunner.exceptions import ValidationFailure, ParamsError
from httprunner.parser import parse_data, parse_string_value, get_mapping_function
from httprunner.schema import VariablesMapping, Validators, FunctionsMapping
text_extractor_regexp_compile = re.compile(r".*\(.*\).*")
def get_uniform_comparator(comparator: Text):
""" convert comparator alias to uniform name
"""
if comparator in ["eq", "equals", "==", "is"]:
return "equals"
elif comparator in ["lt", "less_than"]:
return "less_than"
elif comparator in ["le", "less_than_or_equals"]:
return "less_than_or_equals"
elif comparator in ["gt", "greater_than"]:
return "greater_than"
elif comparator in ["ge", "greater_than_or_equals"]:
return "greater_than_or_equals"
elif comparator in ["ne", "not_equals"]:
return "not_equals"
elif comparator in ["str_eq", "string_equals"]:
return "string_equals"
elif comparator in ["len_eq", "length_equals", "count_eq"]:
return "length_equals"
elif comparator in [
"len_gt",
"count_gt",
"length_greater_than",
"count_greater_than",
]:
return "length_greater_than"
elif comparator in [
"len_ge",
"count_ge",
"length_greater_than_or_equals",
"count_greater_than_or_equals",
]:
return "length_greater_than_or_equals"
elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]:
return "length_less_than"
elif comparator in [
"len_le",
"count_le",
"length_less_than_or_equals",
"count_less_than_or_equals",
]:
return "length_less_than_or_equals"
else:
return comparator
def uniform_validator(validator):
""" unify validator
Args:
validator (dict): validator maybe in two formats:
format1: this is kept for compatiblity with the previous versions.
{"check": "status_code", "assert": "eq", "expect": 201}
{"check": "$resp_body_success", "assert": "eq", "expect": True}
format2: recommended new version, {assert: [check_item, expected_value]}
{'eq': ['status_code', 201]}
{'eq': ['$resp_body_success', True]}
Returns
dict: validator info
{
"check": "status_code",
"expect": 201,
"assert": "equals"
}
"""
if not isinstance(validator, dict):
raise ParamsError(f"invalid validator: {validator}")
if "check" in validator and "expect" in validator:
# format1
check_item = validator["check"]
expect_value = validator["expect"]
comparator = validator.get("comparator", "eq")
elif len(validator) == 1:
# format2
comparator = list(validator.keys())[0]
compare_values = validator[comparator]
if not isinstance(compare_values, list) or len(compare_values) != 2:
raise ParamsError(f"invalid validator: {validator}")
check_item, expect_value = compare_values
else:
raise ParamsError(f"invalid validator: {validator}")
# uniform comparator, e.g. lt => less_than, eq => equals
assert_method = get_uniform_comparator(comparator)
return {"check": check_item, "expect": expect_value, "assert": assert_method}
class ResponseObject(object):
def __init__(self, resp_obj):
def __init__(self, resp_obj: requests.Response):
""" initialize with a requests.Response object
Args:
@@ -21,283 +116,96 @@ class ResponseObject(object):
"""
self.resp_obj = resp_obj
def __getattr__(self, key):
try:
if key == "json":
value = self.resp_obj.json()
elif key == "cookies":
value = self.resp_obj.cookies.get_dict()
else:
value = getattr(self.resp_obj, key)
self.__dict__[key] = value
return value
except AttributeError:
err_msg = f"ResponseObject does not have attribute: {key}"
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
def _extract_field_with_jsonpath(self, field: str) -> list:
""" extract field from response content with jsonpath expression.
JSONPath Docs: https://goessner.net/articles/JsonPath/
Args:
field: jsonpath expression, e.g. $.code, $..items.*.id
Returns:
A list that extracted from json response example. 1) [200] 2) [1, 2]
Raises:
exceptions.ExtractFailure: If no content matched with jsonpath expression.
Examples:
For example, response body like below:
{
"code": 200,
"data": {
"items": [{
"id": 1,
"name": "Bob"
},
{
"id": 2,
"name": "James"
}
]
},
"message": "success"
}
>>> _extract_field_with_regex("$.code")
[200]
>>> _extract_field_with_regex("$..items.*.id")
[1, 2]
"""
try:
json_body = self.json
assert json_body
result = jsonpath.jsonpath(json_body, field)
assert result
return result
except (AssertionError, exceptions.JSONDecodeError):
err_msg = f"Failed to extract data with jsonpath! => {field}\n"
err_msg += f"response body: {self.text}\n"
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
def _extract_field_with_regex(self, field):
""" extract field from response content with regex.
requests.Response body could be json or html text.
Args:
field (str): regex string that matched r".*\(.*\).*"
Returns:
str: matched content.
Raises:
exceptions.ExtractFailure: If no content matched with regex.
Examples:
>>> # self.text: "LB123abcRB789"
>>> filed = "LB[\d]*(.*)RB[\d]*"
>>> _extract_field_with_regex(field)
abc
"""
matched = re.search(field, self.text)
if not matched:
err_msg = f"Failed to extract data with regex! => {field}\n"
err_msg += f"response body: {self.text}\n"
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
return matched.group(1)
def _extract_field_with_delimiter(self, field):
""" response content could be json or html text.
Args:
field (str): string joined by delimiter.
e.g.
"status_code"
"headers"
"cookies"
"content"
"headers.content-type"
"content.person.name.first_name"
"""
# string.split(sep=None, maxsplit=1) -> list of strings
# e.g. "content.person.name" => ["content", "person.name"]
try:
top_query, sub_query = field.split('.', 1)
body = resp_obj.json()
except ValueError:
top_query = field
sub_query = None
body = resp_obj.content
# status_code
if top_query in ["status_code", "encoding", "ok", "reason", "url"]:
if sub_query:
# status_code.XX
err_msg = f"Failed to extract: {field}\n"
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
self.resp_obj_meta = {
"status_code": resp_obj.status_code,
"headers": resp_obj.headers,
"body": body,
}
self.validation_results: Dict = {}
return getattr(self, top_query)
# cookies
elif top_query == "cookies":
cookies = self.cookies
if not sub_query:
# extract cookies
return cookies
try:
return cookies[sub_query]
except KeyError:
err_msg = f"Failed to extract cookie! => {field}\n"
err_msg += f"response cookies: {cookies}\n"
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# elapsed
elif top_query == "elapsed":
available_attributes = u"available attributes: days, seconds, microseconds, total_seconds"
if not sub_query:
err_msg = "elapsed is datetime.timedelta instance, attribute should also be specified!\n"
err_msg += available_attributes
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
elif sub_query in ["days", "seconds", "microseconds"]:
return getattr(self.elapsed, sub_query)
elif sub_query == "total_seconds":
return self.elapsed.total_seconds()
else:
err_msg = f"{sub_query} is not valid datetime.timedelta attribute.\n"
err_msg += available_attributes
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
# headers
elif top_query == "headers":
headers = self.headers
if not sub_query:
# extract headers
return headers
try:
return headers[sub_query]
except KeyError:
err_msg = f"Failed to extract header! => {field}\n"
err_msg += f"response headers: {headers}\n"
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# response body
elif top_query in ["body", "content", "text", "json"]:
try:
body = self.json
except json.JSONDecodeError:
body = self.text
if not sub_query:
# extract response body
return body
if isinstance(body, (dict, list)):
# content = {"xxx": 123}, content.xxx
return utils.query_json(body, sub_query)
elif sub_query.isdigit():
# content = "abcdefg", content.3 => d
return utils.query_json(body, sub_query)
else:
# content = "<html>abcdefg</html>", content.xxx
err_msg = f"Failed to extract attribute from response body! => {field}\n"
err_msg += f"response body: {body}\n"
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# new set response attributes in teardown_hooks
elif top_query in self.__dict__:
attributes = self.__dict__[top_query]
if not sub_query:
# extract response attributes
return attributes
if isinstance(attributes, (dict, list)):
# attributes = {"xxx": 123}, content.xxx
return utils.query_json(attributes, sub_query)
elif sub_query.isdigit():
# attributes = "abcdefg", attributes.3 => d
return utils.query_json(attributes, sub_query)
else:
# content = "attributes.new_attribute_not_exist"
err_msg = f"Failed to extract cumstom set attribute from teardown hooks! => {field}\n"
err_msg += f"response set attributes: {attributes}\n"
logger.error(err_msg)
raise exceptions.TeardownHooksFailure(err_msg)
# others
else:
err_msg = f"Failed to extract attribute from response! => {field}\n"
err_msg += "available response attributes: status_code, cookies, elapsed, headers, content, " \
"text, json, encoding, ok, reason, url.\n\n"
err_msg += "If you want to set attribute in teardown_hooks, take the following example as reference:\n"
err_msg += "response.new_attribute = 'new_attribute_value'\n"
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
def extract_field(self, field):
""" extract value from requests.Response.
"""
if not isinstance(field, str):
err_msg = f"Invalid extractor! => {field}\n"
logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
msg = f"extract: {field}"
if field.startswith("$"):
value = self._extract_field_with_jsonpath(field)
elif text_extractor_regexp_compile.match(field):
value = self._extract_field_with_regex(field)
else:
value = self._extract_field_with_delimiter(field)
msg += f"\t=> {value}"
logger.debug(msg)
return value
def extract_response(self, extractors):
""" extract value from requests.Response and store in OrderedDict.
Args:
extractors (list):
[
{"resp_status_code": "status_code"},
{"resp_headers_content_type": "headers.content-type"},
{"resp_content": "content"},
{"resp_content_person_first_name": "content.person.name.first_name"}
]
Returns:
OrderDict: variable binds ordered dict
"""
def extract(self, extractors: Dict[Text, Text]) -> Dict[Text, Any]:
if not extractors:
return {}
logger.debug("start to extract from response object.")
extracted_variables_mapping = OrderedDict()
extract_binds_order_dict = utils.ensure_mapping_format(extractors)
extract_mapping = {}
for key, field in extractors.items():
field_value = jmespath.search(field, self.resp_obj_meta)
extract_mapping[key] = field_value
for key, field in extract_binds_order_dict.items():
extracted_variables_mapping[key] = self.extract_field(field)
logger.info(f"extract mapping: {extract_mapping}")
return extract_mapping
return extracted_variables_mapping
def validate(
self,
validators: Validators,
variables_mapping: VariablesMapping = None,
functions_mapping: FunctionsMapping = None,
) -> NoReturn:
self.validation_results = {}
if not validators:
return
validate_pass = True
failures = []
for v in validators:
if "validate_extractor" not in self.validation_results:
self.validation_results["validate_extractor"] = []
u_validator = uniform_validator(v)
# check item
check_item = u_validator["check"]
check_value = jmespath.search(check_item, self.resp_obj_meta)
check_value = parse_string_value(check_value)
# comparator
assert_method = u_validator["assert"]
assert_func = get_mapping_function(assert_method, functions_mapping)
# expect item
expect_item = u_validator["expect"]
# parse expected value with config/teststep/extracted variables
expect_value = parse_data(expect_item, variables_mapping, functions_mapping)
validate_msg = f"assert {check_item} {assert_method} {expect_value}({type(expect_value).__name__})"
validator_dict = {
"comparator": assert_method,
"check": check_item,
"check_value": check_value,
"expect": expect_item,
"expect_value": expect_value,
}
try:
assert_func(check_value, expect_value)
validate_msg += "\t==> pass"
logger.info(validate_msg)
validator_dict["check_result"] = "pass"
except AssertionError:
validate_pass = False
validator_dict["check_result"] = "fail"
validate_msg += "\t==> fail"
validate_msg += (
f"\n"
f"check_item: {check_item}\n"
f"check_value: {check_value}({type(check_value).__name__})\n"
f"assert_method: {assert_method}\n"
f"expect_value: {expect_value}({type(expect_value).__name__})"
)
logger.error(validate_msg)
failures.append(validate_msg)
self.validation_results["validate_extractor"].append(validator_dict)
if not validate_pass:
failures_string = "\n".join([failure for failure in failures])
raise ValidationFailure(failures_string)

View File

@@ -1,266 +1,91 @@
import uuid
from enum import Enum
from unittest.case import SkipTest
import os
import time
from datetime import datetime
from typing import List, Dict, Text
from loguru import logger
from httprunner import exceptions, response, utils
from httprunner import utils, exceptions
from httprunner.client import HttpSession
from httprunner.context import SessionContext
from httprunner.validator import Validator
from httprunner.exceptions import ValidationFailure, ParamsError
from httprunner.ext.uploader import prepare_upload_step
from httprunner.loader import load_project_meta, load_testcase_file
from httprunner.parser import build_url, parse_data, parse_variables_mapping
from httprunner.response import ResponseObject
from httprunner.schema import (
TConfig,
TStep,
VariablesMapping,
StepData,
TestCaseSummary,
TestCaseTime,
TestCaseInOut,
ProjectMeta,
TestCase,
)
class HookTypeEnum(Enum):
SETUP = 1
TEARDOWN = 2
class HttpRunner(object):
config: TConfig
teststeps: List[TStep]
success: bool = True # indicate testcase execution result
__project_meta: ProjectMeta = None
__step_datas: List[StepData] = None
__session: HttpSession = None
__session_variables: VariablesMapping = {}
__start_at = 0
__duration = 0
class Runner(object):
""" Running testcases.
def with_project_meta(self, project_meta: ProjectMeta) -> "HttpRunner":
self.__project_meta = project_meta
return self
Examples:
>>> tests_mapping = {
"project_mapping": {
"functions": {}
},
"testcases": [
{
"config": {
"name": "XXXX",
"base_url": "http://127.0.0.1",
"verify": False
},
"teststeps": [
{
"name": "test description",
"variables": [], # optional
"request": {
"url": "http://127.0.0.1:5000/api/users/1000",
"method": "GET"
}
}
]
}
]
}
def with_session(self, session: HttpSession) -> "HttpRunner":
self.__session = session
return self
>>> testcases = parser.parse_tests(tests_mapping)
>>> parsed_testcase = testcases[0]
def with_variables(self, variables: VariablesMapping) -> "HttpRunner":
self.__session_variables = variables
return self
>>> test_runner = runner.Runner(parsed_testcase["config"])
>>> test_runner.run_test(parsed_testcase["teststeps"][0])
def __run_step_request(self, step: TStep):
"""run teststep: request"""
step_data = StepData(name=step.name)
"""
# parse
prepare_upload_step(step, self.__project_meta.functions)
request_dict = step.request.dict()
request_dict.pop("upload", None)
parsed_request_dict = parse_data(
request_dict, step.variables, self.__project_meta.functions
)
def __init__(self, config, http_client_session=None):
""" run testcase or testsuite.
# prepare arguments
method = parsed_request_dict.pop("method")
url_path = parsed_request_dict.pop("url")
url = build_url(self.config.base_url, url_path)
Args:
config (dict): testcase/testsuite config dict
parsed_request_dict["json"] = parsed_request_dict.pop("req_json", {})
{
"name": "ABC",
"variables": {},
"setup_hooks", [],
"teardown_hooks", []
}
http_client_session (instance): requests.Session(), or locust.client.Session() instance.
"""
self.verify = config.get("verify", True)
self.export = config.get("export") or config.get("output", [])
config_variables = config.get("variables", {})
self.hrun_request_id = str(uuid.uuid4())
if "HRUN-Request-ID" not in config_variables:
config_variables["HRUN-Request-ID"] = self.hrun_request_id
else:
self.hrun_request_id = config_variables["HRUN-Request-ID"]
# testcase setup hooks
testcase_setup_hooks = config.get("setup_hooks", [])
# testcase teardown hooks
self.testcase_teardown_hooks = config.get("teardown_hooks", [])
self.http_client_session = http_client_session or HttpSession()
self.session_context = SessionContext(config_variables)
if testcase_setup_hooks:
self.do_hook_actions(testcase_setup_hooks, HookTypeEnum.SETUP)
def __del__(self):
if self.testcase_teardown_hooks:
self.do_hook_actions(self.testcase_teardown_hooks, HookTypeEnum.TEARDOWN)
def __clear_test_data(self):
""" clear request and response data
"""
if not isinstance(self.http_client_session, HttpSession):
return
self.http_client_session.init_meta_data()
def _handle_skip_feature(self, test_dict):
""" handle skip feature for test
- skip: skip current test unconditionally
- skipIf: skip current test if condition is true
- skipUnless: skip current test unless condition is true
Args:
test_dict (dict): test info
Raises:
SkipTest: skip test
"""
# TODO: move skip to initialize
skip_reason = None
if "skip" in test_dict:
skip_reason = test_dict["skip"]
elif "skipIf" in test_dict:
skip_if_condition = test_dict["skipIf"]
if self.session_context.eval_content(skip_if_condition):
skip_reason = f"{skip_if_condition} evaluate to True"
elif "skipUnless" in test_dict:
skip_unless_condition = test_dict["skipUnless"]
if not self.session_context.eval_content(skip_unless_condition):
skip_reason = f"{skip_unless_condition} evaluate to False"
if skip_reason:
raise SkipTest(skip_reason)
def do_hook_actions(self, actions, hook_type):
""" call hook actions.
Args:
actions (list): each action in actions list maybe in two format.
format1 (dict): assignment, the value returned by hook function will be assigned to variable.
{"var": "${func()}"}
format2 (str): only call hook functions.
${func()}
hook_type (HookTypeEnum): setup/teardown
"""
logger.debug(f"call {hook_type.name} hook actions.")
for action in actions:
if isinstance(action, dict) and len(action) == 1:
# format 1
# {"var": "${func()}"}
var_name, hook_content = list(action.items())[0]
hook_content_eval = self.session_context.eval_content(hook_content)
logger.debug(
f"assignment with hook: {var_name} = {hook_content} => {hook_content_eval}")
self.session_context.update_test_variables(
var_name, hook_content_eval
)
else:
# format 2
logger.debug(f"call hook function: {action}")
# TODO: check hook function if valid
self.session_context.eval_content(action)
def _run_test(self, test_dict):
""" run single teststep.
Args:
test_dict (dict): teststep info
{
"name": "teststep description",
"skip": "skip this test unconditionally",
"times": 3,
"variables": [], # optional, override
"request": {
"url": "http://127.0.0.1:5000/api/users/1000",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random"
},
"json": {"name": "user", "password": "123456"}
},
"extract": {}, # optional
"validate": [], # optional
"setup_hooks": [], # optional
"teardown_hooks": [] # optional
}
Raises:
exceptions.ParamsError
exceptions.ValidationFailure
exceptions.ExtractFailure
"""
# clear meta data first to ensure independence for each test
self.__clear_test_data()
# check skip
self._handle_skip_feature(test_dict)
# prepare
test_dict = utils.lower_test_dict_keys(test_dict)
test_variables = test_dict.get("variables", {})
self.session_context.init_test_variables(test_variables)
# teststep name
test_name = self.session_context.eval_content(test_dict.get("name", ""))
# parse test request
raw_request = test_dict.get('request', {})
parsed_test_request = self.session_context.eval_content(raw_request)
self.session_context.update_test_variables("request", parsed_test_request)
# setup hooks
setup_hooks = test_dict.get("setup_hooks", [])
if setup_hooks:
self.do_hook_actions(setup_hooks, HookTypeEnum.SETUP)
# prepend url with base_url unless it's already an absolute URL
url = parsed_test_request.pop('url')
base_url = self.session_context.eval_content(test_dict.get("base_url", ""))
parsed_url = utils.build_url(base_url, url)
request_headers = parsed_test_request.setdefault("headers", {})
if "HRUN-Request-ID" not in request_headers:
parsed_test_request["headers"]["HRUN-Request-ID"] = \
self.session_context.session_variables_mapping["HRUN-Request-ID"]
try:
method = parsed_test_request.pop('method')
parsed_test_request.setdefault("verify", self.verify)
group_name = parsed_test_request.pop("group", None)
except KeyError:
raise exceptions.ParamsError("URL or METHOD missed!")
logger.info(f"{method} {parsed_url}")
logger.debug(f"request kwargs(raw): {parsed_test_request}")
logger.info(f"{method} {url}")
logger.debug(f"request kwargs(raw): {parsed_request_dict}")
# request
resp = self.http_client_session.request(
method,
parsed_url,
name=(group_name or test_name),
**parsed_test_request
)
resp_obj = response.ResponseObject(resp)
self.__session = self.__session or HttpSession()
resp = self.__session.request(method, url, **parsed_request_dict)
resp_obj = ResponseObject(resp)
def log_req_resp_details():
err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32)
err_msg = "\n{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32)
# log request
err_msg += "====== request details ======\n"
err_msg += f"url: {parsed_url}\n"
err_msg += f"url: {url}\n"
err_msg += f"method: {method}\n"
headers = parsed_test_request.pop("headers", {})
headers = parsed_request_dict.pop("headers", {})
err_msg += f"headers: {headers}\n"
for k, v in parsed_test_request.items():
for k, v in parsed_request_dict.items():
v = utils.omit_long_data(v)
err_msg += f"{k}: {repr(v)}\n"
@@ -268,149 +93,158 @@ class Runner(object):
# log response
err_msg += "====== response details ======\n"
err_msg += f"status_code: {resp_obj.status_code}\n"
err_msg += f"headers: {resp_obj.headers}\n"
err_msg += f"body: {repr(resp_obj.text)}\n"
err_msg += f"status_code: {resp.status_code}\n"
err_msg += f"headers: {resp.headers}\n"
err_msg += f"body: {repr(resp.text)}\n"
logger.error(err_msg)
# teardown hooks
teardown_hooks = test_dict.get("teardown_hooks", [])
if teardown_hooks:
self.session_context.update_test_variables("response", resp_obj)
self.do_hook_actions(teardown_hooks, HookTypeEnum.TEARDOWN)
self.http_client_session.update_last_req_resp_record(resp_obj)
# extract
extractors = test_dict.get("extract", {})
try:
extracted_variables_mapping = resp_obj.extract_response(extractors)
self.session_context.update_session_variables(extracted_variables_mapping)
except (exceptions.ParamsError, exceptions.ExtractFailure):
log_req_resp_details()
raise
extractors = step.extract
extract_mapping = resp_obj.extract(extractors)
step_data.export = extract_mapping
variables_mapping = step.variables
variables_mapping.update(extract_mapping)
# validate
validators = test_dict.get("validate") or test_dict.get("validators") or []
validate_script = test_dict.get("validate_script", [])
if validate_script:
validators.append({
"type": "python_script",
"script": validate_script
})
validator = Validator(self.session_context, resp_obj)
validators = step.validators
try:
validator.validate(validators)
except exceptions.ValidationFailure:
resp_obj.validate(
validators, variables_mapping, self.__project_meta.functions
)
self.__session.data.success = True
except ValidationFailure:
self.__session.data.success = False
log_req_resp_details()
raise
finally:
self.validation_results = validator.validation_results
# save request & response meta data
self.__session.data.validators = resp_obj.validation_results
self.success &= self.__session.data.success
# save step data
step_data.success = self.__session.data.success
step_data.data = self.__session.data
def _run_testcase(self, testcase_dict):
""" run single testcase.
"""
self.meta_datas = []
config = testcase_dict.get("config", {})
return step_data
# each teststeps in one testcase (YAML/JSON) share the same session.
test_runner = Runner(config, self.http_client_session)
def __run_step_testcase(self, step):
"""run teststep: referenced testcase"""
step_data = StepData(name=step.name)
step_variables = step.variables
tests = testcase_dict.get("teststeps", [])
ref_testcase_path = os.path.join(self.__project_meta.PWD, step.testcase)
case_result = (
HttpRunner()
.with_session(self.__session)
.with_variables(step_variables)
.run_path(ref_testcase_path)
)
step_data.data = case_result.get_step_datas() # list of step data
step_data.export = case_result.get_export_variables()
step_data.success = case_result.success
self.success &= case_result.success
for index, test_dict in enumerate(tests):
return step_data
# override current teststep variables with former testcase output variables
former_output_variables = self.session_context.test_variables_mapping
if former_output_variables:
test_dict.setdefault("variables", {})
test_dict["variables"].update(former_output_variables)
def __run_step(self, step: TStep):
"""run teststep, teststep maybe a request or referenced testcase"""
logger.info(f"run step: {step.name}")
try:
test_runner.run_test(test_dict)
except Exception:
# log exception request_type and name for locust stat
self.exception_request_type = test_runner.exception_request_type
self.exception_name = test_runner.exception_name
raise
finally:
_meta_datas = test_runner.meta_datas
self.meta_datas.append(_meta_datas)
if step.request:
step_data = self.__run_step_request(step)
elif step.testcase:
step_data = self.__run_step_testcase(step)
else:
raise ParamsError(
f"teststep is neither a request nor a referenced testcase: {step.dict()}"
)
self.session_context.update_session_variables(
test_runner.export_variables(test_runner.export)
self.__step_datas.append(step_data)
return step_data.export
def run(self, testcase: TestCase):
"""main entrance"""
self.config = testcase.config
self.teststeps = testcase.teststeps
self.config.variables.update(self.__session_variables)
if self.config.path:
self.__project_meta = load_project_meta(self.config.path)
elif not self.__project_meta:
self.__project_meta = ProjectMeta()
def parse_config(config: TConfig):
config.variables = parse_variables_mapping(
config.variables, self.__project_meta.functions
)
config.name = parse_data(
config.name, config.variables, self.__project_meta.functions
)
config.base_url = parse_data(
config.base_url, config.variables, self.__project_meta.functions
)
parse_config(self.config)
self.__start_at = time.time()
self.__step_datas: List[StepData] = []
self.__session_variables = {}
for step in self.teststeps:
# update with config variables
step.variables.update(self.config.variables)
# update with session variables extracted from pre step
step.variables.update(self.__session_variables)
# parse variables
step.variables = parse_variables_mapping(
step.variables, self.__project_meta.functions
)
# run step
extract_mapping = self.__run_step(step)
# save extracted variables to session variables
self.__session_variables.update(extract_mapping)
self.__duration = time.time() - self.__start_at
return self
def run_path(self, path: Text) -> "HttpRunner":
if not os.path.isfile(path):
raise exceptions.ParamsError(f"Invalid testcase path: {path}")
_, testcase_obj = load_testcase_file(path)
return self.run(testcase_obj)
def get_step_datas(self) -> List[StepData]:
return self.__step_datas
def get_export_variables(self) -> Dict:
export_vars_mapping = {}
for var_name in self.config.export:
if var_name not in self.__session_variables:
raise ParamsError(
f"failed to export variable {var_name} from session variables {self.__session_variables}"
)
export_vars_mapping[var_name] = self.__session_variables[var_name]
return export_vars_mapping
def get_summary(self) -> TestCaseSummary:
"""get testcase result summary"""
start_at_timestamp = self.__start_at
start_at_iso_format = datetime.utcfromtimestamp(start_at_timestamp).isoformat()
return TestCaseSummary(
name=self.config.name,
success=self.success,
time=TestCaseTime(
start_at=self.__start_at,
start_at_iso_format=start_at_iso_format,
duration=self.__duration,
),
in_out=TestCaseInOut(
vars=self.config.variables, export=self.get_export_variables()
),
step_datas=self.__step_datas,
)
def run_test(self, test_dict):
""" run single teststep of testcase.
test_dict may be in 3 types.
Args:
test_dict (dict):
# teststep
{
"name": "teststep description",
"variables": [], # optional
"request": {
"url": "http://127.0.0.1:5000/api/users/1000",
"method": "GET"
}
}
# nested testcase
{
"config": {...},
"teststeps": [
{...},
{...}
]
}
# TODO: function
{
"name": "exec function",
"function": "${func()}"
}
"""
self.meta_datas = None
if "teststeps" in test_dict:
# nested testcase
test_dict.setdefault("config", {}).setdefault("variables", {})
test_dict["config"]["variables"].update(
self.session_context.session_variables_mapping)
self._run_testcase(test_dict)
else:
# api
self.validation_results = {}
try:
self._run_test(test_dict)
except Exception:
# log exception request_type and name for locust stat
self.exception_request_type = test_dict["request"]["method"]
self.exception_name = test_dict.get("name")
raise
finally:
# get request/response data and validate results
self.meta_datas = getattr(self.http_client_session, "meta_data", {})
self.meta_datas["validators"] = self.validation_results
def export_variables(self, output_variables_list):
""" export current testcase variables
"""
variables_mapping = self.session_context.session_variables_mapping
output = {}
for variable in output_variables_list:
if variable not in variables_mapping:
logger.warning(
f"variable '{variable}' can not be found in variables mapping, "
"failed to export!"
)
continue
output[variable] = variables_mapping[variable]
utils.print_info(output)
return output
def test_start(self):
"""discovered by pytest"""
return self.run(TestCase(config=self.config, teststeps=self.teststeps))

28
httprunner/runner_test.py Normal file
View File

@@ -0,0 +1,28 @@
import unittest
from httprunner.runner import HttpRunner
class TestHttpRunner(unittest.TestCase):
def setUp(self):
self.runner = HttpRunner()
def test_run_testcase_by_path_request_only(self):
self.runner.run_path(
"examples/postman_echo/request_methods/request_with_variables.yml"
)
result = self.runner.get_summary()
self.assertTrue(result.success)
self.assertEqual(result.name, "request methods testcase with variables")
self.assertEqual(result.step_datas[0].name, "get with params")
self.assertEqual(len(result.step_datas), 3)
def test_run_testcase_by_path_ref_testcase(self):
self.runner.run_path(
"examples/postman_echo/request_methods/request_with_testcase_reference.yml"
)
result = self.runner.get_summary()
self.assertTrue(result.success)
self.assertEqual(result.name, "request methods testcase: reference testcase")
self.assertEqual(result.step_datas[0].name, "request with variables")
self.assertEqual(len(result.step_datas), 1)

174
httprunner/schema.py Normal file
View File

@@ -0,0 +1,174 @@
import os
from enum import Enum
from typing import Any
from typing import Dict, Text, Union, Callable
from typing import List
from pydantic import BaseModel, Field
from pydantic import HttpUrl
Name = Text
Url = Text
BaseUrl = Union[HttpUrl, Text]
VariablesMapping = Dict[Text, Any]
FunctionsMapping = Dict[Text, Callable]
Headers = Dict[Text, Text]
Verify = bool
Hook = List[Text]
Export = List[Text]
Validators = List[Dict]
Env = Dict[Text, Any]
class MethodEnum(Text, Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
HEAD = "HEAD"
OPTIONS = "OPTIONS"
PATCH = "PATCH"
CONNECT = "CONNECT"
TRACE = "TRACE"
class TConfig(BaseModel):
name: Name
verify: Verify = False
base_url: BaseUrl = ""
variables: VariablesMapping = {}
setup_hooks: Hook = []
teardown_hooks: Hook = []
export: Export = []
path: Text = None
class Request(BaseModel):
"""requests.Request model"""
method: MethodEnum = MethodEnum.GET
url: Url
params: Dict[Text, Text] = {}
headers: Headers = {}
req_json: Dict = Field({}, alias="json")
data: Union[Text, Dict[Text, Any]] = ""
cookies: Dict[Text, Text] = {}
timeout: int = 120
allow_redirects: bool = True
verify: Verify = False
upload: Dict = {} # used for upload files
class TStep(BaseModel):
name: Name
request: Request = None
testcase: Text = ""
variables: VariablesMapping = {}
extract: Dict[Text, Text] = {}
validators: Validators = Field([], alias="validate")
class TestCase(BaseModel):
config: TConfig
teststeps: List[TStep]
class ProjectMeta(BaseModel):
debugtalk_py: Text = "" # debugtalk.py file content
functions: FunctionsMapping = {}
env: Env = {}
PWD: Text = os.getcwd()
test_path: Text = None # run with specified test path
class TestsMapping(BaseModel):
project_meta: ProjectMeta
testcases: List[TestCase]
class TestCaseTime(BaseModel):
start_at: float = 0
start_at_iso_format: Text = ""
duration: float = 0
class TestCaseInOut(BaseModel):
vars: VariablesMapping = {}
export: Dict = {}
class RequestStat(BaseModel):
content_size: float = 0
response_time_ms: float = 0
elapsed_ms: float = 0
class RequestData(BaseModel):
method: MethodEnum = MethodEnum.GET
url: Url
headers: Headers = {}
# TODO: add cookies
body: Union[Text, bytes, Dict, None] = {}
class ResponseData(BaseModel):
status_code: int
cookies: Dict
encoding: Union[Text, None] = None
headers: Dict
content_type: Text
body: Union[Text, bytes, Dict]
class ReqRespData(BaseModel):
request: RequestData
response: ResponseData
class SessionData(BaseModel):
"""request session data, including request, response, validators and stat data"""
success: bool = False
# in most cases, req_resps only contains one request & response
# while when 30X redirect occurs, req_resps will contain multiple request & response
req_resps: List[ReqRespData] = []
stat: RequestStat = RequestStat()
validators: Dict = {}
class StepData(BaseModel):
"""teststep data, each step maybe corresponding to one request or one testcase"""
success: bool = False
name: Text = "" # teststep name
data: Union[SessionData, List[SessionData]] = None
export: Dict = {}
class TestCaseSummary(BaseModel):
name: Text = ""
success: bool = False
time: TestCaseTime
in_out: TestCaseInOut = {}
log: Text = ""
step_datas: List[StepData] = []
class PlatformInfo(BaseModel):
httprunner_version: Text
python_version: Text
platform: Text
class Stat(BaseModel):
total: int = 0
success: int = 0
fail: int = 0
class TestSuiteSummary(BaseModel):
success: bool = False
stat: Stat = Stat()
time: TestCaseTime = TestCaseTime()
platform: PlatformInfo
testcases: List[TestCaseSummary]

View File

@@ -1 +0,0 @@
from .testcase import ProjectMeta, TestCase, TestCases

View File

@@ -1,14 +0,0 @@
from pydantic import BaseModel
from httprunner.schema import common
class Api(BaseModel):
name: common.Name
request: common.Request
variables: common.Variables
base_url: common.BaseUrl
setup_hooks: common.Hook
teardown_hooks: common.Hook
extract: common.Extract
validate: common.Validate

View File

@@ -1,61 +0,0 @@
from enum import Enum
from typing import Dict, List, Any, Tuple
from pydantic import BaseModel, HttpUrl, Field
Name = str
Url = HttpUrl
BaseUrl = str
Variables = Dict[str, Any]
Headers = Dict[str, str]
Verify = bool
Hook = List[str]
Export = List[str]
Extract = Dict[str, str]
Validate = List[Dict]
Env = Dict[str, Any]
class MethodEnum(str, Enum):
GET = 'GET'
POST = 'POST'
PUT = "PUT"
DELETE = "DELETE"
HEAD = "HEAD"
OPTIONS = "OPTIONS"
PATCH = "PATCH"
CONNECT = "CONNECT"
TRACE = "TRACE"
class TestsConfig(BaseModel):
name: Name
verify: Verify = False
base_url: BaseUrl = ""
variables: Variables = {}
setup_hooks: Hook = []
teardown_hooks: Hook = []
export: Export = []
class Config:
schema_extra = {
"examples": [
{
"name": "used in testcase/testsuite to configure common fields",
"verify": False,
"base_url": "https://httpbin.org"
}
]
}
class Request(BaseModel):
method: MethodEnum = MethodEnum.GET
url: Url
params: Dict[str, str] = {}
headers: Headers = {}
req_json: Dict = Field({}, alias="json")
cookies: Dict[str, str] = {}
timeout: int = 120
allow_redirects: bool = True
verify: Verify = False

Some files were not shown because too many files have changed in this diff Show More