mirror of
https://github.com/httprunner/httprunner.git
synced 2026-05-12 02:21:29 +08:00
refactor: split validate from context, move to validator
This commit is contained in:
@@ -277,9 +277,9 @@ class HttpRunner(object):
|
||||
|
||||
"""
|
||||
logger.log_info("HttpRunner version: {}".format(__version__))
|
||||
if validator.is_testcase_path(path_or_tests):
|
||||
if loader.is_testcase_path(path_or_tests):
|
||||
return self.run_path(path_or_tests, dot_env_path, mapping)
|
||||
elif validator.is_testcases(path_or_tests):
|
||||
elif loader.is_testcases(path_or_tests):
|
||||
return self.run_tests(path_or_tests)
|
||||
else:
|
||||
raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from httprunner import exceptions, logger, parser, utils
|
||||
from httprunner import parser, utils
|
||||
|
||||
|
||||
class SessionContext(object):
|
||||
@@ -19,7 +19,6 @@ class SessionContext(object):
|
||||
self.session_variables_mapping = parser.parse_variables_mapping(variables_mapping)
|
||||
self.test_variables_mapping = {}
|
||||
self.init_test_variables()
|
||||
self.validation_results = {}
|
||||
|
||||
def init_test_variables(self, variables_mapping=None):
|
||||
""" init test variables, called when each test(api) starts.
|
||||
@@ -63,175 +62,3 @@ class SessionContext(object):
|
||||
content may be in any data structure, include dict, list, tuple, number, string, etc.
|
||||
"""
|
||||
return parser.parse_lazy_data(content, self.test_variables_mapping)
|
||||
|
||||
def __eval_validator_check(self, check_item, resp_obj):
|
||||
""" evaluate check item in validator.
|
||||
|
||||
Args:
|
||||
check_item: check_item should only be the following 5 formats:
|
||||
1, variable reference, e.g. $token
|
||||
2, function reference, e.g. ${is_status_code_200($status_code)}
|
||||
3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
|
||||
4, string joined by delimiter. e.g. "status_code", "headers.content-type"
|
||||
5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
|
||||
|
||||
resp_obj: response object
|
||||
|
||||
"""
|
||||
if isinstance(check_item, (dict, list)) \
|
||||
or isinstance(check_item, parser.LazyString):
|
||||
# format 1/2/3
|
||||
check_value = self.eval_content(check_item)
|
||||
else:
|
||||
# format 4/5
|
||||
check_value = resp_obj.extract_field(check_item)
|
||||
|
||||
return check_value
|
||||
|
||||
def __eval_validator_expect(self, expect_item):
|
||||
""" evaluate expect item in validator.
|
||||
|
||||
Args:
|
||||
expect_item: expect_item should only be in 2 types:
|
||||
1, variable reference, e.g. $expect_status_code
|
||||
2, actual value, e.g. 200
|
||||
|
||||
"""
|
||||
expect_value = self.eval_content(expect_item)
|
||||
return expect_value
|
||||
|
||||
def validate(self, validators, resp_obj):
|
||||
""" make validation with comparators
|
||||
"""
|
||||
self.validation_results = {}
|
||||
if not validators:
|
||||
return
|
||||
|
||||
logger.log_debug("start to validate.")
|
||||
|
||||
validate_pass = True
|
||||
failures = []
|
||||
|
||||
for validator in validators:
|
||||
|
||||
if isinstance(validator, dict) and validator.get("type") == "python_script":
|
||||
validator_dict, ex = self.validate_script(validator["script"], resp_obj)
|
||||
if ex:
|
||||
validate_pass = False
|
||||
failures.append(ex)
|
||||
|
||||
self.validation_results["validate_script"] = validator_dict
|
||||
continue
|
||||
|
||||
if "validate_extractor" not in self.validation_results:
|
||||
self.validation_results["validate_extractor"] = []
|
||||
|
||||
# validator should be LazyFunction object
|
||||
if not isinstance(validator, parser.LazyFunction):
|
||||
raise exceptions.ValidationFailure(
|
||||
"validator should be parsed first: {}".format(validators))
|
||||
|
||||
# evaluate validator args with context variable mapping.
|
||||
validator_args = validator.get_args()
|
||||
check_item, expect_item = validator_args
|
||||
check_value = self.__eval_validator_check(
|
||||
check_item,
|
||||
resp_obj
|
||||
)
|
||||
expect_value = self.__eval_validator_expect(expect_item)
|
||||
validator.update_args([check_value, expect_value])
|
||||
|
||||
comparator = validator.func_name
|
||||
validator_dict = {
|
||||
"comparator": comparator,
|
||||
"check": check_item,
|
||||
"check_value": check_value,
|
||||
"expect": expect_item,
|
||||
"expect_value": expect_value
|
||||
}
|
||||
validate_msg = "\nvalidate: {} {} {}({})".format(
|
||||
check_item,
|
||||
comparator,
|
||||
expect_value,
|
||||
type(expect_value).__name__
|
||||
)
|
||||
|
||||
try:
|
||||
validator.to_value(self.test_variables_mapping)
|
||||
validator_dict["check_result"] = "pass"
|
||||
validate_msg += "\t==> pass"
|
||||
logger.log_debug(validate_msg)
|
||||
except (AssertionError, TypeError):
|
||||
validate_pass = False
|
||||
validator_dict["check_result"] = "fail"
|
||||
validate_msg += "\t==> fail"
|
||||
validate_msg += "\n{}({}) {} {}({})".format(
|
||||
check_value,
|
||||
type(check_value).__name__,
|
||||
comparator,
|
||||
expect_value,
|
||||
type(expect_value).__name__
|
||||
)
|
||||
logger.log_error(validate_msg)
|
||||
failures.append(validate_msg)
|
||||
|
||||
self.validation_results["validate_extractor"].append(validator_dict)
|
||||
|
||||
# restore validator args, in case of running multiple times
|
||||
validator.update_args(validator_args)
|
||||
|
||||
if not validate_pass:
|
||||
failures_string = "\n".join([failure for failure in failures])
|
||||
raise exceptions.ValidationFailure(failures_string)
|
||||
|
||||
def validate_script(self, script, resp_obj):
|
||||
""" make validation with python script
|
||||
"""
|
||||
validator_dict = {
|
||||
"validate_script": "<br/>".join(script),
|
||||
"check_result": "fail",
|
||||
"exception": ""
|
||||
}
|
||||
|
||||
script = "\n ".join(script)
|
||||
code = """
|
||||
# encoding: utf-8
|
||||
|
||||
try:
|
||||
{}
|
||||
except Exception as ex:
|
||||
import traceback
|
||||
import sys
|
||||
_type, _value, _tb = sys.exc_info()
|
||||
# filename, lineno, name, line
|
||||
_, _lineno, _, line_content = traceback.extract_tb(_tb, 1)[0]
|
||||
|
||||
line_no = _lineno - 4
|
||||
|
||||
c_exception = _type.__name__ + "\\n"
|
||||
c_exception += "\\tError line number: " + str(line_no) + "\\n"
|
||||
c_exception += "\\tError line content: " + str(line_content) + "\\n"
|
||||
|
||||
if _value.args:
|
||||
c_exception += "\\tError description: " + str(_value)
|
||||
else:
|
||||
c_exception += "\\tError description: " + _type.__name__
|
||||
|
||||
raise _type(c_exception)
|
||||
""".format(script)
|
||||
variables = {
|
||||
"status_code": resp_obj.status_code,
|
||||
"response_json": resp_obj.json,
|
||||
"response": resp_obj
|
||||
}
|
||||
variables.update(self.test_variables_mapping)
|
||||
|
||||
try:
|
||||
code = compile(code, '<string>', 'exec')
|
||||
exec(code, variables)
|
||||
validator_dict["check_result"] = "pass"
|
||||
return validator_dict, ""
|
||||
except Exception as ex:
|
||||
validator_dict["check_result"] = "fail"
|
||||
validator_dict["exception"] = "<br/>".join(str(ex).splitlines())
|
||||
return validator_dict, str(ex)
|
||||
|
||||
@@ -5,6 +5,7 @@ from unittest.case import SkipTest
|
||||
from httprunner import exceptions, logger, response, utils
|
||||
from httprunner.client import HttpSession
|
||||
from httprunner.context import SessionContext
|
||||
from httprunner.validator import Validator
|
||||
|
||||
|
||||
class Runner(object):
|
||||
@@ -87,16 +88,6 @@ class Runner(object):
|
||||
|
||||
self.http_client_session.init_meta_data()
|
||||
|
||||
def __get_test_data(self):
|
||||
""" get request/response data and validate results
|
||||
"""
|
||||
if not isinstance(self.http_client_session, HttpSession):
|
||||
return
|
||||
|
||||
meta_data = self.http_client_session.meta_data
|
||||
meta_data["validators"] = self.session_context.validation_results
|
||||
return meta_data
|
||||
|
||||
def _handle_skip_feature(self, test_dict):
|
||||
""" handle skip feature for test
|
||||
- skip: skip current test unconditionally
|
||||
@@ -267,7 +258,6 @@ class Runner(object):
|
||||
self.session_context.update_session_variables(extracted_variables_mapping)
|
||||
|
||||
# validate
|
||||
# TODO: split validate from context
|
||||
validators = test_dict.get("validate") or test_dict.get("validators") or []
|
||||
validate_script = test_dict.get("validate_script", [])
|
||||
if validate_script:
|
||||
@@ -276,8 +266,9 @@ class Runner(object):
|
||||
"script": validate_script
|
||||
})
|
||||
|
||||
validator = Validator(self.session_context, resp_obj)
|
||||
try:
|
||||
self.session_context.validate(validators, resp_obj)
|
||||
validator.validate(validators)
|
||||
except (exceptions.ParamsError,
|
||||
exceptions.ValidationFailure, exceptions.ExtractFailure):
|
||||
err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32)
|
||||
@@ -302,6 +293,11 @@ class Runner(object):
|
||||
|
||||
raise
|
||||
|
||||
finally:
|
||||
# get request/response data and validate results
|
||||
self.meta_datas = getattr(self.http_client_session, "meta_data", {})
|
||||
self.meta_datas["validators"] = validator.validation_results
|
||||
|
||||
def _run_testcase(self, testcase_dict):
|
||||
""" run single testcase.
|
||||
"""
|
||||
@@ -385,8 +381,6 @@ class Runner(object):
|
||||
self.exception_request_type = test_dict["request"]["method"]
|
||||
self.exception_name = test_dict.get("name")
|
||||
raise
|
||||
finally:
|
||||
self.meta_datas = self.__get_test_data()
|
||||
|
||||
def export_variables(self, output_variables_list):
|
||||
""" export current testcase variables
|
||||
@@ -397,8 +391,8 @@ class Runner(object):
|
||||
for variable in output_variables_list:
|
||||
if variable not in variables_mapping:
|
||||
logger.log_warning(
|
||||
"variable '{}' can not be found in variables mapping, failed to export!"\
|
||||
.format(variable)
|
||||
"variable '{}' can not be found in variables mapping, "
|
||||
"failed to export!".format(variable)
|
||||
)
|
||||
continue
|
||||
|
||||
|
||||
@@ -2,136 +2,9 @@
|
||||
import collections
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import types
|
||||
|
||||
from httprunner import exceptions, logger
|
||||
|
||||
""" validate data format
|
||||
TODO: refactor with JSON schema validate
|
||||
"""
|
||||
|
||||
|
||||
def is_testcase(data_structure):
|
||||
""" check if data_structure is a testcase.
|
||||
|
||||
Args:
|
||||
data_structure (dict): testcase should always be in the following data structure:
|
||||
|
||||
{
|
||||
"config": {
|
||||
"name": "desc1",
|
||||
"variables": [], # optional
|
||||
"request": {} # optional
|
||||
},
|
||||
"teststeps": [
|
||||
test_dict1,
|
||||
{ # test_dict2
|
||||
'name': 'test step desc2',
|
||||
'variables': [], # optional
|
||||
'extract': [], # optional
|
||||
'validate': [],
|
||||
'request': {},
|
||||
'function_meta': {}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Returns:
|
||||
bool: True if data_structure is valid testcase, otherwise False.
|
||||
|
||||
"""
|
||||
# TODO: replace with JSON schema validation
|
||||
if not isinstance(data_structure, dict):
|
||||
return False
|
||||
|
||||
if "teststeps" not in data_structure:
|
||||
return False
|
||||
|
||||
if not isinstance(data_structure["teststeps"], list):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_testcases(data_structure):
|
||||
""" check if data_structure is testcase or testcases list.
|
||||
|
||||
Args:
|
||||
data_structure (dict): testcase(s) should always be in the following data structure:
|
||||
{
|
||||
"project_mapping": {
|
||||
"PWD": "XXXXX",
|
||||
"functions": {},
|
||||
"env": {}
|
||||
},
|
||||
"testcases": [
|
||||
{ # testcase data structure
|
||||
"config": {
|
||||
"name": "desc1",
|
||||
"path": "testcase1_path",
|
||||
"variables": [], # optional
|
||||
},
|
||||
"teststeps": [
|
||||
# test data structure
|
||||
{
|
||||
'name': 'test step desc1',
|
||||
'variables': [], # optional
|
||||
'extract': [], # optional
|
||||
'validate': [],
|
||||
'request': {}
|
||||
},
|
||||
test_dict_2 # another test dict
|
||||
]
|
||||
},
|
||||
testcase_dict_2 # another testcase dict
|
||||
]
|
||||
}
|
||||
|
||||
Returns:
|
||||
bool: True if data_structure is valid testcase(s), otherwise False.
|
||||
|
||||
"""
|
||||
if not isinstance(data_structure, dict):
|
||||
return False
|
||||
|
||||
if "testcases" not in data_structure:
|
||||
return False
|
||||
|
||||
testcases = data_structure["testcases"]
|
||||
if not isinstance(testcases, list):
|
||||
return False
|
||||
|
||||
for item in testcases:
|
||||
if not is_testcase(item):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_testcase_path(path):
|
||||
""" check if path is testcase path or path list.
|
||||
|
||||
Args:
|
||||
path (str/list): file path or file path list.
|
||||
|
||||
Returns:
|
||||
bool: True if path is valid file path or path list, otherwise False.
|
||||
|
||||
"""
|
||||
if not isinstance(path, (str, list)):
|
||||
return False
|
||||
|
||||
if isinstance(path, list):
|
||||
for p in path:
|
||||
if not is_testcase_path(p):
|
||||
return False
|
||||
|
||||
if isinstance(path, str):
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
|
||||
return True
|
||||
from httprunner import exceptions, logger, parser
|
||||
|
||||
|
||||
###############################################################################
|
||||
@@ -345,3 +218,191 @@ def validate_json_file(file_list):
|
||||
raise SystemExit(e)
|
||||
|
||||
print("OK")
|
||||
|
||||
|
||||
class Validator(object):
|
||||
"""Validate tests
|
||||
|
||||
Attributes:
|
||||
validation_results (dict): store validation results,
|
||||
including validate_extractor and validate_script.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, session_context, resp_obj):
|
||||
""" initialize a Validator for each teststep (API request)
|
||||
|
||||
Args:
|
||||
session_context: HttpRunner session context
|
||||
resp_obj: ResponseObject instance
|
||||
"""
|
||||
self.session_context = session_context
|
||||
self.resp_obj = resp_obj
|
||||
self.validation_results = {}
|
||||
|
||||
def __eval_validator_check(self, check_item):
|
||||
""" evaluate check item in validator.
|
||||
|
||||
Args:
|
||||
check_item: check_item should only be the following 5 formats:
|
||||
1, variable reference, e.g. $token
|
||||
2, function reference, e.g. ${is_status_code_200($status_code)}
|
||||
3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
|
||||
4, string joined by delimiter. e.g. "status_code", "headers.content-type"
|
||||
5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
|
||||
|
||||
"""
|
||||
if isinstance(check_item, (dict, list)) \
|
||||
or isinstance(check_item, parser.LazyString):
|
||||
# format 1/2/3
|
||||
check_value = self.session_context.eval_content(check_item)
|
||||
else:
|
||||
# format 4/5
|
||||
check_value = self.resp_obj.extract_field(check_item)
|
||||
|
||||
return check_value
|
||||
|
||||
def __eval_validator_expect(self, expect_item):
|
||||
""" evaluate expect item in validator.
|
||||
|
||||
Args:
|
||||
expect_item: expect_item should only be in 2 types:
|
||||
1, variable reference, e.g. $expect_status_code
|
||||
2, actual value, e.g. 200
|
||||
|
||||
"""
|
||||
expect_value = self.session_context.eval_content(expect_item)
|
||||
return expect_value
|
||||
|
||||
def validate_script(self, script):
|
||||
""" make validation with python script
|
||||
"""
|
||||
validator_dict = {
|
||||
"validate_script": "<br/>".join(script),
|
||||
"check_result": "fail",
|
||||
"exception": ""
|
||||
}
|
||||
|
||||
script = "\n ".join(script)
|
||||
code = """
|
||||
# encoding: utf-8
|
||||
|
||||
try:
|
||||
{}
|
||||
except Exception as ex:
|
||||
import traceback
|
||||
import sys
|
||||
_type, _value, _tb = sys.exc_info()
|
||||
# filename, lineno, name, line
|
||||
_, _lineno, _, line_content = traceback.extract_tb(_tb, 1)[0]
|
||||
|
||||
line_no = _lineno - 4
|
||||
|
||||
c_exception = _type.__name__ + "\\n"
|
||||
c_exception += "\\tError line number: " + str(line_no) + "\\n"
|
||||
c_exception += "\\tError line content: " + str(line_content) + "\\n"
|
||||
|
||||
if _value.args:
|
||||
c_exception += "\\tError description: " + str(_value)
|
||||
else:
|
||||
c_exception += "\\tError description: " + _type.__name__
|
||||
|
||||
raise _type(c_exception)
|
||||
""".format(script)
|
||||
variables = {
|
||||
"status_code": self.resp_obj.status_code,
|
||||
"response_json": self.resp_obj.json,
|
||||
"response": self.resp_obj
|
||||
}
|
||||
variables.update(self.session_context.test_variables_mapping)
|
||||
|
||||
try:
|
||||
code = compile(code, '<string>', 'exec')
|
||||
exec(code, variables)
|
||||
validator_dict["check_result"] = "pass"
|
||||
return validator_dict, ""
|
||||
except Exception as ex:
|
||||
validator_dict["check_result"] = "fail"
|
||||
validator_dict["exception"] = "<br/>".join(str(ex).splitlines())
|
||||
return validator_dict, str(ex)
|
||||
|
||||
def validate(self, validators):
|
||||
""" make validation with comparators
|
||||
"""
|
||||
self.validation_results = {}
|
||||
if not validators:
|
||||
return
|
||||
|
||||
logger.log_debug("start to validate.")
|
||||
|
||||
validate_pass = True
|
||||
failures = []
|
||||
|
||||
for validator in validators:
|
||||
|
||||
if isinstance(validator, dict) and validator.get("type") == "python_script":
|
||||
validator_dict, ex = self.validate_script(validator["script"])
|
||||
if ex:
|
||||
validate_pass = False
|
||||
failures.append(ex)
|
||||
|
||||
self.validation_results["validate_script"] = validator_dict
|
||||
continue
|
||||
|
||||
if "validate_extractor" not in self.validation_results:
|
||||
self.validation_results["validate_extractor"] = []
|
||||
|
||||
# validator should be LazyFunction object
|
||||
if not isinstance(validator, parser.LazyFunction):
|
||||
raise exceptions.ValidationFailure(
|
||||
"validator should be parsed first: {}".format(validators))
|
||||
|
||||
# evaluate validator args with context variable mapping.
|
||||
validator_args = validator.get_args()
|
||||
check_item, expect_item = validator_args
|
||||
check_value = self.__eval_validator_check(check_item)
|
||||
expect_value = self.__eval_validator_expect(expect_item)
|
||||
validator.update_args([check_value, expect_value])
|
||||
|
||||
comparator = validator.func_name
|
||||
validator_dict = {
|
||||
"comparator": comparator,
|
||||
"check": check_item,
|
||||
"check_value": check_value,
|
||||
"expect": expect_item,
|
||||
"expect_value": expect_value
|
||||
}
|
||||
validate_msg = "\nvalidate: {} {} {}({})".format(
|
||||
check_item,
|
||||
comparator,
|
||||
expect_value,
|
||||
type(expect_value).__name__
|
||||
)
|
||||
|
||||
try:
|
||||
validator.to_value(self.session_context.test_variables_mapping)
|
||||
validator_dict["check_result"] = "pass"
|
||||
validate_msg += "\t==> pass"
|
||||
logger.log_debug(validate_msg)
|
||||
except (AssertionError, TypeError):
|
||||
validate_pass = False
|
||||
validator_dict["check_result"] = "fail"
|
||||
validate_msg += "\t==> fail"
|
||||
validate_msg += "\n{}({}) {} {}({})".format(
|
||||
check_value,
|
||||
type(check_value).__name__,
|
||||
comparator,
|
||||
expect_value,
|
||||
type(expect_value).__name__
|
||||
)
|
||||
logger.log_error(validate_msg)
|
||||
failures.append(validate_msg)
|
||||
|
||||
self.validation_results["validate_extractor"].append(validator_dict)
|
||||
|
||||
# restore validator args, in case of running multiple times
|
||||
validator.update_args(validator_args)
|
||||
|
||||
if not validate_pass:
|
||||
failures_string = "\n".join([failure for failure in failures])
|
||||
raise exceptions.ValidationFailure(failures_string)
|
||||
|
||||
Reference in New Issue
Block a user