diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 9fcb2272..04570db4 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -1,5 +1,21 @@
# Release History
+## 2.4.0 (2019-12-04)
+
+**Added**
+
+- feat: validate with python script, ref #773
+
+## 2.3.3 (2019-12-04)
+
+**Fixed**
+
+- fix #768: dump json file path error when folder name contains dot, such as `a.b.c`
+
+**Changed**
+
+- change: rename builtin function, sleep_N_secs => sleep
+
## 2.3.2 (2019-11-01)
**Added**
diff --git a/httprunner/__init__.py b/httprunner/__init__.py
index 5b67afa6..cb8ce2a1 100644
--- a/httprunner/__init__.py
+++ b/httprunner/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "2.3.2"
+__version__ = "2.4.0"
__description__ = "One-stop solution for HTTP(S) testing."
__all__ = ["__version__", "__description__"]
diff --git a/httprunner/api.py b/httprunner/api.py
index 8608a189..04ae785a 100644
--- a/httprunner/api.py
+++ b/httprunner/api.py
@@ -258,7 +258,6 @@ class HttpRunner(object):
# load tests
self.exception_stage = "load tests"
tests_mapping = loader.load_tests(path, dot_env_path)
- tests_mapping["project_mapping"]["test_path"] = path
if mapping:
tests_mapping["project_mapping"]["variables"] = mapping
@@ -278,9 +277,9 @@ class HttpRunner(object):
"""
logger.log_info("HttpRunner version: {}".format(__version__))
- if validator.is_testcase_path(path_or_tests):
+ if loader.is_testcase_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
- elif validator.is_testcases(path_or_tests):
+ elif loader.is_testcases(path_or_tests):
return self.run_tests(path_or_tests)
else:
raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests))
diff --git a/httprunner/cli.py b/httprunner/cli.py
index 389fdc80..19a163f5 100644
--- a/httprunner/cli.py
+++ b/httprunner/cli.py
@@ -5,11 +5,11 @@ import sys
from httprunner import __description__, __version__
from httprunner.api import HttpRunner
from httprunner.compat import is_py2
+from httprunner.loader import validate_json_file
from httprunner.logger import color_print
from httprunner.report import gen_html_report
from httprunner.utils import (create_scaffold, get_python2_retire_msg,
prettify_json_file)
-from httprunner.validator import validate_json_file
def main():
diff --git a/httprunner/client.py b/httprunner/client.py
index bc7cd07e..0a2af89e 100644
--- a/httprunner/client.py
+++ b/httprunner/client.py
@@ -8,7 +8,7 @@ from requests import Request, Response
from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema,
RequestException)
-from httprunner import logger
+from httprunner import logger, response
from httprunner.utils import lower_dict_keys, omit_long_data
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -115,7 +115,10 @@ class HttpSession(requests.Session):
else:
try:
# try to record json data
- req_resp_dict["response"]["json"] = resp_obj.json()
+ if isinstance(resp_obj, response.ResponseObject):
+ req_resp_dict["response"]["json"] = resp_obj.json
+ else:
+ req_resp_dict["response"]["json"] = resp_obj.json()
except ValueError:
# only record at most 512 text charactors
resp_text = resp_obj.text
@@ -126,6 +129,13 @@ class HttpSession(requests.Session):
return req_resp_dict
+ def update_last_req_resp_record(self, resp_obj):
+ """
+ update request and response info from Response() object.
+ """
+ self.meta_data["data"].pop()
+ self.meta_data["data"].append(self.get_req_resp_record(resp_obj))
+
def request(self, method, url, name=None, **kwargs):
"""
Constructs and sends a :py:class:`requests.Request`.
diff --git a/httprunner/context.py b/httprunner/context.py
index 980f44a9..c08af242 100644
--- a/httprunner/context.py
+++ b/httprunner/context.py
@@ -1,4 +1,4 @@
-from httprunner import exceptions, logger, parser, utils
+from httprunner import parser, utils
class SessionContext(object):
@@ -13,11 +13,12 @@ class SessionContext(object):
>>> context.update_session_variables(variables)
"""
+
def __init__(self, variables=None):
variables_mapping = utils.ensure_mapping_format(variables or {})
self.session_variables_mapping = parser.parse_variables_mapping(variables_mapping)
+ self.test_variables_mapping = {}
self.init_test_variables()
- self.validation_results = []
def init_test_variables(self, variables_mapping=None):
""" init test variables, called when each test(api) starts.
@@ -61,110 +62,3 @@ class SessionContext(object):
content may be in any data structure, include dict, list, tuple, number, string, etc.
"""
return parser.parse_lazy_data(content, self.test_variables_mapping)
-
- def __eval_validator_check(self, check_item, resp_obj):
- """ evaluate check item in validator.
-
- Args:
- check_item: check_item should only be the following 5 formats:
- 1, variable reference, e.g. $token
- 2, function reference, e.g. ${is_status_code_200($status_code)}
- 3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
- 4, string joined by delimiter. e.g. "status_code", "headers.content-type"
- 5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
-
- resp_obj: response object
-
- """
- if isinstance(check_item, (dict, list)) \
- or isinstance(check_item, parser.LazyString):
- # format 1/2/3
- check_value = self.eval_content(check_item)
- else:
- # format 4/5
- check_value = resp_obj.extract_field(check_item)
-
- return check_value
-
- def __eval_validator_expect(self, expect_item):
- """ evaluate expect item in validator.
-
- Args:
- expect_item: expect_item should only be in 2 types:
- 1, variable reference, e.g. $expect_status_code
- 2, actual value, e.g. 200
-
- """
- expect_value = self.eval_content(expect_item)
- return expect_value
-
- def validate(self, validators, resp_obj):
- """ make validation with comparators
- """
- self.validation_results = []
- if not validators:
- return
-
- logger.log_debug("start to validate.")
-
- validate_pass = True
- failures = []
-
- for validator in validators:
- # validator should be LazyFunction object
- if not isinstance(validator, parser.LazyFunction):
- raise exceptions.ValidationFailure(
- "validator should be parsed first: {}".format(validators))
-
- # evaluate validator args with context variable mapping.
- validator_args = validator.get_args()
- check_item, expect_item = validator_args
- check_value = self.__eval_validator_check(
- check_item,
- resp_obj
- )
- expect_value = self.__eval_validator_expect(expect_item)
- validator.update_args([check_value, expect_value])
-
- comparator = validator.func_name
- validator_dict = {
- "comparator": comparator,
- "check": check_item,
- "check_value": check_value,
- "expect": expect_item,
- "expect_value": expect_value
- }
- validate_msg = "\nvalidate: {} {} {}({})".format(
- check_item,
- comparator,
- expect_value,
- type(expect_value).__name__
- )
-
- try:
- validator.to_value(self.test_variables_mapping)
- validator_dict["check_result"] = "pass"
- validate_msg += "\t==> pass"
- logger.log_debug(validate_msg)
- except (AssertionError, TypeError):
- validate_pass = False
- validator_dict["check_result"] = "fail"
- validate_msg += "\t==> fail"
- validate_msg += "\n{}({}) {} {}({})".format(
- check_value,
- type(check_value).__name__,
- comparator,
- expect_value,
- type(expect_value).__name__
- )
- logger.log_error(validate_msg)
- failures.append(validate_msg)
-
- self.validation_results.append(validator_dict)
-
- # restore validator args, in case of running multiple times
- validator.update_args(validator_args)
-
- if not validate_pass:
- failures_string = "\n".join([failure for failure in failures])
- raise exceptions.ValidationFailure(failures_string)
diff --git a/httprunner/loader.py b/httprunner/loader.py
index b4735ad1..428d318a 100644
--- a/httprunner/loader.py
+++ b/httprunner/loader.py
@@ -4,6 +4,7 @@ import io
import json
import os
import sys
+import types
import yaml
@@ -17,6 +18,149 @@ except AttributeError:
pass
+# TODO: validate data format with JSON schema
+
+def is_testcase(data_structure):
+ """ check if data_structure is a testcase.
+
+ Args:
+ data_structure (dict): testcase should always be in the following data structure:
+
+ {
+ "config": {
+ "name": "desc1",
+ "variables": [], # optional
+ "request": {} # optional
+ },
+ "teststeps": [
+ test_dict1,
+ { # test_dict2
+ 'name': 'test step desc2',
+ 'variables': [], # optional
+ 'extract': [], # optional
+ 'validate': [],
+ 'request': {},
+ 'function_meta': {}
+ }
+ ]
+ }
+
+ Returns:
+ bool: True if data_structure is valid testcase, otherwise False.
+
+ """
+ # TODO: replace with JSON schema validation
+ if not isinstance(data_structure, dict):
+ return False
+
+ if "teststeps" not in data_structure:
+ return False
+
+ if not isinstance(data_structure["teststeps"], list):
+ return False
+
+ return True
+
+
+def is_testcases(data_structure):
+ """ check if data_structure is testcase or testcases list.
+
+ Args:
+ data_structure (dict): testcase(s) should always be in the following data structure:
+ {
+ "project_mapping": {
+ "PWD": "XXXXX",
+ "functions": {},
+ "env": {}
+ },
+ "testcases": [
+ { # testcase data structure
+ "config": {
+ "name": "desc1",
+ "path": "testcase1_path",
+ "variables": [], # optional
+ },
+ "teststeps": [
+ # test data structure
+ {
+ 'name': 'test step desc1',
+ 'variables': [], # optional
+ 'extract': [], # optional
+ 'validate': [],
+ 'request': {}
+ },
+ test_dict_2 # another test dict
+ ]
+ },
+ testcase_dict_2 # another testcase dict
+ ]
+ }
+
+ Returns:
+ bool: True if data_structure is valid testcase(s), otherwise False.
+
+ """
+ if not isinstance(data_structure, dict):
+ return False
+
+ if "testcases" not in data_structure:
+ return False
+
+ testcases = data_structure["testcases"]
+ if not isinstance(testcases, list):
+ return False
+
+ for item in testcases:
+ if not is_testcase(item):
+ return False
+
+ return True
+
+
+def is_testcase_path(path):
+ """ check if path is testcase path or path list.
+
+ Args:
+ path (str/list): file path or file path list.
+
+ Returns:
+ bool: True if path is valid file path or path list, otherwise False.
+
+ """
+ if not isinstance(path, (str, list)):
+ return False
+
+ if isinstance(path, list):
+ for p in path:
+ if not is_testcase_path(p):
+ return False
+
+ if isinstance(path, str):
+ if not os.path.exists(path):
+ return False
+
+ return True
+
+
+def validate_json_file(file_list):
+ """ validate JSON testcase format
+ """
+ for json_file in set(file_list):
+ if not json_file.endswith(".json"):
+ logger.log_warning("Only JSON file format can be validated, skip: {}".format(json_file))
+ continue
+
+ logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN")
+
+ with io.open(json_file) as stream:
+ try:
+ json.load(stream)
+ except ValueError as e:
+ raise SystemExit(e)
+
+ print("OK")
+
+
###############################################################################
# file loader
###############################################################################
@@ -244,6 +388,31 @@ def locate_file(start_path, file_name):
###############################################################################
+def is_function(item):
+ """ Takes item object, returns True if it is a function.
+ """
+ return isinstance(item, types.FunctionType)
+
+
+def is_variable(tup):
+ """ Takes (name, object) tuple, returns True if it is a variable.
+ """
+ name, item = tup
+ if callable(item):
+ # function or class
+ return False
+
+ if isinstance(item, types.ModuleType):
+ # imported module
+ return False
+
+ if name.startswith("_"):
+ # private property
+ return False
+
+ return True
+
+
def load_module_functions(module):
""" load python module functions.
@@ -262,7 +431,7 @@ def load_module_functions(module):
module_functions = {}
for name, item in vars(module).items():
- if validator.is_function(item):
+ if is_function(item):
module_functions[name] = item
return module_functions
@@ -724,7 +893,7 @@ def load_api_folder(api_folder_path):
for api_item in api_items:
key, api_dict = api_item.popitem()
api_id = api_dict.get("id") or api_dict.get("def") \
- or api_dict.get("name")
+ or api_dict.get("name")
if key != "api" or not api_id:
raise exceptions.ParamsError(
"Invalid API defined in {}".format(api_file_path))
@@ -779,6 +948,19 @@ def load_project_tests(test_path, dot_env_path=None):
environments and debugtalk.py functions.
"""
+
+ def prepare_path(path):
+ if not os.path.exists(path):
+ err_msg = "path not exist: {}".format(path)
+ logger.log_error(err_msg)
+ raise exceptions.FileNotFound(err_msg)
+
+ if not os.path.isabs(path):
+ path = os.path.join(os.getcwd(), path)
+
+ return path
+
+ test_path = prepare_path(test_path)
# locate debugtalk.py file
debugtalk_path = locate_debugtalk_py(test_path)
@@ -810,6 +992,7 @@ def load_project_tests(test_path, dot_env_path=None):
project_mapping["PWD"] = project_working_directory
built_in.PWD = project_working_directory
project_mapping["functions"] = debugtalk_functions
+ project_mapping["test_path"] = test_path
# load api
tests_def_mapping["api"] = load_api_folder(os.path.join(project_working_directory, "api"))
@@ -869,14 +1052,6 @@ def load_tests(path, dot_env_path=None):
}
"""
- if not os.path.exists(path):
- err_msg = "path not exist: {}".format(path)
- logger.log_error(err_msg)
- raise exceptions.FileNotFound(err_msg)
-
- if not os.path.isabs(path):
- path = os.path.join(os.getcwd(), path)
-
load_project_tests(path, dot_env_path)
tests_mapping = {
"project_mapping": project_mapping
diff --git a/httprunner/runner.py b/httprunner/runner.py
index 7fe68b4f..6aa41fe9 100644
--- a/httprunner/runner.py
+++ b/httprunner/runner.py
@@ -5,6 +5,7 @@ from unittest.case import SkipTest
from httprunner import exceptions, logger, response, utils
from httprunner.client import HttpSession
from httprunner.context import SessionContext
+from httprunner.validator import Validator
class Runner(object):
@@ -62,7 +63,6 @@ class Runner(object):
"""
self.verify = config.get("verify", True)
self.export = config.get("export") or config.get("output", [])
- self.validation_results = []
config_variables = config.get("variables", {})
# testcase setup hooks
@@ -86,19 +86,8 @@ class Runner(object):
if not isinstance(self.http_client_session, HttpSession):
return
- self.validation_results = []
self.http_client_session.init_meta_data()
- def __get_test_data(self):
- """ get request/response data and validate results
- """
- if not isinstance(self.http_client_session, HttpSession):
- return
-
- meta_data = self.http_client_session.meta_data
- meta_data["validators"] = self.validation_results
- return meta_data
-
def _handle_skip_feature(self, test_dict):
""" handle skip feature for test
- skip: skip current test unconditionally
@@ -244,7 +233,8 @@ class Runner(object):
raise exceptions.ParamsError(err_msg)
logger.log_info("{method} {url}".format(method=method, url=parsed_url))
- logger.log_debug("request kwargs(raw): {kwargs}".format(kwargs=parsed_test_request))
+ logger.log_debug(
+ "request kwargs(raw): {kwargs}".format(kwargs=parsed_test_request))
# request
resp = self.http_client_session.request(
@@ -260,6 +250,7 @@ class Runner(object):
if teardown_hooks:
self.session_context.update_test_variables("response", resp_obj)
self.do_hook_actions(teardown_hooks, "teardown")
+ self.http_client_session.update_last_req_resp_record(resp_obj)
# extract
extractors = test_dict.get("extract", {})
@@ -268,9 +259,18 @@ class Runner(object):
# validate
validators = test_dict.get("validate") or test_dict.get("validators") or []
+ validate_script = test_dict.get("validate_script", [])
+ if validate_script:
+ validators.append({
+ "type": "python_script",
+ "script": validate_script
+ })
+
+ validator = Validator(self.session_context, resp_obj)
try:
- self.session_context.validate(validators, resp_obj)
- except (exceptions.ParamsError, exceptions.ValidationFailure, exceptions.ExtractFailure):
+ validator.validate(validators)
+ except (exceptions.ParamsError,
+ exceptions.ValidationFailure, exceptions.ExtractFailure):
err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32)
# log request
@@ -294,7 +294,9 @@ class Runner(object):
raise
finally:
- self.validation_results = self.session_context.validation_results
+ # get request/response data and validate results
+ self.meta_datas = getattr(self.http_client_session, "meta_data", {})
+ self.meta_datas["validators"] = validator.validation_results
def _run_testcase(self, testcase_dict):
""" run single testcase.
@@ -379,8 +381,6 @@ class Runner(object):
self.exception_request_type = test_dict["request"]["method"]
self.exception_name = test_dict.get("name")
raise
- finally:
- self.meta_datas = self.__get_test_data()
def export_variables(self, output_variables_list):
""" export current testcase variables
@@ -391,8 +391,8 @@ class Runner(object):
for variable in output_variables_list:
if variable not in variables_mapping:
logger.log_warning(
- "variable '{}' can not be found in variables mapping, failed to export!"\
- .format(variable)
+ "variable '{}' can not be found in variables mapping, "
+ "failed to export!".format(variable)
)
continue
diff --git a/httprunner/static/report_template.html b/httprunner/static/report_template.html
index 633b97c5..209c18ae 100644
--- a/httprunner/static/report_template.html
+++ b/httprunner/static/report_template.html
@@ -279,15 +279,17 @@
{% endfor %}
Validators:
-
-
+
+ {% set validate_extractors = meta_data.validators.validate_extractor %}
+ {% if validate_extractors %}
+
| check |
comparator |
expect value |
actual value |
- {% for validator in meta_data.validators %}
+ {% for validator in validate_extractors %}
{% if validator.check_result == "pass" %}
|
@@ -303,7 +305,27 @@
| {{validator.check_value | e}} |
{% endfor %}
-
+
+ {% endif %}
+
+ {% set validate_script = meta_data.validators.validate_script %}
+ {% if validate_script %}
+
+
+ | validate script | exception |
+
+
+ | {{validate_script.validate_script | safe}} |
+ {% if validate_script.check_result == "pass" %}
+
+ {% elif validate_script.check_result == "fail" %}
+ |
+ {% endif %}
+ {{validate_script.exception}}
+ |
+
+
+ {% endif %}
Statistics:
diff --git a/httprunner/utils.py b/httprunner/utils.py
index 133329f3..928b4882 100644
--- a/httprunner/utils.py
+++ b/httprunner/utils.py
@@ -607,7 +607,7 @@ def omit_long_data(body, omit_len=512):
return omitted_body + appendix_str
-def dump_json_file(json_data, pwd_dir_path, dump_file_name):
+def dump_json_file(json_data, json_file_abs_path):
""" dump json data to file
"""
class PythonObjectEncoder(json.JSONEncoder):
@@ -617,14 +617,8 @@ def dump_json_file(json_data, pwd_dir_path, dump_file_name):
except TypeError:
return str(obj)
- logs_dir_path = os.path.join(pwd_dir_path, "logs")
- if not os.path.isdir(logs_dir_path):
- os.makedirs(logs_dir_path)
-
- dump_file_path = os.path.join(logs_dir_path, dump_file_name)
-
try:
- with io.open(dump_file_path, 'w', encoding='utf-8') as outfile:
+ with io.open(json_file_abs_path, 'w', encoding='utf-8') as outfile:
if is_py2:
outfile.write(
unicode(json.dumps(
@@ -645,23 +639,44 @@ def dump_json_file(json_data, pwd_dir_path, dump_file_name):
cls=PythonObjectEncoder
)
- msg = "dump file: {}".format(dump_file_path)
+ msg = "dump file: {}".format(json_file_abs_path)
logger.color_print(msg, "BLUE")
except TypeError as ex:
- msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex)
+ msg = "Failed to dump json file: {}\nReason: {}".format(json_file_abs_path, ex)
logger.color_print(msg, "RED")
-def _prepare_dump_info(project_mapping, tag_name):
- """ prepare dump file info.
+def prepare_dump_json_file_abs_path(project_mapping, tag_name):
+ """ prepare dump json file absolute path.
"""
- test_path = project_mapping.get("test_path") or "tests_mapping"
pwd_dir_path = project_mapping.get("PWD") or os.getcwd()
- file_name, file_suffix = os.path.splitext(os.path.basename(test_path.rstrip("/")))
- dump_file_name = "{}.{}.json".format(file_name, tag_name)
+ test_path = project_mapping.get("test_path")
- return pwd_dir_path, dump_file_name
+ if not test_path:
+ # running passed in testcase/testsuite data structure
+ dump_file_name = "tests_mapping.{}.json".format(tag_name)
+ dumped_json_file_abs_path = os.path.join(pwd_dir_path, "logs", dump_file_name)
+ return dumped_json_file_abs_path
+
+ # both test_path and pwd_dir_path are absolute path
+ logs_dir_path = os.path.join(pwd_dir_path, "logs")
+ test_path_relative_path = test_path[len(pwd_dir_path)+1:]
+
+ if os.path.isdir(test_path):
+ file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
+ dump_file_name = "all.{}.json".format(tag_name)
+ else:
+ file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
+ file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
+ test_file_name, _file_suffix = os.path.splitext(test_file)
+ dump_file_name = "{}.{}.json".format(test_file_name, tag_name)
+
+ if not os.path.isdir(file_foder_path):
+ os.makedirs(file_foder_path)
+
+ dumped_json_file_abs_path = os.path.join(file_foder_path, dump_file_name)
+ return dumped_json_file_abs_path
def dump_logs(json_data, project_mapping, tag_name):
@@ -674,8 +689,8 @@ def dump_logs(json_data, project_mapping, tag_name):
tag_name (str): tag name, loaded/parsed/summary
"""
- pwd_dir_path, dump_file_name = _prepare_dump_info(project_mapping, tag_name)
- dump_json_file(json_data, pwd_dir_path, dump_file_name)
+ json_file_abs_path = prepare_dump_json_file_abs_path(project_mapping, tag_name)
+ dump_json_file(json_data, json_file_abs_path)
def get_python2_retire_msg():
diff --git a/httprunner/validator.py b/httprunner/validator.py
index 36f085a2..153eeb61 100644
--- a/httprunner/validator.py
+++ b/httprunner/validator.py
@@ -1,137 +1,8 @@
# encoding: utf-8
import collections
-import io
import json
-import os
-import types
-from httprunner import exceptions, logger
-
-""" validate data format
-TODO: refactor with JSON schema validate
-"""
-
-
-def is_testcase(data_structure):
- """ check if data_structure is a testcase.
-
- Args:
- data_structure (dict): testcase should always be in the following data structure:
-
- {
- "config": {
- "name": "desc1",
- "variables": [], # optional
- "request": {} # optional
- },
- "teststeps": [
- test_dict1,
- { # test_dict2
- 'name': 'test step desc2',
- 'variables': [], # optional
- 'extract': [], # optional
- 'validate': [],
- 'request': {},
- 'function_meta': {}
- }
- ]
- }
-
- Returns:
- bool: True if data_structure is valid testcase, otherwise False.
-
- """
- # TODO: replace with JSON schema validation
- if not isinstance(data_structure, dict):
- return False
-
- if "teststeps" not in data_structure:
- return False
-
- if not isinstance(data_structure["teststeps"], list):
- return False
-
- return True
-
-
-def is_testcases(data_structure):
- """ check if data_structure is testcase or testcases list.
-
- Args:
- data_structure (dict): testcase(s) should always be in the following data structure:
- {
- "project_mapping": {
- "PWD": "XXXXX",
- "functions": {},
- "env": {}
- },
- "testcases": [
- { # testcase data structure
- "config": {
- "name": "desc1",
- "path": "testcase1_path",
- "variables": [], # optional
- },
- "teststeps": [
- # test data structure
- {
- 'name': 'test step desc1',
- 'variables': [], # optional
- 'extract': [], # optional
- 'validate': [],
- 'request': {}
- },
- test_dict_2 # another test dict
- ]
- },
- testcase_dict_2 # another testcase dict
- ]
- }
-
- Returns:
- bool: True if data_structure is valid testcase(s), otherwise False.
-
- """
- if not isinstance(data_structure, dict):
- return False
-
- if "testcases" not in data_structure:
- return False
-
- testcases = data_structure["testcases"]
- if not isinstance(testcases, list):
- return False
-
- for item in testcases:
- if not is_testcase(item):
- return False
-
- return True
-
-
-def is_testcase_path(path):
- """ check if path is testcase path or path list.
-
- Args:
- path (str/list): file path or file path list.
-
- Returns:
- bool: True if path is valid file path or path list, otherwise False.
-
- """
- if not isinstance(path, (str, list)):
- return False
-
- if isinstance(path, list):
- for p in path:
- if not is_testcase_path(p):
- return False
-
- if isinstance(path, str):
- if not os.path.exists(path):
- return False
-
- return True
+from httprunner import exceptions, logger, parser
###############################################################################
@@ -159,13 +30,13 @@ def get_uniform_comparator(comparator):
return "length_equals"
elif comparator in ["len_gt", "count_gt", "length_greater_than", "count_greater_than"]:
return "length_greater_than"
- elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals", \
- "count_greater_than_or_equals"]:
+ elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals",
+ "count_greater_than_or_equals"]:
return "length_greater_than_or_equals"
elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]:
return "length_less_than"
- elif comparator in ["len_le", "count_le", "length_less_than_or_equals", \
- "count_less_than_or_equals"]:
+ elif comparator in ["len_le", "count_le", "length_less_than_or_equals",
+ "count_less_than_or_equals"]:
return "length_less_than_or_equals"
else:
return comparator
@@ -237,14 +108,14 @@ def _convert_validators_to_mapping(validators):
Examples:
>>> validators = [
- {"check": "v1", "expect": 201, "comparator": "eq"},
- {"check": {"b": 1}, "expect": 200, "comparator": "eq"}
- ]
- >>> _convert_validators_to_mapping(validators)
- {
- ("v1", "eq"): {"check": "v1", "expect": 201, "comparator": "eq"},
- ('{"b": 1}', "eq"): {"check": {"b": 1}, "expect": 200, "comparator": "eq"}
- }
+ {"check": "v1", "expect": 201, "comparator": "eq"},
+ {"check": {"b": 1}, "expect": 200, "comparator": "eq"}
+ ]
+ >>> print(_convert_validators_to_mapping(validators))
+ {
+ ("v1", "eq"): {"check": "v1", "expect": 201, "comparator": "eq"},
+ ('{"b": 1}', "eq"): {"check": {"b": 1}, "expect": 200, "comparator": "eq"}
+ }
"""
validators_mapping = {}
@@ -298,50 +169,189 @@ def extend_validators(raw_validators, override_validators):
return list(def_validators_mapping.values())
-###############################################################################
-## validate varibles and functions
-###############################################################################
+class Validator(object):
+ """Validate tests
+ Attributes:
+ validation_results (dict): store validation results,
+ including validate_extractor and validate_script.
-def is_function(item):
- """ Takes item object, returns True if it is a function.
"""
- return isinstance(item, types.FunctionType)
+ def __init__(self, session_context, resp_obj):
+ """ initialize a Validator for each teststep (API request)
-def is_variable(tup):
- """ Takes (name, object) tuple, returns True if it is a variable.
- """
- name, item = tup
- if callable(item):
- # function or class
- return False
+ Args:
+ session_context: HttpRunner session context
+ resp_obj: ResponseObject instance
+ """
+ self.session_context = session_context
+ self.resp_obj = resp_obj
+ self.validation_results = {}
- if isinstance(item, types.ModuleType):
- # imported module
- return False
+ def __eval_validator_check(self, check_item):
+ """ evaluate check item in validator.
- if name.startswith("_"):
- # private property
- return False
+ Args:
+ check_item: check_item should only be the following 5 formats:
+ 1, variable reference, e.g. $token
+ 2, function reference, e.g. ${is_status_code_200($status_code)}
+ 3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
+ 4, string joined by delimiter. e.g. "status_code", "headers.content-type"
+ 5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
- return True
+ """
+ if isinstance(check_item, (dict, list)) \
+ or isinstance(check_item, parser.LazyString):
+ # format 1/2/3
+ check_value = self.session_context.eval_content(check_item)
+ else:
+ # format 4/5
+ check_value = self.resp_obj.extract_field(check_item)
+ return check_value
-def validate_json_file(file_list):
- """ validate JSON testcase format
- """
- for json_file in set(file_list):
- if not json_file.endswith(".json"):
- logger.log_warning("Only JSON file format can be validated, skip: {}".format(json_file))
- continue
+ def __eval_validator_expect(self, expect_item):
+ """ evaluate expect item in validator.
- logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN")
+ Args:
+ expect_item: expect_item should only be in 2 types:
+ 1, variable reference, e.g. $expect_status_code
+ 2, actual value, e.g. 200
+
+ """
+ expect_value = self.session_context.eval_content(expect_item)
+ return expect_value
+
+ def validate_script(self, script):
+ """ make validation with python script
+ """
+ validator_dict = {
+ "validate_script": "
".join(script),
+ "check_result": "fail",
+ "exception": ""
+ }
+
+ script = "\n ".join(script)
+ code = """
+# encoding: utf-8
+
+try:
+ {}
+except Exception as ex:
+ import traceback
+ import sys
+ _type, _value, _tb = sys.exc_info()
+ # filename, lineno, name, line
+ _, _lineno, _, line_content = traceback.extract_tb(_tb, 1)[0]
+
+ line_no = _lineno - 4
+
+ c_exception = _type.__name__ + "\\n"
+ c_exception += "\\tError line number: " + str(line_no) + "\\n"
+ c_exception += "\\tError line content: " + str(line_content) + "\\n"
+
+ if _value.args:
+ c_exception += "\\tError description: " + str(_value)
+ else:
+ c_exception += "\\tError description: " + _type.__name__
+
+ raise _type(c_exception)
+""".format(script)
+ variables = {
+ "status_code": self.resp_obj.status_code,
+ "response_json": self.resp_obj.json,
+ "response": self.resp_obj
+ }
+ variables.update(self.session_context.test_variables_mapping)
+
+ try:
+ code = compile(code, '', 'exec')
+ exec(code, variables)
+ validator_dict["check_result"] = "pass"
+ return validator_dict, ""
+ except Exception as ex:
+ validator_dict["check_result"] = "fail"
+ validator_dict["exception"] = "
".join(str(ex).splitlines())
+ return validator_dict, str(ex)
+
+ def validate(self, validators):
+ """ make validation with comparators
+ """
+ self.validation_results = {}
+ if not validators:
+ return
+
+ logger.log_debug("start to validate.")
+
+ validate_pass = True
+ failures = []
+
+ for validator in validators:
+
+ if isinstance(validator, dict) and validator.get("type") == "python_script":
+ validator_dict, ex = self.validate_script(validator["script"])
+ if ex:
+ validate_pass = False
+ failures.append(ex)
+
+ self.validation_results["validate_script"] = validator_dict
+ continue
+
+ if "validate_extractor" not in self.validation_results:
+ self.validation_results["validate_extractor"] = []
+
+ # validator should be LazyFunction object
+ if not isinstance(validator, parser.LazyFunction):
+ raise exceptions.ValidationFailure(
+ "validator should be parsed first: {}".format(validators))
+
+ # evaluate validator args with context variable mapping.
+ validator_args = validator.get_args()
+ check_item, expect_item = validator_args
+ check_value = self.__eval_validator_check(check_item)
+ expect_value = self.__eval_validator_expect(expect_item)
+ validator.update_args([check_value, expect_value])
+
+ comparator = validator.func_name
+ validator_dict = {
+ "comparator": comparator,
+ "check": check_item,
+ "check_value": check_value,
+ "expect": expect_item,
+ "expect_value": expect_value
+ }
+ validate_msg = "\nvalidate: {} {} {}({})".format(
+ check_item,
+ comparator,
+ expect_value,
+ type(expect_value).__name__
+ )
- with io.open(json_file) as stream:
try:
- json.load(stream)
- except ValueError as e:
- raise SystemExit(e)
+ validator.to_value(self.session_context.test_variables_mapping)
+ validator_dict["check_result"] = "pass"
+ validate_msg += "\t==> pass"
+ logger.log_debug(validate_msg)
+ except (AssertionError, TypeError):
+ validate_pass = False
+ validator_dict["check_result"] = "fail"
+ validate_msg += "\t==> fail"
+ validate_msg += "\n{}({}) {} {}({})".format(
+ check_value,
+ type(check_value).__name__,
+ comparator,
+ expect_value,
+ type(expect_value).__name__
+ )
+ logger.log_error(validate_msg)
+ failures.append(validate_msg)
- print("OK")
+ self.validation_results["validate_extractor"].append(validator_dict)
+
+ # restore validator args, in case of running multiple times
+ validator.update_args(validator_args)
+
+ if not validate_pass:
+ failures_string = "\n".join([failure for failure in failures])
+ raise exceptions.ValidationFailure(failures_string)
diff --git a/pyproject.toml b/pyproject.toml
index 08c8742c..6b14c7d1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "httprunner"
-version = "2.3.2"
+version = "2.4.0"
description = "One-stop solution for HTTP(S) testing."
license = "Apache-2.0"
readme = "README.md"
diff --git a/tests/debugtalk.py b/tests/debugtalk.py
index 7006ddf8..101d6308 100644
--- a/tests/debugtalk.py
+++ b/tests/debugtalk.py
@@ -127,6 +127,15 @@ def alter_response(response):
"key": 123
}
+def alter_response_302(response):
+ response.status_code = 500
+ response.headers["Content-Type"] = "html/text"
+ response.text = "abcdef"
+ response.new_attribute = "new_attribute_value"
+ response.new_attribute_dict = {
+ "key": 123
+ }
+
def alter_response_error(response):
# NameError
diff --git a/tests/httpbin/a.b.c/rpc.yml b/tests/httpbin/a.b.c/rpc.yml
new file mode 100644
index 00000000..ef31c546
--- /dev/null
+++ b/tests/httpbin/a.b.c/rpc.yml
@@ -0,0 +1,10 @@
+name: rpc api
+base_url: http://httpbin.org
+variables:
+ expected_status_code: 200
+request:
+ url: /headers
+ method: GET
+validate:
+ - eq: ["status_code", $expected_status_code]
+ - eq: [content.headers.Host, "httpbin.org"]
diff --git a/tests/httpbin/api/302_redirect_teardown_hook.yml b/tests/httpbin/api/302_redirect_teardown_hook.yml
new file mode 100644
index 00000000..329d6d48
--- /dev/null
+++ b/tests/httpbin/api/302_redirect_teardown_hook.yml
@@ -0,0 +1,13 @@
+name: 302 redirect
+request:
+ url: https://httpbin.org/redirect-to
+ params:
+ url: https://github.com
+ status_code: 302
+ method: GET
+ verify: False
+teardown_hooks:
+ - ${alter_response_302($response)}
+validate:
+ - eq: ["status_code", 500]
+ - eq: ["text","abcdef"]
\ No newline at end of file
diff --git a/tests/httpbin/validate.yml b/tests/httpbin/validate.yml
index 310b826b..0be60af8 100644
--- a/tests/httpbin/validate.yml
+++ b/tests/httpbin/validate.yml
@@ -1,13 +1,34 @@
- config:
name: basic test with httpbin
- request:
- base_url: http://httpbin.org/
+ base_url: http://httpbin.org/
- test:
- name: headers
+ name: validate response with json path
request:
- url: /headers
+ url: /get
+ params:
+ a: 1
+ b: 2
method: GET
validate:
- eq: ["status_code", 200]
- - assert_status_code_is_200: ["status_code"]
+ - eq: ["json.args.a", '1']
+ - eq: ["json.args.b", '2']
+ validate_script:
+ - "assert status_code == 200"
+
+
+- test:
+ name: validate response with python script
+ request:
+ url: /get
+ params:
+ a: 1
+ b: 2
+ method: GET
+ validate:
+ - eq: ["status_code", 200]
+ validate_script:
+ - "assert status_code == 201"
+ - "a = response_json.get('args').get('a')"
+ - "assert a == '1'"
diff --git a/tests/test_api.py b/tests/test_api.py
index 97c59422..9e61f179 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -289,6 +289,10 @@ class TestHttpRunner(ApiServerUnittest):
self.assertEqual(summary["stat"]["testcases"]["total"], 2)
self.assertEqual(summary["stat"]["teststeps"]["total"], 4)
+ def test_validate_script(self):
+ summary = self.runner.run("tests/httpbin/validate.yml")
+ self.assertFalse(summary["success"])
+
def test_run_httprunner_with_hooks(self):
testcase_file_path = os.path.join(
os.getcwd(), 'tests/httpbin/hooks.yml')
@@ -419,6 +423,19 @@ class TestHttpRunner(ApiServerUnittest):
self.assertEqual(req_resp_data[0]["response"]["status_code"], 302)
self.assertEqual(req_resp_data[1]["response"]["status_code"], 200)
+ def test_request_302_logs_teardown_hook(self):
+ path = "tests/httpbin/api/302_redirect_teardown_hook.yml"
+ summary = self.runner.run(path)
+ self.assertTrue(summary["success"])
+ self.assertEqual(summary["stat"]["testcases"]["total"], 1)
+ self.assertEqual(summary["stat"]["teststeps"]["total"], 1)
+ self.assertEqual(summary["stat"]["teststeps"]["successes"], 1)
+
+ req_resp_data = summary["details"][0]["records"][0]["meta_datas"]["data"]
+ self.assertEqual(len(req_resp_data), 2)
+ self.assertEqual(req_resp_data[0]["response"]["status_code"], 302)
+ self.assertEqual(req_resp_data[1]["response"]["status_code"], 500)
+
def test_request_with_params(self):
path = "tests/httpbin/api/302_redirect.yml"
summary = self.runner.run(path)
@@ -437,13 +454,17 @@ class TestHttpRunner(ApiServerUnittest):
def test_run_api_folder(self):
api_folder = "tests/httpbin/api/"
summary = self.runner.run(api_folder)
+ print(summary["stat"]["testcases"]["total"])
+ print(len(summary["details"]))
self.assertTrue(summary["success"])
- self.assertEqual(summary["stat"]["testcases"]["total"], 2)
- self.assertEqual(summary["stat"]["teststeps"]["total"], 2)
- self.assertEqual(summary["stat"]["teststeps"]["successes"], 2)
- self.assertEqual(len(summary["details"]), 2)
+ self.assertEqual(summary["stat"]["testcases"]["total"], 3)
+ self.assertEqual(summary["stat"]["teststeps"]["total"], 3)
+ self.assertEqual(summary["stat"]["teststeps"]["successes"], 3)
+ self.assertEqual(len(summary["details"]), 3)
self.assertEqual(summary["details"][0]["stat"]["total"], 1)
self.assertEqual(summary["details"][1]["stat"]["total"], 1)
+ self.assertEqual(summary["details"][2]["stat"]["total"], 1)
+
def test_run_testcase_hardcode(self):
for testcase_file_path in self.testcase_file_path_list:
diff --git a/tests/test_loader.py b/tests/test_loader.py
index 21268074..6da7b278 100644
--- a/tests/test_loader.py
+++ b/tests/test_loader.py
@@ -476,3 +476,73 @@ class TestSuiteLoader(unittest.TestCase):
api_file_path = os.path.join(os.getcwd(), "tests", "api", "get_token.yml")
self.assertIn(api_file_path, self.tests_def_mapping["api"])
self.assertEqual(self.project_mapping["env"]["PROJECT_KEY"], "ABCDEFGH")
+
+ def test_is_function(self):
+ func = lambda x: x + 1
+ self.assertTrue(loader.is_function(func))
+ self.assertTrue(loader.is_function(loader.is_testcase))
+
+ def test_is_testcases(self):
+ data_structure = "path/to/file"
+ self.assertFalse(loader.is_testcases(data_structure))
+ data_structure = ["path/to/file1", "path/to/file2"]
+ self.assertFalse(loader.is_testcases(data_structure))
+
+ data_structure = {
+ "project_mapping": {
+ "PWD": "XXXXX",
+ "functions": {},
+ "env": {}
+ },
+ "testcases": [
+ { # testcase data structure
+ "config": {
+ "name": "desc1",
+ "path": "testcase1_path",
+ "variables": [], # optional
+ },
+ "teststeps": [
+ # test data structure
+ {
+ 'name': 'test step desc1',
+ 'variables': [], # optional
+ 'extract': [], # optional
+ 'validate': [],
+ 'request': {}
+ },
+ # test_dict2 # another test dict
+ ]
+ },
+ # testcase_dict_2 # another testcase dict
+ ]
+ }
+ self.assertTrue(loader.is_testcases(data_structure))
+ data_structure = [
+ {
+ "name": "desc1",
+ "config": {},
+ "api": {},
+ "testcases": ["testcase11", "testcase12"]
+ },
+ {
+ "name": "desc2",
+ "config": {},
+ "api": {},
+ "testcases": ["testcase21", "testcase22"]
+ }
+ ]
+ self.assertTrue(data_structure)
+
+ def test_is_variable(self):
+ var1 = 123
+ var2 = "abc"
+ self.assertTrue(loader.is_variable(("var1", var1)))
+ self.assertTrue(loader.is_variable(("var2", var2)))
+
+ __var = 123
+ self.assertFalse(loader.is_variable(("__var", __var)))
+
+ func = lambda x: x + 1
+ self.assertFalse(loader.is_variable(("func", func)))
+
+ self.assertFalse(loader.is_variable(("unittest", unittest)))
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 5a32588f..7952a5ac 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -275,3 +275,37 @@ class TestUtils(ApiServerUnittest):
"d": [4, 5]
}
utils.print_info(info_mapping)
+
+ def test_prepare_dump_json_file_path_for_folder(self):
+ # hrun tests/httpbin/a.b.c/ --save-tests
+ project_working_directory = os.path.join(os.getcwd(), "tests")
+ project_mapping = {
+ "PWD": project_working_directory,
+ "test_path": os.path.join(os.getcwd(), "tests", "httpbin", "a.b.c")
+ }
+ self.assertEqual(
+ utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
+ os.path.join(project_working_directory, "logs", "httpbin/a.b.c/all.loaded.json")
+ )
+
+ def test_prepare_dump_json_file_path_for_file(self):
+ # hrun tests/httpbin/a.b.c/rpc.yml --save-tests
+ project_working_directory = os.path.join(os.getcwd(), "tests")
+ project_mapping = {
+ "PWD": project_working_directory,
+ "test_path": os.path.join(os.getcwd(), "tests", "httpbin", "a.b.c", "rpc.yml")
+ }
+ self.assertEqual(
+ utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
+ os.path.join(project_working_directory, "logs", "httpbin/a.b.c/rpc.loaded.json")
+ )
+
+ def test_prepare_dump_json_file_path_for_passed_testcase(self):
+ project_working_directory = os.path.join(os.getcwd(), "tests")
+ project_mapping = {
+ "PWD": project_working_directory
+ }
+ self.assertEqual(
+ utils.prepare_dump_json_file_abs_path(project_mapping, "loaded"),
+ os.path.join(project_working_directory, "logs", "tests_mapping.loaded.json")
+ )
diff --git a/tests/test_validator.py b/tests/test_validator.py
index 63a8cc5a..312a9e74 100644
--- a/tests/test_validator.py
+++ b/tests/test_validator.py
@@ -5,76 +5,6 @@ from httprunner import validator
class TestValidator(unittest.TestCase):
- def test_is_testcases(self):
- data_structure = "path/to/file"
- self.assertFalse(validator.is_testcases(data_structure))
- data_structure = ["path/to/file1", "path/to/file2"]
- self.assertFalse(validator.is_testcases(data_structure))
-
- data_structure = {
- "project_mapping": {
- "PWD": "XXXXX",
- "functions": {},
- "env": {}
- },
- "testcases": [
- { # testcase data structure
- "config": {
- "name": "desc1",
- "path": "testcase1_path",
- "variables": [], # optional
- },
- "teststeps": [
- # test data structure
- {
- 'name': 'test step desc1',
- 'variables': [], # optional
- 'extract': [], # optional
- 'validate': [],
- 'request': {}
- },
- # test_dict2 # another test dict
- ]
- },
- # testcase_dict_2 # another testcase dict
- ]
- }
- self.assertTrue(validator.is_testcases(data_structure))
- data_structure = [
- {
- "name": "desc1",
- "config": {},
- "api": {},
- "testcases": ["testcase11", "testcase12"]
- },
- {
- "name": "desc2",
- "config": {},
- "api": {},
- "testcases": ["testcase21", "testcase22"]
- }
- ]
- self.assertTrue(data_structure)
-
- def test_is_variable(self):
- var1 = 123
- var2 = "abc"
- self.assertTrue(validator.is_variable(("var1", var1)))
- self.assertTrue(validator.is_variable(("var2", var2)))
-
- __var = 123
- self.assertFalse(validator.is_variable(("__var", __var)))
-
- func = lambda x: x + 1
- self.assertFalse(validator.is_variable(("func", func)))
-
- self.assertFalse(validator.is_variable(("unittest", unittest)))
-
- def test_is_function(self):
- func = lambda x: x + 1
- self.assertTrue(validator.is_function(func))
- self.assertTrue(validator.is_function(validator.is_testcase))
-
def test_get_uniform_comparator(self):
self.assertEqual(validator.get_uniform_comparator("eq"), "equals")
self.assertEqual(validator.get_uniform_comparator("=="), "equals")
@@ -117,7 +47,6 @@ class TestValidator(unittest.TestCase):
{"check": "status_code", "comparator": "equals", "expect": 201}
)
-
def test_extend_validators(self):
def_validators = [
{'eq': ['v1', 200]},