diff --git a/httprunner/__about__.py b/httprunner/__about__.py index 40f1461a..9fc5c801 100644 --- a/httprunner/__about__.py +++ b/httprunner/__about__.py @@ -1,7 +1,7 @@ __title__ = 'HttpRunner' __description__ = 'One-stop solution for HTTP(S) testing.' __url__ = 'https://github.com/HttpRunner/HttpRunner' -__version__ = '1.4.8' +__version__ = '1.5.6' __author__ = 'debugtalk' __author_email__ = 'mail@debugtalk.com' __license__ = 'MIT' diff --git a/httprunner/cli.py b/httprunner/cli.py index aeb74c2b..fdabd9a7 100644 --- a/httprunner/cli.py +++ b/httprunner/cli.py @@ -11,8 +11,7 @@ from httprunner.__about__ import __description__, __version__ from httprunner.compat import is_py2 from httprunner.task import HttpRunner from httprunner.utils import (create_scaffold, get_python2_retire_msg, - prettify_json_file, print_output, - validate_json_file) + prettify_json_file, validate_json_file) def main_hrun(): @@ -88,7 +87,6 @@ def main_hrun(): ) summary = runner.summary - print_output(summary["output"]) return 0 if summary["success"] else 1 def main_locust(): diff --git a/httprunner/client.py b/httprunner/client.py index 097fdd57..9b2ecfd5 100644 --- a/httprunner/client.py +++ b/httprunner/client.py @@ -55,17 +55,22 @@ class HttpSession(requests.Session): """ initialize meta_data, it will store detail data of request and response """ self.meta_data = { - "url": "N/A", - "method": "N/A", - "request_time": "N/A", - "request_headers": {}, - "request_body": "N/A", - "status_code": "N/A", - "response_headers": {}, - "response_body": "N/A", - "content_size": "N/A", - "response_time_ms": "N/A", - "elapsed_ms": "N/A" + "request": { + "url": "N/A", + "method": "N/A", + "headers": {}, + "start_timestamp": None + }, + "response": { + "status_code": "N/A", + "headers": {}, + "content_size": "N/A", + "response_time_ms": "N/A", + "elapsed_ms": "N/A", + "encoding": None, + "content": None, + "content_type": "" + } } def request(self, method, url, name=None, **kwargs): @@ -107,10 +112,17 @@ class HttpSession(requests.Session): :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ + def log_print(request_response): + msg = "\n================== {} details ==================\n".format(request_response) + for key, value in self.meta_data[request_response].items(): + msg += "{:<16} : {}\n".format(key, value) + logger.log_debug(msg) + # record original request info - self.meta_data["method"] = method - self.meta_data["url"] = url - self.meta_data["request_time"] = time.time() + self.meta_data["request"]["method"] = method + self.meta_data["request"]["url"] = url + self.meta_data["request"].update(kwargs) + self.meta_data["request"]["start_timestamp"] = time.time() # prepend url with hostname unless it's already an absolute URL url = self._build_url(url) @@ -119,35 +131,44 @@ class HttpSession(requests.Session): response = self._send_request_safe_mode(method, url, **kwargs) # record the consumed time - self.meta_data["response_time_ms"] = round((time.time() - self.meta_data["request_time"]) * 1000, 2) - self.meta_data["elapsed_ms"] = response.elapsed.microseconds / 1000.0 + self.meta_data["response"]["response_time_ms"] = \ + round((time.time() - self.meta_data["request"]["start_timestamp"]) * 1000, 2) + self.meta_data["response"]["elapsed_ms"] = response.elapsed.microseconds / 1000.0 # record actual request info - self.meta_data["url"] = (response.history and response.history[0] or response).request.url - self.meta_data["request_headers"] = response.request.headers - self.meta_data["request_body"] = response.request.body + self.meta_data["request"]["url"] = (response.history and response.history[0] or response).request.url + self.meta_data["request"]["headers"] = dict(response.request.headers) + self.meta_data["request"]["body"] = response.request.body + + # log request details in debug mode + log_print("request") # record response info - self.meta_data["status_code"] = response.status_code - self.meta_data["response_headers"] = response.headers - try: - self.meta_data["response_body"] = response.json() - except ValueError: - self.meta_data["response_body"] = response.content + self.meta_data["response"]["ok"] = response.ok + self.meta_data["response"]["url"] = response.url + self.meta_data["response"]["status_code"] = response.status_code + self.meta_data["response"]["reason"] = response.reason + self.meta_data["response"]["headers"] = dict(response.headers) + self.meta_data["response"]["cookies"] = response.cookies or {} + self.meta_data["response"]["encoding"] = response.encoding + self.meta_data["response"]["content"] = response.content + self.meta_data["response"]["text"] = response.text + self.meta_data["response"]["content_type"] = response.headers.get("Content-Type", "") - # log response details in debug mode - msg = "response details:\n" - msg += "> status_code: {}\n".format(self.meta_data["status_code"]) - msg += "> headers: {}\n".format(self.meta_data["response_headers"]) - msg += "> body: {}".format(self.meta_data["response_body"]) - logger.log_debug(msg) + try: + self.meta_data["response"]["json"] = response.json() + except ValueError: + self.meta_data["response"]["json"] = None # get the length of the content, but if the argument stream is set to True, we take # the size from the content-length header, in order to not trigger fetching of the body if kwargs.get("stream", False): - self.meta_data["content_size"] = int(self.meta_data["response_headers"].get("content-length") or 0) + self.meta_data["response"]["content_size"] = int(self.meta_data["response"]["headers"].get("content-length") or 0) else: - self.meta_data["content_size"] = len(response.content or "") + self.meta_data["response"]["content_size"] = len(response.content or "") + + # log response details in debug mode + log_print("response") try: response.raise_for_status() @@ -156,9 +177,9 @@ class HttpSession(requests.Session): else: logger.log_info( """status_code: {}, response_time(ms): {} ms, response_length: {} bytes""".format( - self.meta_data["status_code"], - self.meta_data["response_time_ms"], - self.meta_data["content_size"] + self.meta_data["response"]["status_code"], + self.meta_data["response"]["response_time_ms"], + self.meta_data["response"]["content_size"] ) ) diff --git a/httprunner/context.py b/httprunner/context.py index 9059242c..9decb7ca 100644 --- a/httprunner/context.py +++ b/httprunner/context.py @@ -216,12 +216,13 @@ class Context(object): # 2, actual value, e.g. 200 expect_value = self.eval_content(validator["expect"]) validator["expect"] = expect_value + validator["check_result"] = "unchecked" return validator def do_validation(self, validator_dict): """ validate with functions """ - # TODO: move comparator uniform to init_task_suite + # TODO: move comparator uniform to init_test_suites comparator = utils.get_uniform_comparator(validator_dict["comparator"]) validate_func = self.testcase_parser.get_bind_function(comparator) @@ -237,6 +238,7 @@ class Context(object): raise exception.ParamsError("Null value can only be compared with comparator: eq/equals/==") try: + validator_dict["check_result"] = "passed" validate_func(validator_dict["check_value"], validator_dict["expect"]) except (AssertionError, TypeError): err_msg = "\n" + "\n".join([ @@ -245,18 +247,22 @@ class Context(object): "\tcomparator: %s;" % comparator, "\texpected value: %s (%s)." % (expect_value, type(expect_value).__name__) ]) + validator_dict["check_result"] = "failed" raise exception.ValidationError(err_msg) - def validate(self, validators, resp_obj): - """ check validators with the context variable mapping. - @param (list) validators - @param (object) resp_obj + def eval_validators(self, validators, resp_obj): + """ evaluate validators with context variable mapping. """ - for validator in validators: - validator_dict = self.eval_check_item( + return [ + self.eval_check_item( testcase.parse_validator(validator), resp_obj ) - self.do_validation(validator_dict) + for validator in validators + ] - return True + def validate(self, validators): + """ make validations + """ + for validator_dict in validators: + self.do_validation(validator_dict) diff --git a/httprunner/report.py b/httprunner/report.py index 714eee1b..7cb6b172 100644 --- a/httprunner/report.py +++ b/httprunner/report.py @@ -38,8 +38,7 @@ def get_summary(result): 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses) - }, - "platform": get_platform() + } } summary["stat"]["successes"] = summary["stat"]["testsRun"] \ - summary["stat"]["failures"] \ @@ -50,7 +49,7 @@ def get_summary(result): if getattr(result, "records", None): summary["time"] = { - 'start_at': datetime.fromtimestamp(result.start_at), + 'start_at': result.start_at, 'duration': result.duration } summary["records"] = result.records @@ -78,22 +77,24 @@ def render_html_report(summary, html_report_name=None, html_report_template=None logger.log_debug("render data: {}".format(summary)) report_dir_path = os.path.join(os.getcwd(), "reports") - start_datetime = summary["time"]["start_at"].strftime('%Y-%m-%d-%H-%M-%S') + start_at_timestamp = int(summary["time"]["start_at"]) + summary["time"]["start_datetime"] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S') if html_report_name: summary["html_report_name"] = html_report_name report_dir_path = os.path.join(report_dir_path, html_report_name) - html_report_name += "-{}.html".format(start_datetime) + html_report_name += "-{}.html".format(start_at_timestamp) else: summary["html_report_name"] = "" - html_report_name = "{}.html".format(start_datetime) + html_report_name = "{}.html".format(start_at_timestamp) if not os.path.isdir(report_dir_path): os.makedirs(report_dir_path) - for record in summary.get("records"): - meta_data = record['meta_data'] - stringify_body(meta_data, 'request') - stringify_body(meta_data, 'response') + for suite_summary in summary["details"]: + for record in suite_summary.get("records"): + meta_data = record['meta_data'] + stringify_data(meta_data, 'request') + stringify_data(meta_data, 'response') with io.open(html_report_template, "r", encoding='utf-8') as fp_r: template_content = fp_r.read() @@ -106,36 +107,39 @@ def render_html_report(summary, html_report_name=None, html_report_template=None return report_path -def stringify_body(meta_data, request_or_response): - headers = meta_data.get('{}_headers'.format(request_or_response), {}) - body = meta_data.get('{}_body'.format(request_or_response)) +def stringify_data(meta_data, request_or_response): + headers = meta_data[request_or_response]["headers"] - if isinstance(body, CaseInsensitiveDict): - body = json.dumps(dict(body), ensure_ascii=False) + request_or_response_dict = meta_data[request_or_response] - elif isinstance(body, (dict, list)): - body = json.dumps(body, indent=2, ensure_ascii=False) + for key, value in request_or_response_dict.items(): - elif isinstance(body, bytes): - resp_content_type = headers.get("Content-Type", "") - try: - if "image" in resp_content_type: - meta_data["response_data_type"] = "image" - body = "data:{};base64,{}".format( - resp_content_type, - b64encode(body).decode('utf-8') - ) - else: - body = escape(body.decode("utf-8")) - except UnicodeDecodeError: - pass + if isinstance(value, list): + value = json.dumps(value, indent=2, ensure_ascii=False) - elif not isinstance(body, (basestring, numeric_types, Iterable)): - # class instance, e.g. MultipartEncoder() - body = repr(body) + elif isinstance(value, bytes): + try: + encoding = meta_data["response"].get("encoding") + if not encoding or encoding == "None": + encoding = "utf-8" - meta_data['{}_body'.format(request_or_response)] = body + content_type = meta_data["response"]["content_type"] + if "image" in content_type: + meta_data["response"]["content_type"] = "image" + value = "data:{};base64,{}".format( + content_type, + b64encode(value).decode(encoding) + ) + else: + value = escape(value.decode(encoding)) + except UnicodeDecodeError: + pass + elif not isinstance(value, (basestring, numeric_types, Iterable)): + # class instance, e.g. MultipartEncoder() + value = repr(value) + + meta_data[request_or_response][key] = value class HtmlTestResult(unittest.TextTestResult): """A html result class that can generate formatted html results. diff --git a/httprunner/response.py b/httprunner/response.py index d00a397c..9a2a3b89 100644 --- a/httprunner/response.py +++ b/httprunner/response.py @@ -82,6 +82,16 @@ class ResponseObject(object): err_msg += u"attribute: {}".format(sub_query) logger.log_error(err_msg) raise exception.ParamsError(err_msg) + elif top_query == "elapsed": + if sub_query in ["days", "seconds", "microseconds"]: + return getattr(self.elapsed, sub_query) + elif sub_query == "total_seconds": + return self.elapsed.total_seconds() + else: + err_msg = "{}: {} is not valid timedelta attribute.\n".format(field, sub_query) + err_msg += "elapsed only support attributes: days, seconds, microseconds, total_seconds.\n" + logger.log_error(err_msg) + raise exception.ParamsError(err_msg) try: top_query_content = getattr(self, top_query) @@ -96,11 +106,11 @@ class ResponseObject(object): # TODO: remove compatibility for content, text if isinstance(top_query_content, bytes): top_query_content = top_query_content.decode("utf-8") - + if isinstance(top_query_content, PreparedRequest): top_query_content = top_query_content.__dict__ - else: - top_query_content = json.loads(top_query_content) + else: + top_query_content = json.loads(top_query_content) except json.decoder.JSONDecodeError: err_msg = u"Failed to extract data with delimiter!\n" err_msg += u"response content: {}\n".format(self.content) diff --git a/httprunner/runner.py b/httprunner/runner.py index 00919d12..fbf8031f 100644 --- a/httprunner/runner.py +++ b/httprunner/runner.py @@ -11,6 +11,7 @@ class Runner(object): def __init__(self, config_dict=None, http_client_session=None): self.http_client_session = http_client_session + self.evaluated_validators = [] self.context = Context() config_dict = config_dict or {} @@ -178,11 +179,12 @@ class Runner(object): extractors = testcase_dict.get("extract", []) or testcase_dict.get("extractors", []) extracted_variables_mapping = resp_obj.extract_response(extractors) self.context.bind_extracted_variables(extracted_variables_mapping) - + # validate validators = testcase_dict.get("validate", []) or testcase_dict.get("validators", []) try: - self.context.validate(validators, resp_obj) + self.evaluated_validators = self.context.eval_validators(validators, resp_obj) + self.context.validate(self.evaluated_validators) except (exception.ParamsError, exception.ResponseError, \ exception.ValidationError, exception.ParseResponseError): # log request diff --git a/httprunner/task.py b/httprunner/task.py index 3448045c..c65a7809 100644 --- a/httprunner/task.py +++ b/httprunner/task.py @@ -6,9 +6,10 @@ import unittest from httprunner import exception, logger, runner, testcase, utils from httprunner.compat import is_py3 -from httprunner.report import HtmlTestResult, get_summary, render_html_report +from httprunner.report import (HtmlTestResult, get_platform, get_summary, + render_html_report) from httprunner.testcase import TestcaseLoader -from httprunner.utils import load_dot_env_file +from httprunner.utils import load_dot_env_file, print_output class TestCase(unittest.TestCase): @@ -27,8 +28,10 @@ class TestCase(unittest.TestCase): finally: if hasattr(self.test_runner.http_client_session, "meta_data"): self.meta_data = self.test_runner.http_client_session.meta_data + self.meta_data["validators"] = self.test_runner.evaluated_validators self.test_runner.http_client_session.init_meta_data() + class TestSuite(unittest.TestSuite): """ create test suite with a testset, it may include one or several testcases. each suite should initialize a separate Runner() with testset config. @@ -64,12 +67,12 @@ class TestSuite(unittest.TestSuite): super(TestSuite, self).__init__() self.test_runner_list = [] - config_dict = testset.get("config", {}) - self.output_variables_list = config_dict.get("output", []) - self.testset_file_path = config_dict.get("path") - config_dict_parameters = config_dict.get("parameters", []) + self.config = testset.get("config", {}) + self.output_variables_list = self.config.get("output", []) + self.testset_file_path = self.config.get("path") + config_dict_parameters = self.config.get("parameters", []) - config_dict_variables = config_dict.get("variables", []) + config_dict_variables = self.config.get("variables", []) variables_mapping = variables_mapping or {} config_dict_variables = utils.override_variables_binds(config_dict_variables, variables_mapping) @@ -82,8 +85,8 @@ class TestSuite(unittest.TestSuite): for config_variables in config_parametered_variables_list: # config level - config_dict["variables"] = config_variables - test_runner = runner.Runner(config_dict, http_client_session) + self.config["variables"] = config_variables + test_runner = runner.Runner(self.config, http_client_session) for testcase_dict in testcases: testcase_dict = copy.copy(testcase_dict) @@ -148,55 +151,33 @@ class TestSuite(unittest.TestSuite): if not out: continue - outputs.append({"in": variables, "out": out}) + in_out = { + "in": dict(variables), + "out": out + } + if in_out not in outputs: + outputs.append(in_out) return outputs -class TaskSuite(unittest.TestSuite): - """ create task suite with specified testcase path. - each task suite may include one or several test suite. - """ - def __init__(self, testsets, mapping=None, http_client_session=None): - """ - @params - testsets (dict/list): testset or list of testset - testset_dict - or - [ - testset_dict_1, - testset_dict_2, - { - "name": "desc1", - "config": {}, - "api": {}, - "testcases": [testcase11, testcase12] - } - ] - mapping (dict): - passed in variables mapping, it will override variables in config block - """ - super(TaskSuite, self).__init__() - mapping = mapping or {} - if not testsets: - raise exception.TestcaseNotFound - - if isinstance(testsets, dict): - testsets = [testsets] - - self.suite_list = [] - for testset in testsets: - suite = TestSuite(testset, mapping, http_client_session) - self.addTest(suite) - self.suite_list.append(suite) - - @property - def tasks(self): - return self.suite_list - - -def init_task_suite(path_or_testsets, mapping=None, http_client_session=None): - """ initialize task suite +def init_test_suites(path_or_testsets, mapping=None, http_client_session=None): + """ initialize TestSuite list with testset path or testset dict + @params + testsets (dict/list): testset or list of testset + testset_dict + or + [ + testset_dict_1, + testset_dict_2, + { + "config": {}, + "api": {}, + "testcases": [testcase11, testcase12] + } + ] + mapping (dict): + passed in variables mapping, it will override variables in config block """ if not testcase.is_testsets(path_or_testsets): TestcaseLoader.load_test_dependencies() @@ -206,7 +187,19 @@ def init_task_suite(path_or_testsets, mapping=None, http_client_session=None): # TODO: move comparator uniform here mapping = mapping or {} - return TaskSuite(testsets, mapping, http_client_session) + + if not testsets: + raise exception.TestcaseNotFound + + if isinstance(testsets, dict): + testsets = [testsets] + + test_suite_list = [] + for testset in testsets: + test_suite = TestSuite(testset, mapping, http_client_session) + test_suite_list.append(test_suite) + + return test_suite_list class HttpRunner(object): @@ -242,19 +235,46 @@ class HttpRunner(object): if mapping specified, it will override variables in config block """ try: - task_suite = init_task_suite(path_or_testsets, mapping) + test_suite_list = init_test_suites(path_or_testsets, mapping) except exception.TestcaseNotFound: logger.log_error("Testcases not found in {}".format(path_or_testsets)) sys.exit(1) - result = self.runner.run(task_suite) - self.summary = get_summary(result) + self.summary = { + "success": True, + "stat": {}, + "time": {}, + "platform": get_platform(), + "details": [] + } - output = [] - for task in task_suite.tasks: - output.extend(task.output) + def accumulate_stat(origin_stat, new_stat): + """ accumulate new_stat to origin_stat + """ + for key in new_stat: + if key not in origin_stat: + origin_stat[key] = new_stat[key] + elif key == "start_at": + # start datetime + origin_stat[key] = min(origin_stat[key], new_stat[key]) + else: + origin_stat[key] += new_stat[key] + + for test_suite in test_suite_list: + result = self.runner.run(test_suite) + test_suite_summary = get_summary(result) + + self.summary["success"] &= test_suite_summary["success"] + test_suite_summary["name"] = test_suite.config.get("name") + test_suite_summary["base_url"] = test_suite.config.get("request", {}).get("base_url", "") + test_suite_summary["output"] = test_suite.output + print_output(test_suite_summary["output"]) + + accumulate_stat(self.summary["stat"], test_suite_summary["stat"]) + accumulate_stat(self.summary["time"], test_suite_summary["time"]) + + self.summary["details"].append(test_suite_summary) - self.summary["output"] = output return self def gen_html_report(self, html_report_name=None, html_report_template=None): @@ -274,11 +294,11 @@ class HttpRunner(object): class LocustTask(object): def __init__(self, path_or_testsets, locust_client, mapping=None): - self.task_suite = init_task_suite(path_or_testsets, mapping, locust_client) + self.test_suite_list = init_test_suites(path_or_testsets, mapping, locust_client) def run(self): - for suite in self.task_suite: - for test in suite: + for test_suite in self.test_suite_list: + for test in test_suite: try: test.runTest() except exception.MyBaseError as ex: diff --git a/httprunner/templates/default_report_template.html b/httprunner/templates/default_report_template.html index ccae9cad..4c7b4441 100644 --- a/httprunner/templates/default_report_template.html +++ b/httprunner/templates/default_report_template.html @@ -9,7 +9,7 @@ margin: 0 auto; width: 960px; } - #summary, #details { + #summary { width: 960px; margin-bottom: 20px; } @@ -22,30 +22,43 @@ text-align: center; padding: 4px 8px; } - #details th { + .details { + width: 960px; + margin-bottom: 20px; + } + .details th { background-color: skyblue; padding: 5px 12px; } - #details td { + .details tr .passed { + background-color: lightgreen; + } + .details tr .failed { + background-color: red; + } + .details tr .unchecked { + background-color: gray; + } + .details td { background-color: lightblue; padding: 5px 12px; } - #details .detail { + .details .detail { background-color: lightgrey; font-size: smaller; padding: 5px 10px; text-align: center; } - #details .success { + .details .success { background-color: greenyellow; } - #details .error { + .details .error { background-color: red; } - #details .failure { + .details .failure { background-color: salmon; } - #details .skipped { + .details .skipped { background-color: gray; } @@ -137,7 +150,7 @@ START AT - {{time.start_at.strftime('%Y-%m-%d %H:%M:%S')}} + {{time.start_datetime}} DURATION @@ -170,90 +183,145 @@

Details

- - - - - - - - {% for record in records %} - + + {% for test_suite_summary in details %} + {% set suite_index = loop.index %} +

{{test_suite_summary.name}}

+
StatusNameResponse TimeDetail
+ + + + + + + + + + + + + + + + + + + {% for record in test_suite_summary.records %} + {% set record_index = "{}_{}".format(suite_index, loop.index) %} + - + + + + + + + {% endfor %} +
base_url{{test_suite_summary.base_url}} + parameters & output +
+ +
+ +
TOTAL: {{test_suite_summary.stat.testsRun}}SUCCESS: {{test_suite_summary.stat.successes}}FAILED: {{test_suite_summary.stat.failures}}ERROR: {{test_suite_summary.stat.errors}}SKIPPED: {{test_suite_summary.stat.skipped}}
StatusNameResponse TimeDetail
{{record.status}} - {{record.name}}{{ record.meta_data["response_time_ms"] }} ms{{record.name}}{{ record.meta_data.response.response_time_ms }} ms - log - {{validator.comparator}}{{validator.expect}}{{validator.check_value}}

Statistics:

@@ -261,15 +329,15 @@ - + - + - +
content_size(bytes){{ record.meta_data["content_size"] }}{{ record.meta_data.response.content_size }}
response_time(ms){{ record.meta_data["response_time_ms"] }}{{ record.meta_data.response.response_time_ms }}
elapsed(ms){{ record.meta_data["elapsed_ms"] }}{{ record.meta_data.response.elapsed_ms }}
@@ -279,11 +347,11 @@ {% if record.attachment %} - traceback -