mirror of
https://github.com/httprunner/httprunner.git
synced 2026-05-12 02:21:29 +08:00
0.9.5:
1, remove PyUnitReport dependency; 2, add built-in html report; 3, support pass in html report template, in Jinja2 format.
This commit is contained in:
@@ -9,8 +9,6 @@ from httprunner import __version__ as hrun_version
|
||||
from httprunner import logger
|
||||
from httprunner.task import HttpRunner
|
||||
from httprunner.utils import create_scaffold, print_output, string_type
|
||||
from pyunitreport import __version__ as pyu_version
|
||||
from pyunitreport import HTMLTestRunner
|
||||
|
||||
|
||||
def main_hrun():
|
||||
@@ -24,6 +22,12 @@ def main_hrun():
|
||||
parser.add_argument(
|
||||
'testset_paths', nargs='*',
|
||||
help="testset file path")
|
||||
parser.add_argument(
|
||||
'--html-report-name',
|
||||
help="specify html report name, only effective when generating html report.")
|
||||
parser.add_argument(
|
||||
'--html-report-template',
|
||||
help="specify html report template path.")
|
||||
parser.add_argument(
|
||||
'--log-level', default='INFO',
|
||||
help="Specify logging level, default is INFO.")
|
||||
@@ -38,8 +42,7 @@ def main_hrun():
|
||||
logger.setup_logger(args.log_level)
|
||||
|
||||
if args.version:
|
||||
logger.color_print("HttpRunner version: {}".format(hrun_version), "GREEN")
|
||||
logger.color_print("PyUnitReport version: {}".format(pyu_version), "GREEN")
|
||||
logger.color_print("{}".format(hrun_version), "GREEN")
|
||||
exit(0)
|
||||
|
||||
project_name = args.startproject
|
||||
@@ -49,14 +52,16 @@ def main_hrun():
|
||||
exit(0)
|
||||
|
||||
kwargs = {
|
||||
"output": os.path.join(os.getcwd(), "reports"),
|
||||
"failfast": args.failfast
|
||||
}
|
||||
test_runner = HTMLTestRunner(**kwargs)
|
||||
result = HttpRunner(args.testset_paths, test_runner).run()
|
||||
print_output(result.output)
|
||||
run_kwargs = {
|
||||
"html_report_name": args.html_report_name,
|
||||
"html_report_template": args.html_report_template
|
||||
}
|
||||
result = HttpRunner(args.testset_paths, **kwargs).run(**run_kwargs)
|
||||
|
||||
return 0 if result.success else 1
|
||||
print_output(result["output"])
|
||||
return 0 if result["success"] else 1
|
||||
|
||||
def main_locust():
|
||||
""" Performance test with locust: parse command line options and run commands.
|
||||
|
||||
@@ -119,7 +119,6 @@ class HttpSession(requests.Session):
|
||||
|
||||
# prepend url with hostname unless it's already an absolute URL
|
||||
url = self._build_url(url)
|
||||
print("")
|
||||
logger.log_info("{method} {url}".format(method=method, url=url))
|
||||
logger.log_debug("request kwargs(raw): {kwargs}".format(kwargs=kwargs))
|
||||
# store meta data that is used when reporting the request to locust's statistics
|
||||
|
||||
150
httprunner/report.py
Normal file
150
httprunner/report.py
Normal file
@@ -0,0 +1,150 @@
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
from httprunner import logger
|
||||
from jinja2 import Template
|
||||
|
||||
|
||||
def get_summary(result):
|
||||
""" get summary from test result
|
||||
"""
|
||||
summary = {
|
||||
"success": result.wasSuccessful(),
|
||||
"stat": {
|
||||
'testsRun': result.testsRun,
|
||||
'failures': len(result.failures),
|
||||
'errors': len(result.errors),
|
||||
'skipped': len(result.skipped),
|
||||
'expectedFailures': len(result.expectedFailures),
|
||||
'unexpectedSuccesses': len(result.unexpectedSuccesses)
|
||||
}
|
||||
}
|
||||
summary["stat"]["successes"] = summary["stat"]["testsRun"] \
|
||||
- summary["stat"]["failures"] \
|
||||
- summary["stat"]["errors"] \
|
||||
- summary["stat"]["skipped"] \
|
||||
- summary["stat"]["expectedFailures"] \
|
||||
- summary["stat"]["unexpectedSuccesses"]
|
||||
|
||||
if getattr(result, "records", None):
|
||||
summary["time"] = {
|
||||
'start_at': datetime.fromtimestamp(result.start_at),
|
||||
'duration': result.duration
|
||||
}
|
||||
summary["records"] = result.records
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
class HtmlTestResult(unittest.TextTestResult):
|
||||
"""A html result class that can generate formatted html results.
|
||||
|
||||
Used by TextTestRunner.
|
||||
"""
|
||||
def __init__(self, stream, descriptions, verbosity):
|
||||
super(HtmlTestResult, self).__init__(stream, descriptions, verbosity)
|
||||
self.records = []
|
||||
self.default_report_template_path = os.path.join(
|
||||
os.path.abspath(os.path.dirname(__file__)),
|
||||
"templates",
|
||||
"default_report_template.html"
|
||||
)
|
||||
self.report_path = None
|
||||
|
||||
def _record_test(self, test, result_type, attachment=''):
|
||||
self.records.append({
|
||||
'name': test.shortDescription(),
|
||||
'result_type': result_type,
|
||||
'start_at': datetime.fromtimestamp(test.start_at),
|
||||
'duration': time.time() - test.start_at,
|
||||
'attachment': attachment
|
||||
})
|
||||
|
||||
def startTestRun(self):
|
||||
self.start_at = time.time()
|
||||
|
||||
def startTest(self, test):
|
||||
""" add start test time """
|
||||
test.start_at = time.time()
|
||||
super(HtmlTestResult, self).startTest(test)
|
||||
logger.color_print(test.shortDescription(), "yellow")
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(HtmlTestResult, self).addSuccess(test)
|
||||
self._record_test(test, 'success')
|
||||
print("")
|
||||
|
||||
def addError(self, test, err):
|
||||
super(HtmlTestResult, self).addError(test, err)
|
||||
self._record_test(test, 'error', self._exc_info_to_string(err, test))
|
||||
print("")
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(HtmlTestResult, self).addFailure(test, err)
|
||||
self._record_test(test, 'failure', self._exc_info_to_string(err, test))
|
||||
print("")
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(HtmlTestResult, self).addSkip(test, reason)
|
||||
self._record_test(test, 'skipped', reason)
|
||||
print("")
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(HtmlTestResult, self).addExpectedFailure(test, err)
|
||||
self._record_test(test, 'ExpectedFailure', self._exc_info_to_string(err, test))
|
||||
print("")
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(HtmlTestResult, self).addUnexpectedSuccess(test)
|
||||
self._record_test(test, 'UnexpectedSuccess')
|
||||
print("")
|
||||
|
||||
@property
|
||||
def duration(self):
|
||||
return time.time() - self.start_at
|
||||
|
||||
@property
|
||||
def summary(self):
|
||||
return get_summary(self)
|
||||
|
||||
def render_html_report(self, html_report_name=None, html_report_template=None):
|
||||
""" render html report with specified report name and template
|
||||
if html_report_name is not specified, use current datetime
|
||||
if html_report_template is not specified, use default report template
|
||||
"""
|
||||
if not html_report_template:
|
||||
html_report_template = self.default_report_template_path
|
||||
logger.log_debug("No html report template specified, use default.")
|
||||
else:
|
||||
logger.log_info("render with html report template: {}".format(html_report_template))
|
||||
|
||||
with open(html_report_template, "r") as fp:
|
||||
template_content = fp.read()
|
||||
|
||||
summary = self.summary
|
||||
logger.log_info("Start to render Html report ...")
|
||||
logger.log_debug("render data: {}".format(summary))
|
||||
|
||||
report_dir_path = os.path.join(os.getcwd(), "reports")
|
||||
start_datetime = summary["time"]["start_at"].strftime('%Y-%m-%d-%H-%M-%S')
|
||||
if html_report_name:
|
||||
summary["html_report_name"] = html_report_name
|
||||
report_dir_path = os.path.join(report_dir_path, html_report_name)
|
||||
html_report_name += "-{}.html".format(start_datetime)
|
||||
else:
|
||||
summary["html_report_name"] = ""
|
||||
html_report_name = "{}.html".format(start_datetime)
|
||||
|
||||
if not os.path.isdir(report_dir_path):
|
||||
os.makedirs(report_dir_path)
|
||||
|
||||
report_path = os.path.join(report_dir_path, html_report_name)
|
||||
with open(report_path, 'w', encoding='utf-8') as fp:
|
||||
rendered_content = Template(template_content).render(summary)
|
||||
fp.write(rendered_content)
|
||||
|
||||
logger.log_info("Generated Html report: {}".format(report_path))
|
||||
|
||||
return report_path
|
||||
@@ -1,7 +0,0 @@
|
||||
requests
|
||||
PyYAML
|
||||
PyUnitReport
|
||||
har2case
|
||||
colorama
|
||||
colorlog
|
||||
flask
|
||||
@@ -2,6 +2,7 @@ import sys
|
||||
import unittest
|
||||
|
||||
from httprunner import exception, logger, runner, testcase, utils
|
||||
from httprunner.report import HtmlTestResult, get_summary
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
@@ -73,11 +74,10 @@ class TestSuite(unittest.TestSuite):
|
||||
def _add_tests_to_suite(self, testcases):
|
||||
for testcase_dict in testcases:
|
||||
testcase_name = self.test_runner.context.eval_content(testcase_dict["name"])
|
||||
testcase_name_with_color = logger.coloring(testcase_name, "yellow")
|
||||
if utils.PYTHON_VERSION == 3:
|
||||
TestCase.runTest.__doc__ = testcase_name_with_color
|
||||
TestCase.runTest.__doc__ = testcase_name
|
||||
else:
|
||||
TestCase.runTest.__func__.__doc__ = testcase_name_with_color
|
||||
TestCase.runTest.__func__.__doc__ = testcase_name
|
||||
|
||||
test = TestCase(self.test_runner, testcase_dict)
|
||||
[self.addTest(test) for _ in range(int(testcase_dict.get("times", 1)))]
|
||||
@@ -126,61 +126,55 @@ class TaskSuite(unittest.TestSuite):
|
||||
return self.suite_list
|
||||
|
||||
|
||||
class Result(object):
|
||||
|
||||
class Stat(object):
|
||||
def __init__(self, **stat_dict):
|
||||
for key, value in stat_dict.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
def __init__(self, result, output):
|
||||
self.success = result.wasSuccessful()
|
||||
self.stat = self.make_stat(result)
|
||||
self.output = output
|
||||
|
||||
def make_stat(self, result):
|
||||
total = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
skipped = len(result.skipped)
|
||||
successes = total - failures - errors - skipped
|
||||
stat = {
|
||||
"total": total,
|
||||
"successes": successes,
|
||||
"failures": failures,
|
||||
"errors": errors,
|
||||
"skipped": skipped
|
||||
}
|
||||
return self.Stat(**stat)
|
||||
|
||||
|
||||
class HttpRunner(object):
|
||||
|
||||
def __init__(self, path, runner=None):
|
||||
def __init__(self, path, **kwargs):
|
||||
""" initialize HttpRunner with specified testset file path and test runner
|
||||
@params:
|
||||
- path: YAML/JSON testset file path
|
||||
- runner: HTMLTestRunner() or TextTestRunner()
|
||||
- gen_html_report: True/False
|
||||
- failfast: False/True, stop the test run on the first error or failure.
|
||||
"""
|
||||
self.path = path
|
||||
self.runner = runner or unittest.TextTestRunner()
|
||||
|
||||
def run(self, mapping=None):
|
||||
self.gen_html_report = kwargs.pop("gen_html_report", True)
|
||||
if self.gen_html_report:
|
||||
kwargs["resultclass"] = HtmlTestResult
|
||||
|
||||
self.runner = unittest.TextTestRunner(**kwargs)
|
||||
|
||||
def run(self, **kwargs):
|
||||
""" start to run suite
|
||||
@param mapping
|
||||
if mapping specified, it will override variables in config block
|
||||
@param html_report_name
|
||||
output html report file name
|
||||
@param html_report_template
|
||||
report template file path, template should be in Jinja2 format
|
||||
"""
|
||||
try:
|
||||
mapping = mapping or {}
|
||||
mapping = kwargs.get("mapping", {})
|
||||
task_suite = TaskSuite(self.path, mapping)
|
||||
except exception.TestcaseNotFound:
|
||||
sys.exit(1)
|
||||
|
||||
result = self.runner.run(task_suite)
|
||||
|
||||
output = {}
|
||||
for task in task_suite.tasks:
|
||||
output.update(task.output)
|
||||
|
||||
return Result(result, output)
|
||||
if self.gen_html_report:
|
||||
summary = result.summary
|
||||
summary["report_path"] = result.render_html_report(
|
||||
kwargs.get("html_report_name"),
|
||||
kwargs.get("html_report_template")
|
||||
)
|
||||
else:
|
||||
summary = get_summary(result)
|
||||
|
||||
summary["output"] = output
|
||||
return summary
|
||||
|
||||
|
||||
class LocustTask(object):
|
||||
|
||||
102
httprunner/templates/default_report_template.html
Normal file
102
httprunner/templates/default_report_template.html
Normal file
@@ -0,0 +1,102 @@
|
||||
<head>
|
||||
<meta content="text/html; charset=utf-8" http-equiv="content-type" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{html_report_name}} - TestReport</title>
|
||||
<style>
|
||||
body {
|
||||
background-color: #f2f2f2;
|
||||
color: #333;
|
||||
}
|
||||
#summary, #details {
|
||||
width: 960px;
|
||||
}
|
||||
#summary th {
|
||||
background-color: skyblue;
|
||||
padding: 5px 12px;
|
||||
}
|
||||
#summary td {
|
||||
background-color: lightblue;
|
||||
text-align: center;
|
||||
padding: 4px 8px;
|
||||
}
|
||||
#details th {
|
||||
background-color: skyblue;
|
||||
padding: 5px 12px;
|
||||
}
|
||||
#details td {
|
||||
background-color: lightblue;
|
||||
padding: 5px 12px;
|
||||
}
|
||||
#details .info {
|
||||
background-color: lightgrey;
|
||||
font-size: smaller;
|
||||
padding-left: 2em;
|
||||
white-space: nowrap;
|
||||
}
|
||||
#details .success {
|
||||
background-color: greenyellow;
|
||||
}
|
||||
#details .error {
|
||||
background-color: red;
|
||||
}
|
||||
#details .failure {
|
||||
background-color: salmon;
|
||||
}
|
||||
#details .skipped {
|
||||
background-color: gray;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>Test Report: {{html_report_name}}</h1>
|
||||
|
||||
<h2>Summary</h2>
|
||||
<table id="summary">
|
||||
|
||||
<tr>
|
||||
<th>START AT</th>
|
||||
<td colspan="4">{{time.start_at.strftime('%Y-%m-%d %H:%M:%S')}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>DURATION</th>
|
||||
<td colspan="4">{{ '%0.3f'| format(time.duration|float) }} seconds</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>TOTAL</th>
|
||||
<th>SUCCESS</th>
|
||||
<th>FAILED</th>
|
||||
<th>ERROR</th>
|
||||
<th>SKIPPED</th>
|
||||
<!-- <th>ExpectedFailure</th>
|
||||
<th>UnexpectedSuccess</th> -->
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{stat.testsRun}}</td>
|
||||
<td>{{stat.successes}}</td>
|
||||
<td>{{stat.failures}}</td>
|
||||
<td>{{stat.errors}}</td>
|
||||
<td>{{stat.skipped}}</td>
|
||||
<!-- <td>{{stat.expectedFailures}}</td>
|
||||
<td>{{stat.unexpectedSuccesses}}</td> -->
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<h2>Details</h2>
|
||||
<table id="details">
|
||||
<tr>
|
||||
<th>Status</th>
|
||||
<th>Name</th>
|
||||
<th>Duration</th>
|
||||
<th>Info</th>
|
||||
</tr>
|
||||
{% for record in records %}
|
||||
<tr>
|
||||
<th class="{{record.result_type}}" style="width:5em;">{{record.result_type}}</td>
|
||||
<td>{{record.name}}</td>
|
||||
<td style="text-align:right;width:6em;">{{ '%0.3f'| format(record.duration|float) }} seconds</td>
|
||||
<td class="info"><pre>{{record.attachment}}</pre></td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</body>
|
||||
@@ -1,6 +1,6 @@
|
||||
requests
|
||||
PyYAML
|
||||
PyUnitReport
|
||||
Jinja2
|
||||
har2case
|
||||
colorama
|
||||
colorlog
|
||||
|
||||
4
setup.py
4
setup.py
@@ -10,7 +10,7 @@ with io.open("README.rst", encoding='utf-8') as f:
|
||||
install_requires = [
|
||||
"requests",
|
||||
"PyYAML",
|
||||
"PyUnitReport",
|
||||
"Jinja2",
|
||||
"har2case",
|
||||
"colorama",
|
||||
"colorlog"
|
||||
@@ -27,7 +27,7 @@ setup(
|
||||
license='MIT',
|
||||
packages=find_packages(exclude=["examples", "tests", "tests.*"]),
|
||||
package_data={
|
||||
'httprunner': ['locustfile_template'],
|
||||
'httprunner': ['locustfile_template', "templates/default_report_template.html"],
|
||||
},
|
||||
keywords='api test',
|
||||
install_requires=install_requires,
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from httprunner.task import TaskSuite
|
||||
from pyunitreport import HTMLTestRunner
|
||||
from tests.base import ApiServerUnittest
|
||||
|
||||
|
||||
class TestCli(ApiServerUnittest):
|
||||
|
||||
def setUp(self):
|
||||
testset_path = "tests/data/demo_testset_cli.yml"
|
||||
output_folder_name = os.path.basename(os.path.splitext(testset_path)[0])
|
||||
self.kwargs = {
|
||||
"output": output_folder_name
|
||||
}
|
||||
self.task_suite = TaskSuite(testset_path)
|
||||
self.report_save_dir = os.path.join(os.getcwd(), 'reports', output_folder_name)
|
||||
self.reset_all()
|
||||
|
||||
def reset_all(self):
|
||||
url = "%s/api/reset-all" % self.host
|
||||
headers = self.get_authenticated_headers()
|
||||
return self.api_client.get(url, headers=headers)
|
||||
|
||||
def test_run_times(self):
|
||||
result = HTMLTestRunner(**self.kwargs).run(self.task_suite)
|
||||
self.assertEqual(result.testsRun, 10)
|
||||
shutil.rmtree(self.report_save_dir)
|
||||
|
||||
def test_skip(self):
|
||||
result = HTMLTestRunner(**self.kwargs).run(self.task_suite)
|
||||
self.assertEqual(len(result.skipped), 4)
|
||||
shutil.rmtree(self.report_save_dir)
|
||||
46
tests/test_httprunner.py
Normal file
46
tests/test_httprunner.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from httprunner import HttpRunner
|
||||
from tests.base import ApiServerUnittest
|
||||
|
||||
|
||||
class TestHttpRunner(ApiServerUnittest):
|
||||
|
||||
def setUp(self):
|
||||
self.testset_path = "tests/data/demo_testset_cli.yml"
|
||||
self.reset_all()
|
||||
|
||||
def reset_all(self):
|
||||
url = "%s/api/reset-all" % self.host
|
||||
headers = self.get_authenticated_headers()
|
||||
return self.api_client.get(url, headers=headers)
|
||||
|
||||
def test_text_run_times(self):
|
||||
kwargs = {
|
||||
"gen_html_report": False
|
||||
}
|
||||
result = HttpRunner(self.testset_path, **kwargs).run()
|
||||
self.assertEqual(result["stat"]["testsRun"], 10)
|
||||
|
||||
def test_text_skip(self):
|
||||
kwargs = {
|
||||
"gen_html_report": False
|
||||
}
|
||||
result = HttpRunner(self.testset_path, **kwargs).run()
|
||||
self.assertEqual(result["stat"]["skipped"], 4)
|
||||
|
||||
def test_html_report(self):
|
||||
kwargs = {
|
||||
"gen_html_report": True
|
||||
}
|
||||
output_folder_name = os.path.basename(os.path.splitext(self.testset_path)[0])
|
||||
run_kwargs = {
|
||||
"html_report_name": output_folder_name
|
||||
}
|
||||
result = HttpRunner(self.testset_path).run(**run_kwargs)
|
||||
self.assertEqual(result["stat"]["testsRun"], 10)
|
||||
self.assertEqual(result["stat"]["skipped"], 4)
|
||||
|
||||
report_save_dir = os.path.join(os.getcwd(), 'reports', output_folder_name)
|
||||
shutil.rmtree(report_save_dir)
|
||||
@@ -78,50 +78,50 @@ class TestRunner(ApiServerUnittest):
|
||||
def test_run_testset_hardcode(self):
|
||||
for testcase_file_path in self.testcase_file_path_list:
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testsets_hardcode(self):
|
||||
result = HttpRunner(self.testcase_file_path_list).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertEqual(result.stat.total, 6)
|
||||
self.assertEqual(result.stat.successes, 6)
|
||||
self.assertTrue(result["success"])
|
||||
self.assertEqual(result["stat"]["testsRun"], 6)
|
||||
self.assertEqual(result["stat"]["successes"], 6)
|
||||
|
||||
def test_run_testset_template_variables(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_variables.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testset_template_import_functions(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testsets_template_import_functions(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_template_import_functions.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testsets_template_lambda_functions(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_template_lambda_functions.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testset_layered(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_layer.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertTrue(result["success"])
|
||||
|
||||
def test_run_testset_output(self):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_testset_layer.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertIn("token", result.output)
|
||||
self.assertTrue(result["success"])
|
||||
self.assertIn("token", result["output"])
|
||||
|
||||
def test_run_testset_with_variables_mapping(self):
|
||||
testcase_file_path = os.path.join(
|
||||
@@ -129,9 +129,9 @@ class TestRunner(ApiServerUnittest):
|
||||
variables_mapping = {
|
||||
"app_version": '2.9.7'
|
||||
}
|
||||
result = HttpRunner(testcase_file_path).run(variables_mapping)
|
||||
self.assertTrue(result.success)
|
||||
self.assertIn("token", result.output)
|
||||
result = HttpRunner(testcase_file_path).run(mapping=variables_mapping)
|
||||
self.assertTrue(result["success"])
|
||||
self.assertIn("token", result["output"])
|
||||
|
||||
def test_run_testcase_with_empty_header(self):
|
||||
testcase_file_path = os.path.join(
|
||||
@@ -162,6 +162,6 @@ class TestRunner(ApiServerUnittest):
|
||||
testcase_file_path = os.path.join(
|
||||
os.getcwd(), 'tests/data/demo_parameters.yml')
|
||||
result = HttpRunner(testcase_file_path).run()
|
||||
self.assertTrue(result.success)
|
||||
self.assertIn("token", result.output)
|
||||
self.assertEqual(result.stat.total, 6)
|
||||
self.assertTrue(result["success"])
|
||||
self.assertIn("token", result["output"])
|
||||
self.assertEqual(result["stat"]["testsRun"], 6)
|
||||
|
||||
Reference in New Issue
Block a user