diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 1351a5e9..936907a6 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -6,6 +6,7 @@
- replace logging with [loguru](https://github.com/Delgan/loguru)
- remove support for Python 2.7
+- replace string format with f-string
## 2.5.7 (2020-02-21)
diff --git a/httprunner/api.py b/httprunner/api.py
index cb3c862b..517fee84 100644
--- a/httprunner/api.py
+++ b/httprunner/api.py
@@ -1,9 +1,10 @@
import os
import unittest
+from loguru import logger
from sentry_sdk import capture_message
-from httprunner import (__version__, exceptions, loader, logger, parser,
+from httprunner import (__version__, exceptions, loader, parser,
report, runner, utils)
@@ -32,8 +33,6 @@ class HttpRunner(object):
log_file (str): log file path.
"""
- logger.setup_logger(log_level, log_file)
-
self.exception_stage = "initialize HttpRunner()"
kwargs = {
"failfast": failfast,
@@ -99,7 +98,7 @@ class HttpRunner(object):
times = int(times)
except ValueError:
raise exceptions.ParamsError(
- "times should be digit, given: {}".format(times))
+ f"times should be digit, given: {times}")
for times_index in range(times):
# suppose one testcase should not have more than 9999 steps,
@@ -130,7 +129,7 @@ class HttpRunner(object):
for testcase in test_suite:
testcase_name = testcase.config.get("name")
- logger.log_info("Start to run testcase: {}".format(testcase_name))
+ logger.info(f"Start to run testcase: {testcase_name}")
result = self.unittest_runner.run(testcase)
if result.wasSuccessful():
@@ -197,11 +196,11 @@ class HttpRunner(object):
parsed_testcases = parser.parse_tests(tests_mapping)
parse_failed_testfiles = parser.get_parse_failed_testfiles()
if parse_failed_testfiles:
- logger.log_warning("parse failures occurred ...")
+ logger.warning("parse failures occurred ...")
utils.dump_logs(parse_failed_testfiles, project_mapping, "parse_failed")
if len(parsed_testcases) == 0:
- logger.log_error("failed to parse all cases, abort.")
+ logger.error("failed to parse all cases, abort.")
raise exceptions.ParseTestsFailure
if self.save_tests:
@@ -295,7 +294,7 @@ class HttpRunner(object):
dict: result summary
"""
- logger.log_info("HttpRunner version: {}".format(__version__))
+ logger.info(f"HttpRunner version: {__version__}")
if loader.is_test_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif loader.is_test_content(path_or_tests):
@@ -303,4 +302,4 @@ class HttpRunner(object):
loader.init_pwd(project_working_directory)
return self.run_tests(path_or_tests)
else:
- raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests))
+ raise exceptions.ParamsError(f"Invalid testcase path or testcases: {path_or_tests}")
diff --git a/httprunner/cli.py b/httprunner/cli.py
index b04951e9..3d4fb1f8 100644
--- a/httprunner/cli.py
+++ b/httprunner/cli.py
@@ -3,11 +3,11 @@ import os
import sys
import sentry_sdk
+from loguru import logger
from httprunner import __description__, __version__, exceptions
from httprunner.api import HttpRunner
from httprunner.loader import load_cases
-from httprunner.logger import color_print, log_error
from httprunner.report import gen_html_report
from httprunner.utils import (create_scaffold,
prettify_json_file, init_sentry_sdk)
@@ -67,19 +67,19 @@ def main():
sys.exit(0)
if args.version:
- color_print("{}".format(__version__), "GREEN")
+ print(f"{__version__}")
sys.exit(0)
if args.validate:
for validate_path in args.validate:
try:
- color_print("validate test file: {}".format(validate_path), "GREEN")
+ logger.info(f"validate test file: {validate_path}")
load_cases(validate_path, args.dot_env_path)
except exceptions.MyBaseError as ex:
- log_error(str(ex))
+ logger.error(str(ex))
continue
- color_print("done!", "BLUE")
+ logger.info("done!")
sys.exit(0)
if args.prettify:
@@ -111,8 +111,7 @@ def main():
)
err_code |= (0 if summary and summary["success"] else 1)
except Exception as ex:
- color_print("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage), "YELLOW")
- color_print(str(ex), "RED")
+ logger.error(f"!!!!!!!!!! exception stage: {runner.exception_stage} !!!!!!!!!!\n{str(ex)}")
sentry_sdk.capture_exception(ex)
err_code = 1
diff --git a/httprunner/client.py b/httprunner/client.py
index cbef3a32..4577a40a 100644
--- a/httprunner/client.py
+++ b/httprunner/client.py
@@ -4,11 +4,12 @@ import time
import requests
import urllib3
+from loguru import logger
from requests import Request, Response
from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema,
RequestException)
-from httprunner import logger, response
+from httprunner import response
from httprunner.utils import lower_dict_keys, omit_long_data
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -18,10 +19,10 @@ def get_req_resp_record(resp_obj):
""" get request and response info from Response() object.
"""
def log_print(req_resp_dict, r_type):
- msg = "\n================== {} details ==================\n".format(r_type)
+ msg = f"\n================== {r_type} details ==================\n"
for key, value in req_resp_dict[r_type].items():
msg += "{:<16} : {}\n".format(key, repr(value))
- logger.log_debug(msg)
+ logger.debug(msg)
req_resp_dict = {
"request": {},
@@ -214,15 +215,13 @@ class HttpSession(requests.Session):
try:
response.raise_for_status()
- except RequestException as e:
- logger.log_error(u"{exception}".format(exception=str(e)))
+ except RequestException as ex:
+ logger.error(f"{str(ex)}")
else:
- logger.log_info(
- """status_code: {}, response_time(ms): {} ms, response_length: {} bytes\n""".format(
- response.status_code,
- response_time_ms,
- content_size
- )
+ logger.info(
+ f"status_code: {response.status_code}, "
+ f"response_time(ms): {response_time_ms} ms, "
+ f"response_length: {content_size} bytes\n"
)
return response
@@ -234,9 +233,9 @@ class HttpSession(requests.Session):
"""
try:
msg = "processed request:\n"
- msg += "> {method} {url}\n".format(method=method, url=url)
- msg += "> kwargs: {kwargs}".format(kwargs=kwargs)
- logger.log_debug(msg)
+ msg += f"> {method} {url}\n"
+ msg += f"> kwargs: {kwargs}"
+ logger.debug(msg)
return requests.Session.request(self, method, url, **kwargs)
except (MissingSchema, InvalidSchema, InvalidURL):
raise
diff --git a/httprunner/ext/locusts/cli.py b/httprunner/ext/locusts/cli.py
index 24e54abf..616486f8 100644
--- a/httprunner/ext/locusts/cli.py
+++ b/httprunner/ext/locusts/cli.py
@@ -18,8 +18,9 @@ import multiprocessing
import os
import sys
+from loguru import logger
+
from httprunner import __version__
-from httprunner import logger
from httprunner.utils import init_sentry_sdk
init_sentry_sdk()
@@ -31,7 +32,7 @@ def parse_locustfile(file_path):
if file_path is a YAML/JSON file, convert it to locustfile
"""
if not os.path.isfile(file_path):
- logger.color_print("file path invalid, exit.", "RED")
+ logger.error("file path invalid, exit.")
sys.exit(1)
file_suffix = os.path.splitext(file_path)[1]
@@ -41,7 +42,7 @@ def parse_locustfile(file_path):
locustfile_path = gen_locustfile(file_path)
else:
# '' or other suffix
- logger.color_print("file type should be YAML/JSON/Python, exit.", "RED")
+ logger.error("file type should be YAML/JSON/Python, exit.")
sys.exit(1)
return locustfile_path
@@ -105,7 +106,7 @@ def run_locusts_with_processes(sys_argv, processes_count):
def main():
""" Performance test with locust: parse command line options and run commands.
"""
- print("HttpRunner version: {}".format(__version__))
+ print(f"HttpRunner version: {__version__}")
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
@@ -130,7 +131,7 @@ def main():
# default
loglevel = "WARNING"
- logger.setup_logger(loglevel)
+ # logger.setup_logger(loglevel)
# get testcase file path
try:
@@ -147,7 +148,7 @@ def main():
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
- logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
+ logger.error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
@@ -157,7 +158,7 @@ def main():
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
- logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
+ logger.warning(f"processes count not specified, use {processes_count} by default.")
else:
try:
""" locusts -f locustfile.py --processes 4 """
@@ -166,7 +167,7 @@ def main():
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
- logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
+ logger.warning(f"processes count not specified, use {processes_count} by default.")
sys.argv.pop(processes_index)
run_locusts_with_processes(sys.argv, processes_count)
diff --git a/httprunner/ext/uploader/__init__.py b/httprunner/ext/uploader/__init__.py
index fb299b1d..284c10c3 100644
--- a/httprunner/ext/uploader/__init__.py
+++ b/httprunner/ext/uploader/__init__.py
@@ -85,12 +85,12 @@ def prepare_upload_test(test_dict):
"""
upload_json = test_dict["request"].pop("upload", {})
if not upload_json:
- raise ParamsError("invalid upload info: {}".format(upload_json))
+ raise ParamsError(f"invalid upload info: {upload_json}")
params_list = []
for key, value in upload_json.items():
test_dict["variables"][key] = value
- params_list.append("{}=${}".format(key, key))
+ params_list.append(f"{key}=${key}")
params_str = ", ".join(params_list)
test_dict["variables"]["m_encoder"] = "${multipart_encoder(" + params_str + ")}"
diff --git a/httprunner/loader/buildup.py b/httprunner/loader/buildup.py
index 23f772cf..df40cfe9 100644
--- a/httprunner/loader/buildup.py
+++ b/httprunner/loader/buildup.py
@@ -1,7 +1,9 @@
import importlib
import os
-from httprunner import exceptions, logger, utils
+from loguru import logger
+
+from httprunner import exceptions, utils
from httprunner.loader.check import JsonSchemaChecker
from httprunner.loader.load import load_module_functions, load_file, load_dot_env_file, \
load_folder_files
@@ -53,7 +55,7 @@ def __extend_with_api_ref(raw_testinfo):
if api_name in tests_def_mapping["api"]:
block = tests_def_mapping["api"][api_name]
elif not os.path.isfile(api_name):
- raise exceptions.ApiNotFound("{} not found!".format(api_name))
+ raise exceptions.ApiNotFound(f"{api_name} not found!")
else:
block = load_file(api_name)
@@ -84,7 +86,7 @@ def __extend_with_testcase_ref(raw_testinfo):
testcase_dict = load_testcase_v2(loaded_testcase)
else:
raise exceptions.FileFormatError(
- "Invalid format testcase: {}".format(testcase_path))
+ f"Invalid format testcase: {testcase_path}")
tests_def_mapping["testcases"][testcase_path] = testcase_dict
else:
@@ -186,8 +188,8 @@ def load_testcase(raw_testcase):
elif key == "test":
tests.append(load_teststep(test_block))
else:
- logger.log_warning(
- "unexpected block key: {}. block key should only be 'config' or 'test'.".format(key)
+ logger.warning(
+ f"unexpected block key: {key}. block key should only be 'config' or 'test'."
)
return {
@@ -485,9 +487,9 @@ def load_cases(path, dot_env_path=None):
try:
loaded_content = load_test_file(path)
except exceptions.ApiNotFound as ex:
- logger.log_warning("Invalid api reference in {}: {}".format(path, ex))
+ logger.warning(f"Invalid api reference in {path}: {ex}")
except exceptions.FileFormatError:
- logger.log_warning("Invalid test file format: {}".format(path))
+ logger.warning(f"Invalid test file format: {path}")
if not loaded_content:
pass
diff --git a/httprunner/loader/check.py b/httprunner/loader/check.py
index 35560d30..ba248ec7 100644
--- a/httprunner/loader/check.py
+++ b/httprunner/loader/check.py
@@ -4,8 +4,9 @@ import os
import platform
import jsonschema
+from loguru import logger
-from httprunner import exceptions, logger
+from httprunner import exceptions
schemas_root_dir = os.path.join(os.path.dirname(__file__), "schemas")
common_schema_path = os.path.join(schemas_root_dir, "common.schema.json")
@@ -50,7 +51,7 @@ class JsonSchemaChecker(object):
try:
jsonschema.validate(content, scheme, resolver=resolver)
except jsonschema.exceptions.ValidationError as ex:
- logger.log_error(str(ex))
+ logger.error(str(ex))
raise exceptions.FileFormatError
return True
diff --git a/httprunner/loader/load.py b/httprunner/loader/load.py
index 27fe0455..158cf5b3 100644
--- a/httprunner/loader/load.py
+++ b/httprunner/loader/load.py
@@ -5,9 +5,10 @@ import os
import types
import yaml
+from loguru import logger
from httprunner import builtin
-from httprunner import exceptions, logger, utils
+from httprunner import exceptions, utils
from httprunner.loader.locate import get_project_working_directory
try:
@@ -25,7 +26,7 @@ def _load_yaml_file(yaml_file):
try:
yaml_content = yaml.load(stream)
except yaml.YAMLError as ex:
- logger.log_error(str(ex))
+ logger.error(str(ex))
raise exceptions.FileFormatError
return yaml_content
@@ -38,8 +39,8 @@ def _load_json_file(json_file):
try:
json_content = json.load(data_file)
except exceptions.JSONDecodeError:
- err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
- logger.log_error(err_msg)
+ err_msg = f"JSONDecodeError: JSON file format error: {json_file}"
+ logger.error(err_msg)
raise exceptions.FileFormatError(err_msg)
return json_content
@@ -90,7 +91,7 @@ def load_csv_file(csv_file):
def load_file(file_path):
if not os.path.isfile(file_path):
- raise exceptions.FileNotFound("{} does not exist.".format(file_path))
+ raise exceptions.FileNotFound(f"{file_path} does not exist.")
file_suffix = os.path.splitext(file_path)[1].lower()
if file_suffix == '.json':
@@ -101,8 +102,7 @@ def load_file(file_path):
return load_csv_file(file_path)
else:
# '' or other suffix
- err_msg = u"Unsupported file format: {}".format(file_path)
- logger.log_warning(err_msg)
+ logger.warning(f"Unsupported file format: {file_path}")
return []
@@ -169,7 +169,7 @@ def load_dot_env_file(dot_env_path):
if not os.path.isfile(dot_env_path):
return {}
- logger.log_info("Loading environment variables from {}".format(dot_env_path))
+ logger.info(f"Loading environment variables from {dot_env_path}")
env_variables_mapping = {}
with io.open(dot_env_path, 'r', encoding='utf-8') as fp:
diff --git a/httprunner/loader/locate.py b/httprunner/loader/locate.py
index 5fe7a5b3..0d4dd056 100644
--- a/httprunner/loader/locate.py
+++ b/httprunner/loader/locate.py
@@ -1,7 +1,9 @@
import os
import sys
-from httprunner import exceptions, logger
+from loguru import logger
+
+from httprunner import exceptions
project_working_directory = None
@@ -26,7 +28,7 @@ def locate_file(start_path, file_name):
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
- raise exceptions.FileNotFound("invalid path: {}".format(start_path))
+ raise exceptions.FileNotFound(f"invalid path: {start_path}")
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
@@ -34,14 +36,14 @@ def locate_file(start_path, file_name):
# current working directory
if os.path.abspath(start_dir_path) == os.getcwd():
- raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
+ raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# system root dir
# Windows, e.g. 'E:\\'
# Linux/Darwin, '/'
parent_dir = os.path.dirname(start_dir_path)
if parent_dir == start_dir_path:
- raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
+ raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# locate recursive upward
return locate_file(parent_dir, file_name)
@@ -85,8 +87,8 @@ def init_project_working_directory(test_path):
def prepare_path(path):
if not os.path.exists(path):
- err_msg = "path not exist: {}".format(path)
- logger.log_error(err_msg)
+ err_msg = f"path not exist: {path}"
+ logger.error(err_msg)
raise exceptions.FileNotFound(err_msg)
if not os.path.isabs(path):
diff --git a/httprunner/parser.py b/httprunner/parser.py
index f1600d39..75bdef9c 100644
--- a/httprunner/parser.py
+++ b/httprunner/parser.py
@@ -6,8 +6,9 @@ import collections
import json
import re
+from loguru import logger
+
from httprunner import exceptions, utils, loader
-from httprunner import logger
from httprunner.compat import basestring, numeric_types, str
# use $$ to escape $ notation
@@ -286,7 +287,7 @@ def uniform_validator(validator):
"""
if not isinstance(validator, dict):
- raise exceptions.ParamsError("invalid validator: {}".format(validator))
+ raise exceptions.ParamsError(f"invalid validator: {validator}")
if "check" in validator and "expect" in validator:
# format1
@@ -300,12 +301,12 @@ def uniform_validator(validator):
compare_values = validator[comparator]
if not isinstance(compare_values, list) or len(compare_values) != 2:
- raise exceptions.ParamsError("invalid validator: {}".format(validator))
+ raise exceptions.ParamsError(f"invalid validator: {validator}")
check_item, expect_value = compare_values
else:
- raise exceptions.ParamsError("invalid validator: {}".format(validator))
+ raise exceptions.ParamsError(f"invalid validator: {validator}")
# uniform comparator, e.g. lt => less_than, eq => equals
comparator = get_uniform_comparator(comparator)
@@ -410,7 +411,7 @@ def get_mapping_variable(variable_name, variables_mapping):
try:
return variables_mapping[variable_name]
except KeyError:
- raise exceptions.VariableNotFound("{} is not found.".format(variable_name))
+ raise exceptions.VariableNotFound(f"{variable_name} is not found.")
def get_mapping_function(function_name, functions_mapping):
@@ -455,7 +456,7 @@ def get_mapping_function(function_name, functions_mapping):
except AttributeError:
pass
- raise exceptions.FunctionNotFound("{} is not found.".format(function_name))
+ raise exceptions.FunctionNotFound(f"{function_name} is not found.")
def parse_function_params(params):
@@ -578,12 +579,12 @@ class LazyFunction(object):
if self._kwargs:
args_string += ", "
str_kwargs = [
- "{}={}".format(key, str(value))
+ f"{key}={str(value)}"
for key, value in self._kwargs.items()
]
args_string += ", ".join(str_kwargs)
- return "LazyFunction({}({}))".format(self.func_name, args_string)
+ return f"LazyFunction({self.func_name}({args_string}))"
def __prepare_cache_key(self, args, kwargs):
return self.func_name, repr(args), repr(kwargs)
@@ -698,7 +699,7 @@ class LazyString(object):
self._string += escape_braces(remain_string)
def __repr__(self):
- return "LazyString({})".format(self.raw_string)
+ return f"LazyString({self.raw_string})"
def to_value(self, variables_mapping=None):
""" parse lazy data with evaluated variables mapping.
@@ -1247,7 +1248,7 @@ def _parse_testcase(testcase, project_mapping, session_variables_set=None):
testcase_type = testcase["type"]
testcase_path = testcase.get("path")
- logger.log_error("failed to parse testcase: {}, error: {}".format(testcase_path, ex))
+ logger.error(f"failed to parse testcase: {testcase_path}, error: {ex}")
global parse_failed_testfiles
if testcase_type not in parse_failed_testfiles:
diff --git a/httprunner/report/html/gen_report.py b/httprunner/report/html/gen_report.py
index af7814c6..c7791183 100644
--- a/httprunner/report/html/gen_report.py
+++ b/httprunner/report/html/gen_report.py
@@ -3,8 +3,8 @@ import os
from datetime import datetime
from jinja2 import Template
+from loguru import logger
-from httprunner import logger
from httprunner.exceptions import SummaryEmpty
@@ -19,7 +19,7 @@ def gen_html_report(summary, report_template=None, report_dir=None, report_file=
"""
if not summary["time"] or summary["stat"]["testcases"]["total"] == 0:
- logger.log_error("test result summary is empty ! {}".format(summary))
+ logger.error(f"test result summary is empty ! {summary}")
raise SummaryEmpty
if not report_template:
@@ -27,11 +27,11 @@ def gen_html_report(summary, report_template=None, report_dir=None, report_file=
os.path.abspath(os.path.dirname(__file__)),
"template.html"
)
- logger.log_debug("No html report template specified, use default.")
+ logger.debug("No html report template specified, use default.")
else:
- logger.log_info("render with html report template: {}".format(report_template))
+ logger.info(f"render with html report template: {report_template}")
- logger.log_info("Start to render Html report ...")
+ logger.info("Start to render Html report ...")
start_at_timestamp = summary["time"]["start_at"]
utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat()
@@ -58,7 +58,7 @@ def gen_html_report(summary, report_template=None, report_dir=None, report_file=
).render(summary)
fp_w.write(rendered_content)
- logger.log_info("Generated Html report: {}".format(report_path))
+ logger.info(f"Generated Html report: {report_path}")
return report_path
diff --git a/httprunner/report/html/result.py b/httprunner/report/html/result.py
index d4076c19..762d0bb1 100644
--- a/httprunner/report/html/result.py
+++ b/httprunner/report/html/result.py
@@ -1,7 +1,7 @@
import time
import unittest
-from httprunner import logger
+from loguru import logger
class HtmlTestResult(unittest.TextTestResult):
@@ -27,7 +27,7 @@ class HtmlTestResult(unittest.TextTestResult):
def startTest(self, test):
""" add start test time """
super(HtmlTestResult, self).startTest(test)
- logger.color_print(test.shortDescription(), "yellow")
+ logger.info(test.shortDescription())
def addSuccess(self, test):
super(HtmlTestResult, self).addSuccess(test)
diff --git a/httprunner/report/stringify.py b/httprunner/report/stringify.py
index bd9d9007..4797ce8d 100644
--- a/httprunner/report/stringify.py
+++ b/httprunner/report/stringify.py
@@ -205,7 +205,7 @@ def stringify_summary(summary):
for index, suite_summary in enumerate(summary["details"]):
if not suite_summary.get("name"):
- suite_summary["name"] = "testcase {}".format(index)
+ suite_summary["name"] = f"testcase {index}"
for record in suite_summary.get("records"):
meta_datas = record['meta_datas']
diff --git a/httprunner/response.py b/httprunner/response.py
index 80f040d1..d13f4852 100644
--- a/httprunner/response.py
+++ b/httprunner/response.py
@@ -2,8 +2,9 @@ import re
from collections import OrderedDict
import jsonpath
+from loguru import logger
-from httprunner import exceptions, logger, utils
+from httprunner import exceptions, utils
from httprunner.compat import basestring, is_py2
text_extractor_regexp_compile = re.compile(r".*\(.*\).*")
@@ -32,8 +33,8 @@ class ResponseObject(object):
self.__dict__[key] = value
return value
except AttributeError:
- err_msg = "ResponseObject does not have attribute: {}".format(key)
- logger.log_error(err_msg)
+ err_msg = f"ResponseObject does not have attribute: {key}"
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
def _extract_field_with_jsonpath(self, field: str) -> list:
@@ -81,9 +82,9 @@ class ResponseObject(object):
assert result
return result
except (AssertionError, exceptions.JSONDecodeError):
- err_msg = u"Failed to extract data with jsonpath! => {}\n".format(field)
- err_msg += u"response body: {}\n".format(self.text)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract data with jsonpath! => {field}\n"
+ err_msg += f"response body: {self.text}\n"
+ logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
def _extract_field_with_regex(self, field):
@@ -108,9 +109,9 @@ class ResponseObject(object):
"""
matched = re.search(field, self.text)
if not matched:
- err_msg = u"Failed to extract data with regex! => {}\n".format(field)
- err_msg += u"response body: {}\n".format(self.text)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract data with regex! => {field}\n"
+ err_msg += f"response body: {self.text}\n"
+ logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
return matched.group(1)
@@ -141,8 +142,8 @@ class ResponseObject(object):
if top_query in ["status_code", "encoding", "ok", "reason", "url"]:
if sub_query:
# status_code.XX
- err_msg = u"Failed to extract: {}\n".format(field)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract: {field}\n"
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
return getattr(self, top_query)
@@ -157,27 +158,27 @@ class ResponseObject(object):
try:
return cookies[sub_query]
except KeyError:
- err_msg = u"Failed to extract cookie! => {}\n".format(field)
- err_msg += u"response cookies: {}\n".format(cookies)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract cookie! => {field}\n"
+ err_msg += f"response cookies: {cookies}\n"
+ logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# elapsed
elif top_query == "elapsed":
available_attributes = u"available attributes: days, seconds, microseconds, total_seconds"
if not sub_query:
- err_msg = u"elapsed is datetime.timedelta instance, attribute should also be specified!\n"
+ err_msg = "elapsed is datetime.timedelta instance, attribute should also be specified!\n"
err_msg += available_attributes
- logger.log_error(err_msg)
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
elif sub_query in ["days", "seconds", "microseconds"]:
return getattr(self.elapsed, sub_query)
elif sub_query == "total_seconds":
return self.elapsed.total_seconds()
else:
- err_msg = "{} is not valid datetime.timedelta attribute.\n".format(sub_query)
+ err_msg = f"{sub_query} is not valid datetime.timedelta attribute.\n"
err_msg += available_attributes
- logger.log_error(err_msg)
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
# headers
@@ -190,9 +191,9 @@ class ResponseObject(object):
try:
return headers[sub_query]
except KeyError:
- err_msg = u"Failed to extract header! => {}\n".format(field)
- err_msg += u"response headers: {}\n".format(headers)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract header! => {field}\n"
+ err_msg += f"response headers: {headers}\n"
+ logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# response body
@@ -214,9 +215,9 @@ class ResponseObject(object):
return utils.query_json(body, sub_query)
else:
# content = "abcdefg", content.xxx
- err_msg = u"Failed to extract attribute from response body! => {}\n".format(field)
- err_msg += u"response body: {}\n".format(body)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract attribute from response body! => {field}\n"
+ err_msg += f"response body: {body}\n"
+ logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
# new set response attributes in teardown_hooks
@@ -235,30 +236,30 @@ class ResponseObject(object):
return utils.query_json(attributes, sub_query)
else:
# content = "attributes.new_attribute_not_exist"
- err_msg = u"Failed to extract cumstom set attribute from teardown hooks! => {}\n".format(field)
- err_msg += u"response set attributes: {}\n".format(attributes)
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract cumstom set attribute from teardown hooks! => {field}\n"
+ err_msg += f"response set attributes: {attributes}\n"
+ logger.error(err_msg)
raise exceptions.TeardownHooksFailure(err_msg)
# others
else:
- err_msg = u"Failed to extract attribute from response! => {}\n".format(field)
- err_msg += u"available response attributes: status_code, cookies, elapsed, headers, content, " \
- u"text, json, encoding, ok, reason, url.\n\n"
- err_msg += u"If you want to set attribute in teardown_hooks, take the following example as reference:\n"
- err_msg += u"response.new_attribute = 'new_attribute_value'\n"
- logger.log_error(err_msg)
+ err_msg = f"Failed to extract attribute from response! => {field}\n"
+ err_msg += "available response attributes: status_code, cookies, elapsed, headers, content, " \
+ "text, json, encoding, ok, reason, url.\n\n"
+ err_msg += "If you want to set attribute in teardown_hooks, take the following example as reference:\n"
+ err_msg += "response.new_attribute = 'new_attribute_value'\n"
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
def extract_field(self, field):
""" extract value from requests.Response.
"""
if not isinstance(field, basestring):
- err_msg = u"Invalid extractor! => {}\n".format(field)
- logger.log_error(err_msg)
+ err_msg = f"Invalid extractor! => {field}\n"
+ logger.error(err_msg)
raise exceptions.ParamsError(err_msg)
- msg = "extract: {}".format(field)
+ msg = f"extract: {field}"
if field.startswith("$"):
value = self._extract_field_with_jsonpath(field)
@@ -270,8 +271,8 @@ class ResponseObject(object):
if is_py2 and isinstance(value, unicode):
value = value.encode("utf-8")
- msg += "\t=> {}".format(value)
- logger.log_debug(msg)
+ msg += f"\t=> {value}"
+ logger.debug(msg)
return value
@@ -295,7 +296,7 @@ class ResponseObject(object):
if not extractors:
return {}
- logger.log_debug("start to extract from response object.")
+ logger.debug("start to extract from response object.")
extracted_variables_mapping = OrderedDict()
extract_binds_order_dict = utils.ensure_mapping_format(extractors)
diff --git a/httprunner/runner.py b/httprunner/runner.py
index aa163f52..f6ea0eb7 100644
--- a/httprunner/runner.py
+++ b/httprunner/runner.py
@@ -3,7 +3,9 @@
from enum import Enum
from unittest.case import SkipTest
-from httprunner import exceptions, logger, response, utils
+from loguru import logger
+
+from httprunner import exceptions, response, utils
from httprunner.client import HttpSession
from httprunner.context import SessionContext
from httprunner.validator import Validator
@@ -116,12 +118,12 @@ class Runner(object):
elif "skipIf" in test_dict:
skip_if_condition = test_dict["skipIf"]
if self.session_context.eval_content(skip_if_condition):
- skip_reason = "{} evaluate to True".format(skip_if_condition)
+ skip_reason = f"{skip_if_condition} evaluate to True"
elif "skipUnless" in test_dict:
skip_unless_condition = test_dict["skipUnless"]
if not self.session_context.eval_content(skip_unless_condition):
- skip_reason = "{} evaluate to False".format(skip_unless_condition)
+ skip_reason = f"{skip_unless_condition} evaluate to False"
if skip_reason:
raise SkipTest(skip_reason)
@@ -140,7 +142,7 @@ class Runner(object):
hook_type (HookTypeEnum): setup/teardown
"""
- logger.log_debug("call {} hook actions.".format(hook_type.name))
+ logger.debug(f"call {hook_type.name} hook actions.")
for action in actions:
if isinstance(action, dict) and len(action) == 1:
@@ -148,17 +150,14 @@ class Runner(object):
# {"var": "${func()}"}
var_name, hook_content = list(action.items())[0]
hook_content_eval = self.session_context.eval_content(hook_content)
- logger.log_debug(
- "assignment with hook: {} = {} => {}".format(
- var_name, hook_content, hook_content_eval
- )
- )
+ logger.debug(
+ f"assignment with hook: {var_name} = {hook_content} => {hook_content_eval}")
self.session_context.update_test_variables(
var_name, hook_content_eval
)
else:
# format 2
- logger.log_debug("call hook function: {}".format(action))
+ logger.debug(f"call hook function: {action}")
# TODO: check hook function if valid
self.session_context.eval_content(action)
@@ -230,9 +229,8 @@ class Runner(object):
except KeyError:
raise exceptions.ParamsError("URL or METHOD missed!")
- logger.log_info("{method} {url}".format(method=method, url=parsed_url))
- logger.log_debug(
- "request kwargs(raw): {kwargs}".format(kwargs=parsed_test_request))
+ logger.info(f"{method} {parsed_url}")
+ logger.debug(f"request kwargs(raw): {parsed_test_request}")
# request
resp = self.http_client_session.request(
@@ -248,21 +246,22 @@ class Runner(object):
# log request
err_msg += "====== request details ======\n"
- err_msg += "url: {}\n".format(parsed_url)
- err_msg += "method: {}\n".format(method)
- err_msg += "headers: {}\n".format(parsed_test_request.pop("headers", {}))
+ err_msg += f"url: {parsed_url}\n"
+ err_msg += f"method: {method}\n"
+ headers = parsed_test_request.pop("headers", {})
+ err_msg += f"headers: {headers}\n"
for k, v in parsed_test_request.items():
v = utils.omit_long_data(v)
- err_msg += "{}: {}\n".format(k, repr(v))
+ err_msg += f"{k}: {repr(v)}\n"
err_msg += "\n"
# log response
err_msg += "====== response details ======\n"
- err_msg += "status_code: {}\n".format(resp_obj.status_code)
- err_msg += "headers: {}\n".format(resp_obj.headers)
- err_msg += "body: {}\n".format(repr(resp_obj.text))
- logger.log_error(err_msg)
+ err_msg += f"status_code: {resp_obj.status_code}\n"
+ err_msg += f"headers: {resp_obj.headers}\n"
+ err_msg += f"body: {repr(resp_obj.text)}\n"
+ logger.error(err_msg)
# teardown hooks
teardown_hooks = test_dict.get("teardown_hooks", [])
@@ -395,9 +394,9 @@ class Runner(object):
output = {}
for variable in output_variables_list:
if variable not in variables_mapping:
- logger.log_warning(
- "variable '{}' can not be found in variables mapping, "
- "failed to export!".format(variable)
+ logger.warning(
+ f"variable '{variable}' can not be found in variables mapping, "
+ "failed to export!"
)
continue
diff --git a/httprunner/utils.py b/httprunner/utils.py
index 4ea1faaa..b9322ee3 100644
--- a/httprunner/utils.py
+++ b/httprunner/utils.py
@@ -22,7 +22,7 @@ absolute_http_url_regexp = re.compile(r"^https?://", re.I)
def init_sentry_sdk():
sentry_sdk.init(
dsn="https://cc6dd86fbe9f4e7fbd95248cfcff114d@sentry.io/1862849",
- release="httprunner@{}".format(__version__)
+ release=f"httprunner@{__version__}"
)
with sentry_sdk.configure_scope() as scope:
@@ -34,7 +34,7 @@ def set_os_environ(variables_mapping):
"""
for variable in variables_mapping:
os.environ[variable] = variables_mapping[variable]
- logger.debug("Set OS environment variable: {}".format(variable))
+ logger.debug(f"Set OS environment variable: {variable}")
def unset_os_environ(variables_mapping):
@@ -42,7 +42,7 @@ def unset_os_environ(variables_mapping):
"""
for variable in variables_mapping:
os.environ.pop(variable)
- logger.debug("Unset OS environment variable: {}".format(variable))
+ logger.debug(f"Unset OS environment variable: {variable}")
def get_os_environ(variable_name):
@@ -109,7 +109,7 @@ def query_json(json_content, query, delimiter='.'):
"""
raise_flag = False
- response_body = u"response body: {}\n".format(json_content)
+ response_body = f"response body: {json_content}\n"
try:
for key in query.split(delimiter):
if isinstance(json_content, (list, basestring)):
@@ -118,13 +118,13 @@ def query_json(json_content, query, delimiter='.'):
json_content = json_content[key]
else:
logger.error(
- "invalid type value: {}({})".format(json_content, type(json_content)))
+ f"invalid type value: {json_content}({type(json_content)})")
raise_flag = True
except (KeyError, ValueError, IndexError):
raise_flag = True
if raise_flag:
- err_msg = u"Failed to extract! => {}\n".format(query)
+ err_msg = f"Failed to extract! => {query}\n"
err_msg += response_body
logger.error(err_msg)
raise exceptions.ExtractFailure(err_msg)
@@ -371,21 +371,21 @@ def create_scaffold(project_name):
""" create scaffold with specified project name.
"""
if os.path.isdir(project_name):
- logger.warning(u"Folder {} exists, please specify a new folder name.".format(project_name))
+ logger.warning(f"Folder {project_name} exists, please specify a new folder name.")
return
- logger.info("Start to create new project: {}".format(project_name))
- logger.info("CWD: {}".format(os.getcwd()))
+ logger.info(f"Start to create new project: {project_name}")
+ logger.info(f"CWD: {os.getcwd()}")
def create_folder(path):
os.makedirs(path)
- msg = "created folder: {}".format(path)
+ msg = f"created folder: {path}"
logger.info(msg)
def create_file(path, file_content=""):
with open(path, 'w') as f:
f.write(file_content)
- msg = "created file: {}".format(path)
+ msg = f"created file: {path}"
logger.info(msg)
demo_api_content = """
@@ -526,14 +526,14 @@ def prettify_json_file(file_list):
"""
for json_file in set(file_list):
if not json_file.endswith(".json"):
- logger.warning("Only JSON file format can be prettified, skip: {}".format(json_file))
+ logger.warning(f"Only JSON file format can be prettified, skip: {json_file}")
continue
- logger.info("Start to prettify JSON file: {}".format(json_file))
+ logger.info(f"Start to prettify JSON file: {json_file}")
dir_path = os.path.dirname(json_file)
file_name, file_suffix = os.path.splitext(os.path.basename(json_file))
- outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name))
+ outfile = os.path.join(dir_path, f"{file_name}.pretty.json")
with io.open(json_file, 'r', encoding='utf-8') as stream:
try:
@@ -545,7 +545,7 @@ def prettify_json_file(file_list):
json.dump(obj, out, indent=4, separators=(',', ': '))
out.write('\n')
- print("success: {}".format(outfile))
+ print(f"success: {outfile}")
def omit_long_data(body, omit_len=512):
@@ -560,7 +560,7 @@ def omit_long_data(body, omit_len=512):
omitted_body = body[0:omit_len]
- appendix_str = " ... OMITTED {} CHARACTORS ...".format(body_len - omit_len)
+ appendix_str = f" ... OMITTED {body_len - omit_len} CHARACTORS ..."
if isinstance(body, bytes):
appendix_str = appendix_str.encode("utf-8")
@@ -604,11 +604,11 @@ def dump_json_file(json_data, json_file_abs_path):
cls=PythonObjectEncoder
)
- msg = "dump file: {}".format(json_file_abs_path)
+ msg = f"dump file: {json_file_abs_path}"
logger.info(msg)
except TypeError as ex:
- msg = "Failed to dump json file: {}\nReason: {}".format(json_file_abs_path, ex)
+ msg = f"Failed to dump json file: {json_file_abs_path}\nReason: {ex}"
logger.error(msg)
@@ -620,7 +620,7 @@ def prepare_dump_json_file_abs_path(project_mapping, tag_name):
if not test_path:
# running passed in testcase/testsuite data structure
- dump_file_name = "tests_mapping.{}.json".format(tag_name)
+ dump_file_name = f"tests_mapping.{tag_name}.json"
dumped_json_file_abs_path = os.path.join(pwd_dir_path, "logs", dump_file_name)
return dumped_json_file_abs_path
@@ -630,12 +630,12 @@ def prepare_dump_json_file_abs_path(project_mapping, tag_name):
if os.path.isdir(test_path):
file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
- dump_file_name = "all.{}.json".format(tag_name)
+ dump_file_name = f"all.{tag_name}.json"
else:
file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
test_file_name, _file_suffix = os.path.splitext(test_file)
- dump_file_name = "{}.{}.json".format(test_file_name, tag_name)
+ dump_file_name = f"{test_file_name}.{tag_name}.json"
dumped_json_file_abs_path = os.path.join(file_foder_path, dump_file_name)
return dumped_json_file_abs_path
diff --git a/httprunner/validator.py b/httprunner/validator.py
index eb2d52c4..9abe5699 100644
--- a/httprunner/validator.py
+++ b/httprunner/validator.py
@@ -3,7 +3,9 @@
import sys
import traceback
-from httprunner import exceptions, logger, parser
+from loguru import logger
+
+from httprunner import exceptions, parser
class Validator(object):
@@ -70,12 +72,12 @@ class Validator(object):
}
script = "\n ".join(script)
- code = """
+ code = f"""
# encoding: utf-8
def run_validate_script():
- {}
-""".format(script)
+ {script}
+"""
variables = {
"status_code": self.resp_obj.status_code,
@@ -88,12 +90,12 @@ def run_validate_script():
try:
exec(code, variables)
except SyntaxError as ex:
- logger.log_warning("SyntaxError in python validate script: {}".format(ex))
+ logger.warning(f"SyntaxError in python validate script: {ex}")
result["check_result"] = "fail"
result["output"] = "
".join([
- "ErrorMessage: {}".format(ex.msg),
- "ErrorLine: {}".format(ex.lineno),
- "ErrorText: {}".format(ex.text)
+ f"ErrorMessage: {ex.msg}",
+ f"ErrorLine: {ex.lineno}",
+ f"ErrorText: {ex.text}"
])
return result
@@ -101,7 +103,7 @@ def run_validate_script():
# run python validate script
variables["run_validate_script"]()
except Exception as ex:
- logger.log_warning("run python validate script failed: {}".format(ex))
+ logger.warning(f"run python validate script failed: {ex}")
result["check_result"] = "fail"
_type, _value, _tb = sys.exc_info()
@@ -118,8 +120,8 @@ def run_validate_script():
line_no = "N/A"
result["output"] = "
".join([
- "ErrorType: {}".format(_type.__name__),
- "ErrorLine: {}".format(line_no)
+ f"ErrorType: {_type.__name__}",
+ f"ErrorLine: {line_no}"
])
return result
@@ -131,7 +133,7 @@ def run_validate_script():
if not validators:
return
- logger.log_debug("start to validate.")
+ logger.debug("start to validate.")
validate_pass = True
failures = []
@@ -154,7 +156,7 @@ def run_validate_script():
# validator should be LazyFunction object
if not isinstance(validator, parser.LazyFunction):
raise exceptions.ValidationFailure(
- "validator should be parsed first: {}".format(validators))
+ f"validator should be parsed first: {validators}")
# evaluate validator args with context variable mapping.
validator_args = validator.get_args()
@@ -171,18 +173,13 @@ def run_validate_script():
"expect": expect_item,
"expect_value": expect_value
}
- validate_msg = "\nvalidate: {} {} {}({})".format(
- check_item,
- comparator,
- expect_value,
- type(expect_value).__name__
- )
+ validate_msg = f"\nvalidate: {check_item} {comparator} {expect_value}({type(expect_value).__name__})"
try:
validator.to_value(self.session_context.test_variables_mapping)
validator_dict["check_result"] = "pass"
validate_msg += "\t==> pass"
- logger.log_debug(validate_msg)
+ logger.debug(validate_msg)
except (AssertionError, TypeError):
validate_pass = False
validator_dict["check_result"] = "fail"
@@ -194,7 +191,7 @@ def run_validate_script():
expect_value,
type(expect_value).__name__
)
- logger.log_error(validate_msg)
+ logger.error(validate_msg)
failures.append(validate_msg)
self.validation_results["validate_extractor"].append(validator_dict)