commit f4860de5adc4308c78c9823138a41b85dba1491d Author: lilong.129 Date: Wed Feb 5 21:32:44 2025 +0800 init: move from httprunner/httprunner diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3ece898 --- /dev/null +++ b/.gitignore @@ -0,0 +1,46 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# system or IDE generated files +__debug_bin +.vscode/ +.idea/ +.DS_Store +*.bak +.commit.txt + +# project output files +site/ +output/ +logs +*.log +*.pcap +.coverage +reports +results +*.xml +htmlcov/ +screenshots/ + +# built plugins +debugtalk.bin +debugtalk.so + +# python files +.venv +__pycache__ +*.pyc +dist +*.egg-info +.python-version +.pytest_cache diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f63cb65 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 debugtalk + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/data/.csv b/examples/data/.csv new file mode 100644 index 0000000..e69de29 diff --git a/examples/data/a-b.c/1.yml b/examples/data/a-b.c/1.yml new file mode 100644 index 0000000..5754c33 --- /dev/null +++ b/examples/data/a-b.c/1.yml @@ -0,0 +1,30 @@ +config: + name: "request methods testcase with functions" + variables: + foo1: config_bar1 + foo2: config_bar2 + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + variables: + foo1: bar1 + sum_v: "${sum_two(1, 2)}" + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + sum_v: $sum_v + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + extract: + session_foo2: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.args.foo1", "bar1"] + - eq: ["body.args.sum_v", "3"] + - eq: ["body.args.foo2", "config_bar2"] diff --git a/examples/data/a-b.c/2 3.yml b/examples/data/a-b.c/2 3.yml new file mode 100644 index 0000000..8a37b3a --- /dev/null +++ b/examples/data/a-b.c/2 3.yml @@ -0,0 +1,26 @@ +config: + name: "reference testcase unittest for abnormal folder path" + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: request with functions + testcase: a-b.c/1.yml + export: + - session_foo2 +- + name: post form data + variables: + foo1: bar12 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=$foo1&foo2=$session_foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.form.foo1", "bar12"] + - eq: ["body.form.foo2", "config_bar2"] diff --git a/examples/data/a-b.c/中文case.yml b/examples/data/a-b.c/中文case.yml new file mode 100644 index 0000000..e69de29 diff --git a/examples/data/a_b_c/T1_test.py b/examples/data/a_b_c/T1_test.py new file mode 100644 index 0000000..c65163c --- /dev/null +++ b/examples/data/a_b_c/T1_test.py @@ -0,0 +1,34 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: a-b.c/1.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseT1(HttpRunner): + + config = ( + Config("request methods testcase with functions") + .variables(**{"foo1": "config_bar1", "foo2": "config_bar2"}) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables(**{"foo1": "bar1", "sum_v": "${sum_two(1, 2)}"}) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}) + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .extract() + .with_jmespath("body.args.foo2", "session_foo2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.foo1", "bar1") + .assert_equal("body.args.sum_v", "3") + .assert_equal("body.args.foo2", "config_bar2") + ), + ] + + +if __name__ == "__main__": + TestCaseT1().test_start() diff --git a/examples/data/a_b_c/T2_3_test.py b/examples/data/a_b_c/T2_3_test.py new file mode 100644 index 0000000..c28c757 --- /dev/null +++ b/examples/data/a_b_c/T2_3_test.py @@ -0,0 +1,44 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: a-b.c/2 3.yml +from httprunner import HttpRunner, Config, Step, RunRequest +from httprunner import RunTestCase + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from a_b_c.T1_test import TestCaseT1 as T1 + + +class TestCaseT23(HttpRunner): + + config = ( + Config("reference testcase unittest for abnormal folder path") + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step(RunTestCase("request with functions").call(T1).export(*["session_foo2"])), + Step( + RunRequest("post form data") + .with_variables(**{"foo1": "bar12"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/${get_httprunner_version()}", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=$foo1&foo2=$session_foo2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.form.foo1", "bar12") + .assert_equal("body.form.foo2", "config_bar2") + ), + ] + + +if __name__ == "__main__": + TestCaseT23().test_start() diff --git a/examples/data/a_b_c/__init__.py b/examples/data/a_b_c/__init__.py new file mode 100644 index 0000000..70cfba5 --- /dev/null +++ b/examples/data/a_b_c/__init__.py @@ -0,0 +1 @@ +# NOTICE: Generated By HttpRunner. DO NOT EDIT! diff --git a/examples/data/curl/curl_examples.txt b/examples/data/curl/curl_examples.txt new file mode 100644 index 0000000..6d8e6a7 --- /dev/null +++ b/examples/data/curl/curl_examples.txt @@ -0,0 +1,11 @@ +curl httpbin.org + +curl https://httpbin.org/get?key1=value1&key2=value2 + +curl -H "Content-Type: application/json" -H "Authorization: Bearer b7d03a6947b217efb6f3ec3bd3504582" -d '{"type":"A","name":"www","data":"162.10.66.0","priority":null,"port":null,"weight":null}' "https://httpbin.org/post" + +curl -F "dummyName=dummyFile" -F file1=@file1.txt -F file2=@file2.txt https://httpbin.org/post + +curl https://httpbin.org/post -d 'shipment[to_address][id]=adr_HrBKVA85' -d 'shipment[from_address][id]=adr_VtuTOj7o' -d 'shipment[parcel][id]=prcl_WDv2VzHp' -d 'shipment[is_return]=true' -d 'shipment[customs_info][id]=cstinfo_bl5sE20Y' + +curl https://httpbing.org/post -H "Content-Type: application/x-www-form-urlencoded" --data "key1=value+1&key2=value%3A2" diff --git a/examples/data/debugtalk.py b/examples/data/debugtalk.py new file mode 100644 index 0000000..af8b22e --- /dev/null +++ b/examples/data/debugtalk.py @@ -0,0 +1,13 @@ +from httprunner import __version__ + + +def get_httprunner_version(): + return __version__ + + +def sum_two(m, n): + return m + n + + +def get_variables(): + return {"foo1": "session_bar1"} diff --git a/examples/data/har/demo.har b/examples/data/har/demo.har new file mode 100644 index 0000000..3a94a30 --- /dev/null +++ b/examples/data/har/demo.har @@ -0,0 +1,356 @@ +{ + "log": { + "version": "1.2", + "creator": { + "name": "Charles Proxy", + "version": "4.6.1" + }, + "entries": [ + { + "startedDateTime": "2021-10-15T20:29:14.396+08:00", + "time": 1528, + "request": { + "method": "GET", + "url": "https://postman-echo.com/get?foo1=HDnY8&foo2=34.5", + "httpVersion": "HTTP/1.1", + "cookies": [], + "headers": [ + { + "name": "Host", + "value": "postman-echo.com" + }, + { + "name": "User-Agent", + "value": "HttpRunnerPlus" + }, + { + "name": "Accept-Encoding", + "value": "gzip" + } + ], + "queryString": [ + { + "name": "foo1", + "value": "HDnY8" + }, + { + "name": "foo2", + "value": "34.5" + } + ], + "headersSize": 113, + "bodySize": 0 + }, + "response": { + "_charlesStatus": "COMPLETE", + "status": 200, + "statusText": "OK", + "httpVersion": "HTTP/1.1", + "cookies": [ + { + "name": "sails.sid", + "value": "s%3Az_LpglkKxTvJ_eHVUH6V67drKp0AGWW-.PidabaXOnatLRP47hVyqqepl6BdrpEQzRlJQXtbIiwk", + "path": "/", + "domain": null, + "expires": null, + "httpOnly": true, + "secure": false, + "comment": null, + "_maxAge": null + } + ], + "headers": [ + { + "name": "Date", + "value": "Fri, 15 Oct 2021 12:29:15 GMT" + }, + { + "name": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "name": "Content-Length", + "value": "300" + }, + { + "name": "ETag", + "value": "W/\"12c-1pyB4v4mv3hdBoU+8cUmx4p37qI\"" + }, + { + "name": "Vary", + "value": "Accept-Encoding" + }, + { + "name": "set-cookie", + "value": "sails.sid=s%3Az_LpglkKxTvJ_eHVUH6V67drKp0AGWW-.PidabaXOnatLRP47hVyqqepl6BdrpEQzRlJQXtbIiwk; Path=/; HttpOnly" + }, + { + "name": "Connection", + "value": "keep-alive" + } + ], + "content": { + "size": 300, + "mimeType": "application/json; charset=utf-8", + "text": "eyJhcmdzIjp7ImZvbzEiOiJIRG5ZOCIsImZvbzIiOiIzNC41In0sImhlYWRlcnMiOnsieC1mb3J3YXJkZWQtcHJvdG8iOiJodHRwcyIsIngtZm9yd2FyZGVkLXBvcnQiOiI0NDMiLCJob3N0IjoicG9zdG1hbi1lY2hvLmNvbSIsIngtYW16bi10cmFjZS1pZCI6IlJvb3Q9MS02MTY5NzQxYi01YjgyNTRjZTZjZThlNTU2NTRiNzc3MmQiLCJ1c2VyLWFnZW50IjoiSHR0cEJvb21lciIsImFjY2VwdC1lbmNvZGluZyI6Imd6aXAifSwidXJsIjoiaHR0cHM6Ly9wb3N0bWFuLWVjaG8uY29tL2dldD9mb28xPUhEblk4JmZvbzI9MzQuNSJ9", + "encoding": "base64" + }, + "redirectURL": null, + "headersSize": 0, + "bodySize": 300 + }, + "serverIPAddress": "44.193.31.23", + "cache": {}, + "timings": { + "dns": 105, + "connect": 1108, + "ssl": 721, + "send": 1, + "wait": 312, + "receive": 2 + } + }, + { + "startedDateTime": "2021-10-15T20:29:16.120+08:00", + "time": 306, + "request": { + "method": "POST", + "url": "https://postman-echo.com/post", + "httpVersion": "HTTP/1.1", + "cookies": [ + { + "name": "sails.sid", + "value": "s%3Az_LpglkKxTvJ_eHVUH6V67drKp0AGWW-.PidabaXOnatLRP47hVyqqepl6BdrpEQzRlJQXtbIiwk" + } + ], + "headers": [ + { + "name": "Host", + "value": "postman-echo.com" + }, + { + "name": "User-Agent", + "value": "Go-http-client/1.1" + }, + { + "name": "Content-Length", + "value": "28" + }, + { + "name": "Content-Type", + "value": "application/json; charset=UTF-8" + }, + { + "name": "Cookie", + "value": "sails.sid=s%3Az_LpglkKxTvJ_eHVUH6V67drKp0AGWW-.PidabaXOnatLRP47hVyqqepl6BdrpEQzRlJQXtbIiwk" + }, + { + "name": "Accept-Encoding", + "value": "gzip" + } + ], + "queryString": [], + "postData": { + "mimeType": "application/json; charset=UTF-8", + "text": "{\"foo1\":\"HDnY8\",\"foo2\":12.3}" + }, + "headersSize": 269, + "bodySize": 28 + }, + "response": { + "_charlesStatus": "COMPLETE", + "status": 200, + "statusText": "OK", + "httpVersion": "HTTP/1.1", + "cookies": [ + { + "name": "sails.sid", + "value": "s%3AS5e7w0zQ0xAsCwh9L8T6R7QLYCO7_gtD.r8%2B2w9IWqEIfuVkrZjnxzm2xADIk34zKAWXRPapr%2FAw", + "path": "/", + "domain": null, + "expires": null, + "httpOnly": true, + "secure": false, + "comment": null, + "_maxAge": null + } + ], + "headers": [ + { + "name": "Date", + "value": "Fri, 15 Oct 2021 12:29:16 GMT" + }, + { + "name": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "name": "Content-Length", + "value": "526" + }, + { + "name": "ETag", + "value": "W/\"20e-aXqJ0H6Q30sU41c/D7asB+yXWeQ\"" + }, + { + "name": "Vary", + "value": "Accept-Encoding" + }, + { + "name": "set-cookie", + "value": "sails.sid=s%3AS5e7w0zQ0xAsCwh9L8T6R7QLYCO7_gtD.r8%2B2w9IWqEIfuVkrZjnxzm2xADIk34zKAWXRPapr%2FAw; Path=/; HttpOnly" + }, + { + "name": "Connection", + "value": "keep-alive" + } + ], + "content": { + "size": 526, + "mimeType": "application/json; charset=utf-8", + "text": "eyJhcmdzIjp7fSwiZGF0YSI6eyJmb28xIjoiSERuWTgiLCJmb28yIjoxMi4zfSwiZmlsZXMiOnt9LCJmb3JtIjp7fSwiaGVhZGVycyI6eyJ4LWZvcndhcmRlZC1wcm90byI6Imh0dHBzIiwieC1mb3J3YXJkZWQtcG9ydCI6IjQ0MyIsImhvc3QiOiJwb3N0bWFuLWVjaG8uY29tIiwieC1hbXpuLXRyYWNlLWlkIjoiUm9vdD0xLTYxNjk3NDFjLTIxN2RiMGI3MWFkYjgwYmQ3ODUxOTI2OCIsImNvbnRlbnQtbGVuZ3RoIjoiMjgiLCJ1c2VyLWFnZW50IjoiR28taHR0cC1jbGllbnQvMS4xIiwiY29udGVudC10eXBlIjoiYXBwbGljYXRpb24vanNvbjsgY2hhcnNldD1VVEYtOCIsImNvb2tpZSI6InNhaWxzLnNpZD1zJTNBel9McGdsa0t4VHZKX2VIVlVINlY2N2RyS3AwQUdXVy0uUGlkYWJhWE9uYXRMUlA0N2hWeXFxZXBsNkJkcnBFUXpSbEpRWHRiSWl3ayIsImFjY2VwdC1lbmNvZGluZyI6Imd6aXAifSwianNvbiI6eyJmb28xIjoiSERuWTgiLCJmb28yIjoxMi4zfSwidXJsIjoiaHR0cHM6Ly9wb3N0bWFuLWVjaG8uY29tL3Bvc3QifQ==", + "encoding": "base64" + }, + "redirectURL": null, + "headersSize": 0, + "bodySize": 526 + }, + "serverIPAddress": "44.193.31.23", + "cache": {}, + "timings": { + "dns": -1, + "connect": -1, + "ssl": -1, + "send": 1, + "wait": 304, + "receive": 1 + } + }, + { + "startedDateTime": "2021-10-15T20:29:16.427+08:00", + "time": 305, + "request": { + "method": "POST", + "url": "https://postman-echo.com/post", + "httpVersion": "HTTP/1.1", + "cookies": [ + { + "name": "sails.sid", + "value": "s%3AS5e7w0zQ0xAsCwh9L8T6R7QLYCO7_gtD.r8%2B2w9IWqEIfuVkrZjnxzm2xADIk34zKAWXRPapr%2FAw" + } + ], + "headers": [ + { + "name": "Host", + "value": "postman-echo.com" + }, + { + "name": "User-Agent", + "value": "Go-http-client/1.1" + }, + { + "name": "Content-Length", + "value": "20" + }, + { + "name": "Content-Type", + "value": "application/x-www-form-urlencoded; charset=UTF-8" + }, + { + "name": "Cookie", + "value": "sails.sid=s%3AS5e7w0zQ0xAsCwh9L8T6R7QLYCO7_gtD.r8%2B2w9IWqEIfuVkrZjnxzm2xADIk34zKAWXRPapr%2FAw" + }, + { + "name": "Accept-Encoding", + "value": "gzip" + } + ], + "queryString": [], + "postData": { + "mimeType": "application/x-www-form-urlencoded; charset=UTF-8", + "params": [ + { + "name": "foo1", + "value": "HDnY8" + }, + { + "name": "foo2", + "value": "12.3" + } + ] + }, + "headersSize": 290, + "bodySize": 20 + }, + "response": { + "_charlesStatus": "COMPLETE", + "status": 200, + "statusText": "OK", + "httpVersion": "HTTP/1.1", + "cookies": [ + { + "name": "sails.sid", + "value": "s%3AMp2gGgeCCDM4sRS_MfL1q-hAkL3bAk84.9XT7TTW8QzueQqtQ6bQM%2BgHqiUBbkJSfgM5CbfhFreQ", + "path": "/", + "domain": null, + "expires": null, + "httpOnly": true, + "secure": false, + "comment": null, + "_maxAge": null + } + ], + "headers": [ + { + "name": "Date", + "value": "Fri, 15 Oct 2021 12:29:16 GMT" + }, + { + "name": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "name": "Content-Length", + "value": "551" + }, + { + "name": "ETag", + "value": "W/\"227-micuvGYwtEZN542D1sTL0hAZaRs\"" + }, + { + "name": "Vary", + "value": "Accept-Encoding" + }, + { + "name": "set-cookie", + "value": "sails.sid=s%3AMp2gGgeCCDM4sRS_MfL1q-hAkL3bAk84.9XT7TTW8QzueQqtQ6bQM%2BgHqiUBbkJSfgM5CbfhFreQ; Path=/; HttpOnly" + }, + { + "name": "Connection", + "value": "keep-alive" + } + ], + "content": { + "size": 551, + "mimeType": "application/json; charset=utf-8", + "text": "eyJhcmdzIjp7fSwiZGF0YSI6IiIsImZpbGVzIjp7fSwiZm9ybSI6eyJmb28xIjoiSERuWTgiLCJmb28yIjoiMTIuMyJ9LCJoZWFkZXJzIjp7IngtZm9yd2FyZGVkLXByb3RvIjoiaHR0cHMiLCJ4LWZvcndhcmRlZC1wb3J0IjoiNDQzIiwiaG9zdCI6InBvc3RtYW4tZWNoby5jb20iLCJ4LWFtem4tdHJhY2UtaWQiOiJSb290PTEtNjE2OTc0MWMtNWI5ZDEyMWI2N2FlZTI0MTUyMmQzMjE2IiwiY29udGVudC1sZW5ndGgiOiIyMCIsInVzZXItYWdlbnQiOiJHby1odHRwLWNsaWVudC8xLjEiLCJjb250ZW50LXR5cGUiOiJhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQ7IGNoYXJzZXQ9VVRGLTgiLCJjb29raWUiOiJzYWlscy5zaWQ9cyUzQVM1ZTd3MHpRMHhBc0N3aDlMOFQ2UjdRTFlDTzdfZ3RELnI4JTJCMnc5SVdxRUlmdVZrclpqbnh6bTJ4QURJazM0ektBV1hSUGFwciUyRkF3IiwiYWNjZXB0LWVuY29kaW5nIjoiZ3ppcCJ9LCJqc29uIjp7ImZvbzEiOiJIRG5ZOCIsImZvbzIiOiIxMi4zIn0sInVybCI6Imh0dHBzOi8vcG9zdG1hbi1lY2hvLmNvbS9wb3N0In0=", + "encoding": "base64" + }, + "redirectURL": null, + "headersSize": 0, + "bodySize": 551 + }, + "serverIPAddress": "44.193.31.23", + "cache": {}, + "timings": { + "dns": -1, + "connect": -1, + "ssl": -1, + "send": 0, + "wait": 303, + "receive": 2 + } + } + ] + } +} \ No newline at end of file diff --git a/examples/data/postman/__init__.py b/examples/data/postman/__init__.py new file mode 100644 index 0000000..70cfba5 --- /dev/null +++ b/examples/data/postman/__init__.py @@ -0,0 +1 @@ +# NOTICE: Generated By HttpRunner. DO NOT EDIT! diff --git a/examples/data/postman/intro.txt b/examples/data/postman/intro.txt new file mode 100644 index 0000000..1ac2b9d --- /dev/null +++ b/examples/data/postman/intro.txt @@ -0,0 +1 @@ +HttpRunner is an open source API testing tool that supports HTTP(S)/HTTP2/WebSocket/RPC network protocols, covering API testing, performance testing and digital experience monitoring (DEM) test types. Enjoy! \ No newline at end of file diff --git a/examples/data/postman/logo.jpeg b/examples/data/postman/logo.jpeg new file mode 100644 index 0000000..e790a1c Binary files /dev/null and b/examples/data/postman/logo.jpeg differ diff --git a/examples/data/postman/postman_collection.json b/examples/data/postman/postman_collection.json new file mode 100644 index 0000000..0f96084 --- /dev/null +++ b/examples/data/postman/postman_collection.json @@ -0,0 +1,498 @@ +{ + "info": { + "_postman_id": "0417a445-b206-4ea2-b1d2-5441afd6c6b9", + "name": "postman collection demo", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "folder1", + "item": [ + { + "name": "folder2", + "item": [ + { + "name": "Get with params", + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "https://postman-echo.com/:path?k1=v1&k2=v2", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "query": [ + { + "key": "k1", + "value": "v1" + }, + { + "key": "k2", + "value": "v2" + }, + { + "key": "k3", + "value": "v3", + "disabled": true + } + ], + "variable": [ + { + "key": "path", + "value": "get" + } + ] + } + }, + "response": [ + { + "name": "Get with params case1", + "originalRequest": { + "method": "GET", + "header": [], + "url": { + "raw": "https://postman-echo.com/:path?k1=v1&k2=v2", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "query": [ + { + "key": "k1", + "value": "v1" + }, + { + "key": "k2", + "value": "v2" + }, + { + "key": "k3", + "value": "v3", + "disabled": true + } + ], + "variable": [ + { + "key": "path", + "value": "get" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Date", + "value": "Mon, 16 May 2022 12:12:28 GMT" + }, + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "key": "Content-Length", + "value": "508" + }, + { + "key": "Connection", + "value": "keep-alive" + }, + { + "key": "ETag", + "value": "W/\"1fc-x4EIPFQzoLX0HenCFPx6HNfG0lc\"" + }, + { + "key": "Vary", + "value": "Accept-Encoding" + }, + { + "key": "set-cookie", + "value": "sails.sid=s%3AX2aa_Z7gbcUqIWAjlBkytBRmQ4WCvc3D.pX9Qxh8aO9Ict0BL4CrRhdDJmz81UVmwFsV5Nx30Ils; Path=/; HttpOnly" + } + ], + "cookie": [], + "body": "{\n \"args\": {\n \"k1\": \"v1\",\n \"k2\": \"v2\"\n },\n \"headers\": {\n \"x-forwarded-proto\": \"https\",\n \"x-forwarded-port\": \"443\",\n \"host\": \"postman-echo.com\",\n \"user-agent\": \"PostmanRuntime/7.29.0\",\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"cookie\": \"Cookie_1=c1; Cookie_2=c2; sails.sid=s%3AGX6aS9b_phvUSUk66w7ZBgWuOPI7IIKT.ayEGTaW4U35eAWyPz%2Fh6Q74DonNcbqw3H5Q5Zv%2BfKMY\"\n },\n \"url\": \"https://postman-echo.com/get?k1=v1&k2=v2\"\n}" + }, + { + "name": "Get with params case2", + "originalRequest": { + "method": "GET", + "header": [], + "url": { + "raw": "https://postman-echo.com/:path?k1=v1&k3=v3", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "query": [ + { + "key": "k1", + "value": "v1" + }, + { + "key": "k2", + "value": "v2", + "disabled": true + }, + { + "key": "k3", + "value": "v3" + } + ], + "variable": [ + { + "key": "path", + "value": "get" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Date", + "value": "Mon, 16 May 2022 12:14:04 GMT" + }, + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "key": "Content-Length", + "value": "504" + }, + { + "key": "Connection", + "value": "keep-alive" + }, + { + "key": "ETag", + "value": "W/\"1f8-tMaKs4xmwr+3su3I8mcgR0p+ucw\"" + }, + { + "key": "Vary", + "value": "Accept-Encoding" + }, + { + "key": "set-cookie", + "value": "sails.sid=s%3AMNuX_i0KgaP_KuuMpYB8RtCNipCGJWVw.4ETfPHxE81Omqb6Yli%2FezUU8CXyYBcN3%2Bxkx5htwh8Y; Path=/; HttpOnly" + } + ], + "cookie": [], + "body": "{\n \"args\": {\n \"k1\": \"v1\",\n \"k3\": \"v3\"\n },\n \"headers\": {\n \"x-forwarded-proto\": \"https\",\n \"x-forwarded-port\": \"443\",\n \"host\": \"postman-echo.com\",\n \"user-agent\": \"PostmanRuntime/7.29.0\",\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"cookie\": \"Cookie_1=c1; Cookie_2=c2; sails.sid=s%3AX2aa_Z7gbcUqIWAjlBkytBRmQ4WCvc3D.pX9Qxh8aO9Ict0BL4CrRhdDJmz81UVmwFsV5Nx30Ils\"\n },\n \"url\": \"https://postman-echo.com/get?k1=v1&k3=v3\"\n}" + } + ] + } + ] + } + ] + }, + { + "name": "folder3", + "item": [ + { + "name": "Post form-data", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "k1", + "value": "v1", + "type": "text" + }, + { + "key": "k2", + "value": "v2", + "type": "text" + }, + { + "key": "k3", + "value": "v3", + "type": "text", + "disabled": true + }, + { + "key": "intro_key", + "type": "file", + "src": "intro.txt" + }, + { + "key": "logo_key", + "type": "file", + "src": "logo.jpeg" + } + ] + }, + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "post" + } + ] + } + }, + "response": [] + }, + { + "name": "Post x-www-form-urlencoded", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "k1", + "value": "v1", + "type": "text" + }, + { + "key": "k2", + "value": "v2", + "type": "text" + }, + { + "key": "k3", + "value": "v3", + "type": "text", + "disabled": true + } + ] + }, + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "post" + } + ] + } + }, + "response": [] + }, + { + "name": "Post raw json", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"k1\": \"v1\",\n \"k2\": \"v2\"\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "post" + } + ] + } + }, + "response": [] + }, + { + "name": "Post raw text", + "request": { + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "have a nice day", + "options": { + "raw": { + "language": "text" + } + } + }, + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "post" + } + ] + } + }, + "response": [] + } + ] + }, + { + "name": "Get request headers", + "request": { + "method": "GET", + "header": [ + { + "key": "User-Agent", + "value": "HttpRunner", + "type": "text" + }, + { + "key": "User-Name", + "value": "bbx", + "type": "text", + "disabled": true + }, + { + "key": "Connection", + "value": "close", + "type": "text" + } + ], + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "headers" + } + ] + } + }, + "response": [ + { + "name": "Get request headers case1", + "originalRequest": { + "method": "GET", + "header": [ + { + "key": "User-Agent", + "value": "HttpRunner", + "type": "text" + }, + { + "key": "User-Name", + "value": "bbx", + "type": "text", + "disabled": true + }, + { + "key": "Cookie", + "value": "Cookie_1=c1; Cookie_2=c2; sails.sid=s%3AGX6aS9b_phvUSUk66w7ZBgWuOPI7IIKT.ayEGTaW4U35eAWyPz%2Fh6Q74DonNcbqw3H5Q5Zv%2BfKMY", + "type": "text" + } + ], + "url": { + "raw": "https://postman-echo.com/:path", + "protocol": "https", + "host": [ + "postman-echo", + "com" + ], + "path": [ + ":path" + ], + "variable": [ + { + "key": "path", + "value": "headers" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Date", + "value": "Mon, 16 May 2022 12:14:25 GMT" + }, + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + }, + { + "key": "Content-Length", + "value": "541" + }, + { + "key": "Connection", + "value": "keep-alive" + }, + { + "key": "ETag", + "value": "W/\"21d-ld5UvFTaRM6lihVnvCj6mZm5Of0\"" + }, + { + "key": "Vary", + "value": "Accept-Encoding" + } + ], + "cookie": [], + "body": "{\n \"headers\": {\n \"x-forwarded-proto\": \"https\",\n \"x-forwarded-port\": \"443\",\n \"host\": \"postman-echo.com\",\n \"user-agent\": \"HttpRunner\",\n \"cookie\": \"Cookie_1=c1; Cookie_2=c2; sails.sid=s%3AGX6aS9b_phvUSUk66w7ZBgWuOPI7IIKT.ayEGTaW4U35eAWyPz%2Fh6Q74DonNcbqw3H5Q5Zv%2BfKMY\",\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\"\n }\n}" + } + ] + } + ] +} \ No newline at end of file diff --git a/examples/data/profile.yml b/examples/data/profile.yml new file mode 100644 index 0000000..69963ba --- /dev/null +++ b/examples/data/profile.yml @@ -0,0 +1,4 @@ +headers: + Content-Type: "application/x-www-form-urlencoded" +cookies: + UserName: "debugtalk" \ No newline at end of file diff --git a/examples/data/profile_override.yml b/examples/data/profile_override.yml new file mode 100644 index 0000000..35236a5 --- /dev/null +++ b/examples/data/profile_override.yml @@ -0,0 +1,5 @@ +override: true +headers: + Content-Type: "application/x-www-form-urlencoded" +cookies: + UserName: "debugtalk" \ No newline at end of file diff --git a/examples/data/sqlite.db b/examples/data/sqlite.db new file mode 100644 index 0000000..49485ef Binary files /dev/null and b/examples/data/sqlite.db differ diff --git a/examples/httpbin/__init__.py b/examples/httpbin/__init__.py new file mode 100644 index 0000000..70cfba5 --- /dev/null +++ b/examples/httpbin/__init__.py @@ -0,0 +1 @@ +# NOTICE: Generated By HttpRunner. DO NOT EDIT! diff --git a/examples/httpbin/account.csv b/examples/httpbin/account.csv new file mode 100644 index 0000000..67ce22c --- /dev/null +++ b/examples/httpbin/account.csv @@ -0,0 +1,4 @@ +username,password +test1,111111 +test2,222222 +test3,333333 \ No newline at end of file diff --git a/examples/httpbin/basic.yml b/examples/httpbin/basic.yml new file mode 100644 index 0000000..69a6db7 --- /dev/null +++ b/examples/httpbin/basic.yml @@ -0,0 +1,89 @@ +config: + name: basic test with httpbin + base_url: ${get_httpbin_server()} + +teststeps: +- + name: headers + request: + url: /headers + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.headers.Host, "127.0.0.1"] + +- + name: user-agent + request: + url: /user-agent + method: GET + validate: + - eq: ["status_code", 200] + - startswith: [body."user-agent", "python-requests"] + +- + name: get without params + request: + url: /get + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.args, {}] + +- + name: get with params in url + request: + url: /get?a=1&b=2 + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.args, {'a': '1', 'b': '2'}] + +- + name: get with params in params field + request: + url: /get + params: + a: 1 + b: 2 + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.args, {'a': '1', 'b': '2'}] + +- + name: set cookie + request: + url: /cookies/set?name=value + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.cookies.name, "value"] + +- + name: extract cookie + request: + url: /cookies + method: GET + validate: + - eq: ["status_code", 200] + - eq: [body.cookies.name, "value"] + +- + name: post data + request: + url: /post + method: POST + headers: + Content-Type: application/json + data: abc + validate: + - eq: ["status_code", 200] + +- + name: validate body length + request: + url: /spec.json + method: GET + validate: + - len_eq: ["body", 9] diff --git a/examples/httpbin/basic_test.py b/examples/httpbin/basic_test.py new file mode 100644 index 0000000..1933ddf --- /dev/null +++ b/examples/httpbin/basic_test.py @@ -0,0 +1,79 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: basic.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseBasic(HttpRunner): + + config = Config("basic test with httpbin").base_url("${get_httpbin_server()}") + + teststeps = [ + Step( + RunRequest("headers") + .get("/headers") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.headers.Host", "127.0.0.1") + ), + Step( + RunRequest("user-agent") + .get("/user-agent") + .validate() + .assert_equal("status_code", 200) + .assert_startswith('body."user-agent"', "python-requests") + ), + Step( + RunRequest("get without params") + .get("/get") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args", {}) + ), + Step( + RunRequest("get with params in url") + .get("/get?a=1&b=2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args", {"a": "1", "b": "2"}) + ), + Step( + RunRequest("get with params in params field") + .get("/get") + .with_params(**{"a": 1, "b": 2}) + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args", {"a": "1", "b": "2"}) + ), + Step( + RunRequest("set cookie") + .get("/cookies/set?name=value") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.cookies.name", "value") + ), + Step( + RunRequest("extract cookie") + .get("/cookies") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.cookies.name", "value") + ), + Step( + RunRequest("post data") + .post("/post") + .with_headers(**{"Content-Type": "application/json"}) + .with_data("abc") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("validate body length") + .get("/spec.json") + .validate() + .assert_length_equal("body", 9) + ), + ] + + +if __name__ == "__main__": + TestCaseBasic().test_start() diff --git a/examples/httpbin/debugtalk.py b/examples/httpbin/debugtalk.py new file mode 100644 index 0000000..5f40844 --- /dev/null +++ b/examples/httpbin/debugtalk.py @@ -0,0 +1,148 @@ +import os +import random +import string +import time +import uuid + +from loguru import logger + +from httprunner.utils import HTTP_BIN_URL + + +def get_httpbin_server(): + return HTTP_BIN_URL + + +def setup_testcase(variables): + logger.info(f"setup_testcase, variables: {variables}") + variables["request_id_prefix"] = str(int(time.time())) + + +def teardown_testcase(): + logger.info("teardown_testcase.") + + +def setup_teststep(request, variables): + logger.info(f"setup_teststep, request: {request}, variables: {variables}") + request.setdefault("headers", {}) + request_id_prefix = variables["request_id_prefix"] + request["headers"]["HRUN-Request-ID"] = request_id_prefix + "-" + str(uuid.uuid4()) + + +def teardown_teststep(response): + logger.info(f"teardown_teststep, response status code: {response.status_code}") + + +def sum_two(m, n): + return m + n + + +def sum_status_code(status_code, expect_sum): + """sum status code digits + e.g. 400 => 4, 201 => 3 + """ + sum_value = 0 + for digit in str(status_code): + sum_value += int(digit) + + assert sum_value == expect_sum + + +def is_status_code_200(status_code): + return status_code == 200 + + +os.environ["TEST_ENV"] = "PRODUCTION" + + +def skip_test_in_production_env(): + """skip this test in production environment""" + return os.environ["TEST_ENV"] == "PRODUCTION" + + +def get_user_agent(): + return ["iOS/10.1", "iOS/10.2"] + + +def gen_app_version(): + return [{"app_version": "2.8.5"}, {"app_version": "2.8.6"}] + + +def get_account(): + return [ + {"username": "user1", "password": "111111"}, + {"username": "user2", "password": "222222"}, + ] + + +def get_account_in_tuple(): + return [("user1", "111111"), ("user2", "222222")] + + +def gen_random_string(str_len): + random_char_list = [] + for _ in range(str_len): + random_char = random.choice(string.ascii_letters + string.digits) + random_char_list.append(random_char) + + random_string = "".join(random_char_list) + return random_string + + +def setup_hook_add_kwargs(request): + request["key"] = "value" + + +def setup_hook_remove_kwargs(request): + request.pop("key") + + +def teardown_hook_sleep_N_secs(response, n_secs): + """sleep n seconds after request""" + if response.status_code == 200: + time.sleep(0.1) + else: + time.sleep(n_secs) + + +def hook_print(msg): + print(msg) + + +def modify_request_json(request, os_platform): + request["json"]["os_platform"] = os_platform + + +def setup_hook_httpntlmauth(request): + if "httpntlmauth" in request: + from requests_ntlm import HttpNtlmAuth + + auth_account = request.pop("httpntlmauth") + request["auth"] = HttpNtlmAuth( + auth_account["username"], auth_account["password"] + ) + + +def alter_response(response): + response.status_code = 500 + response.headers["Content-Type"] = "html/text" + response.body["headers"]["Host"] = "127.0.0.1:8888" + response.new_attribute = "new_attribute_value" + response.new_attribute_dict = {"key": 123} + + +def alter_response_302(response): + response.status_code = 500 + response.headers["Content-Type"] = "html/text" + response.text = "abcdef" + response.new_attribute = "new_attribute_value" + response.new_attribute_dict = {"key": 123} + + +def alter_response_error(response): + # NameError + not_defined_variable + + +def gen_variables(): + return {"var_a": 1, "var_b": 2} diff --git a/examples/httpbin/hooks.yml b/examples/httpbin/hooks.yml new file mode 100644 index 0000000..23f0ba5 --- /dev/null +++ b/examples/httpbin/hooks.yml @@ -0,0 +1,36 @@ +config: + name: basic test with httpbin + base_url: ${get_httpbin_server()} + setup_hooks: + - ${hook_print(setup)} + teardown_hooks: + - ${hook_print(teardown)} + +teststeps: +- + name: headers + variables: + a: 123 + request: + url: /headers + method: GET + setup_hooks: + - ${setup_hook_add_kwargs($request)} + - ${setup_hook_remove_kwargs($request)} + teardown_hooks: + - ${teardown_hook_sleep_N_secs($response, 1)} + validate: + - eq: ["status_code", 200] + - contained_by: [body.headers.Host, "${get_httpbin_server()}"] + +- + name: alter response + request: + url: /headers + method: GET + teardown_hooks: + - ${alter_response($response)} + validate: + - eq: ["status_code", 500] + - eq: [headers."Content-Type", "html/text"] + - eq: [body.headers.Host, "127.0.0.1:8888"] diff --git a/examples/httpbin/hooks_test.py b/examples/httpbin/hooks_test.py new file mode 100644 index 0000000..4a074ba --- /dev/null +++ b/examples/httpbin/hooks_test.py @@ -0,0 +1,35 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: hooks.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseHooks(HttpRunner): + + config = Config("basic test with httpbin").base_url("${get_httpbin_server()}") + + teststeps = [ + Step( + RunRequest("headers") + .with_variables(**{"a": 123}) + .setup_hook("${setup_hook_add_kwargs($request)}") + .setup_hook("${setup_hook_remove_kwargs($request)}") + .get("/headers") + .teardown_hook("${teardown_hook_sleep_N_secs($response, 1)}") + .validate() + .assert_equal("status_code", 200) + .assert_contained_by("body.headers.Host", "${get_httpbin_server()}") + ), + Step( + RunRequest("alter response") + .get("/headers") + .teardown_hook("${alter_response($response)}") + .validate() + .assert_equal("status_code", 500) + .assert_equal('headers."Content-Type"', "html/text") + .assert_equal("body.headers.Host", "127.0.0.1:8888") + ), + ] + + +if __name__ == "__main__": + TestCaseHooks().test_start() diff --git a/examples/httpbin/load_image.yml b/examples/httpbin/load_image.yml new file mode 100644 index 0000000..7a2ada6 --- /dev/null +++ b/examples/httpbin/load_image.yml @@ -0,0 +1,37 @@ +config: + name: load images + base_url: ${get_httpbin_server()} + +teststeps: +- + name: get png image + request: + url: /image/png + method: GET + validate: + - eq: ["status_code", 200] + +- + name: get jpeg image + request: + url: /image/jpeg + method: GET + validate: + - eq: ["status_code", 200] + +- + name: get webp image + request: + url: /image/webp + method: GET + validate: + - eq: ["status_code", 200] + +- + name: get svg image + request: + url: /image/svg + method: GET + validate: + - eq: ["status_code", 200] + diff --git a/examples/httpbin/load_image_test.py b/examples/httpbin/load_image_test.py new file mode 100644 index 0000000..324a2d5 --- /dev/null +++ b/examples/httpbin/load_image_test.py @@ -0,0 +1,39 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: load_image.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseLoadImage(HttpRunner): + + config = Config("load images").base_url("${get_httpbin_server()}") + + teststeps = [ + Step( + RunRequest("get png image") + .get("/image/png") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("get jpeg image") + .get("/image/jpeg") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("get webp image") + .get("/image/webp") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("get svg image") + .get("/image/svg") + .validate() + .assert_equal("status_code", 200) + ), + ] + + +if __name__ == "__main__": + TestCaseLoadImage().test_start() diff --git a/examples/httpbin/test.env b/examples/httpbin/test.env new file mode 100644 index 0000000..74d5d9e --- /dev/null +++ b/examples/httpbin/test.env @@ -0,0 +1,4 @@ +UserName=test +Password=654321 +PROJECT_KEY=AAABBBCCC +content_type=application/json; charset=UTF-8 \ No newline at end of file diff --git a/examples/httpbin/upload.yml b/examples/httpbin/upload.yml new file mode 100644 index 0000000..5eff4cb --- /dev/null +++ b/examples/httpbin/upload.yml @@ -0,0 +1,30 @@ +config: + name: test upload file with httpbin + base_url: ${get_httpbin_server()} + +teststeps: +- + name: upload file + variables: + file_path: "test.env" + m_encoder: ${multipart_encoder(file=$file_path)} + request: + url: /post + method: POST + headers: + Content-Type: ${multipart_content_type($m_encoder)} + data: $m_encoder + validate: + - eq: ["status_code", 200] + - startswith: ["body.files.file", "UserName=test"] + +- + name: upload file with keyword + request: + url: /post + method: POST + upload: + file: "test.env" + validate: + - eq: ["status_code", 200] + - startswith: ["body.files.file", "UserName=test"] diff --git a/examples/httpbin/upload_test.py b/examples/httpbin/upload_test.py new file mode 100644 index 0000000..860fe1b --- /dev/null +++ b/examples/httpbin/upload_test.py @@ -0,0 +1,38 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: upload.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseUpload(HttpRunner): + + config = Config("test upload file with httpbin").base_url("${get_httpbin_server()}") + + teststeps = [ + Step( + RunRequest("upload file") + .with_variables( + **{ + "file_path": "test.env", + "m_encoder": "${multipart_encoder(file=$file_path)}", + } + ) + .post("/post") + .with_headers(**{"Content-Type": "${multipart_content_type($m_encoder)}"}) + .with_data("$m_encoder") + .validate() + .assert_equal("status_code", 200) + .assert_startswith("body.files.file", "UserName=test") + ), + Step( + RunRequest("upload file with keyword") + .post("/post") + .upload(**{"file": "test.env"}) + .validate() + .assert_equal("status_code", 200) + .assert_startswith("body.files.file", "UserName=test") + ), + ] + + +if __name__ == "__main__": + TestCaseUpload().test_start() diff --git a/examples/httpbin/user_agent.csv b/examples/httpbin/user_agent.csv new file mode 100644 index 0000000..fa0c1df --- /dev/null +++ b/examples/httpbin/user_agent.csv @@ -0,0 +1,4 @@ +user_agent +iOS/10.1 +iOS/10.2 +iOS/10.3 diff --git a/examples/httpbin/validate.yml b/examples/httpbin/validate.yml new file mode 100644 index 0000000..3f16d3f --- /dev/null +++ b/examples/httpbin/validate.yml @@ -0,0 +1,35 @@ +config: + name: basic test with httpbin + base_url: ${get_httpbin_server()} + +teststeps: +- + name: validate response with json path + request: + url: /get + params: + a: 1 + b: 2 + method: GET + validate: + - eq: ["status_code", 200] + - eq: ["body.args.a", "1"] + - eq: ["body.args.b", "2"] + validate_script: + - "assert status_code == 200" + + +- + name: validate response with python script + request: + url: /get + params: + a: 1 + b: 2 + method: GET + validate: + - eq: ["status_code", 200] + validate_script: + - "assert status_code == 201" + - "a = response_json.get('args').get('a')" + - "assert a == '1'" diff --git a/examples/httpbin/validate_test.py b/examples/httpbin/validate_test.py new file mode 100644 index 0000000..3e4dfc9 --- /dev/null +++ b/examples/httpbin/validate_test.py @@ -0,0 +1,31 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: validate.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseValidate(HttpRunner): + + config = Config("basic test with httpbin").base_url("${get_httpbin_server()}") + + teststeps = [ + Step( + RunRequest("validate response with json path") + .get("/get") + .with_params(**{"a": 1, "b": 2}) + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.a", "1") + .assert_equal("body.args.b", "2") + ), + Step( + RunRequest("validate response with python script") + .get("/get") + .with_params(**{"a": 1, "b": 2}) + .validate() + .assert_equal("status_code", 200) + ), + ] + + +if __name__ == "__main__": + TestCaseValidate().test_start() diff --git a/examples/postman_echo/.debugtalk_gen.py b/examples/postman_echo/.debugtalk_gen.py new file mode 100644 index 0000000..f3e5887 --- /dev/null +++ b/examples/postman_echo/.debugtalk_gen.py @@ -0,0 +1,20 @@ +# NOTE: Generated By hrp v4.2.0, DO NOT EDIT! + +import sys +import os + +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from debugtalk import * + + +if __name__ == "__main__": + import funppy + funppy.register("get_httprunner_version", get_httprunner_version) + funppy.register("sum_two", sum_two) + funppy.register("get_testcase_config_variables", get_testcase_config_variables) + funppy.register("get_testsuite_config_variables", get_testsuite_config_variables) + funppy.register("get_app_version", get_app_version) + funppy.register("calculate_two_nums", calculate_two_nums) + funppy.register("fake_rand_count", fake_rand_count) + funppy.serve() diff --git a/examples/postman_echo/__init__.py b/examples/postman_echo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/postman_echo/conftest.py b/examples/postman_echo/conftest.py new file mode 100644 index 0000000..c894700 --- /dev/null +++ b/examples/postman_echo/conftest.py @@ -0,0 +1,65 @@ +# NOTICE: Generated By HttpRunner. +import json +import os +import time + +import pytest +from loguru import logger + +from httprunner.utils import get_platform, ExtendJSONEncoder + + +@pytest.fixture(scope="session", autouse=True) +def session_fixture(request): + """setup and teardown each task""" + logger.info("start running testcases ...") + + start_at = time.time() + + yield + + logger.info("task finished, generate task summary for --save-tests") + + summary = { + "success": True, + "stat": { + "testcases": {"total": 0, "success": 0, "fail": 0}, + "teststeps": {"total": 0, "failures": 0, "successes": 0}, + }, + "time": {"start_at": start_at, "duration": time.time() - start_at}, + "platform": get_platform(), + "details": [], + } + + for item in request.node.items: + testcase_summary = item.instance.get_summary() + summary["success"] &= testcase_summary.success + + summary["stat"]["testcases"]["total"] += 1 + summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_results) + if testcase_summary.success: + summary["stat"]["testcases"]["success"] += 1 + summary["stat"]["teststeps"]["successes"] += len( + testcase_summary.step_results + ) + else: + summary["stat"]["testcases"]["fail"] += 1 + summary["stat"]["teststeps"]["successes"] += ( + len(testcase_summary.step_results) - 1 + ) + summary["stat"]["teststeps"]["failures"] += 1 + + testcase_summary_json = testcase_summary.dict() + testcase_summary_json["records"] = testcase_summary_json.pop("step_results") + summary["details"].append(testcase_summary_json) + + summary_path = os.path.join( + os.getcwd(), "examples/postman_echo/logs/request_methods/hardcode.summary.json" + ) + summary_dir = os.path.dirname(summary_path) + os.makedirs(summary_dir, exist_ok=True) + + with open(summary_path, "w", encoding="utf-8") as f: + json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) + + logger.info(f"generated task summary: {summary_path}") diff --git a/examples/postman_echo/cookie_manipulation/__init__.py b/examples/postman_echo/cookie_manipulation/__init__.py new file mode 100644 index 0000000..70cfba5 --- /dev/null +++ b/examples/postman_echo/cookie_manipulation/__init__.py @@ -0,0 +1 @@ +# NOTICE: Generated By HttpRunner. DO NOT EDIT! diff --git a/examples/postman_echo/cookie_manipulation/hardcode.yml b/examples/postman_echo/cookie_manipulation/hardcode.yml new file mode 100644 index 0000000..d3b0035 --- /dev/null +++ b/examples/postman_echo/cookie_manipulation/hardcode.yml @@ -0,0 +1,34 @@ +config: + name: "set & delete cookies." + base_url: "https://postman-echo.com" + verify: False + export: ["cookie_foo1"] + +teststeps: +- + name: set cookie foo1 & foo2 & foo3 + request: + method: GET + url: /cookies/set + params: + foo1: bar1 + foo2: bar2 + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + extract: + cookie_foo1: body.cookies.foo1 + validate: + - eq: ["status_code", 200] + - eq: ["body.cookies.foo1", "bar1"] + - eq: ["body.cookies.foo2", "bar2"] +- + name: delete cookie foo2 + request: + method: GET + url: /cookies/delete?foo2 + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + validate: + - eq: ["status_code", 200] + - eq: ["body.cookies.foo1", "bar1"] + - eq: ["body.cookies.foo2", null] diff --git a/examples/postman_echo/cookie_manipulation/hardcode_test.py b/examples/postman_echo/cookie_manipulation/hardcode_test.py new file mode 100644 index 0000000..9abb014 --- /dev/null +++ b/examples/postman_echo/cookie_manipulation/hardcode_test.py @@ -0,0 +1,41 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: cookie_manipulation/hardcode.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseHardcode(HttpRunner): + + config = ( + Config("set & delete cookies.") + .base_url("https://postman-echo.com") + .verify(False) + .export(*["cookie_foo1"]) + ) + + teststeps = [ + Step( + RunRequest("set cookie foo1 & foo2 & foo3") + .get("/cookies/set") + .with_params(**{"foo1": "bar1", "foo2": "bar2"}) + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .extract() + .with_jmespath("body.cookies.foo1", "cookie_foo1") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.cookies.foo1", "bar1") + .assert_equal("body.cookies.foo2", "bar2") + ), + Step( + RunRequest("delete cookie foo2") + .get("/cookies/delete?foo2") + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.cookies.foo1", "bar1") + .assert_equal("body.cookies.foo2", None) + ), + ] + + +if __name__ == "__main__": + TestCaseHardcode().test_start() diff --git a/examples/postman_echo/cookie_manipulation/set_delete_cookies.yml b/examples/postman_echo/cookie_manipulation/set_delete_cookies.yml new file mode 100644 index 0000000..f43116a --- /dev/null +++ b/examples/postman_echo/cookie_manipulation/set_delete_cookies.yml @@ -0,0 +1,41 @@ +config: + name: "set & delete cookies." + variables: + foo1: bar1 + foo2: bar2 + base_url: "https://postman-echo.com" + verify: False + export: ["cookie_foo1", "cookie_foo3"] + +teststeps: +- + name: set cookie foo1 & foo2 & foo3 + variables: + foo3: bar3 + request: + method: GET + url: /cookies/set + params: + foo1: bar111 + foo2: $foo2 + foo3: $foo3 + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + extract: + cookie_foo1: $.cookies.foo1 + cookie_foo3: $.cookies.foo3 + validate: + - eq: ["status_code", 200] + - ne: ["$.cookies.foo3", "$foo3"] +- + name: delete cookie foo2 + request: + method: GET + url: /cookies/delete?foo2 + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + validate: + - eq: ["status_code", 200] + - ne: ["$.cookies.foo1", "$foo1"] + - eq: ["$.cookies.foo1", "$cookie_foo1"] + - eq: ["$.cookies.foo3", "$cookie_foo3"] diff --git a/examples/postman_echo/cookie_manipulation/set_delete_cookies_test.py b/examples/postman_echo/cookie_manipulation/set_delete_cookies_test.py new file mode 100644 index 0000000..fd408d1 --- /dev/null +++ b/examples/postman_echo/cookie_manipulation/set_delete_cookies_test.py @@ -0,0 +1,44 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: cookie_manipulation/set_delete_cookies.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseSetDeleteCookies(HttpRunner): + + config = ( + Config("set & delete cookies.") + .variables(**{"foo1": "bar1", "foo2": "bar2"}) + .base_url("https://postman-echo.com") + .verify(False) + .export(*["cookie_foo1", "cookie_foo3"]) + ) + + teststeps = [ + Step( + RunRequest("set cookie foo1 & foo2 & foo3") + .with_variables(**{"foo3": "bar3"}) + .get("/cookies/set") + .with_params(**{"foo1": "bar111", "foo2": "$foo2", "foo3": "$foo3"}) + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .extract() + .with_jmespath("$.cookies.foo1", "cookie_foo1") + .with_jmespath("$.cookies.foo3", "cookie_foo3") + .validate() + .assert_equal("status_code", 200) + .assert_not_equal("$.cookies.foo3", "$foo3") + ), + Step( + RunRequest("delete cookie foo2") + .get("/cookies/delete?foo2") + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .validate() + .assert_equal("status_code", 200) + .assert_not_equal("$.cookies.foo1", "$foo1") + .assert_equal("$.cookies.foo1", "$cookie_foo1") + .assert_equal("$.cookies.foo3", "$cookie_foo3") + ), + ] + + +if __name__ == "__main__": + TestCaseSetDeleteCookies().test_start() diff --git a/examples/postman_echo/debugtalk.py b/examples/postman_echo/debugtalk.py new file mode 100644 index 0000000..f17806a --- /dev/null +++ b/examples/postman_echo/debugtalk.py @@ -0,0 +1,42 @@ +from httprunner import __version__ + + +def get_httprunner_version(): + return __version__ + + +def sum_two(m, n): + return m + n + + +def get_testcase_config_variables(): + return {"foo1": "testcase_config_bar1", "foo2": "testcase_config_bar2"} + + +def get_testsuite_config_variables(): + return {"foo1": "testsuite_config_bar1", "foo2": "testsuite_config_bar2"} + + +def get_app_version(): + return [3.1, 3.0] + + +def calculate_two_nums(a, b=1): + return [a + b, b - a] + + +def fake_rand_count(): + """ + return 1 at first call + return 2 at second call + """ + l = [] + + def func(): + l.append(1) + return len(l) + + return func + + +fake_randnum = fake_rand_count() diff --git a/examples/postman_echo/request_methods/__init__.py b/examples/postman_echo/request_methods/__init__.py new file mode 100644 index 0000000..70cfba5 --- /dev/null +++ b/examples/postman_echo/request_methods/__init__.py @@ -0,0 +1 @@ +# NOTICE: Generated By HttpRunner. DO NOT EDIT! diff --git a/examples/postman_echo/request_methods/account.csv b/examples/postman_echo/request_methods/account.csv new file mode 100644 index 0000000..67ce22c --- /dev/null +++ b/examples/postman_echo/request_methods/account.csv @@ -0,0 +1,4 @@ +username,password +test1,111111 +test2,222222 +test3,333333 \ No newline at end of file diff --git a/examples/postman_echo/request_methods/conftest.py b/examples/postman_echo/request_methods/conftest.py new file mode 100644 index 0000000..9d872f1 --- /dev/null +++ b/examples/postman_echo/request_methods/conftest.py @@ -0,0 +1,61 @@ +import uuid +from typing import List + +import pytest +from httprunner import Config, Step +from loguru import logger + + +@pytest.fixture(scope="session", autouse=True) +def session_fixture(request): + """setup and teardown each task""" + total_testcases_num = request.node.testscollected + testcases = [] + for item in request.node.items: + testcase = { + "name": item.cls.config.name, + "path": item.cls.config.path, + "node_id": item.nodeid, + } + testcases.append(testcase) + + logger.debug(f"collected {total_testcases_num} testcases: {testcases}") + + yield + + logger.debug("teardown task fixture") + + # teardown task + # TODO: upload task summary + + +@pytest.fixture(scope="function", autouse=True) +def testcase_fixture(request): + """setup and teardown each testcase""" + config: Config = request.cls.config + teststeps: List[Step] = request.cls.teststeps + + logger.debug(f"setup testcase fixture: {config.name} - {request.module.__name__}") + + def update_request_headers(steps, index): + for teststep in steps: + if teststep.request: + index += 1 + teststep.request.headers["X-Request-ID"] = f"{prefix}-{index}" + elif teststep.testcase and hasattr(teststep.testcase, "teststeps"): + update_request_headers(teststep.testcase.teststeps, index) + + # you can update testcase teststep like this + prefix = f"HRUN-{uuid.uuid4()}" + update_request_headers(teststeps, 0) + + yield + + logger.debug( + f"teardown testcase fixture: {config.name} - {request.module.__name__}" + ) + + summary = request.instance.get_summary() + logger.debug(f"testcase result summary: {summary}") + + # TODO: upload testcase summary diff --git a/examples/postman_echo/request_methods/hardcode.yml b/examples/postman_echo/request_methods/hardcode.yml new file mode 100644 index 0000000..2a2a7a2 --- /dev/null +++ b/examples/postman_echo/request_methods/hardcode.yml @@ -0,0 +1,55 @@ +config: + name: "request methods testcase in hardcode" + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + request: + method: GET + url: /get + params: + foo1: bar1 + foo2: bar2 + headers: + :authority: postman-echo.com + :method: POST + :path: /get + :schema: https + User-Agent: HttpRunner/3.0 + validate: + - eq: ["status_code", 200] +- + name: post raw text + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "text/plain" + data: "This is expected to be sent back as part of response body." + validate: + - eq: ["status_code", 200] +- + name: post form data + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=bar1&foo2=bar2" + validate: + - eq: ["status_code", 200] +- + name: put request + request: + method: PUT + url: /put + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "text/plain" + data: "This is expected to be sent back as part of response body." + validate: + - eq: ["status_code", 200] \ No newline at end of file diff --git a/examples/postman_echo/request_methods/hardcode_test.py b/examples/postman_echo/request_methods/hardcode_test.py new file mode 100644 index 0000000..ced3aa6 --- /dev/null +++ b/examples/postman_echo/request_methods/hardcode_test.py @@ -0,0 +1,68 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/hardcode.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseHardcode(HttpRunner): + + config = ( + Config("request methods testcase in hardcode") + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .get("/get") + .with_params(**{"foo1": "bar1", "foo2": "bar2"}) + .with_headers( + **{ + ":authority": "postman-echo.com", + ":method": "POST", + ":path": "/get", + ":schema": "https", + "User-Agent": "HttpRunner/3.0", + } + ) + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("post raw text") + .post("/post") + .with_headers( + **{"User-Agent": "HttpRunner/3.0", "Content-Type": "text/plain"} + ) + .with_data("This is expected to be sent back as part of response body.") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("post form data") + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/3.0", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=bar1&foo2=bar2") + .validate() + .assert_equal("status_code", 200) + ), + Step( + RunRequest("put request") + .put("/put") + .with_headers( + **{"User-Agent": "HttpRunner/3.0", "Content-Type": "text/plain"} + ) + .with_data("This is expected to be sent back as part of response body.") + .validate() + .assert_equal("status_code", 200) + ), + ] + + +if __name__ == "__main__": + TestCaseHardcode().test_start() diff --git a/examples/postman_echo/request_methods/request_with_functions.yml b/examples/postman_echo/request_methods/request_with_functions.yml new file mode 100644 index 0000000..98007e7 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_functions.yml @@ -0,0 +1,69 @@ +config: + name: "request methods testcase with functions" + variables: + foo1: config_bar1 + foo2: config_bar2 + expect_foo1: config_bar1 + expect_foo2: config_bar2 + base_url: "https://postman-echo.com" + verify: False + weight: 2 + export: ["foo3"] + +teststeps: +- + name: get with params + variables: + foo1: bar11 + foo2: bar21 + sum_v: "${sum_two(1, 2)}" + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + sum_v: $sum_v + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + extract: + foo3: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.args.foo1", "bar11"] + - eq: ["body.args.sum_v", "3"] + - eq: ["body.args.foo2", "bar21"] +- + name: post raw text + variables: + foo1: "bar12" + foo3: "bar32" + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + Content-Type: "text/plain" + data: "This is expected to be sent back as part of response body: $foo1-$foo2-$foo3." + validate: + - eq: ["status_code", 200] + - eq: ["body.data", "This is expected to be sent back as part of response body: bar12-$expect_foo2-bar32."] + - type_match: ["body.json", None] + - type_match: ["body.json", NoneType] + - type_match: ["body.json", null] +- + name: post form data + variables: + foo2: bar23 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=$foo1&foo2=$foo2&foo3=$foo3" + validate: + - eq: ["status_code", 200, "response status code should be 200"] + - eq: ["body.form.foo1", "$expect_foo1"] + - eq: ["body.form.foo2", "bar23"] + - eq: ["body.form.foo3", "bar21"] diff --git a/examples/postman_echo/request_methods/request_with_functions_test.py b/examples/postman_echo/request_methods/request_with_functions_test.py new file mode 100644 index 0000000..24596d7 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_functions_test.py @@ -0,0 +1,84 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/request_with_functions.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseRequestWithFunctions(HttpRunner): + + config = ( + Config("request methods testcase with functions") + .variables( + **{ + "foo1": "config_bar1", + "foo2": "config_bar2", + "expect_foo1": "config_bar1", + "expect_foo2": "config_bar2", + } + ) + .base_url("https://postman-echo.com") + .verify(False) + .export(*["foo3"]) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables( + **{"foo1": "bar11", "foo2": "bar21", "sum_v": "${sum_two(1, 2)}"} + ) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}) + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .extract() + .with_jmespath("body.args.foo2", "foo3") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.foo1", "bar11") + .assert_equal("body.args.sum_v", "3") + .assert_equal("body.args.foo2", "bar21") + ), + Step( + RunRequest("post raw text") + .with_variables(**{"foo1": "bar12", "foo3": "bar32"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/${get_httprunner_version()}", + "Content-Type": "text/plain", + } + ) + .with_data( + "This is expected to be sent back as part of response body: $foo1-$foo2-$foo3." + ) + .validate() + .assert_equal("status_code", 200) + .assert_equal( + "body.data", + "This is expected to be sent back as part of response body: bar12-$expect_foo2-bar32.", + ) + .assert_type_match("body.json", "None") + .assert_type_match("body.json", "NoneType") + .assert_type_match("body.json", None) + ), + Step( + RunRequest("post form data") + .with_variables(**{"foo2": "bar23"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/${get_httprunner_version()}", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=$foo1&foo2=$foo2&foo3=$foo3") + .validate() + .assert_equal("status_code", 200, "response status code should be 200") + .assert_equal("body.form.foo1", "$expect_foo1") + .assert_equal("body.form.foo2", "bar23") + .assert_equal("body.form.foo3", "bar21") + ), + ] + + +if __name__ == "__main__": + TestCaseRequestWithFunctions().test_start() diff --git a/examples/postman_echo/request_methods/request_with_parameters.yml b/examples/postman_echo/request_methods/request_with_parameters.yml new file mode 100644 index 0000000..38e239c --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_parameters.yml @@ -0,0 +1,33 @@ +config: + name: "request methods testcase: validate with parameters" + parameters: + user_agent: ["iOS/10.1", "iOS/10.2"] + username-password: ${parameterize(request_methods/account.csv)} + app_version: ${get_app_version()} + variables: + app_version: f1 + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + variables: + foo1: $username + foo2: $password + sum_v: "${sum_two(1, $app_version)}" + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + sum_v: $sum_v + headers: + User-Agent: $user_agent,$app_version + extract: + session_foo2: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - str_eq: ["body.args.sum_v", "${sum_two(1, $app_version)}"] +# - less_than: ["body.args.sum_v", "${sum_two(2, 2)}"] FIXME: TypeError: '<' not supported between instances of 'str' and 'int' diff --git a/examples/postman_echo/request_methods/request_with_parameters_test.py b/examples/postman_echo/request_methods/request_with_parameters_test.py new file mode 100644 index 0000000..1b2d3e0 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_parameters_test.py @@ -0,0 +1,53 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/request_with_parameters.yml +import pytest + +from httprunner import HttpRunner, Config, Step, RunRequest +from httprunner import Parameters + + +class TestCaseRequestWithParameters(HttpRunner): + @pytest.mark.parametrize( + "param", + Parameters( + { + "user_agent": ["iOS/10.1", "iOS/10.2"], + "username-password": "${parameterize(request_methods/account.csv)}", + "app_version": "${get_app_version()}", + } + ), + ) + def test_start(self, param): + super().test_start(param) + + config = ( + Config("request methods testcase: validate with parameters") + .variables(**{"app_version": "f1"}) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables( + **{ + "foo1": "$username", + "foo2": "$password", + "sum_v": "${sum_two(1, $app_version)}", + } + ) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}) + .with_headers(**{"User-Agent": "$user_agent,$app_version"}) + .extract() + .with_jmespath("body.args.foo2", "session_foo2") + .validate() + .assert_equal("status_code", 200) + .assert_string_equals("body.args.sum_v", "${sum_two(1, $app_version)}") + ), + ] + + +if __name__ == "__main__": + TestCaseRequestWithParameters().test_start() diff --git a/examples/postman_echo/request_methods/request_with_retry_test.py b/examples/postman_echo/request_methods/request_with_retry_test.py new file mode 100644 index 0000000..af74697 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_retry_test.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +""" + + @Date : 2022/4/7 + @File : request_with_retry.py + @Author : duanchao.bill + @Desc : + +""" + +from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase + + +class TestCaseRetry(HttpRunner): + config = ( + Config("request methods testcase in hardcode") + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("run with retry") + .with_retry(retry_times=1, retry_interval=1) + .get("/get") + .with_params(**{"foo1": "${fake_randnum()}"}) + .with_headers(**{"User-Agent": "HttpRunner/3.0"}) + .validate() + .assert_equal("body.args.foo1", "2") + ) + ] diff --git a/examples/postman_echo/request_methods/request_with_testcase_reference.yml b/examples/postman_echo/request_methods/request_with_testcase_reference.yml new file mode 100644 index 0000000..abb51f3 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_testcase_reference.yml @@ -0,0 +1,37 @@ +config: + name: "request methods testcase: reference testcase" + variables: + foo1: testsuite_config_bar1 + expect_foo1: testsuite_config_bar1 + expect_foo2: config_bar2 + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: request with functions + variables: + foo1: testcase_ref_bar1 + expect_foo1: testcase_ref_bar1 + setup_hooks: + - ${sleep(0.1)} + testcase: request_methods/request_with_functions.yml + teardown_hooks: + - ${sleep(0.2)} + export: + - foo3 +- + name: post form data + variables: + foo1: bar1 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=$foo1&foo2=$foo3" + validate: + - eq: ["status_code", 200] + - eq: ["body.form.foo1", "bar1"] + - eq: ["body.form.foo2", "bar21"] diff --git a/examples/postman_echo/request_methods/request_with_testcase_reference_test.py b/examples/postman_echo/request_methods/request_with_testcase_reference_test.py new file mode 100644 index 0000000..d8b5a07 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_testcase_reference_test.py @@ -0,0 +1,62 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/request_with_testcase_reference.yml +from httprunner import HttpRunner, Config, Step, RunRequest +from httprunner import RunTestCase + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from request_methods.request_with_functions_test import ( + TestCaseRequestWithFunctions as RequestWithFunctions, +) + + +class TestCaseRequestWithTestcaseReference(HttpRunner): + + config = ( + Config("request methods testcase: reference testcase") + .variables( + **{ + "foo1": "testsuite_config_bar1", + "expect_foo1": "testsuite_config_bar1", + "expect_foo2": "config_bar2", + } + ) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunTestCase("request with functions") + .with_variables( + **{"foo1": "testcase_ref_bar1", "expect_foo1": "testcase_ref_bar1"} + ) + .setup_hook("${sleep(0.1)}") + .call(RequestWithFunctions) + .teardown_hook("${sleep(0.2)}") + .export(*["foo3"]) + ), + Step( + RunRequest("post form data") + .with_variables(**{"foo1": "bar1"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/${get_httprunner_version()}", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=$foo1&foo2=$foo3") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.form.foo1", "bar1") + .assert_equal("body.form.foo2", "bar21") + ), + ] + + +if __name__ == "__main__": + TestCaseRequestWithTestcaseReference().test_start() diff --git a/examples/postman_echo/request_methods/request_with_variables.yml b/examples/postman_echo/request_methods/request_with_variables.yml new file mode 100644 index 0000000..34c8477 --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_variables.yml @@ -0,0 +1,78 @@ +config: + name: "request methods testcase with variables" + variables: ${get_testcase_config_variables()} + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + variables: + foo1: bar11 + foo2: bar21 + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + headers: + User-Agent: HttpRunner/3.0 + extract: + foo3: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.args.foo1", "bar11"] + - eq: ["body.args.foo2", "bar21"] +- + name: post raw text + variables: + foo1: "bar12" + foo3: "bar32" + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "text/plain" + data: "This is expected to be sent back as part of response body: $foo1-$foo2-$foo3." + validate: + - eq: ["status_code", 200] + - eq: ["body.data", "This is expected to be sent back as part of response body: bar12-testcase_config_bar2-bar32."] +- + name: post form data + variables: + foo2: bar23 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=$foo1&foo2=$foo2&foo3=$foo3" + validate: + - eq: ["status_code", 200] + - eq: ["body.form.foo1", "testcase_config_bar1"] + - eq: ["body.form.foo2", "bar23"] + - eq: ["body.form.foo3", "bar21"] + +- + name: post form data using json + variables: + foo2: bar23 + jsondata: + foo1: $foo1 + foo2: $foo2 + foo3: $foo3 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "application/json" + json: $jsondata + validate: + - eq: ["status_code", 200] + - eq: ["body.data.foo1", "testcase_config_bar1"] + - eq: ["body.data.foo2", "bar23"] + - eq: ["body.data.foo3", "bar21"] diff --git a/examples/postman_echo/request_methods/request_with_variables_test.py b/examples/postman_echo/request_methods/request_with_variables_test.py new file mode 100644 index 0000000..ae6b9dd --- /dev/null +++ b/examples/postman_echo/request_methods/request_with_variables_test.py @@ -0,0 +1,86 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/request_with_variables.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseRequestWithVariables(HttpRunner): + + config = ( + Config("request methods testcase with variables") + .variables(**{"foo1": "testcase_config_bar1", "foo2": "testcase_config_bar2"}) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables(**{"foo1": "bar11", "foo2": "bar21"}) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2"}) + .with_headers(**{"User-Agent": "HttpRunner/3.0"}) + .extract() + .with_jmespath("body.args.foo2", "foo3") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.foo1", "bar11") + .assert_equal("body.args.foo2", "bar21") + ), + Step( + RunRequest("post raw text") + .with_variables(**{"foo1": "bar12", "foo3": "bar32"}) + .post("/post") + .with_headers( + **{"User-Agent": "HttpRunner/3.0", "Content-Type": "text/plain"} + ) + .with_data( + "This is expected to be sent back as part of response body: $foo1-$foo2-$foo3." + ) + .validate() + .assert_equal("status_code", 200) + .assert_equal( + "body.data", + "This is expected to be sent back as part of response body: bar12-testcase_config_bar2-bar32.", + ) + ), + Step( + RunRequest("post form data") + .with_variables(**{"foo2": "bar23"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/3.0", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=$foo1&foo2=$foo2&foo3=$foo3") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.form.foo1", "testcase_config_bar1") + .assert_equal("body.form.foo2", "bar23") + .assert_equal("body.form.foo3", "bar21") + ), + Step( + RunRequest("post form data using json") + .with_variables( + **{ + "foo2": "bar23", + "jsondata": {"foo1": "$foo1", "foo2": "$foo2", "foo3": "$foo3"}, + } + ) + .post("/post") + .with_headers( + **{"User-Agent": "HttpRunner/3.0", "Content-Type": "application/json"} + ) + .with_json("$jsondata") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.data.foo1", "testcase_config_bar1") + .assert_equal("body.data.foo2", "bar23") + .assert_equal("body.data.foo3", "bar21") + ), + ] + + +if __name__ == "__main__": + TestCaseRequestWithVariables().test_start() diff --git a/examples/postman_echo/request_methods/validate_with_functions.yml b/examples/postman_echo/request_methods/validate_with_functions.yml new file mode 100644 index 0000000..608f061 --- /dev/null +++ b/examples/postman_echo/request_methods/validate_with_functions.yml @@ -0,0 +1,29 @@ +config: + name: "request methods testcase: validate with functions" + variables: + foo1: session_bar1 + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + variables: + foo1: bar1 + foo2: session_bar2 + sum_v: "${sum_two(1, 2)}" + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + sum_v: $sum_v + headers: + User-Agent: HttpRunner/${get_httprunner_version()} + extract: + session_foo2: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.args.sum_v", "3"] +# - less_than: ["body.args.sum_v", "${sum_two(2, 2)}"] FIXME: TypeError: '<' not supported between instances of 'str' and 'int' diff --git a/examples/postman_echo/request_methods/validate_with_functions_test.py b/examples/postman_echo/request_methods/validate_with_functions_test.py new file mode 100644 index 0000000..1ad2041 --- /dev/null +++ b/examples/postman_echo/request_methods/validate_with_functions_test.py @@ -0,0 +1,34 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/validate_with_functions.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseValidateWithFunctions(HttpRunner): + + config = ( + Config("request methods testcase: validate with functions") + .variables(**{"foo1": "session_bar1"}) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables( + **{"foo1": "bar1", "foo2": "session_bar2", "sum_v": "${sum_two(1, 2)}"} + ) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}) + .with_headers(**{"User-Agent": "HttpRunner/${get_httprunner_version()}"}) + .extract() + .with_jmespath("body.args.foo2", "session_foo2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.sum_v", "3") + ), + ] + + +if __name__ == "__main__": + TestCaseValidateWithFunctions().test_start() diff --git a/examples/postman_echo/request_methods/validate_with_variables.yml b/examples/postman_echo/request_methods/validate_with_variables.yml new file mode 100644 index 0000000..3044af9 --- /dev/null +++ b/examples/postman_echo/request_methods/validate_with_variables.yml @@ -0,0 +1,58 @@ +config: + name: "request methods testcase: validate with variables" + variables: + foo1: session_bar1 + base_url: "https://postman-echo.com" + verify: False + +teststeps: +- + name: get with params + variables: + foo1: bar1 + foo2: session_bar2 + request: + method: GET + url: /get + params: + foo1: $foo1 + foo2: $foo2 + headers: + User-Agent: HttpRunner/3.0 + extract: + session_foo2: "body.args.foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.args.foo1", "$foo1"] + - eq: ["body.args.foo2", "$foo2"] +- + name: post raw text + variables: + foo1: "hello world" + foo3: "$session_foo2" + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "text/plain" + data: "This is expected to be sent back as part of response body: $foo1-$foo3." + validate: + - eq: ["status_code", 200] + - eq: ["body.data", "This is expected to be sent back as part of response body: hello world-$foo3."] +- + name: post form data + variables: + foo1: bar1 + foo2: bar2 + request: + method: POST + url: /post + headers: + User-Agent: HttpRunner/3.0 + Content-Type: "application/x-www-form-urlencoded" + data: "foo1=$foo1&foo2=$foo2" + validate: + - eq: ["status_code", 200] + - eq: ["body.form.foo1", "$foo1"] + - eq: ["body.form.foo2", "$foo2"] diff --git a/examples/postman_echo/request_methods/validate_with_variables_test.py b/examples/postman_echo/request_methods/validate_with_variables_test.py new file mode 100644 index 0000000..1fb7270 --- /dev/null +++ b/examples/postman_echo/request_methods/validate_with_variables_test.py @@ -0,0 +1,66 @@ +# NOTE: Generated By HttpRunner v4.3.5 +# FROM: request_methods/validate_with_variables.yml +from httprunner import HttpRunner, Config, Step, RunRequest + + +class TestCaseValidateWithVariables(HttpRunner): + + config = ( + Config("request methods testcase: validate with variables") + .variables(**{"foo1": "session_bar1"}) + .base_url("https://postman-echo.com") + .verify(False) + ) + + teststeps = [ + Step( + RunRequest("get with params") + .with_variables(**{"foo1": "bar1", "foo2": "session_bar2"}) + .get("/get") + .with_params(**{"foo1": "$foo1", "foo2": "$foo2"}) + .with_headers(**{"User-Agent": "HttpRunner/3.0"}) + .extract() + .with_jmespath("body.args.foo2", "session_foo2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.args.foo1", "$foo1") + .assert_equal("body.args.foo2", "$foo2") + ), + Step( + RunRequest("post raw text") + .with_variables(**{"foo1": "hello world", "foo3": "$session_foo2"}) + .post("/post") + .with_headers( + **{"User-Agent": "HttpRunner/3.0", "Content-Type": "text/plain"} + ) + .with_data( + "This is expected to be sent back as part of response body: $foo1-$foo3." + ) + .validate() + .assert_equal("status_code", 200) + .assert_equal( + "body.data", + "This is expected to be sent back as part of response body: hello world-$foo3.", + ) + ), + Step( + RunRequest("post form data") + .with_variables(**{"foo1": "bar1", "foo2": "bar2"}) + .post("/post") + .with_headers( + **{ + "User-Agent": "HttpRunner/3.0", + "Content-Type": "application/x-www-form-urlencoded", + } + ) + .with_data("foo1=$foo1&foo2=$foo2") + .validate() + .assert_equal("status_code", 200) + .assert_equal("body.form.foo1", "$foo1") + .assert_equal("body.form.foo2", "$foo2") + ), + ] + + +if __name__ == "__main__": + TestCaseValidateWithVariables().test_start() diff --git a/examples/pytest.ini b/examples/pytest.ini new file mode 100644 index 0000000..0aad31f --- /dev/null +++ b/examples/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +addopts = -s +# https://docs.pytest.org/en/latest/how-to/output.html +junit_logging = all +junit_duration_report = total +log_cli = False diff --git a/examples/sql/test_sql_demo.py b/examples/sql/test_sql_demo.py new file mode 100644 index 0000000..b0ed6f5 --- /dev/null +++ b/examples/sql/test_sql_demo.py @@ -0,0 +1,36 @@ +import sys +from pathlib import Path + +from httprunner.database.engine import DBEngine + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from httprunner import HttpRunner, Config, Step, RunSqlRequest # noqa:E402 + + +class TestCaseDemoSqlite(HttpRunner): + config = Config("run sqlite demo") + + teststeps = [ + Step( + RunSqlRequest("执行一个sqlite demo") + .fetchmany("select* from student;", 5) + .extract() + .with_jmespath("[0].name", "name") + .validate() + .assert_equal( + "[0]", + { + "id": 1, + "name": "Jack", + "fullname": {"first_name": "Jack", "last_name": "Tomson"}, + }, + ) + .assert_equal("[0].fullname.first_name", "Jack") + ) + ] + + def test_start(self): + eg = DBEngine(db_uri="sqlite:///../data/sqlite.db") + self.with_db_engine(eg) + super().test_start() diff --git a/httprunner/README.md b/httprunner/README.md new file mode 100644 index 0000000..c6d241b --- /dev/null +++ b/httprunner/README.md @@ -0,0 +1,115 @@ +# 代码阅读指南(python 部分) + +## 核心数据结构 + +HttpRunner 以 `TestCase` 为核心,将任意测试场景抽象为有序步骤的集合。 + +```py +class TestCase(BaseModel): + config: TConfig + teststeps: List[TStep] +``` + +针对每种测试步骤,统一继承自 `IStep`,并要求必须至少实现如下 4 个方法;步骤内容统一在 `run` 方法中进行实现。 + +```py +class IStep(object): + + def name(self) -> str: + raise NotImplementedError + + def type(self) -> str: + raise NotImplementedError + + def struct(self) -> TStep: + raise NotImplementedError + + def run(self, runner) -> StepData: + # runner: HttpRunner + raise NotImplementedError +``` + +我们只需遵循 `IStep` 的接口定义,即可实现各种类型的测试步骤类型。当前 python 版本已支持的步骤类型包括: + +- [request](step_request.py):发起单次 HTTP 请求 +- [testcase](step_testcase.py):引用执行其它测试用例文件 + +基于该机制,我们可以扩展支持新的协议类型,例如 HTTP2/WebSocket/RPC 等;同时也可以支持新的测试类型,例如 UI 自动化。甚至我们还可以在一个测试用例中混合调用多种不同的 Step 类型,例如实现 HTTP/RPC/UI 混合场景。 + +## 用例编写 + +## 运行主流程 + +### 整体控制器 pytest + +不同于 golang 版本,python 版本的控制逻辑都基于 `pytest` 的用例发现和执行机制。 + +- 如果是运行 JSON/YAML 格式的用例,hrp 会将用例转换为 pytest 支持的用例格式 +- 如果是要自行编写 pytest 测试用例,需要遵循 HttpRunner 的格式要求 + +### pytest 用例格式要求 + +所有测试用例要求都继承自 `HttpRunner`,然后 + +结构如下所示: + +```py +class TestCaseRequestWithFunctions(HttpRunner): + + config = ( + Config("request methods testcase with functions") + ) + + teststeps = [ + Step( + RunRequest("get with params")... + ), + Step( + RunRequest("post raw text")... + ), + Step( + RunRequest("post form data")... + ), + ] +``` + +完整案例可参考: + +- [request_with_functions_test.py](../examples/postman_echo/request_methods/request_with_functions_test.py):用例中包含了 requests 的情况 +- [request_with_testcase_reference_test.py](../examples/postman_echo/request_methods/request_with_testcase_reference_test.py):用例中包含了引用其它测试用例的情况 + +### 用例执行器 SessionRunner + +测试用例的具体执行都由 `SessionRunner` 完成,每个 TestCase 对应一个实例,在该实例中除了包含测试用例自身内容外,还会包含测试过程的 session 数据和最终测试结果 summary。 + +```py +class SessionRunner(object): + config: Config + teststeps: List[object] # list of Step + ... +``` + +重点关注一个方法: + +- test_start:该方法将被 pytest 发现,作为启动执行入口,依次执行所有测试步骤 + +```go +def test_start(self, param: Dict = None) -> "SessionRunner": + """main entrance, discovered by pytest""" + self.__start_at = time.time() + try: + # run step in sequential order + for step in self.teststeps: + self.__run_step(step) + finally: + logger.info(f"generate testcase log: {self.__log_path}") + + self.__duration = time.time() - self.__start_at +``` + +在主流程中,SessionRunner 并不需要关注 step 的具体类型,统一都是调用 `step.run(self)`,具体实现逻辑都在对应 step 的 `run` 方法中。 + +```py +def run(self, runner: HttpRunner) -> StepData: + return self.__step.run(runner) +``` diff --git a/httprunner/__init__.py b/httprunner/__init__.py new file mode 100644 index 0000000..58838e3 --- /dev/null +++ b/httprunner/__init__.py @@ -0,0 +1,38 @@ +__version__ = "v4.3.5" +__description__ = "One-stop solution for HTTP(S) testing." + + +from httprunner.config import Config +from httprunner.parser import parse_parameters as Parameters +from httprunner.runner import HttpRunner +from httprunner.step import Step +from httprunner.step_request import RunRequest +from httprunner.step_sql_request import ( + RunSqlRequest, + StepSqlRequestExtraction, + StepSqlRequestValidation, +) +from httprunner.step_testcase import RunTestCase +from httprunner.step_thrift_request import ( + RunThriftRequest, + StepThriftRequestExtraction, + StepThriftRequestValidation, +) + + +__all__ = [ + "__version__", + "__description__", + "HttpRunner", + "Config", + "Step", + "RunRequest", + "RunSqlRequest", + "StepSqlRequestValidation", + "StepSqlRequestExtraction", + "RunTestCase", + "Parameters", + "RunThriftRequest", + "StepThriftRequestValidation", + "StepThriftRequestExtraction", +] diff --git a/httprunner/__main__.py b/httprunner/__main__.py new file mode 100644 index 0000000..6cc9a14 --- /dev/null +++ b/httprunner/__main__.py @@ -0,0 +1,5 @@ +from httprunner.cli import main + + +if __name__ == "__main__": + main() diff --git a/httprunner/builtin/__init__.py b/httprunner/builtin/__init__.py new file mode 100644 index 0000000..0c7cf6d --- /dev/null +++ b/httprunner/builtin/__init__.py @@ -0,0 +1,2 @@ +from httprunner.builtin.comparators import * +from httprunner.builtin.functions import * diff --git a/httprunner/builtin/comparators.py b/httprunner/builtin/comparators.py new file mode 100644 index 0000000..58f9f3c --- /dev/null +++ b/httprunner/builtin/comparators.py @@ -0,0 +1,129 @@ +""" +Built-in validate comparators. +""" + +import re +from typing import Text, Any, Union + + +def equal(check_value: Any, expect_value: Any, message: Text = ""): + assert check_value == expect_value, message + + +def greater_than( + check_value: Union[int, float], expect_value: Union[int, float], message: Text = "" +): + assert check_value > expect_value, message + + +def less_than( + check_value: Union[int, float], expect_value: Union[int, float], message: Text = "" +): + assert check_value < expect_value, message + + +def greater_or_equals( + check_value: Union[int, float], expect_value: Union[int, float], message: Text = "" +): + assert check_value >= expect_value, message + + +def less_or_equals( + check_value: Union[int, float], expect_value: Union[int, float], message: Text = "" +): + assert check_value <= expect_value, message + + +def not_equal(check_value: Any, expect_value: Any, message: Text = ""): + assert check_value != expect_value, message + + +def string_equals(check_value: Text, expect_value: Any, message: Text = ""): + assert str(check_value) == str(expect_value), message + + +def length_equal(check_value: Text, expect_value: int, message: Text = ""): + assert isinstance(expect_value, int), "expect_value should be int type" + assert len(check_value) == expect_value, message + + +def length_greater_than( + check_value: Text, expect_value: Union[int, float], message: Text = "" +): + assert isinstance( + expect_value, (int, float) + ), "expect_value should be int/float type" + assert len(check_value) > expect_value, message + + +def length_greater_or_equals( + check_value: Text, expect_value: Union[int, float], message: Text = "" +): + assert isinstance( + expect_value, (int, float) + ), "expect_value should be int/float type" + assert len(check_value) >= expect_value, message + + +def length_less_than( + check_value: Text, expect_value: Union[int, float], message: Text = "" +): + assert isinstance( + expect_value, (int, float) + ), "expect_value should be int/float type" + assert len(check_value) < expect_value, message + + +def length_less_or_equals( + check_value: Text, expect_value: Union[int, float], message: Text = "" +): + assert isinstance( + expect_value, (int, float) + ), "expect_value should be int/float type" + assert len(check_value) <= expect_value, message + + +def contains(check_value: Any, expect_value: Any, message: Text = ""): + assert isinstance( + check_value, (list, tuple, dict, str, bytes) + ), "expect_value should be list/tuple/dict/str/bytes type" + assert expect_value in check_value, message + + +def contained_by(check_value: Any, expect_value: Any, message: Text = ""): + assert isinstance( + expect_value, (list, tuple, dict, str, bytes) + ), "expect_value should be list/tuple/dict/str/bytes type" + assert check_value in expect_value, message + + +def type_match(check_value: Any, expect_value: Any, message: Text = ""): + def get_type(name): + if isinstance(name, type): + return name + elif isinstance(name, str): + try: + return __builtins__[name] + except KeyError: + raise ValueError(name) + else: + raise ValueError(name) + + if expect_value in ["None", "NoneType", None]: + assert check_value is None, message + else: + assert type(check_value) == get_type(expect_value), message + + +def regex_match(check_value: Text, expect_value: Any, message: Text = ""): + assert isinstance(expect_value, str), "expect_value should be Text type" + assert isinstance(check_value, str), "check_value should be Text type" + assert re.match(expect_value, check_value), message + + +def startswith(check_value: Any, expect_value: Any, message: Text = ""): + assert str(check_value).startswith(str(expect_value)), message + + +def endswith(check_value: Text, expect_value: Any, message: Text = ""): + assert str(check_value).endswith(str(expect_value)), message diff --git a/httprunner/builtin/functions.py b/httprunner/builtin/functions.py new file mode 100644 index 0000000..00f964f --- /dev/null +++ b/httprunner/builtin/functions.py @@ -0,0 +1,35 @@ +""" +Built-in functions used in YAML/JSON testcases. +""" + +import datetime +import random +import string +import time + +from httprunner.exceptions import ParamsError + + +def gen_random_string(str_len): + """generate random string with specified length""" + return "".join( + random.choice(string.ascii_letters + string.digits) for _ in range(str_len) + ) + + +def get_timestamp(str_len=13): + """get timestamp string, length can only between 0 and 16""" + if isinstance(str_len, int) and 0 < str_len < 17: + return str(time.time()).replace(".", "")[:str_len] + + raise ParamsError("timestamp length can only between 0 and 16.") + + +def get_current_date(fmt="%Y-%m-%d"): + """get current date, default format is %Y-%m-%d""" + return datetime.datetime.now().strftime(fmt) + + +def sleep(n_secs): + """sleep n seconds""" + time.sleep(n_secs) diff --git a/httprunner/cli.py b/httprunner/cli.py new file mode 100644 index 0000000..427ee4b --- /dev/null +++ b/httprunner/cli.py @@ -0,0 +1,152 @@ +import argparse +import enum +import os +import sys + +import pytest +from loguru import logger + +from httprunner import __description__, __version__ +from httprunner.compat import ensure_cli_args +from httprunner.make import init_make_parser, main_make +from httprunner.utils import ga4_client, init_logger, init_sentry_sdk + +init_sentry_sdk() + + +def init_parser_run(subparsers): + sub_parser_run = subparsers.add_parser( + "run", help="Make HttpRunner testcases and run with pytest." + ) + return sub_parser_run + + +def main_run(extra_args) -> enum.IntEnum: + ga4_client.send_event("hrun") + # keep compatibility with v2 + extra_args = ensure_cli_args(extra_args) + + tests_path_list = [] + extra_args_new = [] + for item in extra_args: + if not os.path.exists(item): + # item is not file/folder path + extra_args_new.append(item) + else: + # item is file/folder path + tests_path_list.append(item) + + if len(tests_path_list) == 0: + # has not specified any testcase path + logger.error(f"No valid testcase path in cli arguments: {extra_args}") + sys.exit(1) + + testcase_path_list = main_make(tests_path_list) + if not testcase_path_list: + logger.error("No valid testcases found, exit 1.") + sys.exit(1) + + if "--tb=short" not in extra_args_new: + extra_args_new.append("--tb=short") + + extra_args_new.extend(testcase_path_list) + logger.info(f"start to run tests with pytest. HttpRunner version: {__version__}") + return pytest.main(extra_args_new) + + +def main(): + """API test: parse command line options and run commands.""" + parser = argparse.ArgumentParser(description=__description__) + parser.add_argument( + "-V", "--version", dest="version", action="store_true", help="show version" + ) + + subparsers = parser.add_subparsers(help="sub-command help") + init_parser_run(subparsers) + sub_parser_make = init_make_parser(subparsers) + + if len(sys.argv) == 1: + # httprunner + parser.print_help() + sys.exit(0) + elif len(sys.argv) == 2: + # print help for sub-commands + if sys.argv[1] in ["-V", "--version"]: + # httprunner -V + print(f"{__version__}") + elif sys.argv[1] in ["-h", "--help"]: + # httprunner -h + parser.print_help() + elif sys.argv[1] == "run": + # httprunner run + pytest.main(["-h"]) + elif sys.argv[1] == "make": + # httprunner make + sub_parser_make.print_help() + sys.exit(0) + elif ( + len(sys.argv) == 3 and sys.argv[1] == "run" and sys.argv[2] in ["-h", "--help"] + ): + # httprunner run -h + pytest.main(["-h"]) + sys.exit(0) + + extra_args = [] + if len(sys.argv) >= 2 and sys.argv[1] in ["run"]: + args, extra_args = parser.parse_known_args() + else: + args = parser.parse_args() + + if args.version: + print(f"{__version__}") + sys.exit(0) + + # set log level + try: + index = extra_args.index("--log-level") + if index < len(extra_args) - 1: + level = extra_args[index + 1] + else: + # not specify log level value + level = "INFO" # default + except ValueError: + level = "INFO" # default + + init_logger(level) + + if sys.argv[1] == "run": + sys.exit(main_run(extra_args)) + elif sys.argv[1] == "make": + main_make(args.testcase_path) + + +def main_hrun_alias(): + """command alias + hrun = httprunner run + """ + if len(sys.argv) == 2: + if sys.argv[1] in ["-V", "--version"]: + # hrun -V + sys.argv = ["httprunner", "-V"] + elif sys.argv[1] in ["-h", "--help"]: + pytest.main(["-h"]) + sys.exit(0) + else: + # hrun /path/to/testcase + sys.argv.insert(1, "run") + else: + sys.argv.insert(1, "run") + + main() + + +def main_make_alias(): + """command alias + hmake = httprunner make + """ + sys.argv.insert(1, "make") + main() + + +if __name__ == "__main__": + main() diff --git a/httprunner/cli_test.py b/httprunner/cli_test.py new file mode 100644 index 0000000..8a95ef0 --- /dev/null +++ b/httprunner/cli_test.py @@ -0,0 +1,62 @@ +import io +import os +import sys +import unittest + +import pytest + +from httprunner import loader +from httprunner.cli import main, main_run + + +class TestCli(unittest.TestCase): + def setUp(self): + self.captured_output = io.StringIO() + sys.stdout = self.captured_output + + def tearDown(self): + sys.stdout = sys.__stdout__ # Reset redirect. + + def test_show_version(self): + sys.argv = ["hrun", "-V"] + + with self.assertRaises(SystemExit) as cm: + main() + + self.assertEqual(cm.exception.code, 0) + + from httprunner import __version__ + + self.assertIn(__version__, self.captured_output.getvalue().strip()) + + def test_show_help(self): + sys.argv = ["hrun", "-h"] + + with self.assertRaises(SystemExit) as cm: + main() + + self.assertEqual(cm.exception.code, 0) + + from httprunner import __description__ + + self.assertIn(__description__, self.captured_output.getvalue().strip()) + + def test_debug_pytest(self): + cwd = os.getcwd() + try: + os.chdir(os.path.join(cwd, "examples", "postman_echo")) + exit_code = pytest.main( + ["-s", "request_methods/request_with_testcase_reference_test.py"] + ) + self.assertEqual(exit_code, 0) + finally: + os.chdir(cwd) + + def test_run_testcase_with_abnormal_path(self): + loader.project_meta = None + exit_code = main_run(["examples/data/a-b.c/2 3.yml"]) + self.assertEqual(exit_code, 0) + self.assertTrue(os.path.exists("examples/data/a_b_c/__init__.py")) + self.assertTrue(os.path.exists("examples/data/debugtalk.py")) + self.assertTrue(os.path.exists("examples/data/a_b_c/T1_test.py")) + self.assertTrue(os.path.exists("examples/data/a_b_c/T2_3_test.py")) diff --git a/httprunner/client.py b/httprunner/client.py new file mode 100644 index 0000000..8e45425 --- /dev/null +++ b/httprunner/client.py @@ -0,0 +1,238 @@ +import json +import time + +import requests +import urllib3 +from loguru import logger +from requests import Request, Response +from requests.exceptions import ( + InvalidSchema, + InvalidURL, + MissingSchema, + RequestException, +) + +from httprunner.models import RequestData, ResponseData +from httprunner.models import SessionData, ReqRespData +from httprunner.utils import lower_dict_keys, omit_long_data + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + +class ApiResponse(Response): + def raise_for_status(self): + if hasattr(self, "error") and self.error: + raise self.error + Response.raise_for_status(self) + + +def get_req_resp_record(resp_obj: Response) -> ReqRespData: + """get request and response info from Response() object.""" + + def log_print(req_or_resp, r_type): + msg = f"\n================== {r_type} details ==================\n" + for key, value in req_or_resp.dict().items(): + if isinstance(value, dict) or isinstance(value, list): + value = json.dumps(value, indent=4, ensure_ascii=False) + + msg += "{:<8} : {}\n".format(key, value) + logger.debug(msg) + + # record actual request info + request_headers = dict(resp_obj.request.headers) + request_cookies = resp_obj.request._cookies.get_dict() + + request_body = resp_obj.request.body + if request_body is not None: + try: + request_body = json.loads(request_body) + except json.JSONDecodeError: + # str: a=1&b=2 + pass + except UnicodeDecodeError: + # bytes/bytearray: request body in protobuf + pass + except TypeError: + # neither str nor bytes/bytearray, e.g. + pass + + request_content_type = lower_dict_keys(request_headers).get("content-type") + if request_content_type and "multipart/form-data" in request_content_type: + # upload file type + request_body = "upload file stream (OMITTED)" + + request_data = RequestData( + method=resp_obj.request.method, + url=resp_obj.request.url, + headers=request_headers, + cookies=request_cookies, + body=request_body, + ) + + # log request details in debug mode + log_print(request_data, "request") + + # record response info + resp_headers = dict(resp_obj.headers) + lower_resp_headers = lower_dict_keys(resp_headers) + content_type = lower_resp_headers.get("content-type", "") + + if "image" in content_type: + # response is image type, record bytes content only + response_body = resp_obj.content + else: + try: + # try to record json data + response_body = resp_obj.json() + except ValueError: + # only record at most 512 text charactors + resp_text = resp_obj.text + response_body = omit_long_data(resp_text) + + response_data = ResponseData( + status_code=resp_obj.status_code, + cookies=resp_obj.cookies or {}, + encoding=resp_obj.encoding, + headers=resp_headers, + content_type=content_type, + body=response_body, + ) + + # log response details in debug mode + log_print(response_data, "response") + + req_resp_data = ReqRespData(request=request_data, response=response_data) + return req_resp_data + + +class HttpSession(requests.Session): + """ + Class for performing HTTP requests and holding (session-) cookies between requests (in order + to be able to log in and out of websites). Each request is logged so that HttpRunner can + display statistics. + + This is a slightly extended version of `python-request `_'s + :py:class:`requests.Session` class and mostly this class works exactly the same. + """ + + def __init__(self): + super(HttpSession, self).__init__() + self.data = SessionData() + + def update_last_req_resp_record(self, resp_obj): + """ + update request and response info from Response() object. + """ + # TODO: fix + self.data.req_resps.pop() + self.data.req_resps.append(get_req_resp_record(resp_obj)) + + def request(self, method, url, name=None, **kwargs): + """ + Constructs and sends a :py:class:`requests.Request`. + Returns :py:class:`requests.Response` object. + + :param method: + method for the new :class:`Request` object. + :param url: + URL for the new :class:`Request` object. + :param name: (optional) + Placeholder, make compatible with Locust's HttpSession + :param params: (optional) + Dictionary or bytes to be sent in the query string for the :class:`Request`. + :param data: (optional) + Dictionary or bytes to send in the body of the :class:`Request`. + :param headers: (optional) + Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) + Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) + Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. + :param auth: (optional) + Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) + How long to wait for the server to send data before giving up, as a float, or \ + a (`connect timeout, read timeout `_) tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) + Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) + Dictionary mapping protocol to the URL of the proxy. + :param stream: (optional) + whether to immediately download the response content. Defaults to ``False``. + :param verify: (optional) + if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. + :param cert: (optional) + if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + """ + self.data = SessionData() + + # timeout default to 120 seconds + kwargs.setdefault("timeout", 120) + + # set stream to True, in order to get client/server IP/Port + kwargs["stream"] = True + + start_timestamp = time.time() + response = self._send_request_safe_mode(method, url, **kwargs) + response_time_ms = round((time.time() - start_timestamp) * 1000, 2) + + try: + client_ip, client_port = response.raw._connection.sock.getsockname() + self.data.address.client_ip = client_ip + self.data.address.client_port = client_port + logger.debug(f"client IP: {client_ip}, Port: {client_port}") + except Exception: + pass + + try: + server_ip, server_port = response.raw._connection.sock.getpeername() + self.data.address.server_ip = server_ip + self.data.address.server_port = server_port + logger.debug(f"server IP: {server_ip}, Port: {server_port}") + except Exception: + pass + + # get length of the response content + content_size = int(dict(response.headers).get("content-length") or 0) + + # record the consumed time + self.data.stat.response_time_ms = response_time_ms + self.data.stat.elapsed_ms = response.elapsed.microseconds / 1000.0 + self.data.stat.content_size = content_size + + # record request and response histories, include 30X redirection + response_list = response.history + [response] + self.data.req_resps = [ + get_req_resp_record(resp_obj) for resp_obj in response_list + ] + + try: + response.raise_for_status() + except RequestException as ex: + logger.error(f"{str(ex)}") + else: + logger.info( + f"status_code: {response.status_code}, " + f"response_time(ms): {response_time_ms} ms, " + f"response_length: {content_size} bytes" + ) + + return response + + def _send_request_safe_mode(self, method, url, **kwargs): + """ + Send a HTTP request, and catch any exception that might occur due to connection problems. + Safe mode has been removed from requests 1.x. + """ + try: + return requests.Session.request(self, method, url, **kwargs) + except (MissingSchema, InvalidSchema, InvalidURL): + raise + except RequestException as ex: + resp = ApiResponse() + resp.error = ex + resp.status_code = 0 # with this status_code, content returns None + resp.request = Request(method, url).prepare() + return resp diff --git a/httprunner/client_test.py b/httprunner/client_test.py new file mode 100644 index 0000000..466d090 --- /dev/null +++ b/httprunner/client_test.py @@ -0,0 +1,73 @@ +import unittest + +from httprunner.client import HttpSession +from httprunner.utils import HTTP_BIN_URL + + +class TestHttpSession(unittest.TestCase): + def setUp(self): + self.session = HttpSession() + + def test_request_http(self): + self.session.request("get", f"{HTTP_BIN_URL}/get") + address = self.session.data.address + self.assertGreater(len(address.server_ip), 0) + self.assertEqual(address.server_port, 80) + self.assertGreater(len(address.client_ip), 0) + self.assertGreater(address.client_port, 10000) + + def test_request_https(self): + self.session.request("get", "https://postman-echo.com/get") + address = self.session.data.address + self.assertGreater(len(address.server_ip), 0) + self.assertEqual(address.server_port, 443) + self.assertGreater(len(address.client_ip), 0) + self.assertGreater(address.client_port, 10000) + + def test_request_http_allow_redirects(self): + self.session.request( + "get", + f"{HTTP_BIN_URL}/redirect-to?url=https%3A%2F%2Fgithub.com", + allow_redirects=True, + ) + address = self.session.data.address + self.assertNotEqual(address.server_ip, "N/A") + self.assertEqual(address.server_port, 443) + self.assertNotEqual(address.server_ip, "N/A") + self.assertGreater(address.client_port, 10000) + + def test_request_https_allow_redirects(self): + self.session.request( + "get", + "https://postman-echo.com/redirect-to?url=https%3A%2F%2Fgithub.com", + allow_redirects=True, + ) + address = self.session.data.address + self.assertNotEqual(address.server_ip, "N/A") + self.assertEqual(address.server_port, 443) + self.assertNotEqual(address.server_ip, "N/A") + self.assertGreater(address.client_port, 10000) + + def test_request_http_not_allow_redirects(self): + self.session.request( + "get", + f"{HTTP_BIN_URL}/redirect-to?url=https%3A%2F%2Fgithub.com", + allow_redirects=False, + ) + address = self.session.data.address + self.assertEqual(address.server_ip, "N/A") + self.assertEqual(address.server_port, 0) + self.assertEqual(address.client_ip, "N/A") + self.assertEqual(address.client_port, 0) + + def test_request_https_not_allow_redirects(self): + self.session.request( + "get", + "https://postman-echo.com/redirect-to?url=https%3A%2F%2Fgithub.com", + allow_redirects=False, + ) + address = self.session.data.address + self.assertEqual(address.server_ip, "N/A") + self.assertEqual(address.server_port, 0) + self.assertEqual(address.client_ip, "N/A") + self.assertEqual(address.client_port, 0) diff --git a/httprunner/compat.py b/httprunner/compat.py new file mode 100644 index 0000000..755d600 --- /dev/null +++ b/httprunner/compat.py @@ -0,0 +1,385 @@ +""" +This module handles compatibility issues between testcase format v2, v3 and v4. +""" +import os +import sys +from typing import List, Dict, Text, Union, Any + +from loguru import logger + +from httprunner import exceptions +from httprunner.loader import load_project_meta, convert_relative_project_root_dir +from httprunner.parser import parse_data +from httprunner.utils import sort_dict_by_custom_order + + +def convert_variables( + raw_variables: Union[Dict, Text], test_path: Text +) -> Dict[Text, Any]: + if isinstance(raw_variables, Dict): + return raw_variables + + elif isinstance(raw_variables, Text): + # get variables by function, e.g. ${get_variables()} + project_meta = load_project_meta(test_path) + variables = parse_data(raw_variables, {}, project_meta.functions) + + return variables + + else: + raise exceptions.TestCaseFormatError( + f"Invalid variables format: {raw_variables}" + ) + + +def _convert_request(request: Dict) -> Dict: + if "body" in request: + content_type = "" + if "headers" in request and "Content-Type" in request["headers"]: + content_type = request["headers"]["Content-Type"] + if content_type.startswith("application/json"): + request["json"] = request.pop("body") + else: + request["data"] = request.pop("body") + return _sort_request_by_custom_order(request) + + +def _convert_jmespath(raw: Text) -> Text: + if not isinstance(raw, Text): + raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}") + + # content.xx/json.xx => body.xx + if raw.startswith("content"): + raw = f"body{raw[len('content'):]}" + elif raw.startswith("json"): + raw = f"body{raw[len('json'):]}" + + raw_list = raw.split(".") + for i, item in enumerate(raw_list): + item = item.strip('"') + if item.lower().startswith("content-") or item.lower() in ["user-agent"]: + # add quotes for some field in white list + # e.g. headers.Content-Type => headers."Content-Type" + raw_list[i] = f'"{item}"' + + return ".".join(raw_list) + + +def _convert_extractors(extractors: Union[List, Dict]) -> Dict: + """convert extract list(v2) to dict(v3) + + Args: + extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] + + Returns: + {"varA": "body.varA", "varB": "body.varB"} + + """ + v3_extractors: Dict = {} + + if isinstance(extractors, List): + # [{"varA": "content.varA"}, {"varB": "json.varB"}] + for extractor in extractors: + if not isinstance(extractor, Dict): + logger.error(f"Invalid extractor: {extractors}") + sys.exit(1) + for k, v in extractor.items(): + v3_extractors[k] = v + elif isinstance(extractors, Dict): + # {"varA": "body.varA", "varB": "body.varB"} + v3_extractors = extractors + else: + logger.error(f"Invalid extractor: {extractors}") + sys.exit(1) + + for k, v in v3_extractors.items(): + v3_extractors[k] = _convert_jmespath(v) + + return v3_extractors + + +def _convert_validators(validators: List) -> List: + for v in validators: + if "check" in v and "expect" in v: + # format1: {"check": "content.abc", "assert": "eq", "expect": 201} + v["check"] = _convert_jmespath(v["check"]) + + elif len(v) == 1: + # format2: {'eq': ['status_code', 201]} + comparator = list(v.keys())[0] + v[comparator][0] = _convert_jmespath(v[comparator][0]) + + return validators + + +def _sort_request_by_custom_order(request: Dict) -> Dict: + custom_order = [ + "method", + "url", + "params", + "headers", + "cookies", + "data", + "json", + "files", + "timeout", + "allow_redirects", + "proxies", + "verify", + "stream", + "auth", + "cert", + ] + return sort_dict_by_custom_order(request, custom_order) + + +def _sort_step_by_custom_order(step: Dict) -> Dict: + custom_order = [ + "name", + "variables", + "request", + "testcase", + "setup_hooks", + "teardown_hooks", + "extract", + "validate", + "validate_script", + ] + return sort_dict_by_custom_order(step, custom_order) + + +def _ensure_step_attachment(step: Dict) -> Dict: + test_dict = { + "name": step["name"], + } + + if "request" in step: + test_dict["request"] = _convert_request(step["request"]) + + if "variables" in step: + test_dict["variables"] = step["variables"] + + if "setup_hooks" in step: + test_dict["setup_hooks"] = step["setup_hooks"] + + if "teardown_hooks" in step: + test_dict["teardown_hooks"] = step["teardown_hooks"] + + if "extract" in step: + test_dict["extract"] = _convert_extractors(step["extract"]) + + if "export" in step: + test_dict["export"] = step["export"] + + if "validate" in step: + if not isinstance(step["validate"], List): + raise exceptions.TestCaseFormatError( + f'Invalid teststep validate: {step["validate"]}' + ) + test_dict["validate"] = _convert_validators(step["validate"]) + + if "validate_script" in step: + test_dict["validate_script"] = step["validate_script"] + + return test_dict + + +def ensure_testcase_v4_api(api_content: Dict) -> Dict: + logger.info("convert api in v2/v3 to testcase format v4") + + teststep = { + "request": _convert_request(api_content["request"]), + } + teststep.update(_ensure_step_attachment(api_content)) + + teststep = _sort_step_by_custom_order(teststep) + + config = {"name": api_content["name"]} + extract_variable_names: List = list(teststep.get("extract", {}).keys()) + if extract_variable_names: + config["export"] = extract_variable_names + + return { + "config": config, + "teststeps": [teststep], + } + + +def ensure_testcase_v4(test_content: Dict) -> Dict: + logger.info("ensure compatibility with testcase format v2/v3") + + v3_content = {"config": test_content["config"], "teststeps": []} + + if "teststeps" not in test_content: + logger.error(f"Miss teststeps: {test_content}") + sys.exit(1) + + if not isinstance(test_content["teststeps"], list): + logger.error( + f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}' + ) + sys.exit(1) + + for step in test_content["teststeps"]: + teststep = {} + + if "request" in step: + pass + elif "api" in step: + teststep["testcase"] = step.pop("api") + elif "testcase" in step: + teststep["testcase"] = step.pop("testcase") + else: + raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}") + + teststep.update(_ensure_step_attachment(step)) + + teststep = _sort_step_by_custom_order(teststep) + v3_content["teststeps"].append(teststep) + + return v3_content + + +def ensure_cli_args(args: List) -> List: + """ensure compatibility with deprecated cli args in v2""" + # remove deprecated --failfast + if "--failfast" in args: + logger.warning("remove deprecated argument: --failfast") + args.pop(args.index("--failfast")) + + # convert --report-file to --html + if "--report-file" in args: + logger.warning("replace deprecated argument --report-file with --html") + index = args.index("--report-file") + args[index] = "--html" + args.append("--self-contained-html") + + # keep compatibility with --save-tests in v2 + if "--save-tests" in args: + logger.warning( + "generate conftest.py keep compatibility with --save-tests in v2" + ) + args.pop(args.index("--save-tests")) + _generate_conftest_for_summary(args) + + return args + + +def _generate_conftest_for_summary(args: List): + + for arg in args: + if os.path.exists(arg): + test_path = arg + # FIXME: several test paths maybe specified + break + else: + logger.error(f"No valid test path specified! \nargs: {args}") + sys.exit(1) + + conftest_content = '''# NOTICE: Generated By HttpRunner. +import json +import os +import time + +import pytest +from loguru import logger + +from httprunner.utils import get_platform, ExtendJSONEncoder + + +@pytest.fixture(scope="session", autouse=True) +def session_fixture(request): + """setup and teardown each task""" + logger.info("start running testcases ...") + + start_at = time.time() + + yield + + logger.info("task finished, generate task summary for --save-tests") + + summary = { + "success": True, + "stat": { + "testcases": {"total": 0, "success": 0, "fail": 0}, + "teststeps": {"total": 0, "failures": 0, "successes": 0}, + }, + "time": {"start_at": start_at, "duration": time.time() - start_at}, + "platform": get_platform(), + "details": [], + } + + for item in request.node.items: + testcase_summary = item.instance.get_summary() + summary["success"] &= testcase_summary.success + + summary["stat"]["testcases"]["total"] += 1 + summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_results) + if testcase_summary.success: + summary["stat"]["testcases"]["success"] += 1 + summary["stat"]["teststeps"]["successes"] += len( + testcase_summary.step_results + ) + else: + summary["stat"]["testcases"]["fail"] += 1 + summary["stat"]["teststeps"]["successes"] += ( + len(testcase_summary.step_results) - 1 + ) + summary["stat"]["teststeps"]["failures"] += 1 + + testcase_summary_json = testcase_summary.dict() + testcase_summary_json["records"] = testcase_summary_json.pop("step_results") + summary["details"].append(testcase_summary_json) + + summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}" + summary_dir = os.path.dirname(summary_path) + os.makedirs(summary_dir, exist_ok=True) + + with open(summary_path, "w", encoding="utf-8") as f: + json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder) + + logger.info(f"generated task summary: {summary_path}") + +''' + + project_meta = load_project_meta(test_path) + project_root_dir = project_meta.RootDir + conftest_path = os.path.join(project_root_dir, "conftest.py") + + test_path = os.path.abspath(test_path) + logs_dir_path = os.path.join(project_root_dir, "logs") + test_path_relative_path = convert_relative_project_root_dir(test_path) + + if os.path.isdir(test_path): + file_foder_path = os.path.join(logs_dir_path, test_path_relative_path) + dump_file_name = "all.summary.json" + else: + file_relative_folder_path, test_file = os.path.split(test_path_relative_path) + file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path) + test_file_name, _ = os.path.splitext(test_file) + dump_file_name = f"{test_file_name}.summary.json" + + summary_path = os.path.join(file_foder_path, dump_file_name) + conftest_content = conftest_content.replace( + "{{SUMMARY_PATH_PLACEHOLDER}}", summary_path + ) + + dir_path = os.path.dirname(conftest_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(conftest_path, "w", encoding="utf-8") as f: + f.write(conftest_content) + + logger.info("generated conftest.py to generate summary.json") + + +def ensure_path_sep(path: Text) -> Text: + """ensure compatibility with different path separators of Linux and Windows""" + if "/" in path: + path = os.sep.join(path.split("/")) + + if "\\" in path: + path = os.sep.join(path.split("\\")) + + return path diff --git a/httprunner/compat_test.py b/httprunner/compat_test.py new file mode 100644 index 0000000..ef4a60d --- /dev/null +++ b/httprunner/compat_test.py @@ -0,0 +1,266 @@ +import os +import unittest + +from httprunner import compat, exceptions, loader +from httprunner.utils import HTTP_BIN_URL + + +class TestCompat(unittest.TestCase): + def setUp(self): + loader.project_meta = None + + def test_convert_variables(self): + raw_variables = {"var1": 1, "var2": "val2"} + self.assertEqual( + compat.convert_variables(raw_variables, "examples/data/a-b.c/1.yml"), + {"var1": 1, "var2": "val2"}, + ) + raw_variables = "${get_variables()}" + self.assertEqual( + compat.convert_variables(raw_variables, "examples/data/a-b.c/1.yml"), + {"foo1": "session_bar1"}, + ) + + with self.assertRaises(exceptions.TestCaseFormatError): + raw_variables = [{"var1": 1}, {"var2": "val2", "var3": 3}] + compat.convert_variables(raw_variables, "examples/data/a-b.c/1.yml") + with self.assertRaises(exceptions.TestCaseFormatError): + compat.convert_variables(None, "examples/data/a-b.c/1.yml") + + def test_convert_request(self): + request_with_json_body = { + "method": "POST", + "url": "https://postman-echo.com/post", + "headers": {"Content-Type": "application/json"}, + "body": {"k1": "v1", "k2": "v2"}, + } + self.assertEqual( + compat._convert_request(request_with_json_body), + { + "method": "POST", + "url": "https://postman-echo.com/post", + "headers": {"Content-Type": "application/json"}, + "json": {"k1": "v1", "k2": "v2"}, + }, + ) + + request_with_text_body = { + "method": "POST", + "url": "https://postman-echo.com/post", + "headers": {"Content-Type": "text/plain"}, + "body": "have a nice day", + } + self.assertEqual( + compat._convert_request(request_with_text_body), + { + "method": "POST", + "url": "https://postman-echo.com/post", + "headers": {"Content-Type": "text/plain"}, + "data": "have a nice day", + }, + ) + + def test_convert_jmespath(self): + self.assertEqual(compat._convert_jmespath("content.abc"), "body.abc") + self.assertEqual(compat._convert_jmespath("json.abc"), "body.abc") + self.assertEqual( + compat._convert_jmespath("headers.Content-Type"), 'headers."Content-Type"' + ) + self.assertEqual( + compat._convert_jmespath("headers.User-Agent"), 'headers."User-Agent"' + ) + self.assertEqual( + compat._convert_jmespath('headers."Content-Type"'), 'headers."Content-Type"' + ) + self.assertEqual( + compat._convert_jmespath("body.users[-1]"), + "body.users[-1]", + ) + self.assertEqual( + compat._convert_jmespath("body.result.WorkNode_-1"), + "body.result.WorkNode_-1", + ) + + def test_convert_extractors(self): + self.assertEqual( + compat._convert_extractors( + [{"varA": "content.varA"}, {"varB": "json.varB"}] + ), + {"varA": "body.varA", "varB": "body.varB"}, + ) + self.assertEqual( + compat._convert_extractors([{"varA": "content[0].varA"}]), + {"varA": "body[0].varA"}, + ) + self.assertEqual( + compat._convert_extractors({"varA": "content[0].varA"}), + {"varA": "body[0].varA"}, + ) + + def test_convert_validators(self): + self.assertEqual( + compat._convert_validators( + [{"check": "content.abc", "assert": "eq", "expect": 201}] + ), + [{"check": "body.abc", "assert": "eq", "expect": 201}], + ) + self.assertEqual( + compat._convert_validators([{"eq": ["content.abc", 201]}]), + [{"eq": ["body.abc", 201]}], + ) + self.assertEqual( + compat._convert_validators([{"eq": ["content[0].name", 201]}]), + [{"eq": ["body[0].name", 201]}], + ) + + def test_ensure_testcase_v4_api(self): + api_content = { + "name": "get with params", + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "bar1", "foo2": "bar2"}, + "headers": {"User-Agent": "HttpRunner/3.0"}, + }, + "extract": [{"varA": "content.varA"}, {"user_agent": "headers.User-Agent"}], + "validate": [{"eq": ["content.varB", 200]}, {"lt": ["json[0].varC", 0]}], + } + self.assertEqual( + compat.ensure_testcase_v4_api(api_content), + { + "config": { + "name": "get with params", + "export": ["varA", "user_agent"], + }, + "teststeps": [ + { + "name": "get with params", + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "bar1", "foo2": "bar2"}, + "headers": {"User-Agent": "HttpRunner/3.0"}, + }, + "extract": { + "varA": "body.varA", + "user_agent": 'headers."User-Agent"', + }, + "validate": [ + {"eq": ["body.varB", 200]}, + {"lt": ["body[0].varC", 0]}, + ], + } + ], + }, + ) + + def test_ensure_testcase_v4(self): + testcase_content = { + "config": {"name": "xxx", "base_url": HTTP_BIN_URL}, + "teststeps": [ + { + "name": "get with params", + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "bar1", "foo2": "bar2"}, + "headers": {"User-Agent": "HttpRunner/3.0"}, + }, + "extract": [ + {"varA": "content.varA"}, + {"user_agent": "headers.User-Agent"}, + ], + "validate": [ + {"eq": ["content.varB", 200]}, + {"lt": ["json[0].varC", 0]}, + ], + } + ], + } + self.assertEqual( + compat.ensure_testcase_v4(testcase_content), + { + "config": {"name": "xxx", "base_url": HTTP_BIN_URL}, + "teststeps": [ + { + "name": "get with params", + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "bar1", "foo2": "bar2"}, + "headers": {"User-Agent": "HttpRunner/3.0"}, + }, + "extract": { + "varA": "body.varA", + "user_agent": 'headers."User-Agent"', + }, + "validate": [ + {"eq": ["body.varB", 200]}, + {"lt": ["body[0].varC", 0]}, + ], + } + ], + }, + ) + + def test_ensure_cli_args(self): + args1 = ["examples/postman_echo/request_methods/hardcode.yml", "--failfast"] + self.assertEqual( + compat.ensure_cli_args(args1), + ["examples/postman_echo/request_methods/hardcode.yml"], + ) + + args2 = ["examples/postman_echo/request_methods/hardcode.yml", "--save-tests"] + self.assertEqual( + compat.ensure_cli_args(args2), + ["examples/postman_echo/request_methods/hardcode.yml"], + ) + self.assertTrue(os.path.isfile("examples/postman_echo/conftest.py")) + + args3 = [ + "examples/postman_echo/request_methods/hardcode.yml", + "--report-file", + "report.html", + ] + self.assertEqual( + compat.ensure_cli_args(args3), + [ + "examples/postman_echo/request_methods/hardcode.yml", + "--html", + "report.html", + "--self-contained-html", + ], + ) + + args4 = [ + "examples/postman_echo/request_methods/hardcode.yml", + "--failfast", + "--save-tests", + "--report-file", + "report.html", + ] + self.assertEqual( + compat.ensure_cli_args(args4), + [ + "examples/postman_echo/request_methods/hardcode.yml", + "--html", + "report.html", + "--self-contained-html", + ], + ) + + def test_ensure_file_path(self): + self.assertEqual( + compat.ensure_path_sep("demo\\test.yml"), os.sep.join(["demo", "test.yml"]) + ) + self.assertEqual( + compat.ensure_path_sep(os.path.join(os.getcwd(), "demo\\test.yml")), + os.path.join(os.getcwd(), os.sep.join(["demo", "test.yml"])), + ) + self.assertEqual( + compat.ensure_path_sep("demo/test.yml"), os.sep.join(["demo", "test.yml"]) + ) + self.assertEqual( + compat.ensure_path_sep(os.path.join(os.getcwd(), "demo/test.yml")), + os.path.join(os.getcwd(), os.sep.join(["demo", "test.yml"])), + ) diff --git a/httprunner/config.py b/httprunner/config.py new file mode 100644 index 0000000..f68a5c6 --- /dev/null +++ b/httprunner/config.py @@ -0,0 +1,138 @@ +import copy +import inspect +from typing import Text + +from httprunner.models import TConfig, TConfigThrift, TConfigDB, ProtoType, VariablesMapping + + +class ConfigThrift(object): + def __init__(self, config: TConfig) -> None: + self.__config = config + self.__config.thrift = TConfigThrift() + + def psm(self, psm: Text) -> "ConfigThrift": + self.__config.thrift.psm = psm + return self + + def env(self, env: Text) -> "ConfigThrift": + self.__config.thrift.env = env + return self + + def cluster(self, cluster: Text) -> "ConfigThrift": + self.__config.thrift.cluster = cluster + return self + + def service_name(self, service_name: Text) -> "ConfigThrift": + self.__config.thrift.service_name = service_name + return self + + def method(self, method: Text) -> "ConfigThrift": + self.__config.thrift.method = method + return self + + def ip(self, service_name_: Text) -> "ConfigThrift": + self.__config.thrift.service_name = service_name_ + return self + + def port(self, port: int) -> "ConfigThrift": + self.__config.thrift.port = port + return self + + def timeout(self, timeout: int) -> "ConfigThrift": + self.__config.thrift.timeout = timeout + return self + + def proto_type(self, proto_type: ProtoType) -> "ConfigThrift": + self.__config.thrift.proto_type = proto_type + return self + + def trans_type(self, trans_type: ProtoType) -> "ConfigThrift": + self.__config.thrift.trans_type = trans_type + return self + + def struct(self) -> TConfig: + return self.__config + + +class ConfigDB(object): + def __init__(self, config: TConfig): + self.__config = config + self.__config.db = TConfigDB() + + def psm(self, psm): + self.__config.db.psm = psm + return self + + def user(self, user): + self.__config.db.user = user + return self + + def password(self, password): + self.__config.db.password = password + return self + + def ip(self, ip): + self.__config.db.ip = ip + return self + + def port(self, port: int): + self.__config.db.port = port + return self + + def database(self, database: Text): + self.__config.db.database = database + return self + + def struct(self) -> TConfig: + return self.__config + + +class Config(object): + def __init__(self, name: Text) -> None: + caller_frame = inspect.stack()[1] + self.__name: Text = name + self.__base_url: Text = "" + self.__variables: VariablesMapping = {} + self.__config = TConfig(name=name, path=caller_frame.filename) + + @property + def name(self) -> Text: + return self.__config.name + + @property + def path(self) -> Text: + return self.__config.path + + def variables(self, **variables) -> "Config": + self.__variables.update(variables) + return self + + def base_url(self, base_url: Text) -> "Config": + self.__base_url = base_url + return self + + def verify(self, verify: bool) -> "Config": + self.__config.verify = verify + return self + + def export(self, *export_var_name: Text) -> "Config": + self.__config.export.extend(export_var_name) + self.__config.export = list(set(self.__config.export)) + return self + + def struct(self) -> TConfig: + self.__init() + return self.__config + + def thrift(self) -> ConfigThrift: + self.__init() + return ConfigThrift(self.__config) + + def db(self) -> ConfigDB: + self.__init() + return ConfigDB(self.__config) + + def __init(self) -> None: + self.__config.name = self.__name + self.__config.base_url = self.__base_url + self.__config.variables = copy.copy(self.__variables) diff --git a/httprunner/database/engine.py b/httprunner/database/engine.py new file mode 100644 index 0000000..8a99ded --- /dev/null +++ b/httprunner/database/engine.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +import datetime +import json + +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + + +class DBEngine(object): + def __init__(self, db_uri): + """ + db_uri = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}?charset=utf8mb4' + + """ + engine = create_engine(db_uri) + self.session = sessionmaker(bind=engine, autocommit=True)() + + @staticmethod + def value_decode(row: dict): + """ + Try to decode value of table + datetime.datetime-->string + datetime.date-->string + json str-->dict + :param row: + :return: + """ + for k, v in row.items(): + if isinstance(v, datetime.datetime): + row[k] = v.strftime("%Y-%m-%d %H:%M:%S") + elif isinstance(v, datetime.date): + row[k] = v.strftime("%Y-%m-%d") + elif isinstance(v, str): + try: + row[k] = json.loads(v) + except ValueError: + pass + + def _fetch(self, query, size=-1, commit=True): + query = query.strip() + result = self.session.execute(query) + if query.upper()[:6] == "SELECT": + if size < 0: + al = result.fetchall() + al = [dict(el) for el in al] + for el in al: + self.value_decode(el) + return al or None + elif size == 1: + on = dict(result.fetchone()) + self.value_decode(on) + return on or None + else: + mny = result.fetchmany(size) + mny = [dict(el) for el in mny] + for el in mny: + self.value_decode(el) + return mny or None + elif query.upper()[:6] in ("UPDATE", "DELETE", "INSERT"): + return {"rowcount": result.rowcount} + + def fetchone(self, query, commit=True): + return self._fetch(query, size=1, commit=commit) + + def fetchmany(self, query, size, commit=True): + return self._fetch(query=query, size=size, commit=commit) + + def fetchall(self, query, commit=True): + return self._fetch(query=query, size=-1, commit=commit) + + def insert(self, query, commit=True): + return self._fetch(query=query, commit=commit) + + def delete(self, query, commit=True): + return self._fetch(query=query, commit=commit) + + def update(self, query, commit=True): + return self._fetch(query=query, commit=commit) + + +if __name__ == "__main__": + # db = DBEngine("mysql+pymysql://xxxxx:xxxxx@10.0.0.1:3306/dbname?charset=utf8mb4") + db = DBEngine("sqlite:////Users/xxx/HttpRunner/examples/data/sqlite.db") + print(db.fetchmany(""" + select* from student""", 5)) + print(db.fetchmany("select* from student", 5)) diff --git a/httprunner/exceptions.py b/httprunner/exceptions.py new file mode 100644 index 0000000..e3969fe --- /dev/null +++ b/httprunner/exceptions.py @@ -0,0 +1,92 @@ +""" failure type exceptions + these exceptions will mark test as failure +""" + + +class MyBaseFailure(Exception): + pass + + +class ParseTestsFailure(MyBaseFailure): + pass + + +class ValidationFailure(MyBaseFailure): + pass + + +class ExtractFailure(MyBaseFailure): + pass + + +class SetupHooksFailure(MyBaseFailure): + pass + + +class TeardownHooksFailure(MyBaseFailure): + pass + + +""" error type exceptions + these exceptions will mark test as error +""" + + +class MyBaseError(Exception): + pass + + +class FileFormatError(MyBaseError): + pass + + +class TestCaseFormatError(FileFormatError): + pass + + +class TestSuiteFormatError(FileFormatError): + pass + + +class ParamsError(MyBaseError): + pass + + +class NotFoundError(MyBaseError): + pass + + +class FileNotFound(FileNotFoundError, NotFoundError): + pass + + +class FunctionNotFound(NotFoundError): + pass + + +class VariableNotFound(NotFoundError): + pass + + +class EnvNotFound(NotFoundError): + pass + + +class CSVNotFound(NotFoundError): + pass + + +class ApiNotFound(NotFoundError): + pass + + +class TestcaseNotFound(NotFoundError): + pass + + +class SummaryEmpty(MyBaseError): + """test result summary data is empty""" + + +class SqlMethodNotSupport(MyBaseError): + pass diff --git a/httprunner/ext/__init__.py b/httprunner/ext/__init__.py new file mode 100644 index 0000000..2ce5da7 --- /dev/null +++ b/httprunner/ext/__init__.py @@ -0,0 +1,2 @@ +# NOTICE: +# This file should not be deleted, or ImportError will be raised in Python 2.7 when importing extension diff --git a/httprunner/ext/uploader/__init__.py b/httprunner/ext/uploader/__init__.py new file mode 100644 index 0000000..d3a0bf0 --- /dev/null +++ b/httprunner/ext/uploader/__init__.py @@ -0,0 +1,178 @@ +""" upload test extension. + +If you want to use this extension, you should install the following dependencies first. + +- requests_toolbelt +- filetype + +Then you can write upload test script as below: + + - test: + name: upload file + request: + url: https://httpbin.org/upload + method: POST + headers: + Cookie: session=AAA-BBB-CCC + upload: + file: "data/file_to_upload" + field1: "value1" + field2: "value2" + validate: + - eq: ["status_code", 200] + +For compatibility, you can also write upload test script in old way: + + - test: + name: upload file + variables: + file: "data/file_to_upload" + field1: "value1" + field2: "value2" + m_encoder: ${multipart_encoder(file=$file, field1=$field1, field2=$field2)} + request: + url: https://httpbin.org/upload + method: POST + headers: + Content-Type: ${multipart_content_type($m_encoder)} + Cookie: session=AAA-BBB-CCC + data: $m_encoder + validate: + - eq: ["status_code", 200] + +""" + +import os +import sys +from typing import Text + +from httprunner.models import VariablesMapping, FunctionsMapping, TStep +from httprunner.parser import parse_data +from loguru import logger + +try: + import filetype + from requests_toolbelt import MultipartEncoder + + UPLOAD_READY = True +except ModuleNotFoundError: + UPLOAD_READY = False + + +def ensure_upload_ready(): + if UPLOAD_READY: + return + + msg = """ + uploader extension dependencies uninstalled, install first and try again. + install with pip: + $ pip install requests_toolbelt filetype + + or you can install httprunner with optional upload dependencies: + $ pip install "httprunner[upload]" + """ + logger.error(msg) + sys.exit(1) + + +def prepare_upload_step( + step: TStep, step_variables: VariablesMapping, functions: FunctionsMapping +): + """preprocess for upload test + replace `upload` info with MultipartEncoder + + Args: + step: teststep + { + "variables": {}, + "request": { + "url": "https://httpbin.org/upload", + "method": "POST", + "headers": { + "Cookie": "session=AAA-BBB-CCC" + }, + "upload": { + "file": "data/file_to_upload" + "md5": "123" + } + } + } + functions: functions mapping + + """ + if not step.request.upload: + return + + # parse upload info + step.request.upload = parse_data(step.request.upload, step_variables, functions) + + ensure_upload_ready() + params_list = [] + for key, value in step.request.upload.items(): + step_variables[key] = value + params_list.append(f"{key}=${key}") + + params_str = ", ".join(params_list) + step_variables["m_encoder"] = "${multipart_encoder(" + params_str + ")}" + + step.request.headers["Content-Type"] = "${multipart_content_type($m_encoder)}" + + step.request.data = "$m_encoder" + + +def multipart_encoder(**kwargs): + """initialize MultipartEncoder with uploading fields. + + Returns: + MultipartEncoder: initialized MultipartEncoder object + + """ + + def get_filetype(file_path): + file_type = filetype.guess(file_path) + if file_type: + return file_type.mime + else: + return "text/html" + + ensure_upload_ready() + fields_dict = {} + for key, value in kwargs.items(): + if os.path.isabs(value): + # value is absolute file path + _file_path = value + is_exists_file = os.path.isfile(value) + else: + # value is not absolute file path, check if it is relative file path + from httprunner.loader import load_project_meta + + project_meta = load_project_meta("") + + _file_path = os.path.join(project_meta.RootDir, value) + is_exists_file = os.path.isfile(_file_path) + + if is_exists_file: + # value is file path to upload + filename = os.path.basename(_file_path) + mime_type = get_filetype(_file_path) + # TODO: fix ResourceWarning for unclosed file + file_handler = open(_file_path, "rb") + fields_dict[key] = (filename, file_handler, mime_type) + else: + fields_dict[key] = value + + return MultipartEncoder(fields=fields_dict) + + +def multipart_content_type(m_encoder) -> Text: + """prepare Content-Type for request headers + + Args: + m_encoder: MultipartEncoder object + + Returns: + content type + + """ + ensure_upload_ready() + return m_encoder.content_type diff --git a/httprunner/loader.py b/httprunner/loader.py new file mode 100644 index 0000000..49dc849 --- /dev/null +++ b/httprunner/loader.py @@ -0,0 +1,432 @@ +import csv +import importlib +import json +import os +import sys +import types +from typing import Callable, Dict, List, Text, Tuple, Union + +import yaml +from loguru import logger +from pydantic import ValidationError + +from httprunner import builtin, exceptions, utils +from httprunner.models import ProjectMeta, TestCase + +project_meta: Union[ProjectMeta, None] = None + + +def _load_yaml_file(yaml_file: Text) -> Dict: + """load yaml file and check file content format""" + with open(yaml_file, mode="rb") as stream: + try: + yaml_content = yaml.load(stream, Loader=yaml.FullLoader) + except yaml.YAMLError as ex: + err_msg = f"YAMLError:\nfile: {yaml_file}\nerror: {ex}" + logger.error(err_msg) + raise exceptions.FileFormatError + + return yaml_content + + +def _load_json_file(json_file: Text) -> Dict: + """load json file and check file content format""" + with open(json_file, mode="rb") as data_file: + try: + json_content = json.load(data_file) + except json.JSONDecodeError as ex: + err_msg = f"JSONDecodeError:\nfile: {json_file}\nerror: {ex}" + raise exceptions.FileFormatError(err_msg) + + return json_content + + +def load_test_file(test_file: Text) -> Dict: + """load testcase/testsuite file content""" + if not os.path.isfile(test_file): + raise exceptions.FileNotFound(f"test file not exists: {test_file}") + + file_suffix = os.path.splitext(test_file)[1].lower() + if file_suffix == ".json": + test_file_content = _load_json_file(test_file) + elif file_suffix in [".yaml", ".yml"]: + test_file_content = _load_yaml_file(test_file) + else: + # '' or other suffix + raise exceptions.FileFormatError( + f"testcase/testsuite file should be YAML/JSON format, invalid format file: {test_file}" + ) + + return test_file_content + + +def load_testcase(testcase: Dict) -> TestCase: + try: + # validate with pydantic TestCase model + testcase_obj = TestCase.parse_obj(testcase) + except ValidationError as ex: + err_msg = f"TestCase ValidationError:\nerror: {ex}\ncontent: {testcase}" + raise exceptions.TestCaseFormatError(err_msg) + + return testcase_obj + + +def load_testcase_file(testcase_file: Text) -> TestCase: + """load testcase file and validate with pydantic model""" + testcase_content = load_test_file(testcase_file) + testcase_obj = load_testcase(testcase_content) + testcase_obj.config.path = testcase_file + return testcase_obj + + +def load_dot_env_file(dot_env_path: Text) -> Dict: + """load .env file. + + Args: + dot_env_path (str): .env file path + + Returns: + dict: environment variables mapping + + { + "UserName": "debugtalk", + "Password": "123456", + "PROJECT_KEY": "ABCDEFGH" + } + + Raises: + exceptions.FileFormatError: If .env file format is invalid. + + """ + if not os.path.isfile(dot_env_path): + return {} + + logger.info(f"Loading environment variables from {dot_env_path}") + env_variables_mapping = {} + + with open(dot_env_path, mode="rb") as fp: + for line in fp: + # maxsplit=1 + line = line.strip() + if not len(line) or line.startswith(b"#"): + continue + if b"=" in line: + variable, value = line.split(b"=", 1) + elif b":" in line: + variable, value = line.split(b":", 1) + else: + raise exceptions.FileFormatError(".env format error") + + env_variables_mapping[ + variable.strip().decode("utf-8") + ] = value.strip().decode("utf-8") + + utils.set_os_environ(env_variables_mapping) + return env_variables_mapping + + +def load_csv_file(csv_file: Text) -> List[Dict]: + """load csv file and check file content format + + Args: + csv_file (str): csv file path, csv file content is like below: + + Returns: + list: list of parameters, each parameter is in dict format + + Examples: + >>> cat csv_file + username,password + test1,111111 + test2,222222 + test3,333333 + + >>> load_csv_file(csv_file) + [ + {'username': 'test1', 'password': '111111'}, + {'username': 'test2', 'password': '222222'}, + {'username': 'test3', 'password': '333333'} + ] + + """ + if not os.path.isabs(csv_file): + global project_meta + if project_meta is None: + raise exceptions.MyBaseFailure("load_project_meta() has not been called!") + + # make compatible with Windows/Linux + csv_file = os.path.join(project_meta.RootDir, *csv_file.split("/")) + + if not os.path.isfile(csv_file): + # file path not exist + raise exceptions.CSVNotFound(csv_file) + + csv_content_list = [] + + with open(csv_file, encoding="utf-8") as csvfile: + reader = csv.DictReader(csvfile) + for row in reader: + csv_content_list.append(row) + + return csv_content_list + + +def load_folder_files(folder_path: Text, recursive: bool = True) -> List: + """load folder path, return all files endswith .yml/.yaml/.json/_test.py in list. + + Args: + folder_path (str): specified folder path to load + recursive (bool): load files recursively if True + + Returns: + list: files endswith yml/yaml/json + """ + if isinstance(folder_path, (list, set)): + files = [] + for path in set(folder_path): + files.extend(load_folder_files(path, recursive)) + + return files + + if not os.path.exists(folder_path): + return [] + + file_list = [] + + for dirpath, dirnames, filenames in os.walk(folder_path): + filenames_list = [] + + for filename in filenames: + if not filename.lower().endswith((".yml", ".yaml", ".json", "_test.py")): + continue + + filenames_list.append(filename) + + for filename in filenames_list: + file_path = os.path.join(dirpath, filename) + file_list.append(file_path) + + if not recursive: + break + + return file_list + + +def load_module_functions(module) -> Dict[Text, Callable]: + """load python module functions. + + Args: + module: python module + + Returns: + dict: functions mapping for specified python module + + { + "func1_name": func1, + "func2_name": func2 + } + + """ + module_functions = {} + + for name, item in vars(module).items(): + if isinstance(item, types.FunctionType): + module_functions[name] = item + + return module_functions + + +def load_builtin_functions() -> Dict[Text, Callable]: + """load builtin module functions""" + return load_module_functions(builtin) + + +def locate_file(start_path: Text, file_name: Text) -> Text: + """locate filename and return absolute file path. + searching will be recursive upward until system root dir. + + Args: + file_name (str): target locate file name + start_path (str): start locating path, maybe file path or directory path + + Returns: + str: located file path. None if file not found. + + Raises: + exceptions.FileNotFound: If failed to locate file. + + """ + if os.path.isfile(start_path): + start_dir_path = os.path.dirname(start_path) + elif os.path.isdir(start_path): + start_dir_path = start_path + else: + raise exceptions.FileNotFound(f"invalid path: {start_path}") + + file_path = os.path.join(start_dir_path, file_name) + if os.path.isfile(file_path): + # ensure absolute + return os.path.abspath(file_path) + + # system root dir + # Windows, e.g. 'E:\\' + # Linux/Darwin, '/' + parent_dir = os.path.dirname(start_dir_path) + if parent_dir == start_dir_path: + raise exceptions.FileNotFound(f"{file_name} not found in {start_path}") + + # locate recursive upward + return locate_file(parent_dir, file_name) + + +def locate_debugtalk_py(start_path: Text) -> Text: + """locate debugtalk.py file + + Args: + start_path (str): start locating path, + maybe testcase file path or directory path + + Returns: + str: debugtalk.py file path, None if not found + + """ + try: + # locate debugtalk.py file. + debugtalk_path = locate_file(start_path, "debugtalk.py") + except exceptions.FileNotFound: + debugtalk_path = None + + return debugtalk_path + + +def locate_project_root_directory(test_path: Text) -> Tuple[Text, Text]: + """locate debugtalk.py path as project root directory + + Args: + test_path: specified testfile path + + Returns: + (str, str): debugtalk.py path, project_root_directory + + """ + + def prepare_path(path): + if not os.path.exists(path): + err_msg = f"path not exist: {path}" + logger.error(err_msg) + raise exceptions.FileNotFound(err_msg) + + if not os.path.isabs(path): + path = os.path.join(os.getcwd(), path) + + return path + + test_path = prepare_path(test_path) + + # locate debugtalk.py file + debugtalk_path = locate_debugtalk_py(test_path) + + if debugtalk_path: + # The folder contains debugtalk.py will be treated as project RootDir. + project_root_directory = os.path.dirname(debugtalk_path) + else: + # debugtalk.py not found, use os.getcwd() as project RootDir. + project_root_directory = os.getcwd() + + return debugtalk_path, project_root_directory + + +def load_debugtalk_functions() -> Dict[Text, Callable]: + """load project debugtalk.py module functions + debugtalk.py should be located in project root directory. + + Returns: + dict: debugtalk module functions mapping + { + "func1_name": func1, + "func2_name": func2 + } + + """ + # load debugtalk.py module + try: + imported_module = importlib.import_module("debugtalk") + except Exception as ex: + logger.error(f"error occurred in debugtalk.py: {ex}") + sys.exit(1) + + # reload to refresh previously loaded module + imported_module = importlib.reload(imported_module) + return load_module_functions(imported_module) + + +def load_project_meta(test_path: Text, reload: bool = False) -> ProjectMeta: + """load testcases, .env, debugtalk.py functions. + testcases folder is relative to project_root_directory + by default, project_meta will be loaded only once, unless set reload to true. + + Args: + test_path (str): test file/folder path, locate project RootDir from this path. + reload: reload project meta if set true, default to false + + Returns: + project loaded api/testcases definitions, + environments and debugtalk.py functions. + + """ + global project_meta + if project_meta and (not reload): + return project_meta + + project_meta = ProjectMeta() + + if not test_path: + return project_meta + + debugtalk_path, project_root_directory = locate_project_root_directory(test_path) + + # add project RootDir to sys.path + sys.path.insert(0, project_root_directory) + + # load .env file + # NOTICE: + # environment variable maybe loaded in debugtalk.py + # thus .env file should be loaded before loading debugtalk.py + dot_env_path = os.path.join(project_root_directory, ".env") + dot_env = load_dot_env_file(dot_env_path) + if dot_env: + project_meta.env = dot_env + project_meta.dot_env_path = dot_env_path + + if debugtalk_path: + # load debugtalk.py functions + debugtalk_functions = load_debugtalk_functions() + else: + debugtalk_functions = {} + + # locate project RootDir and load debugtalk.py functions + project_meta.RootDir = project_root_directory + project_meta.functions = debugtalk_functions + project_meta.debugtalk_path = debugtalk_path + + return project_meta + + +def convert_relative_project_root_dir(abs_path: Text) -> Text: + """convert absolute path to relative path, based on project_meta.RootDir + + Args: + abs_path: absolute path + + Returns: relative path based on project_meta.RootDir + + """ + _project_meta = load_project_meta(abs_path) + if not abs_path.startswith(_project_meta.RootDir): + raise exceptions.ParamsError( + f"failed to convert absolute path to relative path based on project_meta.RootDir\n" + f"abs_path: {abs_path}\n" + f"project_meta.RootDir: {_project_meta.RootDir}" + ) + + return abs_path[len(_project_meta.RootDir) + 1 :] diff --git a/httprunner/loader_test.py b/httprunner/loader_test.py new file mode 100644 index 0000000..7b09d87 --- /dev/null +++ b/httprunner/loader_test.py @@ -0,0 +1,127 @@ +import os +import unittest + +from httprunner import exceptions, loader + + +class TestLoader(unittest.TestCase): + def test_load_testcase_file(self): + path = "examples/postman_echo/request_methods/request_with_variables.yml" + testcase_obj = loader.load_testcase_file(path) + self.assertEqual( + testcase_obj.config.name, "request methods testcase with variables" + ) + self.assertEqual(len(testcase_obj.teststeps), 4) + + def test_load_json_file_file_format_error(self): + json_tmp_file = "tmp.json" + # create empty file + with open(json_tmp_file, "w") as f: + f.write("") + + with self.assertRaises(exceptions.FileFormatError): + loader._load_json_file(json_tmp_file) + + os.remove(json_tmp_file) + + # create empty json file + with open(json_tmp_file, "w") as f: + f.write("{}") + + loader._load_json_file(json_tmp_file) + os.remove(json_tmp_file) + + # create invalid format json file + with open(json_tmp_file, "w") as f: + f.write("abc") + + with self.assertRaises(exceptions.FileFormatError): + loader._load_json_file(json_tmp_file) + + os.remove(json_tmp_file) + + def test_load_testcases_bad_filepath(self): + testcase_file_path = os.path.join(os.getcwd(), "examples/data/demo") + with self.assertRaises(exceptions.FileNotFound): + loader.load_testcase_file(testcase_file_path) + + def test_load_csv_file_one_parameter(self): + csv_file_path = os.path.join(os.getcwd(), "examples/httpbin/user_agent.csv") + csv_content = loader.load_csv_file(csv_file_path) + self.assertEqual( + csv_content, + [ + {"user_agent": "iOS/10.1"}, + {"user_agent": "iOS/10.2"}, + {"user_agent": "iOS/10.3"}, + ], + ) + + def test_load_csv_file_multiple_parameters(self): + csv_file_path = os.path.join(os.getcwd(), "examples/httpbin/account.csv") + csv_content = loader.load_csv_file(csv_file_path) + self.assertEqual( + csv_content, + [ + {"username": "test1", "password": "111111"}, + {"username": "test2", "password": "222222"}, + {"username": "test3", "password": "333333"}, + ], + ) + + def test_load_folder_files(self): + folder = os.path.join(os.getcwd(), "examples") + file1 = os.path.join(os.getcwd(), "examples", "test_utils.py") + file2 = os.path.join(os.getcwd(), "examples", "httpbin", "hooks.yml") + + files = loader.load_folder_files(folder, recursive=False) + self.assertEqual(files, []) + + files = loader.load_folder_files(folder) + self.assertIn(file2, files) + self.assertNotIn(file1, files) + + files = loader.load_folder_files("not_existed_foulder", recursive=False) + self.assertEqual([], files) + + files = loader.load_folder_files(file2, recursive=False) + self.assertEqual([], files) + + def test_load_custom_dot_env_file(self): + dot_env_path = os.path.join(os.getcwd(), "examples", "httpbin", "test.env") + env_variables_mapping = loader.load_dot_env_file(dot_env_path) + self.assertIn("PROJECT_KEY", env_variables_mapping) + self.assertEqual(env_variables_mapping["UserName"], "test") + self.assertEqual( + env_variables_mapping["content_type"], "application/json; charset=UTF-8" + ) + + def test_load_env_path_not_exist(self): + dot_env_path = os.path.join( + os.getcwd(), + "tests", + "data", + ) + env_variables_mapping = loader.load_dot_env_file(dot_env_path) + self.assertEqual(env_variables_mapping, {}) + + def test_locate_file(self): + with self.assertRaises(exceptions.FileNotFound): + loader.locate_file(os.getcwd(), "debugtalk.py") + + with self.assertRaises(exceptions.FileNotFound): + loader.locate_file("", "debugtalk.py") + + start_path = os.path.join(os.getcwd(), "examples", "httpbin") + self.assertEqual( + loader.locate_file(start_path, "debugtalk.py"), + os.path.join(os.getcwd(), "examples", "httpbin", "debugtalk.py"), + ) + self.assertEqual( + loader.locate_file("examples/httpbin/", "debugtalk.py"), + os.path.join(os.getcwd(), "examples", "httpbin", "debugtalk.py"), + ) + self.assertEqual( + loader.locate_file("examples/httpbin/", "debugtalk.py"), + os.path.join(os.getcwd(), "examples", "httpbin", "debugtalk.py"), + ) diff --git a/httprunner/make.py b/httprunner/make.py new file mode 100644 index 0000000..d2b38ed --- /dev/null +++ b/httprunner/make.py @@ -0,0 +1,574 @@ +import os +import string +import subprocess +import sys +from typing import Dict, List, Set, Text, Tuple + +import jinja2 +from loguru import logger + +from httprunner import __version__, exceptions +from httprunner.compat import ( + convert_variables, + ensure_path_sep, + ensure_testcase_v4, + ensure_testcase_v4_api, +) +from httprunner.loader import ( + convert_relative_project_root_dir, + load_folder_files, + load_project_meta, + load_test_file, + load_testcase, +) +from httprunner.response import uniform_validator +from httprunner.utils import ga4_client, is_support_multiprocessing + +""" cache converted pytest files, avoid duplicate making +""" +pytest_files_made_cache_mapping: Dict[Text, Text] = {} + +""" save generated pytest files to run, except referenced testcase +""" +pytest_files_run_set: Set = set() + +__TEMPLATE__ = jinja2.Template( + """# NOTE: Generated By HttpRunner {{ version }} +# FROM: {{ testcase_path }} + +{%- if parameters or skip %} +import pytest +{% endif %} +from httprunner import HttpRunner, Config, Step, RunRequest + +{%- if parameters %} +from httprunner import Parameters +{%- endif %} + +{%- if reference_testcase %} +from httprunner import RunTestCase +{%- endif %} + +{%- for import_str in imports_list %} +{{ import_str }} +{%- endfor %} + +class {{ class_name }}(HttpRunner): + + {% if parameters and skip %} + @pytest.mark.parametrize("param", Parameters({{ parameters }})) + @pytest.mark.skip(reason={{ skip }}) + def test_start(self, param): + super().test_start(param) + + {% elif parameters %} + @pytest.mark.parametrize("param", Parameters({{ parameters }})) + def test_start(self, param): + super().test_start(param) + + {% elif skip %} + @pytest.mark.skip(reason={{ skip }}) + def test_start(self): + super().test_start() + {% endif %} + + config = {{ config_chain_style }} + + teststeps = [ + {% for step_chain_style in teststeps_chain_style %} + {{ step_chain_style }}, + {% endfor %} + ] + +if __name__ == "__main__": + {{ class_name }}().test_start() + +""" +) + + +def __ensure_absolute(path: Text) -> Text: + if path.startswith("./"): + # Linux/Darwin, hrun ./test.yml + path = path[2:] + elif path.startswith(".\\"): + # Windows, hrun .\\test.yml + path = path[3:] + + path = ensure_path_sep(path) + project_meta = load_project_meta(path) + + if os.path.isabs(path): + absolute_path = path + else: + absolute_path = os.path.join(project_meta.RootDir, path) + + if not os.path.isfile(absolute_path): + logger.error(f"Invalid testcase file path: {absolute_path}") + sys.exit(1) + + return absolute_path + + +def ensure_file_abs_path_valid(file_abs_path: Text) -> Text: + """ensure file path valid for pytest, handle cases when directory name includes dot/hyphen/space + + Args: + file_abs_path: absolute file path + + Returns: + ensured valid absolute file path + + """ + project_meta = load_project_meta(file_abs_path) + raw_abs_file_name, file_suffix = os.path.splitext(file_abs_path) + file_suffix = file_suffix.lower() + + raw_file_relative_name = convert_relative_project_root_dir(raw_abs_file_name) + if raw_file_relative_name == "": + return file_abs_path + + path_names = [] + for name in raw_file_relative_name.rstrip(os.sep).split(os.sep): + + if name[0] in string.digits: + # ensure file name not startswith digit + # 19 => T19, 2C => T2C + name = f"T{name}" + + if name.startswith("."): + # avoid ".csv" been converted to "_csv" + pass + else: + # handle cases when directory name includes dot/hyphen/space + name = name.replace(" ", "_").replace(".", "_").replace("-", "_") + + path_names.append(name) + + new_file_path = os.path.join( + project_meta.RootDir, f"{os.sep.join(path_names)}{file_suffix}" + ) + return new_file_path + + +def __ensure_testcase_module(path: Text): + """ensure pytest files are in python module, generate __init__.py on demand""" + init_file = os.path.join(os.path.dirname(path), "__init__.py") + if os.path.isfile(init_file): + return + + with open(init_file, "w", encoding="utf-8") as f: + f.write("# NOTICE: Generated By HttpRunner. DO NOT EDIT!\n") + + +def convert_testcase_path(testcase_abs_path: Text) -> Tuple[Text, Text]: + """convert single YAML/JSON testcase path to python file""" + testcase_new_path = ensure_file_abs_path_valid(testcase_abs_path) + + dir_path = os.path.dirname(testcase_new_path) + file_name, _ = os.path.splitext(os.path.basename(testcase_new_path)) + testcase_python_abs_path = os.path.join(dir_path, f"{file_name}_test.py") + + # convert title case, e.g. request_with_variables => RequestWithVariables + name_in_title_case = file_name.title().replace("_", "") + + return testcase_python_abs_path, name_in_title_case + + +def format_pytest_with_black(*python_paths: Text): + logger.info("format pytest cases with black ...") + try: + if is_support_multiprocessing() or len(python_paths) <= 1: + subprocess.run(["black", *python_paths]) + else: + logger.warning( + "this system does not support multiprocessing well, format files one by one ..." + ) + [subprocess.run(["black", path]) for path in python_paths] + except subprocess.CalledProcessError as ex: + logger.error(ex) + sys.exit(1) + except OSError: + err_msg = """ +missing dependency tool: black +install black manually and try again: +$ pip install black +""" + logger.error(err_msg) + sys.exit(1) + + +def make_config_chain_style(config: Dict) -> Text: + config_chain_style = f'Config("{config["name"]}")' + + if config["variables"]: + variables = config["variables"] + config_chain_style += f".variables(**{variables})" + + if "base_url" in config: + config_chain_style += f'.base_url("{config["base_url"]}")' + + if "verify" in config: + config_chain_style += f'.verify({config["verify"]})' + + if "export" in config: + config_chain_style += f'.export(*{config["export"]})' + + return config_chain_style + + +def make_config_skip(config: Dict) -> Text: + if "skip" in config: + if config["skip"]: + config_chain_style = config["skip"] + else: + config_chain_style = '"skip unconditionally"' + return config_chain_style + + +def make_request_chain_style(request: Dict) -> Text: + method = request["method"].lower() + url = request["url"] + request_chain_style = f'.{method}("{url}")' + + if "params" in request: + params = request["params"] + request_chain_style += f".with_params(**{params})" + + if "headers" in request: + headers = request["headers"] + request_chain_style += f".with_headers(**{headers})" + + if "cookies" in request: + cookies = request["cookies"] + request_chain_style += f".with_cookies(**{cookies})" + + if "data" in request: + data = request["data"] + if isinstance(data, Text): + data = f'"{data}"' + request_chain_style += f".with_data({data})" + + if "json" in request: + req_json = request["json"] + if isinstance(req_json, Text): + req_json = f'"{req_json}"' + request_chain_style += f".with_json({req_json})" + + if "timeout" in request: + timeout = request["timeout"] + request_chain_style += f".set_timeout({timeout})" + + if "verify" in request: + verify = request["verify"] + request_chain_style += f".set_verify({verify})" + + if "allow_redirects" in request: + allow_redirects = request["allow_redirects"] + request_chain_style += f".set_allow_redirects({allow_redirects})" + + if "upload" in request: + upload = request["upload"] + request_chain_style += f".upload(**{upload})" + + return request_chain_style + + +def make_teststep_chain_style(teststep: Dict) -> Text: + if teststep.get("request"): + step_info = f'RunRequest("{teststep["name"]}")' + elif teststep.get("testcase"): + step_info = f'RunTestCase("{teststep["name"]}")' + else: + raise exceptions.TestCaseFormatError(f"Invalid teststep: {teststep}") + + if "variables" in teststep: + variables = teststep["variables"] + step_info += f".with_variables(**{variables})" + + if "setup_hooks" in teststep: + setup_hooks = teststep["setup_hooks"] + for hook in setup_hooks: + if isinstance(hook, Text): + step_info += f'.setup_hook("{hook}")' + elif isinstance(hook, Dict) and len(hook) == 1: + assign_var_name, hook_content = list(hook.items())[0] + step_info += f'.setup_hook("{hook_content}", "{assign_var_name}")' + else: + raise exceptions.TestCaseFormatError(f"Invalid setup hook: {hook}") + + if teststep.get("request"): + step_info += make_request_chain_style(teststep["request"]) + elif teststep.get("testcase"): + testcase = teststep["testcase"] + call_ref_testcase = f".call({testcase})" + step_info += call_ref_testcase + + if "teardown_hooks" in teststep: + teardown_hooks = teststep["teardown_hooks"] + for hook in teardown_hooks: + if isinstance(hook, Text): + step_info += f'.teardown_hook("{hook}")' + elif isinstance(hook, Dict) and len(hook) == 1: + assign_var_name, hook_content = list(hook.items())[0] + step_info += f'.teardown_hook("{hook_content}", "{assign_var_name}")' + else: + raise exceptions.TestCaseFormatError(f"Invalid teardown hook: {hook}") + + if "extract" in teststep: + # request step + step_info += ".extract()" + for extract_name, extract_path in teststep["extract"].items(): + step_info += f""".with_jmespath('{extract_path}', '{extract_name}')""" + + if "export" in teststep: + # reference testcase step + export: List[Text] = teststep["export"] + step_info += f".export(*{export})" + + if "validate" in teststep: + step_info += ".validate()" + + for v in teststep["validate"]: + validator = uniform_validator(v) + assert_method = validator["assert"] + check = validator["check"] + if '"' in check: + # e.g. body."user-agent" => 'body."user-agent"' + check = f"'{check}'" + else: + check = f'"{check}"' + expect = validator["expect"] + if isinstance(expect, Text): + expect = f'"{expect}"' + + message = validator["message"] + if message: + step_info += f".assert_{assert_method}({check}, {expect}, '{message}')" + else: + step_info += f".assert_{assert_method}({check}, {expect})" + + return f"Step({step_info})" + + +def make_testcase(testcase: Dict, dir_path: Text = None) -> Text: + """convert valid testcase dict to pytest file path""" + # ensure compatibility with testcase format v2/v3 + testcase = ensure_testcase_v4(testcase) + + # validate testcase format + load_testcase(testcase) + + testcase_abs_path = __ensure_absolute(testcase["config"]["path"]) + logger.info(f"start to make testcase: {testcase_abs_path}") + + testcase_python_abs_path, testcase_cls_name = convert_testcase_path( + testcase_abs_path + ) + if dir_path: + testcase_python_abs_path = os.path.join( + dir_path, os.path.basename(testcase_python_abs_path) + ) + + global pytest_files_made_cache_mapping + if testcase_python_abs_path in pytest_files_made_cache_mapping: + return testcase_python_abs_path + + config = testcase["config"] + config["path"] = convert_relative_project_root_dir(testcase_python_abs_path) + config["variables"] = convert_variables( + config.get("variables", {}), testcase_abs_path + ) + + # prepare reference testcase + imports_list = [] + teststeps = testcase["teststeps"] + for teststep in teststeps: + if not teststep.get("testcase"): + continue + + # make ref testcase pytest file + ref_testcase_path = __ensure_absolute(teststep["testcase"]) + test_content = load_test_file(ref_testcase_path) + + if not isinstance(test_content, Dict): + raise exceptions.TestCaseFormatError(f"Invalid teststep: {teststep}") + + # api in v2/v3 format, convert to v4 testcase + if "request" in test_content and "name" in test_content: + test_content = ensure_testcase_v4_api(test_content) + + test_content.setdefault("config", {})["path"] = ref_testcase_path + ref_testcase_python_abs_path = make_testcase(test_content) + + # override testcase export + ref_testcase_export: List = test_content["config"].get("export", []) + if ref_testcase_export: + step_export: List = teststep.setdefault("export", []) + step_export.extend(ref_testcase_export) + teststep["export"] = list(set(step_export)) + + # prepare ref testcase class name + ref_testcase_cls_name = pytest_files_made_cache_mapping[ + ref_testcase_python_abs_path + ] + teststep["testcase"] = ref_testcase_cls_name + + # prepare import ref testcase + ref_testcase_python_relative_path = convert_relative_project_root_dir( + ref_testcase_python_abs_path + ) + ref_module_name, _ = os.path.splitext(ref_testcase_python_relative_path) + ref_module_name = ref_module_name.replace(os.sep, ".") + import_expr = f"from {ref_module_name} import TestCase{ref_testcase_cls_name} as {ref_testcase_cls_name}" + if import_expr not in imports_list: + imports_list.append(import_expr) + + testcase_path = convert_relative_project_root_dir(testcase_abs_path) + # current file compared to ProjectRootDir + diff_levels = len(testcase_path.split(os.sep)) + if len(imports_list) > 0 and diff_levels > 0: + parent = ".parent" * diff_levels + import_deps = f""" +import sys +from pathlib import Path +sys.path.insert(0, str(Path(__file__){parent})) +""" + imports_list.insert(0, import_deps) + + data = { + "version": __version__, + "testcase_path": testcase_path, + "class_name": f"TestCase{testcase_cls_name}", + "imports_list": imports_list, + "config_chain_style": make_config_chain_style(config), + "skip": make_config_skip(config), + "parameters": config.get("parameters"), + "reference_testcase": any(step.get("testcase") for step in teststeps), + "teststeps_chain_style": [ + make_teststep_chain_style(step) for step in teststeps + ], + } + content = __TEMPLATE__.render(data) + + # ensure new file's directory exists + dir_path = os.path.dirname(testcase_python_abs_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(testcase_python_abs_path, "w", encoding="utf-8") as f: + f.write(content) + + pytest_files_made_cache_mapping[testcase_python_abs_path] = testcase_cls_name + __ensure_testcase_module(testcase_python_abs_path) + + logger.info(f"generated testcase: {testcase_python_abs_path}") + + return testcase_python_abs_path + + +def __make(tests_path: Text): + """make testcase(s) with testcase/folder absolute path + generated pytest file path will be cached in pytest_files_made_cache_mapping + + Args: + tests_path: should be in absolute path + + """ + logger.info(f"make path: {tests_path}") + test_files = [] + if os.path.isdir(tests_path): + files_list = load_folder_files(tests_path) + test_files.extend(files_list) + elif os.path.isfile(tests_path): + test_files.append(tests_path) + else: + raise exceptions.TestcaseNotFound(f"Invalid tests path: {tests_path}") + + for test_file in test_files: + if test_file.lower().endswith("_test.py"): + pytest_files_run_set.add(test_file) + continue + + try: + test_content = load_test_file(test_file) + except (exceptions.FileNotFound, exceptions.FileFormatError) as ex: + logger.warning(f"Invalid test file: {test_file}\n{type(ex).__name__}: {ex}") + continue + + if not isinstance(test_content, Dict): + logger.warning( + f"Invalid test file: {test_file}\n" + f"reason: test content not in dict format." + ) + continue + + # api in v2/v3 format, convert to v4 testcase + if "request" in test_content and "name" in test_content: + test_content = ensure_testcase_v4_api(test_content) + + if "config" not in test_content: + logger.warning( + f"Invalid testcase file: {test_file}\nreason: missing config part." + ) + continue + elif not isinstance(test_content["config"], Dict): + logger.warning( + f"Invalid testcase file: {test_file}\n" + f"reason: config should be dict type, got {test_content['config']}" + ) + continue + + # ensure path absolute + test_content.setdefault("config", {})["path"] = test_file + + # invalid format + if "teststeps" not in test_content: + logger.warning(f"Invalid testcase file: {test_file}") + + # testcase + try: + testcase_pytest_path = make_testcase(test_content) + pytest_files_run_set.add(testcase_pytest_path) + except exceptions.TestCaseFormatError as ex: + logger.warning( + f"Invalid testcase file: {test_file}\n{type(ex).__name__}: {ex}" + ) + continue + + +def main_make(tests_paths: List[Text]) -> List[Text]: + if not tests_paths: + return [] + + ga4_client.send_event("hmake") + + for tests_path in tests_paths: + tests_path = ensure_path_sep(tests_path) + if not os.path.isabs(tests_path): + tests_path = os.path.join(os.getcwd(), tests_path) + + try: + __make(tests_path) + except exceptions.MyBaseError as ex: + logger.error(ex) + sys.exit(1) + + # format pytest files + pytest_files_format_list = pytest_files_made_cache_mapping.keys() + format_pytest_with_black(*pytest_files_format_list) + + return list(pytest_files_run_set) + + +def init_make_parser(subparsers): + """make testcases: parse command line options and run commands.""" + parser = subparsers.add_parser( + "make", + help="Convert YAML/JSON testcases to pytest cases.", + ) + parser.add_argument( + "testcase_path", nargs="*", help="Specify YAML/JSON testcase file/folder path" + ) + + return parser diff --git a/httprunner/make_test.py b/httprunner/make_test.py new file mode 100644 index 0000000..f3a8032 --- /dev/null +++ b/httprunner/make_test.py @@ -0,0 +1,213 @@ +import os +import unittest + +from httprunner import loader +from httprunner.make import ( + main_make, + convert_testcase_path, + pytest_files_made_cache_mapping, + make_config_chain_style, + make_teststep_chain_style, + pytest_files_run_set, + ensure_file_abs_path_valid, +) + + +class TestMake(unittest.TestCase): + def setUp(self) -> None: + pytest_files_made_cache_mapping.clear() + pytest_files_run_set.clear() + loader.project_meta = None + self.data_dir = os.path.join(os.getcwd(), "examples", "data") + + def test_make_testcase(self): + path = ["examples/postman_echo/request_methods/request_with_variables.yml"] + testcase_python_list = main_make(path) + self.assertEqual( + testcase_python_list[0], + os.path.join( + os.getcwd(), + os.path.join( + "examples", + "postman_echo", + "request_methods", + "request_with_variables_test.py", + ), + ), + ) + + def test_make_testcase_with_ref(self): + path = [ + "examples/postman_echo/request_methods/request_with_testcase_reference.yml" + ] + testcase_python_list = main_make(path) + self.assertEqual(len(testcase_python_list), 1) + self.assertIn( + os.path.join( + os.getcwd(), + os.path.join( + "examples", + "postman_echo", + "request_methods", + "request_with_testcase_reference_test.py", + ), + ), + testcase_python_list, + ) + + with open( + os.path.join( + "examples", + "postman_echo", + "request_methods", + "request_with_testcase_reference_test.py", + ) + ) as f: + content = f.read() + self.assertIn( + """ +from request_methods.request_with_functions_test import ( + TestCaseRequestWithFunctions as RequestWithFunctions, +) +""", + content, + ) + self.assertIn( + ".call(RequestWithFunctions)", + content, + ) + + def test_make_testcase_folder(self): + path = ["examples/postman_echo/request_methods/"] + testcase_python_list = main_make(path) + self.assertIn( + os.path.join( + os.getcwd(), + os.path.join( + "examples", + "postman_echo", + "request_methods", + "request_with_functions_test.py", + ), + ), + testcase_python_list, + ) + + def test_ensure_file_path_valid(self): + self.assertEqual( + ensure_file_abs_path_valid(os.path.join(self.data_dir, "a-b.c", "2 3.yml")), + os.path.join(self.data_dir, "a_b_c", "T2_3.yml"), + ) + loader.project_meta = None + self.assertEqual( + ensure_file_abs_path_valid( + os.path.join(os.getcwd(), "examples", "postman_echo", "request_methods") + ), + os.path.join(os.getcwd(), "examples", "postman_echo", "request_methods"), + ) + loader.project_meta = None + self.assertEqual( + ensure_file_abs_path_valid(os.path.join(os.getcwd(), "pyproject.toml")), + os.path.join(os.getcwd(), "pyproject.toml"), + ) + loader.project_meta = None + self.assertEqual( + ensure_file_abs_path_valid(os.getcwd()), + os.getcwd(), + ) + loader.project_meta = None + self.assertEqual( + ensure_file_abs_path_valid(os.path.join(self.data_dir, ".csv")), + os.path.join(self.data_dir, ".csv"), + ) + + def test_convert_testcase_path(self): + self.assertEqual( + convert_testcase_path(os.path.join(self.data_dir, "a-b.c", "2 3.yml")), + ( + os.path.join(self.data_dir, "a_b_c", "T2_3_test.py"), + "T23", + ), + ) + self.assertEqual( + convert_testcase_path(os.path.join(self.data_dir, "a-b.c", "中文case.yml")), + ( + os.path.join(self.data_dir, "a_b_c", "中文case_test.py"), + "中文Case", + ), + ) + + def test_make_config_chain_style(self): + config = { + "name": "request methods testcase: validate with functions", + "variables": {"foo1": "bar1", "foo2": 22}, + "base_url": "https://postman_echo.com", + "verify": False, + "path": "examples/postman_echo/request_methods/validate_with_functions_test.py", + } + self.assertEqual( + make_config_chain_style(config), + """Config("request methods testcase: validate with functions").variables(**{'foo1': 'bar1', 'foo2': 22}).base_url("https://postman_echo.com").verify(False)""", + ) + + def test_make_teststep_chain_style(self): + step = { + "name": "get with params", + "variables": { + "foo1": "bar1", + "foo2": 123, + "sum_v": "${sum_two(1, 2)}", + }, + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}, + "headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"}, + }, + "testcase": "CLS_LB(TestCaseDemo)CLS_RB", + "extract": { + "session_foo1": "body.args.foo1", + "session_foo2": "body.args.foo2", + }, + "validate": [ + {"eq": ["status_code", 200]}, + {"eq": ["body.args.sum_v", "3"]}, + ], + } + teststep_chain_style = make_teststep_chain_style(step) + self.assertEqual( + teststep_chain_style, + """Step(RunRequest("get with params").with_variables(**{'foo1': 'bar1', 'foo2': 123, 'sum_v': '${sum_two(1, 2)}'}).get("/get").with_params(**{'foo1': '$foo1', 'foo2': '$foo2', 'sum_v': '$sum_v'}).with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}'}).extract().with_jmespath('body.args.foo1', 'session_foo1').with_jmespath('body.args.foo2', 'session_foo2').validate().assert_equal("status_code", 200).assert_equal("body.args.sum_v", "3"))""", + ) + + def test_make_requests_with_json_chain_style(self): + step = { + "name": "get with params", + "variables": { + "foo1": "bar1", + "foo2": 123, + "sum_v": "${sum_two(1, 2)}", + "myjson": {"name": "user", "password": "123456"}, + }, + "request": { + "method": "GET", + "url": "/get", + "params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"}, + "headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"}, + "json": "$myjson", + }, + "testcase": "CLS_LB(TestCaseDemo)CLS_RB", + "extract": { + "session_foo1": "body.args.foo1", + "session_foo2": "body.args.foo2", + }, + "validate": [ + {"eq": ["status_code", 200]}, + {"eq": ["body.args.sum_v", "3"]}, + ], + } + teststep_chain_style = make_teststep_chain_style(step) + self.assertEqual( + teststep_chain_style, + """Step(RunRequest("get with params").with_variables(**{'foo1': 'bar1', 'foo2': 123, 'sum_v': '${sum_two(1, 2)}', 'myjson': {'name': 'user', 'password': '123456'}}).get("/get").with_params(**{'foo1': '$foo1', 'foo2': '$foo2', 'sum_v': '$sum_v'}).with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}'}).with_json("$myjson").extract().with_jmespath('body.args.foo1', 'session_foo1').with_jmespath('body.args.foo2', 'session_foo2').validate().assert_equal("status_code", 200).assert_equal("body.args.sum_v", "3"))""", + ) diff --git a/httprunner/models.py b/httprunner/models.py new file mode 100644 index 0000000..7994240 --- /dev/null +++ b/httprunner/models.py @@ -0,0 +1,305 @@ +import os +from enum import Enum +from typing import Any, Callable, Dict, List, Text, Union + +from pydantic import BaseModel, Field, HttpUrl + +Name = Text +Url = Text +BaseUrl = Union[HttpUrl, Text] +VariablesMapping = Dict[Text, Any] +FunctionsMapping = Dict[Text, Callable] +Headers = Dict[Text, Text] +Cookies = Dict[Text, Text] +Verify = bool +Hooks = List[Union[Text, Dict[Text, Text]]] +Export = List[Text] +Validators = List[Dict] +Env = Dict[Text, Any] + + +class MethodEnum(Text, Enum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + + +class ProtoType(Enum): + Binary = 1 + CyBinary = 2 + Compact = 3 + Json = 4 + + +class TransType(Enum): + Buffered = 1 + CyBuffered = 2 + Framed = 3 + CyFramed = 4 + + +# configs for thrift rpc +class TConfigThrift(BaseModel): + psm: Text = None + env: Text = None + cluster: Text = None + target: Text = None + include_dirs: List[Text] = None + thrift_client: Any = None + timeout: int = 10 + idl_path: Text = None + method: Text = None + ip: Text = "127.0.0.1" + port: int = 9000 + service_name: Text = None + proto_type: ProtoType = ProtoType.Binary + trans_type: TransType = TransType.Buffered + + +# configs for db +class TConfigDB(BaseModel): + psm: Text = None + user: Text = None + password: Text = None + ip: Text = None + port: int = 3306 + database: Text = None + + +class TransportEnum(Text, Enum): + BUFFERED = "buffered" + FRAMED = "framed" + + +class TThriftRequest(BaseModel): + """rpc request model""" + + method: Text = "" + params: Dict = {} + thrift_client: Any = None + idl_path: Text = "" # idl local path + timeout: int = 10 # sec + transport: TransportEnum = TransportEnum.BUFFERED + include_dirs: List[Union[Text, None]] = [] # param of thriftpy2.load + target: Text = "" # tcp://{ip}:{port} or sd://psm?cluster=xx&env=xx + env: Text = "prod" + cluster: Text = "default" + psm: Text = "" + service_name: Text = None + ip: Text = None + port: int = None + proto_type: ProtoType = None + trans_type: TransType = None + + +class SqlMethodEnum(Text, Enum): + FETCHONE = "FETCHONE" + FETCHMANY = "FETCHMANY" + FETCHALL = "FETCHALL" + INSERT = "INSERT" + UPDATE = "UPDATE" + DELETE = "DELETE" + + +class TSqlRequest(BaseModel): + """sql request model""" + + db_config: TConfigDB = TConfigDB() + method: SqlMethodEnum = None + sql: Text = None + size: int = 0 # limit nums of sql result + + +class TConfig(BaseModel): + name: Name + verify: Verify = False + base_url: BaseUrl = "" + # Text: prepare variables in debugtalk.py, ${gen_variables()} + variables: Union[VariablesMapping, Text] = {} + parameters: Union[VariablesMapping, Text] = {} + # setup_hooks: Hooks = [] + # teardown_hooks: Hooks = [] + export: Export = [] + path: Text = None + # configs for other protocols + thrift: TConfigThrift = None + db: TConfigDB = TConfigDB() + + +class TRequest(BaseModel): + """requests.Request model""" + + method: MethodEnum + url: Url + params: Dict[Text, Text] = {} + headers: Headers = {} + req_json: Union[Dict, List, Text] = Field(None, alias="json") + data: Union[Text, Dict[Text, Any]] = None + cookies: Cookies = {} + timeout: float = 120 + allow_redirects: bool = True + verify: Verify = False + upload: Dict = {} # used for upload files + + +class TStep(BaseModel): + name: Name + request: Union[TRequest, None] = None + testcase: Union[Text, Callable, None] = None + variables: VariablesMapping = {} + setup_hooks: Hooks = [] + teardown_hooks: Hooks = [] + # used to extract request's response field + extract: VariablesMapping = {} + # used to export session variables from referenced testcase + export: Export = [] + validators: Validators = Field([], alias="validate") + validate_script: List[Text] = [] + retry_times: int = 0 + retry_interval: int = 0 # sec + thrift_request: Union[TThriftRequest, None] = None + sql_request: Union[TSqlRequest, None] = None + + +class TestCase(BaseModel): + config: TConfig + teststeps: List[TStep] + + +class ProjectMeta(BaseModel): + debugtalk_py: Text = "" # debugtalk.py file content + debugtalk_path: Text = "" # debugtalk.py file path + dot_env_path: Text = "" # .env file path + functions: FunctionsMapping = {} # functions defined in debugtalk.py + env: Env = {} + RootDir: Text = ( + os.getcwd() + ) # project root directory (ensure absolute), the path debugtalk.py located + + +class TestsMapping(BaseModel): + project_meta: ProjectMeta + testcases: List[TestCase] + + +class TestCaseTime(BaseModel): + start_at: float = 0 + start_at_iso_format: Text = "" + duration: float = 0 + + +class TestCaseInOut(BaseModel): + config_vars: VariablesMapping = {} + export_vars: Dict = {} + + +class RequestStat(BaseModel): + content_size: float = 0 + response_time_ms: float = 0 + elapsed_ms: float = 0 + + +class AddressData(BaseModel): + client_ip: Text = "N/A" + client_port: int = 0 + server_ip: Text = "N/A" + server_port: int = 0 + + +class RequestData(BaseModel): + method: MethodEnum = MethodEnum.GET + url: Url + headers: Headers = {} + cookies: Cookies = {} + body: Union[Text, bytes, List, Dict, None] = {} + + +class ResponseData(BaseModel): + status_code: int + headers: Dict + cookies: Cookies + encoding: Union[Text, None] = None + content_type: Text + body: Union[Text, bytes, List, Dict, None] + + +class ReqRespData(BaseModel): + request: RequestData + response: ResponseData + + +class SessionData(BaseModel): + """request session data, including request, response, validators and stat data""" + + success: bool = False + # in most cases, req_resps only contains one request & response + # while when 30X redirect occurs, req_resps will contain multiple request & response + req_resps: List[ReqRespData] = [] + stat: RequestStat = RequestStat() + address: AddressData = AddressData() + validators: Dict = {} + + +class StepResult(BaseModel): + """teststep data, each step maybe corresponding to one request or one testcase""" + + name: Text = "" # teststep name + step_type: Text = "" # teststep type, request or testcase + success: bool = False + data: Union[SessionData, List["StepResult"]] = None + elapsed: float = 0.0 # teststep elapsed time + content_size: float = 0 # response content size + export_vars: VariablesMapping = {} + attachment: Text = "" # teststep attachment + + +StepResult.update_forward_refs() + + +class IStep(object): + def name(self) -> str: + raise NotImplementedError + + def type(self) -> str: + raise NotImplementedError + + def struct(self) -> TStep: + raise NotImplementedError + + def run(self, runner) -> StepResult: + # runner: HttpRunner + raise NotImplementedError + + +class TestCaseSummary(BaseModel): + name: Text + success: bool + case_id: Text + time: TestCaseTime + in_out: TestCaseInOut = {} + log: Text = "" + step_results: List[StepResult] = [] + + +class PlatformInfo(BaseModel): + httprunner_version: Text + python_version: Text + platform: Text + + +class Stat(BaseModel): + total: int = 0 + success: int = 0 + fail: int = 0 + + +class TestSuiteSummary(BaseModel): + success: bool = False + stat: Stat = Stat() + time: TestCaseTime = TestCaseTime() + platform: PlatformInfo + testcases: List[TestCaseSummary] diff --git a/httprunner/parser.py b/httprunner/parser.py new file mode 100644 index 0000000..e1adb7b --- /dev/null +++ b/httprunner/parser.py @@ -0,0 +1,606 @@ +import ast +import builtins +import os +import re +from typing import Any, Callable, Dict, List, Set, Text +from urllib.parse import urlparse + +from loguru import logger + +from httprunner import exceptions, loader, utils +from httprunner.models import FunctionsMapping, VariablesMapping + +# use $$ to escape $ notation +dolloar_regex_compile = re.compile(r"\$\$") +# variable notation, e.g. ${var} or $var +# variable should start with a-zA-Z_ +variable_regex_compile = re.compile(r"\$\{([a-zA-Z_]\w*)\}|\$([a-zA-Z_]\w*)") +# function notation, e.g. ${func1($var_1, $var_3)} +function_regex_compile = re.compile(r"\$\{([a-zA-Z_]\w*)\(([\$\w\.\-/\s=,]*)\)\}") + + +def parse_string_value(str_value: Text) -> Any: + """parse string to number if possible + e.g. "123" => 123 + "12.2" => 12.3 + "abc" => "abc" + "$var" => "$var" + """ + try: + return ast.literal_eval(str_value) + except ValueError: + return str_value + except SyntaxError: + # e.g. $var, ${func} + return str_value + + +def build_url(base_url, step_url): + """prepend url with base_url unless it's already an absolute URL""" + o_step_url = urlparse(step_url) + if o_step_url.netloc != "": + # step url is absolute url + return step_url + + # step url is relative, based on base url + o_base_url = urlparse(base_url) + if o_base_url.netloc == "": + # missed base url + raise exceptions.ParamsError("base url missed!") + + path = o_base_url.path.rstrip("/") + "/" + o_step_url.path.lstrip("/") + o_step_url = ( + o_step_url._replace(scheme=o_base_url.scheme) + ._replace(netloc=o_base_url.netloc) + ._replace(path=path) + ) + return o_step_url.geturl() + + +def regex_findall_variables(raw_string: Text) -> List[Text]: + """extract all variable names from content, which is in format $variable + + Args: + raw_string (str): string content + + Returns: + list: variables list extracted from string content + + Examples: + >>> regex_findall_variables("$variable") + ["variable"] + + >>> regex_findall_variables("/blog/$postid") + ["postid"] + + >>> regex_findall_variables("/$var1/$var2") + ["var1", "var2"] + + >>> regex_findall_variables("abc") + [] + + """ + try: + match_start_position = raw_string.index("$", 0) + except ValueError: + return [] + + vars_list = [] + while match_start_position < len(raw_string): + + # Notice: notation priority + # $$ > $var + + # search $$ + dollar_match = dolloar_regex_compile.match(raw_string, match_start_position) + if dollar_match: + match_start_position = dollar_match.end() + continue + + # search variable like ${var} or $var + var_match = variable_regex_compile.match(raw_string, match_start_position) + if var_match: + var_name = var_match.group(1) or var_match.group(2) + vars_list.append(var_name) + match_start_position = var_match.end() + continue + + curr_position = match_start_position + try: + # find next $ location + match_start_position = raw_string.index("$", curr_position + 1) + except ValueError: + # break while loop + break + + return vars_list + + +def regex_findall_functions(content: Text) -> List[Text]: + """extract all functions from string content, which are in format ${fun()} + + Args: + content (str): string content + + Returns: + list: functions list extracted from string content + + Examples: + >>> regex_findall_functions("${func(5)}") + ["func(5)"] + + >>> regex_findall_functions("${func(a=1, b=2)}") + ["func(a=1, b=2)"] + + >>> regex_findall_functions("/api/1000?_t=${get_timestamp()}") + ["get_timestamp()"] + + >>> regex_findall_functions("/api/${add(1, 2)}") + ["add(1, 2)"] + + >>> regex_findall_functions("/api/${add(1, 2)}?_t=${get_timestamp()}") + ["add(1, 2)", "get_timestamp()"] + + """ + try: + return function_regex_compile.findall(content) + except TypeError as ex: + logger.error(f"regex findall functions error: {ex}") + return [] + + +def extract_variables(content: Any) -> Set: + """extract all variables in content recursively.""" + if isinstance(content, (list, set, tuple)): + variables = set() + for item in content: + variables = variables | extract_variables(item) + return variables + + elif isinstance(content, dict): + variables = set() + for key, value in content.items(): + variables = variables | extract_variables(value) + return variables + + elif isinstance(content, str): + return set(regex_findall_variables(content)) + + return set() + + +def parse_function_params(params: Text) -> Dict: + """parse function params to args and kwargs. + + Args: + params (str): function param in string + + Returns: + dict: function meta dict + + { + "args": [], + "kwargs": {} + } + + Examples: + >>> parse_function_params("") + {'args': [], 'kwargs': {}} + + >>> parse_function_params("5") + {'args': [5], 'kwargs': {}} + + >>> parse_function_params("1, 2") + {'args': [1, 2], 'kwargs': {}} + + >>> parse_function_params("a=1, b=2") + {'args': [], 'kwargs': {'a': 1, 'b': 2}} + + >>> parse_function_params("1, 2, a=3, b=4") + {'args': [1, 2], 'kwargs': {'a':3, 'b':4}} + + """ + function_meta = {"args": [], "kwargs": {}} + + params_str = params.strip() + if params_str == "": + return function_meta + + args_list = params_str.split(",") + for arg in args_list: + arg = arg.strip() + if "=" in arg: + key, value = arg.split("=") + function_meta["kwargs"][key.strip()] = parse_string_value(value.strip()) + else: + function_meta["args"].append(parse_string_value(arg)) + + return function_meta + + +def get_mapping_variable( + variable_name: Text, variables_mapping: VariablesMapping +) -> Any: + """get variable from variables_mapping. + + Args: + variable_name (str): variable name + variables_mapping (dict): variables mapping + + Returns: + mapping variable value. + + Raises: + exceptions.VariableNotFound: variable is not found. + + """ + # TODO: get variable from debugtalk module and environ + try: + return variables_mapping[variable_name] + except KeyError: + raise exceptions.VariableNotFound( + f"{variable_name} not found in {variables_mapping}" + ) + + +def get_mapping_function( + function_name: Text, functions_mapping: FunctionsMapping +) -> Callable: + """get function from functions_mapping, + if not found, then try to check if builtin function. + + Args: + function_name (str): function name + functions_mapping (dict): functions mapping + + Returns: + mapping function object. + + Raises: + exceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin. + + """ + if function_name in functions_mapping: + return functions_mapping[function_name] + + elif function_name in ["parameterize", "P"]: + return loader.load_csv_file + + elif function_name in ["environ", "ENV"]: + return utils.get_os_environ + + elif function_name in ["multipart_encoder", "multipart_content_type"]: + # extension for upload test + from httprunner.ext import uploader + + return getattr(uploader, function_name) + + try: + # check if HttpRunner builtin functions + built_in_functions = loader.load_builtin_functions() + return built_in_functions[function_name] + except KeyError: + pass + + try: + # check if Python builtin functions + return getattr(builtins, function_name) + except AttributeError: + pass + + raise exceptions.FunctionNotFound(f"{function_name} is not found.") + + +def parse_string( + raw_string: Text, + variables_mapping: VariablesMapping, + functions_mapping: FunctionsMapping, +) -> Any: + """parse string content with variables and functions mapping. + + Args: + raw_string: raw string content to be parsed. + variables_mapping: variables mapping. + functions_mapping: functions mapping. + + Returns: + str: parsed string content. + + Examples: + >>> raw_string = "abc${add_one($num)}def" + >>> variables_mapping = {"num": 3} + >>> functions_mapping = {"add_one": lambda x: x + 1} + >>> parse_string(raw_string, variables_mapping, functions_mapping) + "abc4def" + + """ + try: + match_start_position = raw_string.index("$", 0) + parsed_string = raw_string[0:match_start_position] + except ValueError: + parsed_string = raw_string + return parsed_string + + while match_start_position < len(raw_string): + + # Notice: notation priority + # $$ > ${func($a, $b)} > $var + + # search $$ + dollar_match = dolloar_regex_compile.match(raw_string, match_start_position) + if dollar_match: + match_start_position = dollar_match.end() + parsed_string += "$" + continue + + # search function like ${func($a, $b)} + func_match = function_regex_compile.match(raw_string, match_start_position) + if func_match: + func_name = func_match.group(1) + func = get_mapping_function(func_name, functions_mapping) + + func_params_str = func_match.group(2) + function_meta = parse_function_params(func_params_str) + args = function_meta["args"] + kwargs = function_meta["kwargs"] + parsed_args = parse_data(args, variables_mapping, functions_mapping) + parsed_kwargs = parse_data(kwargs, variables_mapping, functions_mapping) + + try: + func_eval_value = func(*parsed_args, **parsed_kwargs) + except Exception as ex: + logger.error( + f"call function error:\n" + f"func_name: {func_name}\n" + f"args: {parsed_args}\n" + f"kwargs: {parsed_kwargs}\n" + f"{type(ex).__name__}: {ex}" + ) + raise + + func_raw_str = "${" + func_name + f"({func_params_str})" + "}" + if func_raw_str == raw_string: + # raw_string is a function, e.g. "${add_one(3)}", return its eval value directly + return func_eval_value + + # raw_string contains one or many functions, e.g. "abc${add_one(3)}def" + parsed_string += str(func_eval_value) + match_start_position = func_match.end() + continue + + # search variable like ${var} or $var + var_match = variable_regex_compile.match(raw_string, match_start_position) + if var_match: + var_name = var_match.group(1) or var_match.group(2) + var_value = get_mapping_variable(var_name, variables_mapping) + + if f"${var_name}" == raw_string or "${" + var_name + "}" == raw_string: + # raw_string is a variable, $var or ${var}, return its value directly + return var_value + + # raw_string contains one or many variables, e.g. "abc${var}def" + parsed_string += str(var_value) + match_start_position = var_match.end() + continue + + curr_position = match_start_position + try: + # find next $ location + match_start_position = raw_string.index("$", curr_position + 1) + remain_string = raw_string[curr_position:match_start_position] + except ValueError: + remain_string = raw_string[curr_position:] + # break while loop + match_start_position = len(raw_string) + + parsed_string += remain_string + + return parsed_string + + +def parse_data( + raw_data: Any, + variables_mapping: VariablesMapping = None, + functions_mapping: FunctionsMapping = None, +) -> Any: + """parse raw data with evaluated variables mapping. + Notice: variables_mapping should not contain any variable or function. + """ + if isinstance(raw_data, str): + # content in string format may contains variables and functions + variables_mapping = variables_mapping or {} + functions_mapping = functions_mapping or {} + # only strip whitespaces and tabs, \n\r is left because they maybe used in changeset + raw_data = raw_data.strip(" \t") + return parse_string(raw_data, variables_mapping, functions_mapping) + + elif isinstance(raw_data, (list, set, tuple)): + return [ + parse_data(item, variables_mapping, functions_mapping) for item in raw_data + ] + + elif isinstance(raw_data, dict): + parsed_data = {} + for key, value in raw_data.items(): + parsed_key = parse_data(key, variables_mapping, functions_mapping) + parsed_value = parse_data(value, variables_mapping, functions_mapping) + parsed_data[parsed_key] = parsed_value + + return parsed_data + + else: + # other types, e.g. None, int, float, bool + return raw_data + + +def parse_variables_mapping( + variables_mapping: VariablesMapping, functions_mapping: FunctionsMapping = None +) -> VariablesMapping: + + parsed_variables: VariablesMapping = {} + + while len(parsed_variables) != len(variables_mapping): + for var_name in variables_mapping: + + if var_name in parsed_variables: + continue + + var_value = variables_mapping[var_name] + variables = extract_variables(var_value) + + # check if reference variable itself + if var_name in variables: + # e.g. + # variables_mapping = {"token": "abc$token"} + # variables_mapping = {"key": ["$key", 2]} + raise exceptions.VariableNotFound(var_name) + + # check if reference variable not in variables_mapping + not_defined_variables = [ + v_name for v_name in variables if v_name not in variables_mapping + ] + if not_defined_variables: + # e.g. {"varA": "123$varB", "varB": "456$varC"} + # e.g. {"varC": "${sum_two($a, $b)}"} + raise exceptions.VariableNotFound(not_defined_variables) + + try: + parsed_value = parse_data( + var_value, parsed_variables, functions_mapping + ) + except exceptions.VariableNotFound: + continue + + parsed_variables[var_name] = parsed_value + + return parsed_variables + + +def parse_parameters( + parameters: Dict, +) -> List[Dict]: + """parse parameters and generate cartesian product. + + Args: + parameters (Dict) parameters: parameter name and value mapping + parameter value may be in three types: + (1) data list, e.g. ["iOS/10.1", "iOS/10.2", "iOS/10.3"] + (2) call built-in parameterize function, "${parameterize(account.csv)}" + (3) call custom function in debugtalk.py, "${gen_app_version()}" + + Returns: + list: cartesian product list + + Examples: + >>> parameters = { + "user_agent": ["iOS/10.1", "iOS/10.2", "iOS/10.3"], + "username-password": "${parameterize(account.csv)}", + "app_version": "${gen_app_version()}", + } + >>> parse_parameters(parameters) + + """ + parsed_parameters_list: List[List[Dict]] = [] + + # load project_meta functions + project_meta = loader.load_project_meta(os.getcwd()) + functions_mapping = project_meta.functions + + for parameter_name, parameter_content in parameters.items(): + parameter_name_list = parameter_name.split("-") + + if isinstance(parameter_content, List): + # (1) data list + # e.g. {"app_version": ["2.8.5", "2.8.6"]} + # => [{"app_version": "2.8.5", "app_version": "2.8.6"}] + # e.g. {"username-password": [["user1", "111111"], ["test2", "222222"]} + # => [{"username": "user1", "password": "111111"}, {"username": "user2", "password": "222222"}] + parameter_content_list: List[Dict] = [] + for parameter_item in parameter_content: + if not isinstance(parameter_item, (list, tuple)): + # "2.8.5" => ["2.8.5"] + parameter_item = [parameter_item] + + # ["app_version"], ["2.8.5"] => {"app_version": "2.8.5"} + # ["username", "password"], ["user1", "111111"] => {"username": "user1", "password": "111111"} + parameter_content_dict = dict(zip(parameter_name_list, parameter_item)) + parameter_content_list.append(parameter_content_dict) + + elif isinstance(parameter_content, Text): + # (2) & (3) + parsed_parameter_content: List = parse_data( + parameter_content, {}, functions_mapping + ) + if not isinstance(parsed_parameter_content, List): + raise exceptions.ParamsError( + f"parameters content should be in List type, got {parsed_parameter_content} for {parameter_content}" + ) + + parameter_content_list: List[Dict] = [] + for parameter_item in parsed_parameter_content: + if isinstance(parameter_item, Dict): + # get subset by parameter name + # {"app_version": "${gen_app_version()}"} + # gen_app_version() => [{'app_version': '2.8.5'}, {'app_version': '2.8.6'}] + # {"username-password": "${get_account()}"} + # get_account() => [ + # {"username": "user1", "password": "111111"}, + # {"username": "user2", "password": "222222"} + # ] + parameter_dict: Dict = { + key: parameter_item[key] for key in parameter_name_list + } + elif isinstance(parameter_item, (List, tuple)): + if len(parameter_name_list) == len(parameter_item): + # {"username-password": "${get_account()}"} + # get_account() => [("user1", "111111"), ("user2", "222222")] + parameter_dict = dict(zip(parameter_name_list, parameter_item)) + else: + raise exceptions.ParamsError( + f"parameter names length are not equal to value length.\n" + f"parameter names: {parameter_name_list}\n" + f"parameter values: {parameter_item}" + ) + elif len(parameter_name_list) == 1: + # {"user_agent": "${get_user_agent()}"} + # get_user_agent() => ["iOS/10.1", "iOS/10.2"] + # parameter_dict will get: {"user_agent": "iOS/10.1", "user_agent": "iOS/10.2"} + parameter_dict = {parameter_name_list[0]: parameter_item} + else: + raise exceptions.ParamsError( + f"Invalid parameter names and values:\n" + f"parameter names: {parameter_name_list}\n" + f"parameter values: {parameter_item}" + ) + + parameter_content_list.append(parameter_dict) + + else: + raise exceptions.ParamsError( + f"parameter content should be List or Text(variables or functions call), got {parameter_content}" + ) + + parsed_parameters_list.append(parameter_content_list) + + return utils.gen_cartesian_product(*parsed_parameters_list) + + +class Parser(object): + def __init__(self, functions_mapping: FunctionsMapping = None) -> None: + self.functions_mapping = functions_mapping + + def parse_string( + self, raw_string: Text, variables_mapping: VariablesMapping + ) -> Any: + return parse_string(raw_string, variables_mapping, self.functions_mapping) + + def parse_variables(self, variables_mapping: VariablesMapping) -> VariablesMapping: + return parse_variables_mapping(variables_mapping, self.functions_mapping) + + def parse_data( + self, raw_data: Any, variables_mapping: VariablesMapping = None + ) -> Any: + return parse_data(raw_data, variables_mapping, self.functions_mapping) + + def get_mapping_function(self, func_name: Text) -> Callable: + return get_mapping_function(func_name, self.functions_mapping) diff --git a/httprunner/parser_test.py b/httprunner/parser_test.py new file mode 100644 index 0000000..2ac7542 --- /dev/null +++ b/httprunner/parser_test.py @@ -0,0 +1,574 @@ +import os +import time +import unittest + +from httprunner import parser +from httprunner.exceptions import FunctionNotFound, VariableNotFound +from httprunner.loader import load_project_meta + + +class TestParserBasic(unittest.TestCase): + def test_build_url(self): + url = parser.build_url("https://postman-echo.com", "/get") + self.assertEqual(url, "https://postman-echo.com/get") + url = parser.build_url("https://postman-echo.com", "get") + self.assertEqual(url, "https://postman-echo.com/get") + url = parser.build_url("https://postman-echo.com/", "/get") + self.assertEqual(url, "https://postman-echo.com/get") + + url = parser.build_url("https://postman-echo.com/abc/", "/get?a=1&b=2") + self.assertEqual(url, "https://postman-echo.com/abc/get?a=1&b=2") + url = parser.build_url("https://postman-echo.com/abc/", "get?a=1&b=2") + self.assertEqual(url, "https://postman-echo.com/abc/get?a=1&b=2") + + # omit query string in base url + url = parser.build_url("https://postman-echo.com/abc?x=6&y=9", "/get?a=1&b=2") + self.assertEqual(url, "https://postman-echo.com/abc/get?a=1&b=2") + + url = parser.build_url("", "https://postman-echo.com/get") + self.assertEqual(url, "https://postman-echo.com/get") + + # notice: step request url > config base url + url = parser.build_url("https://postman-echo.com", "https://httpbin.org/get") + self.assertEqual(url, "https://httpbin.org/get") + + def test_parse_variables_mapping(self): + variables = {"varA": "$varB", "varB": "$varC", "varC": "123", "a": 1, "b": 2} + parsed_variables = parser.parse_variables_mapping(variables) + print(parsed_variables) + self.assertEqual(parsed_variables["varA"], "123") + self.assertEqual(parsed_variables["varB"], "123") + + def test_parse_variables_mapping_exception(self): + variables = {"varA": "$varB", "varB": "$varC", "a": 1, "b": 2} + with self.assertRaises(VariableNotFound): + parser.parse_variables_mapping(variables) + + def test_parse_string_value(self): + self.assertEqual(parser.parse_string_value("123"), 123) + self.assertEqual(parser.parse_string_value("12.3"), 12.3) + self.assertEqual(parser.parse_string_value("a123"), "a123") + self.assertEqual(parser.parse_string_value("$var"), "$var") + self.assertEqual(parser.parse_string_value("${func}"), "${func}") + + def test_regex_findall_variables(self): + self.assertEqual(parser.regex_findall_variables("$variable"), ["variable"]) + self.assertEqual(parser.regex_findall_variables("${variable}123"), ["variable"]) + self.assertEqual(parser.regex_findall_variables("/blog/$postid"), ["postid"]) + self.assertEqual( + parser.regex_findall_variables("/$var1/$var2"), ["var1", "var2"] + ) + self.assertEqual(parser.regex_findall_variables("abc"), []) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$a"), ["a"]) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$$a"), []) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$$$a"), ["a"]) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$$$$a"), []) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$$a$b"), ["b"]) + self.assertEqual(parser.regex_findall_variables("Z:2>1*0*1+1$$a$$b"), []) + # variable should not start with digit + self.assertEqual(parser.regex_findall_variables("$1a"), []) + self.assertEqual(parser.regex_findall_variables("${1a}"), []) + + def test_extract_variables(self): + self.assertEqual(parser.extract_variables("$var"), {"var"}) + self.assertEqual(parser.extract_variables("$var123"), {"var123"}) + self.assertEqual(parser.extract_variables("$var_name"), {"var_name"}) + self.assertEqual(parser.extract_variables("var"), set()) + self.assertEqual(parser.extract_variables("a$var"), {"var"}) + self.assertEqual(parser.extract_variables("$v ar"), {"v"}) + self.assertEqual(parser.extract_variables(" "), set()) + self.assertEqual(parser.extract_variables("$abc*"), {"abc"}) + self.assertEqual(parser.extract_variables("${func()}"), set()) + self.assertEqual(parser.extract_variables("${func(1,2)}"), set()) + self.assertEqual( + parser.extract_variables("${gen_md5($TOKEN, $data, $random)}"), + {"TOKEN", "data", "random"}, + ) + self.assertEqual(parser.extract_variables("Z:2>1*0*1+1$$1"), set()) + + def test_parse_function_params(self): + self.assertEqual(parser.parse_function_params(""), {"args": [], "kwargs": {}}) + self.assertEqual(parser.parse_function_params("5"), {"args": [5], "kwargs": {}}) + self.assertEqual( + parser.parse_function_params("1, 2"), {"args": [1, 2], "kwargs": {}} + ) + self.assertEqual( + parser.parse_function_params("a=1, b=2"), + {"args": [], "kwargs": {"a": 1, "b": 2}}, + ) + self.assertEqual( + parser.parse_function_params("a= 1, b =2"), + {"args": [], "kwargs": {"a": 1, "b": 2}}, + ) + self.assertEqual( + parser.parse_function_params("1, 2, a=3, b=4"), + {"args": [1, 2], "kwargs": {"a": 3, "b": 4}}, + ) + self.assertEqual( + parser.parse_function_params("$request, 123"), + {"args": ["$request", 123], "kwargs": {}}, + ) + self.assertEqual(parser.parse_function_params(" "), {"args": [], "kwargs": {}}) + self.assertEqual( + parser.parse_function_params("hello world, a=3, b=4"), + {"args": ["hello world"], "kwargs": {"a": 3, "b": 4}}, + ) + self.assertEqual( + parser.parse_function_params("$request, 12 3"), + {"args": ["$request", "12 3"], "kwargs": {}}, + ) + + def test_extract_functions(self): + self.assertEqual(parser.regex_findall_functions("${func()}"), [("func", "")]) + self.assertEqual(parser.regex_findall_functions("${func(5)}"), [("func", "5")]) + self.assertEqual( + parser.regex_findall_functions("${func(a=1, b=2)}"), [("func", "a=1, b=2")] + ) + self.assertEqual( + parser.regex_findall_functions("${func(1, $b, c=$x, d=4)}"), + [("func", "1, $b, c=$x, d=4")], + ) + self.assertEqual( + parser.regex_findall_functions("/api/1000?_t=${get_timestamp()}"), + [("get_timestamp", "")], + ) + self.assertEqual( + parser.regex_findall_functions("/api/${add(1, 2)}"), [("add", "1, 2")] + ) + self.assertEqual( + parser.regex_findall_functions("/api/${add(1, 2)}?_t=${get_timestamp()}"), + [("add", "1, 2"), ("get_timestamp", "")], + ) + self.assertEqual( + parser.regex_findall_functions("abc${func(1, 2, a=3, b=4)}def"), + [("func", "1, 2, a=3, b=4")], + ) + + def test_parse_data_string_with_variables(self): + variables_mapping = { + "var_1": "abc", + "var_2": "def", + "var_3": 123, + "var_4": {"a": 1}, + "var_5": True, + "var_6": None, + } + self.assertEqual(parser.parse_data("$var_1", variables_mapping), "abc") + self.assertEqual(parser.parse_data("${var_1}", variables_mapping), "abc") + self.assertEqual(parser.parse_data("var_1", variables_mapping), "var_1") + self.assertEqual(parser.parse_data("$var_1#XYZ", variables_mapping), "abc#XYZ") + self.assertEqual( + parser.parse_data("${var_1}#XYZ", variables_mapping), "abc#XYZ" + ) + self.assertEqual( + parser.parse_data("/$var_1/$var_2/var3", variables_mapping), "/abc/def/var3" + ) + self.assertEqual(parser.parse_data("$var_3", variables_mapping), 123) + self.assertEqual(parser.parse_data("$var_4", variables_mapping), {"a": 1}) + self.assertEqual(parser.parse_data("$var_5", variables_mapping), True) + self.assertEqual(parser.parse_data("abc$var_5", variables_mapping), "abcTrue") + self.assertEqual( + parser.parse_data("abc$var_4", variables_mapping), "abc{'a': 1}" + ) + self.assertEqual(parser.parse_data("$var_6", variables_mapping), None) + + with self.assertRaises(VariableNotFound): + parser.parse_data("/api/$SECRET_KEY", variables_mapping) + + self.assertEqual( + parser.parse_data(["$var_1", "$var_2"], variables_mapping), ["abc", "def"] + ) + self.assertEqual( + parser.parse_data({"$var_1": "$var_2"}, variables_mapping), {"abc": "def"} + ) + + # format: $var + value = parser.parse_data("ABC$var_1", variables_mapping) + self.assertEqual(value, "ABCabc") + + value = parser.parse_data("ABC$var_1$var_3", variables_mapping) + self.assertEqual(value, "ABCabc123") + + value = parser.parse_data("ABC$var_1/$var_3", variables_mapping) + self.assertEqual(value, "ABCabc/123") + + value = parser.parse_data("ABC$var_1/", variables_mapping) + self.assertEqual(value, "ABCabc/") + + value = parser.parse_data("ABC$var_1$", variables_mapping) + self.assertEqual(value, "ABCabc$") + + value = parser.parse_data("ABC$var_1/123$var_1/456", variables_mapping) + self.assertEqual(value, "ABCabc/123abc/456") + + value = parser.parse_data("ABC$var_1/$var_2/$var_1", variables_mapping) + self.assertEqual(value, "ABCabc/def/abc") + + value = parser.parse_data("func1($var_1, $var_3)", variables_mapping) + self.assertEqual(value, "func1(abc, 123)") + + # format: ${var} + value = parser.parse_data("ABC${var_1}", variables_mapping) + self.assertEqual(value, "ABCabc") + + value = parser.parse_data("ABC${var_1}${var_3}", variables_mapping) + self.assertEqual(value, "ABCabc123") + + value = parser.parse_data("ABC${var_1}/${var_3}", variables_mapping) + self.assertEqual(value, "ABCabc/123") + + value = parser.parse_data("ABC${var_1}/", variables_mapping) + self.assertEqual(value, "ABCabc/") + + value = parser.parse_data("ABC${var_1}123", variables_mapping) + self.assertEqual(value, "ABCabc123") + + value = parser.parse_data("ABC${var_1}/123${var_1}/456", variables_mapping) + self.assertEqual(value, "ABCabc/123abc/456") + + value = parser.parse_data("ABC${var_1}/${var_2}/${var_1}", variables_mapping) + self.assertEqual(value, "ABCabc/def/abc") + + value = parser.parse_data("func1(${var_1}, ${var_3})", variables_mapping) + self.assertEqual(value, "func1(abc, 123)") + + def test_parse_data_multiple_identical_variables(self): + variables_mapping = { + "var_1": "abc", + "var_2": "def", + } + self.assertEqual( + parser.parse_data("/$var_1/$var_2/$var_1", variables_mapping), + "/abc/def/abc", + ) + + variables_mapping = {"userid": 100, "data": 1498} + content = "/users/$userid/training/$data?userId=$userid&data=$data" + self.assertEqual( + parser.parse_data(content, variables_mapping), + "/users/100/training/1498?userId=100&data=1498", + ) + + variables_mapping = {"user": 100, "userid": 1000, "data": 1498} + content = "/users/$user/$userid/$data?userId=$userid&data=$data" + self.assertEqual( + parser.parse_data(content, variables_mapping), + "/users/100/1000/1498?userId=1000&data=1498", + ) + + def test_parse_data_string_with_functions(self): + import random + import string + + functions_mapping = { + "gen_random_string": lambda str_len: "".join( + random.choice(string.ascii_letters + string.digits) + for _ in range(str_len) + ) + } + result = parser.parse_data( + "${gen_random_string(5)}", functions_mapping=functions_mapping + ) + self.assertEqual(len(result), 5) + + functions_mapping["add_two_nums"] = lambda a, b=1: a + b + self.assertEqual( + parser.parse_data( + "${add_two_nums(1)}", functions_mapping=functions_mapping + ), + 2, + ) + self.assertEqual( + parser.parse_data( + "${add_two_nums(1, 2)}", functions_mapping=functions_mapping + ), + 3, + ) + self.assertEqual( + parser.parse_data( + "/api/${add_two_nums(1, 2)}", functions_mapping=functions_mapping + ), + "/api/3", + ) + + with self.assertRaises(FunctionNotFound): + parser.parse_data("/api/${gen_md5(abc)}") + + variables_mapping = { + "var_1": "abc", + "var_2": "def", + "var_3": 123, + "var_4": {"a": 1}, + "var_5": True, + "var_6": None, + } + functions_mapping = {"func1": lambda x, y: str(x) + str(y)} + + value = parser.parse_data( + "${func1($var_1, $var_3)}", variables_mapping, functions_mapping + ) + self.assertEqual(value, "abc123") + + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}DE", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABCabc123DE") + + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}$var_5", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABCabc123True") + + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}DE$var_4", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABCabc123DE{'a': 1}") + + value = parser.parse_data( + "ABC$var_5${func1($var_1, $var_3)}", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABCTrueabc123") + + value = parser.parse_data( + "ABC${ord(a)}DEF${len(abcd)}", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABC97DEF4") + + def test_parse_data_func_var_duplicate(self): + variables_mapping = { + "var_1": "abc", + "var_2": "def", + "var_3": 123, + "var_4": {"a": 1}, + "var_5": True, + "var_6": None, + } + functions_mapping = {"func1": lambda x, y: str(x) + str(y)} + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}--${func1($var_1, $var_3)}", + variables_mapping, + functions_mapping, + ) + self.assertEqual(value, "ABCabc123--abc123") + + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}$var_1", variables_mapping, functions_mapping + ) + self.assertEqual(value, "ABCabc123abc") + + value = parser.parse_data( + "ABC${func1($var_1, $var_3)}$var_1--${func1($var_1, $var_3)}$var_1", + variables_mapping, + functions_mapping, + ) + self.assertEqual(value, "ABCabc123abc--abc123abc") + + def test_parse_data_func_abnormal(self): + variables_mapping = { + "var_1": "abc", + "var_2": "def", + "var_3": 123, + "var_4": {"a": 1}, + "var_5": True, + "var_6": None, + } + functions_mapping = {"func1": lambda x, y: str(x) + str(y)} + + # { + value = parser.parse_data("ABC$var_1{", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc{") + + value = parser.parse_data( + "{ABC$var_1{}a}", variables_mapping, functions_mapping + ) + self.assertEqual(value, "{ABCabc{}a}") + + value = parser.parse_data( + "AB{C$var_1{}a}", variables_mapping, functions_mapping + ) + self.assertEqual(value, "AB{Cabc{}a}") + + # } + value = parser.parse_data("ABC$var_1}", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc}") + + # $$ + value = parser.parse_data("ABC$$var_1{", variables_mapping, functions_mapping) + self.assertEqual(value, "ABC$var_1{") + + # $$$ + value = parser.parse_data("ABC$$$var_1{", variables_mapping, functions_mapping) + self.assertEqual(value, "ABC$abc{") + + # $$$$ + value = parser.parse_data("ABC$$$$var_1{", variables_mapping, functions_mapping) + self.assertEqual(value, "ABC$$var_1{") + + # ${ + value = parser.parse_data("ABC$var_1${", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc${") + + value = parser.parse_data("ABC$var_1${a", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc${a") + + # $} + value = parser.parse_data("ABC$var_1$}a", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc$}a") + + # }{ + value = parser.parse_data("ABC$var_1}{a", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc}{a") + + # {} + value = parser.parse_data("ABC$var_1{}a", variables_mapping, functions_mapping) + self.assertEqual(value, "ABCabc{}a") + + def test_parse_data_request(self): + content = { + "request": { + "url": "/api/users/$uid", + "method": "$method", + "headers": {"token": "$token"}, + "data": { + "null": None, + "true": True, + "false": False, + "empty_str": "", + "value": "abc${add_one(3)}def", + }, + } + } + variables_mapping = {"uid": 1000, "method": "POST", "token": "abc123"} + functions_mapping = {"add_one": lambda x: x + 1} + result = parser.parse_data(content, variables_mapping, functions_mapping) + self.assertEqual("/api/users/1000", result["request"]["url"]) + self.assertEqual("abc123", result["request"]["headers"]["token"]) + self.assertEqual("POST", result["request"]["method"]) + self.assertIsNone(result["request"]["data"]["null"]) + self.assertTrue(result["request"]["data"]["true"]) + self.assertFalse(result["request"]["data"]["false"]) + self.assertEqual("", result["request"]["data"]["empty_str"]) + self.assertEqual("abc4def", result["request"]["data"]["value"]) + + def test_parse_data_testcase(self): + variables = { + "uid": "1000", + "random": "A2dEx", + "authorization": "a83de0ff8d2e896dbd8efb81ba14e17d", + "data": {"name": "user", "password": "123456"}, + } + functions = { + "add_two_nums": lambda a, b=1: a + b, + "get_timestamp": lambda: int(time.time() * 1000), + } + testcase_template = { + "url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1,2)}", + "method": "POST", + "headers": { + "Content-Type": "application/json", + "authorization": "$authorization", + "random": "$random", + "sum": "${add_two_nums(1, 2)}", + }, + "body": "$data", + } + parsed_testcase = parser.parse_data(testcase_template, variables, functions) + self.assertEqual( + parsed_testcase["url"], "http://127.0.0.1:5000/api/users/1000/3" + ) + self.assertEqual( + parsed_testcase["headers"]["authorization"], variables["authorization"] + ) + self.assertEqual(parsed_testcase["headers"]["random"], variables["random"]) + self.assertEqual(parsed_testcase["body"], variables["data"]) + self.assertEqual(parsed_testcase["headers"]["sum"], 3) + + def test_parse_parameters_testcase(self): + parameters = { + "user_agent": ["iOS/10.1", "iOS/10.2"], + "username-password": "${parameterize(request_methods/account.csv)}", + "sum": "${calculate_two_nums(1, 2)}", + } + load_project_meta( + os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "examples", + "postman_echo", + "request_methods", + ), + ) + parsed_params = parser.parse_parameters(parameters) + self.assertEqual(len(parsed_params), 2 * 3 * 2) + + self.assertIn( + { + "username": "test1", + "password": "111111", + "user_agent": "iOS/10.1", + "sum": 3, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test1", + "password": "111111", + "user_agent": "iOS/10.1", + "sum": 1, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test1", + "password": "111111", + "user_agent": "iOS/10.2", + "sum": 3, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test1", + "password": "111111", + "user_agent": "iOS/10.2", + "sum": 1, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test2", + "password": "222222", + "user_agent": "iOS/10.1", + "sum": 3, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test2", + "password": "222222", + "user_agent": "iOS/10.1", + "sum": 1, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test2", + "password": "222222", + "user_agent": "iOS/10.2", + "sum": 3, + }, + parsed_params, + ) + self.assertIn( + { + "username": "test2", + "password": "222222", + "user_agent": "iOS/10.2", + "sum": 1, + }, + parsed_params, + ) diff --git a/httprunner/response.py b/httprunner/response.py new file mode 100644 index 0000000..f898344 --- /dev/null +++ b/httprunner/response.py @@ -0,0 +1,309 @@ +from typing import Dict, Text, Any + +import jmespath +from jmespath.exceptions import JMESPathError +from loguru import logger + +from httprunner import exceptions +from httprunner.exceptions import ValidationFailure, ParamsError +from httprunner.models import VariablesMapping, Validators +from httprunner.parser import parse_string_value, Parser + + +def get_uniform_comparator(comparator: Text): + """convert comparator alias to uniform name""" + if comparator in ["eq", "equals", "equal"]: + return "equal" + elif comparator in ["lt", "less_than"]: + return "less_than" + elif comparator in ["le", "less_or_equals"]: + return "less_or_equals" + elif comparator in ["gt", "greater_than"]: + return "greater_than" + elif comparator in ["ge", "greater_or_equals"]: + return "greater_or_equals" + elif comparator in ["ne", "not_equal"]: + return "not_equal" + elif comparator in ["str_eq", "string_equals"]: + return "string_equals" + elif comparator in ["len_eq", "length_equal"]: + return "length_equal" + elif comparator in [ + "len_gt", + "length_greater_than", + ]: + return "length_greater_than" + elif comparator in [ + "len_ge", + "length_greater_or_equals", + ]: + return "length_greater_or_equals" + elif comparator in ["len_lt", "length_less_than"]: + return "length_less_than" + elif comparator in [ + "len_le", + "length_less_or_equals", + ]: + return "length_less_or_equals" + else: + return comparator + + +def uniform_validator(validator): + """unify validator + + Args: + validator (dict): validator maybe in two formats: + + format1: this is kept for compatibility with the previous versions. + {"check": "status_code", "comparator": "eq", "expect": 201, "message": "test"} + {"check": "status_code", "assert": "eq", "expect": 201, "msg": "test"} + format2: recommended new version, {assert: [check_item, expected_value, msg]} + {'eq': ['status_code', 201, "test"]} + + Returns + dict: validator info + + { + "check": "status_code", + "expect": 201, + "assert": "equal", + "message": "test + } + + """ + if not isinstance(validator, dict): + raise ParamsError(f"invalid validator: {validator}") + + if "check" in validator and "expect" in validator: + # format1 + check_item = validator["check"] + expect_value = validator["expect"] + + if "assert" in validator: + comparator = validator.get("assert") + else: + comparator = validator.get("comparator", "eq") + + if "msg" in validator: + message = validator.get("msg") + else: + message = validator.get("message", "") + + elif len(validator) == 1: + # format2 + comparator = list(validator.keys())[0] + compare_values = validator[comparator] + + if not isinstance(compare_values, list) or len(compare_values) not in [2, 3]: + raise ParamsError(f"invalid validator: {validator}") + + check_item = compare_values[0] + expect_value = compare_values[1] + if len(compare_values) == 3: + message = compare_values[2] + else: + # len(compare_values) == 2 + message = "" + + else: + raise ParamsError(f"invalid validator: {validator}") + + # uniform comparator, e.g. lt => less_than, eq => equals + assert_method = get_uniform_comparator(comparator) + + return { + "check": check_item, + "expect": expect_value, + "assert": assert_method, + "message": message, + } + + +class ResponseObjectBase(object): + def __init__(self, resp_obj, parser: Parser): + """initialize with a response object + + Args: + resp_obj (instance): requests.Response instance + + """ + self.resp_obj = resp_obj + self.parser = parser + self.validation_results: Dict = {} + + def extract( + self, + extractors: Dict[Text, Text], + variables_mapping: VariablesMapping = None, + ) -> Dict[Text, Any]: + if not extractors: + return {} + + extract_mapping = {} + for key, field in extractors.items(): + if "$" in field: + # field contains variable or function + field = self.parser.parse_data(field, variables_mapping) + field_value = self._search_jmespath(field) + extract_mapping[key] = field_value + + logger.info(f"extract mapping: {extract_mapping}") + return extract_mapping + + def _search_jmespath(self, expr: Text) -> Any: + try: + check_value = jmespath.search(expr, self.resp_obj) + except JMESPathError as ex: + logger.error( + f"failed to search with jmespath\n" + f"expression: {expr}\n" + f"data: {self.resp_obj}\n" + f"exception: {ex}" + ) + raise + return check_value + + def validate( + self, + validators: Validators, + variables_mapping: VariablesMapping = None, + ): + + variables_mapping = variables_mapping or {} + + self.validation_results = {} + if not validators: + return + + validate_pass = True + failures = [] + + for v in validators: + + if "validate_extractor" not in self.validation_results: + self.validation_results["validate_extractor"] = [] + + u_validator = uniform_validator(v) + + # check item + check_item = u_validator["check"] + if "$" in check_item: + # check_item is variable or function + check_item = self.parser.parse_data(check_item, variables_mapping) + check_item = parse_string_value(check_item) + + if check_item and isinstance(check_item, Text): + check_value = self._search_jmespath(check_item) + else: + # variable or function evaluation result is "" or not text + check_value = check_item + + # comparator + assert_method = u_validator["assert"] + assert_func = self.parser.get_mapping_function(assert_method) + + # expect item + expect_item = u_validator["expect"] + # parse expected value with config/teststep/extracted variables + expect_value = self.parser.parse_data(expect_item, variables_mapping) + + # message + message = u_validator["message"] + # parse message with config/teststep/extracted variables + message = self.parser.parse_data(message, variables_mapping) + + validate_msg = f"assert {check_item} {assert_method} {expect_value}({type(expect_value).__name__})" + + validator_dict = { + "comparator": assert_method, + "check": check_item, + "check_value": check_value, + "expect": expect_item, + "expect_value": expect_value, + "message": message, + } + + try: + assert_func(check_value, expect_value, message) + validate_msg += "\t==> pass" + logger.info(validate_msg) + validator_dict["check_result"] = "pass" + except AssertionError as ex: + validate_pass = False + validator_dict["check_result"] = "fail" + validate_msg += "\t==> fail" + validate_msg += ( + f"\n" + f"check_item: {check_item}\n" + f"check_value: {check_value}({type(check_value).__name__})\n" + f"assert_method: {assert_method}\n" + f"expect_value: {expect_value}({type(expect_value).__name__})" + ) + message = str(ex) + if message: + validate_msg += f"\nmessage: {message}" + + logger.error(validate_msg) + failures.append(validate_msg) + + self.validation_results["validate_extractor"].append(validator_dict) + + if not validate_pass: + failures_string = "\n".join([failure for failure in failures]) + raise ValidationFailure(failures_string) + + +class ResponseObject(ResponseObjectBase): + def __getattr__(self, key): + if key in ["json", "content", "body"]: + try: + value = self.resp_obj.json() + except ValueError: + value = self.resp_obj.content + elif key == "cookies": + value = self.resp_obj.cookies.get_dict() + else: + try: + value = getattr(self.resp_obj, key) + except AttributeError: + err_msg = "ResponseObject does not have attribute: {}".format(key) + logger.error(err_msg) + raise exceptions.ParamsError(err_msg) + + self.__dict__[key] = value + return value + + def _search_jmespath(self, expr: Text) -> Any: + resp_obj_meta = { + "status_code": self.status_code, + "headers": self.headers, + "cookies": self.cookies, + "body": self.body, + } + if not expr.startswith(tuple(resp_obj_meta.keys())): + if hasattr(self.resp_obj,expr): + return getattr(self.resp_obj,expr) + else: + return expr + + try: + check_value = jmespath.search(expr, resp_obj_meta) + except JMESPathError as ex: + logger.error( + f"failed to search with jmespath\n" + f"expression: {expr}\n" + f"data: {resp_obj_meta}\n" + f"exception: {ex}" + ) + raise + + return check_value + + +class ThriftResponseObject(ResponseObjectBase): + pass + + +class SqlResponseObject(ResponseObjectBase): + pass diff --git a/httprunner/response_test.py b/httprunner/response_test.py new file mode 100644 index 0000000..916b434 --- /dev/null +++ b/httprunner/response_test.py @@ -0,0 +1,90 @@ +import unittest + +import requests + +from httprunner.parser import Parser +from httprunner.response import ResponseObject, uniform_validator +from httprunner.utils import HTTP_BIN_URL + + +class TestResponse(unittest.TestCase): + def setUp(self) -> None: + resp = requests.post( + f"{HTTP_BIN_URL}/anything", + json={ + "locations": [ + {"name": "Seattle", "state": "WA"}, + {"name": "New York", "state": "NY"}, + {"name": "Bellevue", "state": "WA"}, + {"name": "Olympia", "state": "WA"}, + ] + }, + ) + parser = Parser( + functions_mapping={"get_name": lambda: "name", "get_num": lambda x: x} + ) + self.resp_obj = ResponseObject(resp, parser) + + def test_extract(self): + variables_mapping = {"body": "body"} + extract_mapping = self.resp_obj.extract( + { + "var_1": "body.json.locations[0]", + "var_2": "body.json.locations[3].name", + "var_3": "$body.json.locations[3].name", + "var_4": "$body.json.locations[3].${get_name()}", + }, + variables_mapping=variables_mapping, + ) + self.assertEqual(extract_mapping["var_1"], {"name": "Seattle", "state": "WA"}) + self.assertEqual(extract_mapping["var_2"], "Olympia") + self.assertEqual(extract_mapping["var_3"], "Olympia") + self.assertEqual(extract_mapping["var_4"], "Olympia") + + def test_validate(self): + self.resp_obj.validate( + [ + {"eq": ["body.json.locations[0].name", "Seattle"]}, + {"eq": ["body.json.locations[0]", {"name": "Seattle", "state": "WA"}]}, + ], + ) + + def test_validate_variables(self): + variables_mapping = {"index": 1, "var_empty": ""} + self.resp_obj.validate( + [ + {"eq": ["body.json.locations[$index].name", "New York"]}, + {"eq": ["$var_empty", ""]}, + ], + variables_mapping=variables_mapping, + ) + + def test_validate_functions(self): + variables_mapping = {"index": 1} + self.resp_obj.validate( + [ + {"eq": ["${get_num(0)}", 0]}, + {"eq": ["${get_num($index)}", 1]}, + ], + variables_mapping=variables_mapping, + ) + + def test_uniform_validator(self): + validators = [ + { + "check": "status_code", + "comparator": "eq", + "expect": 201, + "message": "test", + }, + {"check": "status_code", "assert": "eq", "expect": 201, "msg": "test"}, + {"eq": ["status_code", 201, "test"]}, + ] + expected = { + "check": "status_code", + "assert": "equal", + "expect": 201, + "message": "test", + } + for validator in validators: + self.assertEqual(uniform_validator(validator), expected) diff --git a/httprunner/runner.py b/httprunner/runner.py new file mode 100644 index 0000000..764c4fc --- /dev/null +++ b/httprunner/runner.py @@ -0,0 +1,248 @@ +import os +import time +import uuid +from datetime import datetime +from typing import Dict, List, Text + +try: + import allure + + ALLURE = allure +except ModuleNotFoundError: + ALLURE = None + +from loguru import logger + +from httprunner.client import HttpSession +from httprunner.config import Config +from httprunner.exceptions import ParamsError, ValidationFailure +from httprunner.loader import load_project_meta +from httprunner.models import ( + ProjectMeta, + StepResult, + TConfig, + TestCaseInOut, + TestCaseSummary, + TestCaseTime, + VariablesMapping, +) +from httprunner.parser import Parser +from httprunner.utils import LOGGER_FORMAT, merge_variables, ga4_client + + +class SessionRunner(object): + config: Config + teststeps: List[object] # list of Step + + parser: Parser = None + session: HttpSession = None + case_id: Text = "" + root_dir: Text = "" + thrift_client = None + db_engine = None + + __config: TConfig + __project_meta: ProjectMeta = None + __export: List[Text] = [] + __step_results: List[StepResult] = [] + __session_variables: VariablesMapping = {} + __is_referenced: bool = False + # time + __start_at: float = 0 + __duration: float = 0 + # log + __log_path: Text = "" + + def __init(self): + self.__config = self.config.struct() + self.__session_variables = self.__session_variables or {} + self.__start_at = 0 + self.__duration = 0 + self.__is_referenced = self.__is_referenced or False + + self.__project_meta = self.__project_meta or load_project_meta( + self.__config.path + ) + self.case_id = self.case_id or str(uuid.uuid4()) + self.root_dir = self.root_dir or self.__project_meta.RootDir + self.__log_path = os.path.join(self.root_dir, "logs", f"{self.case_id}.run.log") + + self.__step_results = self.__step_results or [] + self.session = self.session or HttpSession() + self.parser = self.parser or Parser(self.__project_meta.functions) + + def with_session(self, session: HttpSession) -> "SessionRunner": + self.session = session + return self + + def get_config(self) -> TConfig: + return self.__config + + def set_referenced(self) -> "SessionRunner": + self.__is_referenced = True + return self + + def with_case_id(self, case_id: Text) -> "SessionRunner": + self.case_id = case_id + return self + + def with_variables(self, variables: VariablesMapping) -> "SessionRunner": + self.__session_variables = variables + return self + + def with_export(self, export: List[Text]) -> "SessionRunner": + self.__export = export + return self + + def with_thrift_client(self, thrift_client) -> "SessionRunner": + self.thrift_client = thrift_client + return self + + def with_db_engine(self, db_engine) -> "SessionRunner": + self.db_engine = db_engine + return self + + def __parse_config(self, param: Dict = None) -> None: + # parse config variables + self.__config.variables.update(self.__session_variables) + if param: + self.__config.variables.update(param) + self.__config.variables = self.parser.parse_variables(self.__config.variables) + + # parse config name + self.__config.name = self.parser.parse_data( + self.__config.name, self.__config.variables + ) + + # parse config base url + self.__config.base_url = self.parser.parse_data( + self.__config.base_url, self.__config.variables + ) + + def get_export_variables(self) -> Dict: + # override testcase export vars with step export + export_var_names = self.__export or self.__config.export + export_vars_mapping = {} + for var_name in export_var_names: + if var_name not in self.__session_variables: + raise ParamsError( + f"failed to export variable {var_name} from session variables {self.__session_variables}" + ) + + export_vars_mapping[var_name] = self.__session_variables[var_name] + + return export_vars_mapping + + def get_summary(self) -> TestCaseSummary: + """get testcase result summary""" + start_at_timestamp = self.__start_at + start_at_iso_format = datetime.utcfromtimestamp(start_at_timestamp).isoformat() + + summary_success = True + for step_result in self.__step_results: + if not step_result.success: + summary_success = False + break + + return TestCaseSummary( + name=self.__config.name, + success=summary_success, + case_id=self.case_id, + time=TestCaseTime( + start_at=self.__start_at, + start_at_iso_format=start_at_iso_format, + duration=self.__duration, + ), + in_out=TestCaseInOut( + config_vars=self.__config.variables, + export_vars=self.get_export_variables(), + ), + log=self.__log_path, + step_results=self.__step_results, + ) + + def merge_step_variables(self, variables: VariablesMapping) -> VariablesMapping: + # override variables + # step variables > extracted variables from previous steps + variables = merge_variables(variables, self.__session_variables) + # step variables > testcase config variables + variables = merge_variables(variables, self.__config.variables) + + # parse variables + return self.parser.parse_variables(variables) + + def __run_step(self, step): + """run teststep, step maybe any kind that implements IStep interface + + Args: + step (Step): teststep + + """ + logger.info(f"run step begin: {step.name()} >>>>>>") + + # run step + for i in range(step.retry_times + 1): + try: + if ALLURE is not None: + with ALLURE.step(f"step: {step.name()}"): + step_result: StepResult = step.run(self) + else: + step_result: StepResult = step.run(self) + break + except ValidationFailure: + if i == step.retry_times: + raise + else: + logger.warning( + f"run step {step.name()} validation failed,wait {step.retry_interval} sec and try again" + ) + time.sleep(step.retry_interval) + logger.info( + f"run step retry ({i + 1}/{step.retry_times} time): {step.name()} >>>>>>" + ) + + # save extracted variables to session variables + self.__session_variables.update(step_result.export_vars) + # update testcase summary + self.__step_results.append(step_result) + + logger.info(f"run step end: {step.name()} <<<<<<\n") + + def test_start(self, param: Dict = None) -> "SessionRunner": + """main entrance, discovered by pytest""" + ga4_client.send_event("test_start") + print("\n") + self.__init() + self.__parse_config(param) + + if ALLURE is not None and not self.__is_referenced: + # update allure report meta + ALLURE.dynamic.title(self.__config.name) + ALLURE.dynamic.description(f"TestCase ID: {self.case_id}") + + logger.info( + f"Start to run testcase: {self.__config.name}, TestCase ID: {self.case_id}" + ) + + logger.add(self.__log_path, format=LOGGER_FORMAT, level="DEBUG") + self.__start_at = time.time() + try: + # run step in sequential order + for step in self.teststeps: + self.__run_step(step) + finally: + logger.info(f"generate testcase log: {self.__log_path}") + if ALLURE is not None: + ALLURE.attach.file( + self.__log_path, + name="all log", + attachment_type=ALLURE.attachment_type.TEXT, + ) + + self.__duration = time.time() - self.__start_at + return self + + +class HttpRunner(SessionRunner): + # split SessionRunner to keep consistent with golang version + pass diff --git a/httprunner/step.py b/httprunner/step.py new file mode 100644 index 0000000..7f0485a --- /dev/null +++ b/httprunner/step.py @@ -0,0 +1,67 @@ +from typing import Union + +from httprunner import HttpRunner +from httprunner.models import StepResult, TRequest, TStep, TestCase +from httprunner.step_request import ( + RequestWithOptionalArgs, + StepRequestExtraction, + StepRequestValidation, +) +from httprunner.step_sql_request import ( + RunSqlRequest, + StepSqlRequestExtraction, + StepSqlRequestValidation, +) +from httprunner.step_testcase import StepRefCase +from httprunner.step_thrift_request import ( + RunThriftRequest, + StepThriftRequestExtraction, + StepThriftRequestValidation, +) + + +class Step(object): + def __init__( + self, + step: Union[ + StepRequestValidation, + StepRequestExtraction, + RequestWithOptionalArgs, + StepRefCase, + RunSqlRequest, + StepSqlRequestValidation, + StepSqlRequestExtraction, + RunThriftRequest, + StepThriftRequestValidation, + StepThriftRequestExtraction, + ], + ): + self.__step = step + + @property + def request(self) -> TRequest: + return self.__step.struct().request + + @property + def testcase(self) -> TestCase: + return self.__step.struct().testcase + + @property + def retry_times(self) -> int: + return self.__step.struct().retry_times + + @property + def retry_interval(self) -> int: + return self.__step.struct().retry_interval + + def struct(self) -> TStep: + return self.__step.struct() + + def name(self) -> str: + return self.__step.name() + + def type(self) -> str: + return self.__step.type() + + def run(self, runner: HttpRunner) -> StepResult: + return self.__step.run(runner) diff --git a/httprunner/step_request.py b/httprunner/step_request.py new file mode 100644 index 0000000..2db1def --- /dev/null +++ b/httprunner/step_request.py @@ -0,0 +1,499 @@ +import json +import time +from typing import Any, Dict, List, Text, Union + +import requests +from loguru import logger + +from httprunner import utils +from httprunner.exceptions import ValidationFailure +from httprunner.ext.uploader import prepare_upload_step +from httprunner.models import ( + Hooks, + IStep, + MethodEnum, + StepResult, + TRequest, + TStep, + VariablesMapping, +) +from httprunner.parser import build_url, parse_variables_mapping +from httprunner.response import ResponseObject +from httprunner.runner import ALLURE, HttpRunner + + +def call_hooks( + runner: HttpRunner, hooks: Hooks, step_variables: VariablesMapping, hook_msg: Text +): + """call hook actions. + + Args: + hooks (list): each hook in hooks list maybe in two format. + + format1 (str): only call hook functions. + ${func()} + format2 (dict): assignment, the value returned by hook function will be assigned to variable. + {"var": "${func()}"} + + step_variables: current step variables to call hook, include two special variables + + request: parsed request dict + response: ResponseObject for current response + + hook_msg: setup/teardown request/testcase + + """ + logger.info(f"call hook actions: {hook_msg}") + + if not isinstance(hooks, List): + logger.error(f"Invalid hooks format: {hooks}") + return + + for hook in hooks: + if isinstance(hook, Text): + # format 1: ["${func()}"] + logger.debug(f"call hook function: {hook}") + runner.parser.parse_data(hook, step_variables) + elif isinstance(hook, Dict) and len(hook) == 1: + # format 2: {"var": "${func()}"} + var_name, hook_content = list(hook.items())[0] + hook_content_eval = runner.parser.parse_data(hook_content, step_variables) + logger.debug( + f"call hook function: {hook_content}, got value: {hook_content_eval}" + ) + logger.debug(f"assign variable: {var_name} = {hook_content_eval}") + step_variables[var_name] = hook_content_eval + else: + logger.error(f"Invalid hook format: {hook}") + + +def pretty_format(v) -> str: + if isinstance(v, dict): + return json.dumps(v, indent=4, ensure_ascii=False) + + if isinstance(v, requests.structures.CaseInsensitiveDict): + return json.dumps(dict(v.items()), indent=4, ensure_ascii=False) + + return repr(utils.omit_long_data(v)) + + +def run_step_request(runner: HttpRunner, step: TStep) -> StepResult: + """run teststep: request""" + step_result = StepResult( + name=step.name, + step_type="request", + success=False, + ) + start_time = time.time() + + # parse + functions = runner.parser.functions_mapping + step_variables = runner.merge_step_variables(step.variables) + prepare_upload_step(step, step_variables, functions) + # parse variables + step_variables = parse_variables_mapping(step_variables, functions) + + request_dict = step.request.dict() + request_dict.pop("upload", None) + parsed_request_dict = runner.parser.parse_data(request_dict, step_variables) + + request_headers = parsed_request_dict.pop("headers", {}) + # omit pseudo header names for HTTP/1, e.g. :authority, :method, :path, :scheme + request_headers = { + key: request_headers[key] for key in request_headers if not key.startswith(":") + } + request_headers[ + "HRUN-Request-ID" + ] = f"HRUN-{runner.case_id}-{str(int(time.time() * 1000))[-6:]}" + parsed_request_dict["headers"] = request_headers + + step_variables["request"] = parsed_request_dict + + # setup hooks + if step.setup_hooks: + call_hooks(runner, step.setup_hooks, step_variables, "setup request") + + # prepare arguments + config = runner.get_config() + method = parsed_request_dict.pop("method") + url_path = parsed_request_dict.pop("url") + url = build_url(config.base_url, url_path) + parsed_request_dict["verify"] = config.verify + parsed_request_dict["json"] = parsed_request_dict.pop("req_json", {}) + + # log request + request_print = "====== request details ======\n" + request_print += f"url: {url}\n" + request_print += f"method: {method}\n" + for k, v in parsed_request_dict.items(): + request_print += f"{k}: {pretty_format(v)}\n" + + logger.debug(request_print) + if ALLURE is not None: + ALLURE.attach( + request_print, + name="request details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + resp = runner.session.request(method, url, **parsed_request_dict) + + # log response + response_print = "====== response details ======\n" + response_print += f"status_code: {resp.status_code}\n" + response_print += f"headers: {pretty_format(resp.headers)}\n" + + try: + resp_body = resp.json() + except (requests.exceptions.JSONDecodeError, json.decoder.JSONDecodeError): + resp_body = resp.content + + response_print += f"body: {pretty_format(resp_body)}\n" + logger.debug(response_print) + if ALLURE is not None: + ALLURE.attach( + response_print, + name="response details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + resp_obj = ResponseObject(resp, runner.parser) + step_variables["response"] = resp_obj + + # teardown hooks + if step.teardown_hooks: + call_hooks(runner, step.teardown_hooks, step_variables, "teardown request") + + # extract + extractors = step.extract + extract_mapping = resp_obj.extract(extractors, step_variables) + step_result.export_vars = extract_mapping + + variables_mapping = step_variables + variables_mapping.update(extract_mapping) + + # validate + validators = step.validators + try: + resp_obj.validate(validators, variables_mapping) + step_result.success = True + except ValidationFailure: + raise + finally: + session_data = runner.session.data + session_data.success = step_result.success + session_data.validators = resp_obj.validation_results + + # save step data + step_result.data = session_data + step_result.elapsed = time.time() - start_time + + return step_result + + +class StepRequestValidation(IStep): + def __init__(self, step: TStep): + self.__step = step + + def assert_equal( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append({"equal": [jmes_path, expected_value, message]}) + return self + + def assert_not_equal( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"not_equal": [jmes_path, expected_value, message]} + ) + return self + + def assert_greater_than( + self, jmes_path: Text, expected_value: Union[int, float], message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"greater_than": [jmes_path, expected_value, message]} + ) + return self + + def assert_less_than( + self, jmes_path: Text, expected_value: Union[int, float], message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"less_than": [jmes_path, expected_value, message]} + ) + return self + + def assert_greater_or_equals( + self, jmes_path: Text, expected_value: Union[int, float], message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"greater_or_equals": [jmes_path, expected_value, message]} + ) + return self + + def assert_less_or_equals( + self, jmes_path: Text, expected_value: Union[int, float], message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"less_or_equals": [jmes_path, expected_value, message]} + ) + return self + + def assert_length_equal( + self, jmes_path: Text, expected_value: int, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"length_equal": [jmes_path, expected_value, message]} + ) + return self + + def assert_length_greater_than( + self, jmes_path: Text, expected_value: int, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"length_greater_than": [jmes_path, expected_value, message]} + ) + return self + + def assert_length_less_than( + self, jmes_path: Text, expected_value: int, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"length_less_than": [jmes_path, expected_value, message]} + ) + return self + + def assert_length_greater_or_equals( + self, jmes_path: Text, expected_value: int, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"length_greater_or_equals": [jmes_path, expected_value, message]} + ) + return self + + def assert_length_less_or_equals( + self, jmes_path: Text, expected_value: int, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"length_less_or_equals": [jmes_path, expected_value, message]} + ) + return self + + def assert_string_equals( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"string_equals": [jmes_path, expected_value, message]} + ) + return self + + def assert_startswith( + self, jmes_path: Text, expected_value: Text, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"startswith": [jmes_path, expected_value, message]} + ) + return self + + def assert_endswith( + self, jmes_path: Text, expected_value: Text, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"endswith": [jmes_path, expected_value, message]} + ) + return self + + def assert_regex_match( + self, jmes_path: Text, expected_value: Text, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"regex_match": [jmes_path, expected_value, message]} + ) + return self + + def assert_contains( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"contains": [jmes_path, expected_value, message]} + ) + return self + + def assert_contained_by( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"contained_by": [jmes_path, expected_value, message]} + ) + return self + + def assert_type_match( + self, jmes_path: Text, expected_value: Any, message: Text = "" + ) -> "StepRequestValidation": + self.__step.validators.append( + {"type_match": [jmes_path, expected_value, message]} + ) + return self + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"request-{self.__step.request.method}" + + def run(self, runner: HttpRunner): + return run_step_request(runner, self.__step) + + +class StepRequestExtraction(IStep): + def __init__(self, step: TStep): + self.__step = step + + def with_jmespath(self, jmes_path: Text, var_name: Text) -> "StepRequestExtraction": + self.__step.extract[var_name] = jmes_path + return self + + # def with_regex(self): + # # TODO: extract response html with regex + # pass + # + # def with_jsonpath(self): + # # TODO: extract response json with jsonpath + # pass + + def validate(self) -> StepRequestValidation: + return StepRequestValidation(self.__step) + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"request-{self.__step.request.method}" + + def run(self, runner: HttpRunner): + return run_step_request(runner, self.__step) + + +class RequestWithOptionalArgs(IStep): + def __init__(self, step: TStep): + self.__step = step + + def with_params(self, **params) -> "RequestWithOptionalArgs": + self.__step.request.params.update(params) + return self + + def with_headers(self, **headers) -> "RequestWithOptionalArgs": + self.__step.request.headers.update(headers) + return self + + def with_cookies(self, **cookies) -> "RequestWithOptionalArgs": + self.__step.request.cookies.update(cookies) + return self + + def with_data(self, data) -> "RequestWithOptionalArgs": + self.__step.request.data = data + return self + + def with_json(self, req_json) -> "RequestWithOptionalArgs": + self.__step.request.req_json = req_json + return self + + def set_timeout(self, timeout: float) -> "RequestWithOptionalArgs": + self.__step.request.timeout = timeout + return self + + def set_verify(self, verify: bool) -> "RequestWithOptionalArgs": + self.__step.request.verify = verify + return self + + def set_allow_redirects(self, allow_redirects: bool) -> "RequestWithOptionalArgs": + self.__step.request.allow_redirects = allow_redirects + return self + + def upload(self, **file_info) -> "RequestWithOptionalArgs": + self.__step.request.upload.update(file_info) + return self + + def teardown_hook( + self, hook: Text, assign_var_name: Text = None + ) -> "RequestWithOptionalArgs": + if assign_var_name: + self.__step.teardown_hooks.append({assign_var_name: hook}) + else: + self.__step.teardown_hooks.append(hook) + + return self + + def extract(self) -> StepRequestExtraction: + return StepRequestExtraction(self.__step) + + def validate(self) -> StepRequestValidation: + return StepRequestValidation(self.__step) + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"request-{self.__step.request.method}" + + def run(self, runner: HttpRunner): + return run_step_request(runner, self.__step) + + +class RunRequest(object): + def __init__(self, name: Text): + self.__step = TStep(name=name) + + def with_variables(self, **variables) -> "RunRequest": + self.__step.variables.update(variables) + return self + + def with_retry(self, retry_times, retry_interval) -> "RunRequest": + self.__step.retry_times = retry_times + self.__step.retry_interval = retry_interval + return self + + def setup_hook(self, hook: Text, assign_var_name: Text = None) -> "RunRequest": + if assign_var_name: + self.__step.setup_hooks.append({assign_var_name: hook}) + else: + self.__step.setup_hooks.append(hook) + + return self + + def get(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.GET, url=url) + return RequestWithOptionalArgs(self.__step) + + def post(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.POST, url=url) + return RequestWithOptionalArgs(self.__step) + + def put(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.PUT, url=url) + return RequestWithOptionalArgs(self.__step) + + def head(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.HEAD, url=url) + return RequestWithOptionalArgs(self.__step) + + def delete(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.DELETE, url=url) + return RequestWithOptionalArgs(self.__step) + + def options(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.OPTIONS, url=url) + return RequestWithOptionalArgs(self.__step) + + def patch(self, url: Text) -> RequestWithOptionalArgs: + self.__step.request = TRequest(method=MethodEnum.PATCH, url=url) + return RequestWithOptionalArgs(self.__step) diff --git a/httprunner/step_request_test.py b/httprunner/step_request_test.py new file mode 100644 index 0000000..58164ac --- /dev/null +++ b/httprunner/step_request_test.py @@ -0,0 +1,17 @@ +import unittest + +from examples.postman_echo.request_methods.request_with_functions_test import ( + TestCaseRequestWithFunctions, +) + + +class TestRunRequest(unittest.TestCase): + def test_run_request(self): + runner = TestCaseRequestWithFunctions().test_start() + summary = runner.get_summary() + self.assertTrue(summary.success) + self.assertEqual(summary.name, "request methods testcase with functions") + self.assertEqual(len(summary.step_results), 3) + self.assertEqual(summary.step_results[0].name, "get with params") + self.assertEqual(summary.step_results[1].name, "post raw text") + self.assertEqual(summary.step_results[2].name, "post form data") diff --git a/httprunner/step_sql_request.py b/httprunner/step_sql_request.py new file mode 100644 index 0000000..47a5405 --- /dev/null +++ b/httprunner/step_sql_request.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +import sys +import time +from typing import Text + +from loguru import logger + +from httprunner import utils +from httprunner.exceptions import SqlMethodNotSupport, ValidationFailure +from httprunner.models import IStep, SqlMethodEnum, StepResult, TSqlRequest, TStep +from httprunner.response import SqlResponseObject +from httprunner.runner import ALLURE, HttpRunner +from httprunner.step_request import ( + StepRequestExtraction, + StepRequestValidation, + call_hooks, +) + +try: + import pymysql + import sqlalchemy + + SQL_READY = True +except ModuleNotFoundError: + SQL_READY = False + + +def ensure_sql_ready(): + if SQL_READY: + return + + msg = """ + uploader extension dependencies uninstalled, install first and try again. + install with pip: + $ pip install sqlalchemy pymysql + + or you can install httprunner with optional upload dependencies: + $ pip install "httprunner[sql]" + """ + logger.error(msg) + sys.exit(1) + + +def run_step_sql_request(runner: HttpRunner, step: TStep) -> StepResult: + """run teststep:sql request""" + start_time = time.time() + + step_result = StepResult( + name=step.name, + step_type="sql", + success=False, + ) + step_variables = runner.merge_step_variables(step.variables) + # parse + request_dict = step.sql_request.dict() + parsed_request_dict = runner.parser.parse_data(request_dict, step_variables) + config = runner.get_config() + parsed_request_dict["db_config"]["psm"] = ( + parsed_request_dict["db_config"]["psm"] or config.db.psm + ) + parsed_request_dict["db_config"]["user"] = ( + parsed_request_dict["db_config"]["user"] or config.db.user + ) + parsed_request_dict["db_config"]["password"] = ( + parsed_request_dict["db_config"]["password"] or config.db.password + ) + parsed_request_dict["db_config"]["ip"] = ( + parsed_request_dict["db_config"]["ip"] or config.db.ip + ) + parsed_request_dict["db_config"]["port"] = ( + parsed_request_dict["db_config"]["port"] or config.db.port + ) + parsed_request_dict["db_config"]["database"] = ( + parsed_request_dict["db_config"]["database"] or config.db.database + ) + + if not runner.db_engine: + ensure_sql_ready() + from httprunner.database.engine import DBEngine + + runner.db_engine = DBEngine( + f'mysql+pymysql://{parsed_request_dict["db_config"]["user"]}:' + f'{parsed_request_dict["db_config"]["password"]}@{parsed_request_dict["db_config"]["ip"]}:' + f'{parsed_request_dict["db_config"]["port"]}/{parsed_request_dict["db_config"]["database"]}' + f"?charset=utf8mb4" + ) + + # parsed_request_dict["headers"].setdefault( + # "HRUN-Request-ID", + # f"HRUN-{self.__case_id}-{str(int(time.time() * 1000))[-6:]}", + # ) + + # setup hooks + if step.setup_hooks: + call_hooks(runner, step.setup_hooks, step_variables, "setup request") + + # log request + sql_request_print = "====== sql request details ======\n" + sql_request_print += f"sql: {step.sql_request.sql}\n" + for k, v in parsed_request_dict.items(): + v = utils.omit_long_data(v) + sql_request_print += f"{k}: {repr(v)}\n" + + sql_request_print += "\n" + + if ALLURE is not None: + ALLURE.attach( + sql_request_print, + name="sql request details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + logger.info(f"Executing SQL: {parsed_request_dict['sql']}") + if step.sql_request.method == SqlMethodEnum.FETCHONE: + sql_resp = runner.db_engine.fetchone(parsed_request_dict["sql"]) + elif step.sql_request.method == SqlMethodEnum.INSERT: + sql_resp = runner.db_engine.insert(parsed_request_dict["sql"]) + elif step.sql_request.method == SqlMethodEnum.FETCHMANY: + sql_resp = runner.db_engine.fetchmany( + parsed_request_dict["sql"], parsed_request_dict["size"] + ) + elif step.sql_request.method == SqlMethodEnum.FETCHALL: + sql_resp = runner.db_engine.fetchall(parsed_request_dict["sql"]) + elif step.sql_request.method == SqlMethodEnum.UPDATE: + sql_resp = runner.db_engine.update(parsed_request_dict["sql"]) + elif step.sql_request.method == SqlMethodEnum.DELETE: + sql_resp = runner.db_engine.delete(parsed_request_dict["sql"]) + else: + raise SqlMethodNotSupport( + f"step.sql_request.method {parsed_request_dict['method']} not support" + ) + + # log response + sql_response_print = "====== sql response details ======\n" + if isinstance(sql_resp, dict): + for k, v in sql_resp.items(): + v = utils.omit_long_data(v) + sql_response_print += f"{k}: {repr(v)}\n" + elif isinstance(sql_resp, list): + sql_response_print += f"count: {len(sql_resp)}\n" + sql_response_print += "-" * 34 + "\n" + for el in sql_resp: + for k, v in el.items(): + v = utils.omit_long_data(v) + sql_response_print += f"{k}: {repr(v)}\n" + sql_response_print += "-" * 34 + "\n" + elif sql_resp is None: + sql_response_print += "None\n" + if ALLURE is not None: + ALLURE.attach( + sql_response_print, + name="sql response details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + + resp_obj = SqlResponseObject(sql_resp, parser=runner.parser) + step_variables["sql_response"] = resp_obj + + # teardown hooks + if step.teardown_hooks: + call_hooks(runner, step.teardown_hooks, step_variables, "teardown request") + + def log_sql_req_resp_details(): + err_msg = "\n{} SQL DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32) + err_msg += sql_request_print + sql_response_print + logger.error(err_msg) + + # extract + extractors = step.extract + extract_mapping = resp_obj.extract(extractors) + step_result.export_vars = extract_mapping + + variables_mapping = step_variables + variables_mapping.update(extract_mapping) + + # validate + validators = step.validators + try: + resp_obj.validate(validators, variables_mapping) + step_result.success = True + except ValidationFailure: + log_sql_req_resp_details() + raise + finally: + session_data = runner.session.data + session_data.success = step_result.success + session_data.validators = resp_obj.validation_results + + # save step data + step_result.data = session_data + step_result.elapsed = time.time() - start_time + return step_result + + +class StepSqlRequestValidation(StepRequestValidation): + def __init__(self, step: TStep): + self.__step = step + super().__init__(step) + + def run(self, runner: HttpRunner): + return run_step_sql_request(runner, self.__step) + + +class StepSqlRequestExtraction(StepRequestExtraction): + def __init__(self, step: TStep): + self.__step = step + super().__init__(step) + + def run(self, runner: HttpRunner): + return run_step_sql_request(runner, self.__step) + + def validate(self) -> StepSqlRequestValidation: + return StepSqlRequestValidation(self.__step) + + +class RunSqlRequest(IStep): + def __init__(self, name: Text): + self.__step = TStep(name=name) + self.__step.sql_request = TSqlRequest() + + def with_variables(self, **variables) -> "RunSqlRequest": + self.__step.variables.update(variables) + return self + + def with_db_config( + self, user=None, password=None, ip=None, port=None, database=None, psm=None + ): + if user: + self.__step.sql_request.db_config.user = user + if password: + self.__step.sql_request.db_config.password = password + if ip: + self.__step.sql_request.db_config.ip = ip + if port: + self.__step.sql_request.db_config.port = port + if database: + self.__step.sql_request.db_config.database = database + if psm: + self.__step.sql_request.db_config.psm = psm + return self + + def fetchone(self, sql) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.FETCHONE + self.__step.sql_request.sql = sql + return self + + def fetchmany(self, sql, size) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.FETCHMANY + self.__step.sql_request.sql = sql + self.__step.sql_request.size = size + return self + + def fetchall(self, sql) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.FETCHALL + self.__step.sql_request.sql = sql + return self + + def update(self, sql) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.UPDATE + self.__step.sql_request.sql = sql + return self + + def delete(self, sql) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.DELETE + self.__step.sql_request.sql = sql + return self + + def insert(self, sql) -> "RunSqlRequest": + self.__step.sql_request.method = SqlMethodEnum.INSERT + self.__step.sql_request.sql = sql + return self + + def with_retry(self, retry_times, retry_interval) -> "RunSqlRequest": + self.__step.retry_times = retry_times + self.__step.retry_interval = retry_interval + return self + + def teardown_hook( + self, hook: Text, assign_var_name: Text = None + ) -> "RunSqlRequest": + if assign_var_name: + self.__step.teardown_hooks.append({assign_var_name: hook}) + else: + self.__step.teardown_hooks.append(hook) + + return self + + def setup_hook(self, hook: Text, assign_var_name: Text = None) -> "RunSqlRequest": + if assign_var_name: + self.__step.setup_hooks.append({assign_var_name: hook}) + else: + self.__step.setup_hooks.append(hook) + + return self + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"sql-request-{self.__step.sql_request.sql}" + + def run(self, runner) -> StepResult: + return run_step_sql_request(runner, self.__step) + + def extract(self) -> StepSqlRequestExtraction: + return StepSqlRequestExtraction(self.__step) + + def validate(self) -> StepSqlRequestValidation: + return StepSqlRequestValidation(self.__step) + + def with_jmespath( + self, jmes_path: Text, var_name: Text + ) -> "StepSqlRequestExtraction": + self.__step.extract[var_name] = jmes_path + return StepSqlRequestExtraction(self.__step) diff --git a/httprunner/step_testcase.py b/httprunner/step_testcase.py new file mode 100644 index 0000000..1341cc6 --- /dev/null +++ b/httprunner/step_testcase.py @@ -0,0 +1,103 @@ +from typing import Callable, Text + +from loguru import logger + +from httprunner import exceptions +from httprunner.models import IStep, StepResult, TStep, TestCaseSummary +from httprunner.runner import HttpRunner +from httprunner.step_request import call_hooks + + +def run_step_testcase(runner: HttpRunner, step: TStep) -> StepResult: + """run teststep: referenced testcase""" + step_result = StepResult(name=step.name, step_type="testcase") + step_variables = runner.merge_step_variables(step.variables) + step_export = step.export + + # setup hooks + if step.setup_hooks: + call_hooks(runner, step.setup_hooks, step_variables, "setup testcase") + + # TODO: override testcase with current step name/variables/export + + # step.testcase is a referenced testcase, e.g. RequestWithFunctions + ref_case_runner = step.testcase() + ref_case_runner.set_referenced().with_session(runner.session).with_case_id( + runner.case_id + ).with_variables(step_variables).with_export(step_export).test_start() + + # teardown hooks + if step.teardown_hooks: + call_hooks(runner, step.teardown_hooks, step.variables, "teardown testcase") + + summary: TestCaseSummary = ref_case_runner.get_summary() + step_result.data = summary.step_results # list of step data + step_result.export_vars = summary.in_out.export_vars + step_result.success = summary.success + + if step_result.export_vars: + logger.info(f"export variables: {step_result.export_vars}") + + return step_result + + +class StepRefCase(IStep): + def __init__(self, step: TStep): + self.__step = step + + def teardown_hook(self, hook: Text, assign_var_name: Text = None) -> "StepRefCase": + if assign_var_name: + self.__step.teardown_hooks.append({assign_var_name: hook}) + else: + self.__step.teardown_hooks.append(hook) + + return self + + def export(self, *var_name: Text) -> "StepRefCase": + self.__step.export.extend(var_name) + return self + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"request-{self.__step.request.method}" + + def run(self, runner: HttpRunner): + return run_step_testcase(runner, self.__step) + + +class RunTestCase(object): + def __init__(self, name: Text): + self.__step = TStep(name=name) + + def with_variables(self, **variables) -> "RunTestCase": + self.__step.variables.update(variables) + return self + + def with_retry(self, retry_times, retry_interval) -> "RunTestCase": + self.__step.retry_times = retry_times + self.__step.retry_interval = retry_interval + return self + + def setup_hook(self, hook: Text, assign_var_name: Text = None) -> "RunTestCase": + if assign_var_name: + self.__step.setup_hooks.append({assign_var_name: hook}) + else: + self.__step.setup_hooks.append(hook) + + return self + + def call(self, testcase: Callable) -> StepRefCase: + if issubclass(testcase, HttpRunner): + # referenced testcase object + self.__step.testcase = testcase + else: + raise exceptions.ParamsError( + f"Invalid teststep referenced testcase: {testcase}" + ) + + return StepRefCase(self.__step) diff --git a/httprunner/step_testcase_test.py b/httprunner/step_testcase_test.py new file mode 100644 index 0000000..40c2c59 --- /dev/null +++ b/httprunner/step_testcase_test.py @@ -0,0 +1,27 @@ +import unittest + +from httprunner.runner import HttpRunner +from httprunner.step_testcase import RunTestCase +from examples.postman_echo.request_methods.request_with_functions_test import ( + TestCaseRequestWithFunctions, +) + + +class TestRunTestCase(unittest.TestCase): + def setUp(self): + self.runner = TestCaseRequestWithFunctions() + self.runner.test_start() + + def test_run_testcase_by_path(self): + + step_result = ( + RunTestCase("run referenced testcase") + .call(TestCaseRequestWithFunctions) + .run(self.runner) + ) + self.assertTrue(step_result.success) + self.assertEqual(step_result.name, "run referenced testcase") + self.assertEqual(len(step_result.data), 3) + self.assertEqual(step_result.data[0].name, "get with params") + self.assertEqual(step_result.data[1].name, "post raw text") + self.assertEqual(step_result.data[2].name, "post form data") diff --git a/httprunner/step_thrift_request.py b/httprunner/step_thrift_request.py new file mode 100644 index 0000000..322d34e --- /dev/null +++ b/httprunner/step_thrift_request.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +import platform +import sys +import time +from typing import Text, Union + +from loguru import logger + +from httprunner import utils +from httprunner.exceptions import ValidationFailure +from httprunner.models import ( + IStep, + ProtoType, + StepResult, + TransType, + TStep, + TThriftRequest, +) +from httprunner.response import ThriftResponseObject +from httprunner.runner import ALLURE, HttpRunner +from httprunner.step_request import ( + StepRequestExtraction, + StepRequestValidation, + call_hooks, +) + +try: + import thriftpy2 + + from thrift.Thrift import TType + + THRIFT_READY = True +except ModuleNotFoundError: + THRIFT_READY = False + + +def ensure_thrift_ready(): + assert platform.system() != "Windows", "Sorry,thrift not support Windows for now" + if THRIFT_READY: + return + + msg = """ + uploader extension dependencies uninstalled, install first and try again. + install with pip: + $ pip install cython thriftpy2 thrift + + or you can install httprunner with optional upload dependencies: + $ pip install "httprunner[thrift]" + """ + logger.error(msg) + sys.exit(1) + + +def run_step_thrift_request(runner: HttpRunner, step: TStep) -> StepResult: + """run teststep:thrift request""" + start_time = time.time() + + step_result = StepResult( + name=step.name, + step_type="thrift", + success=False, + ) + step_variables = runner.merge_step_variables(step.variables) + # parse + request_dict = step.thrift_request.dict() + parsed_request_dict = runner.parser.parse_data(request_dict, step_variables) + config = runner.get_config() + parsed_request_dict["psm"] = parsed_request_dict["psm"] or config.thrift.psm + parsed_request_dict["env"] = parsed_request_dict["env"] or config.thrift.env + parsed_request_dict["cluster"] = ( + parsed_request_dict["cluster"] or config.thrift.cluster + ) + parsed_request_dict["idl_path"] = ( + parsed_request_dict["idl_path"] or config.thrift.idl_path + ) + parsed_request_dict["include_dirs"] = ( + parsed_request_dict["include_dirs"] or config.thrift.include_dirs + ) + parsed_request_dict["method"] = ( + parsed_request_dict["method"] or config.thrift.method + ) + parsed_request_dict["service_name"] = ( + parsed_request_dict["service_name"] or config.thrift.service_name + ) + parsed_request_dict["ip"] = parsed_request_dict["ip"] or config.thrift.ip + parsed_request_dict["port"] = parsed_request_dict["port"] or config.thrift.port + parsed_request_dict["proto_type"] = ( + parsed_request_dict["proto_type"] or config.thrift.proto_type + ) + parsed_request_dict["trans_port"] = ( + parsed_request_dict["trans_type"] or config.thrift.trans_type + ) + parsed_request_dict["timeout"] = ( + parsed_request_dict["timeout"] or config.thrift.timeout + ) + parsed_request_dict["thrift_client"] = parsed_request_dict["thrift_client"] + + # parsed_request_dict["headers"].setdefault( + # "HRUN-Request-ID", + # f"HRUN-{self.__case_id}-{str(int(time.time() * 1000))[-6:]}", + # ) + step_variables["thrift_request"] = parsed_request_dict + + psm = parsed_request_dict["psm"] + if not runner.thrift_client: + runner.thrift_client = parsed_request_dict["thrift_client"] + if not runner.thrift_client: + ensure_thrift_ready() + from httprunner.thrift.thrift_client import ThriftClient + + runner.thrift_client = ThriftClient( + thrift_file=parsed_request_dict["idl_path"], + service_name=parsed_request_dict["service_name"], + ip=parsed_request_dict["ip"], + port=parsed_request_dict["port"], + include_dirs=parsed_request_dict["include_dirs"], + timeout=parsed_request_dict["timeout"], + proto_type=parsed_request_dict["proto_type"], + trans_type=parsed_request_dict["trans_port"], + ) + + # setup hooks + if step.setup_hooks: + call_hooks(runner, step.setup_hooks, step_variables, "setup request") + + # log request + thrift_request_print = "====== thrift request details ======\n" + thrift_request_print += f"psm: {psm}\n" + for k, v in parsed_request_dict.items(): + v = utils.omit_long_data(v) + thrift_request_print += f"{k}: {repr(v)}\n" + thrift_request_print += "\n" + if ALLURE is not None: + ALLURE.attach( + thrift_request_print, + name="thrift request details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + + # thrift request + resp = runner.thrift_client.send_request( + parsed_request_dict["params"], parsed_request_dict["method"] + ) + resp_obj = ThriftResponseObject(resp, parser=runner.parser) + step_variables["thrift_response"] = resp_obj + + # log response + thrift_response_print = "====== thrift response details ======\n" + for k, v in resp.items(): + v = utils.omit_long_data(v) + thrift_response_print += f"{k}: {repr(v)}\n" + if ALLURE is not None: + ALLURE.attach( + thrift_request_print, + name="thrift response details", + attachment_type=ALLURE.attachment_type.TEXT, + ) + + # teardown hooks + if step.teardown_hooks: + call_hooks(runner, step.teardown_hooks, step_variables, "teardown request") + + def log_thrift_req_resp_details(): + err_msg = "\n{} THRIFT DETAILED REQUEST & RESPONSE {}\n".format( + "*" * 32, "*" * 32 + ) + err_msg += thrift_request_print + thrift_response_print + logger.error(err_msg) + + # extract + extractors = step.extract + extract_mapping = resp_obj.extract(extractors) + step_result.export_vars = extract_mapping + + variables_mapping = step_variables + variables_mapping.update(extract_mapping) + + # validate + validators = step.validators + try: + resp_obj.validate(validators, variables_mapping) + step_result.success = True + except ValidationFailure: + log_thrift_req_resp_details() + raise + finally: + session_data = runner.session.data + session_data.success = step_result.success + session_data.validators = resp_obj.validation_results + + # save step data + step_result.data = session_data + step_result.elapsed = time.time() - start_time + return step_result + + +class StepThriftRequestValidation(StepRequestValidation): + def __init__(self, step: TStep): + self.__step = step + super().__init__(step) + + def run(self, runner: HttpRunner): + return run_step_thrift_request(runner, self.__step) + + +class StepThriftRequestExtraction(StepRequestExtraction): + def __init__(self, step: TStep): + self.__step = step + super().__init__(step) + + def run(self, runner: HttpRunner): + return run_step_thrift_request(runner, self.__step) + + def validate(self) -> StepThriftRequestValidation: + return StepThriftRequestValidation(self.__step) + + +class RunThriftRequest(IStep): + def __init__(self, name: Text): + self.__step = TStep(name=name) + self.__step.thrift_request = TThriftRequest() + + def with_variables(self, **variables) -> "RunThriftRequest": + self.__step.variables.update(variables) + return self + + def with_retry(self, retry_times, retry_interval) -> "RunThriftRequest": + self.__step.retry_times = retry_times + self.__step.retry_interval = retry_interval + return self + + def teardown_hook( + self, hook: Text, assign_var_name: Text = None + ) -> "RunThriftRequest": + if assign_var_name: + self.__step.teardown_hooks.append({assign_var_name: hook}) + else: + self.__step.teardown_hooks.append(hook) + + return self + + def setup_hook( + self, hook: Text, assign_var_name: Text = None + ) -> "RunThriftRequest": + if assign_var_name: + self.__step.setup_hooks.append({assign_var_name: hook}) + else: + self.__step.setup_hooks.append(hook) + + return self + + def with_params(self, **params) -> "RunThriftRequest": + self.__step.thrift_request.params.update(params) + return self + + def with_method(self, method) -> "RunThriftRequest": + self.__step.thrift_request.method = method + return self + + def with_idl_path(self, idl_path, idl_root_path) -> "RunThriftRequest": + self.__step.thrift_request.idl_path = idl_path + self.__step.thrift_request.include_dirs = [idl_root_path] + return self + + def with_thrift_client( + self, thrift_client: Union["ThriftClient", str] + ) -> "RunThriftRequest": + self.__step.thrift_request.thrift_client = thrift_client + return self + + def with_ip(self, ip: str) -> "RunThriftRequest": + self.__step.thrift_request.ip = ip + return self + + def with_port(self, port: int) -> "RunThriftRequest": + self.__step.thrift_request.port = port + return self + + def with_proto_type(self, proto_type: ProtoType) -> "RunThriftRequest": + self.__step.thrift_request.proto_type = proto_type + return self + + def with_trans_type(self, trans_type: TransType) -> "RunThriftRequest": + self.__step.thrift_request.proto_type = trans_type + return self + + def struct(self) -> TStep: + return self.__step + + def name(self) -> Text: + return self.__step.name + + def type(self) -> Text: + return f"thrift-request-{self.__step.thrift_request.psm}-{self.__step.thrift_request.method}" + + def run(self, runner) -> StepResult: + return run_step_thrift_request(runner, self.__step) + + def extract(self) -> StepThriftRequestExtraction: + return StepThriftRequestExtraction(self.__step) + + def validate(self) -> StepThriftRequestValidation: + return StepThriftRequestValidation(self.__step) + + def with_jmespath( + self, jmes_path: Text, var_name: Text + ) -> "StepThriftRequestExtraction": + self.__step.extract[var_name] = jmes_path + return StepThriftRequestExtraction(self.__step) diff --git a/httprunner/thrift/data_convertor.py b/httprunner/thrift/data_convertor.py new file mode 100644 index 0000000..0561ef4 --- /dev/null +++ b/httprunner/thrift/data_convertor.py @@ -0,0 +1,471 @@ +# -*- coding: utf-8 -*- + +from __future__ import division + +import json +import traceback +import re +import logging +import base64 + +from thrift.Thrift import TType + +try: + from _json import encode_basestring_ascii as c_encode_basestring_ascii +except ImportError: + c_encode_basestring_ascii = None + +text_characters = "".join(map(chr, range(32, 127))) + "\n\r\t\b" +_null_trans = str.maketrans("", "") +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r"[\x80-\xff]") +ESCAPE_DCT = { + "\\": "\\\\", + '"': '\\"', + "\b": "\\b", + "\f": "\\f", + "\n": "\\n", + "\r": "\\r", + "\t": "\\t", +} +for i in range(0x20): + ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i)) + # ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) + +INFINITY = float("inf") +FLOAT_REPR = repr + + +def istext(s_input): + """ + 既然我们要判断这串内容是不是可以做为Json的value,那为什么不放下试试呢? + :param s_input: + :return: + """ + return not isinstance(s_input, bytes) + + +def unicode_2_utf8_keep_native(para): + # if type(para) is str: + # return ''.join(filter(lambda x: not str.isalpha(x), para)) + if type(para) is str: + return para + + if type(para) is list: + for i in range(len(para)): + para[i] = unicode_2_utf8_keep_native(para[i]) + return para + elif type(para) is dict: + newpara = {} + for (key, value) in para.items(): + key = unicode_2_utf8_keep_native(key) + value = unicode_2_utf8_keep_native(value) + newpara[key] = value + return newpara + elif type(para) is tuple: + return tuple(unicode_2_utf8_keep_native(list(para))) + elif type(para) is str: + return para.encode("utf-8") + else: + logging.debug("type========", type(para)) + # if issubclass(type(para), dict): + if isinstance(para, dict): + logging.debug("type ************in dict: %s" % (type(para))) + return unicode_2_utf8_keep_native(dict(para)) + else: + return para + + +def encode_basestring(s): + """Return a JSON representation of a Python string""" + + def replace(match): + return ESCAPE_DCT[match.group(0)] + + return '"' + ESCAPE.sub(replace, s) + '"' + + +def py_encode_basestring_ascii(s): + """Return an ASCII-only JSON representation of a Python string""" + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode("utf-8") + + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return "\\u{0:04x}".format(n) + # return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xD800 | ((n >> 10) & 0x3FF) + s2 = 0xDC00 | (n & 0x3FF) + return "\\u{0:04x}\\u{1:04x}".format(s1, s2) + # return '\\u%04x\\u%04x' % (s1, s2) + + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii + + +class ThriftJSONDecoder(json.JSONDecoder): + def __init__(self, *args, **kwargs): + self._thrift_class = kwargs.pop("thrift_class") + super(ThriftJSONDecoder, self).__init__(*args, **kwargs) + + def decode(self, json_str): + if isinstance(json_str, dict): + dct = json_str + else: + dct = super(ThriftJSONDecoder, self).decode(json_str) + return self._convert( + dct, + TType.STRUCT, + # (self._thrift_class, self._thrift_class.thrift_spec)) + self._thrift_class, + ) + + def _convert(self, val, ttype, ttype_info): + if ttype == TType.STRUCT: + if val is None: + ret = None + else: + # (thrift_class, thrift_spec) = ttype_info + thrift_class = ttype_info + thrift_spec = ttype_info.thrift_spec + ret = thrift_class() + for tag, field in thrift_spec.items(): + if field is None: + continue + # {1: (15, 'ad_ids', 10, False), 255: (12, 'Base', , False)} + # {1: (15, 'models', (12, ), False), 255: (12, 'BaseResp', , False)} + if len(field) <= 3: + (field_ttype, field_name, dummy) = field + field_ttype_info = None + else: + (field_ttype, field_name, field_ttype_info, dummy) = field + + if val is None or field_name not in val: + continue + converted_val = self._convert( + val[field_name], field_ttype, field_ttype_info + ) + setattr(ret, field_name, converted_val) + elif ttype == TType.LIST: + if type(ttype_info) != tuple: # 说明是基础类型了, 无法在细分 + (element_ttype, element_ttype_info) = (ttype_info, None) + else: + (element_ttype, element_ttype_info) = ttype_info + if val is not None: + ret = [self._convert(x, element_ttype, element_ttype_info) for x in val] + else: + ret = None + + elif ttype == TType.SET: + if type(ttype_info) != tuple: # 说明是基础类型了, 无法在细分 + (element_ttype, element_ttype_info) = (ttype_info, None) + else: + (element_ttype, element_ttype_info) = ttype_info + if val is not None: + ret = set( + [self._convert(x, element_ttype, element_ttype_info) for x in val] + ) + else: + ret = None + + elif ttype == TType.MAP: + # key处理 + if type(ttype_info[0]) == tuple: + key_ttype, key_ttype_info = ttype_info[0] + else: + key_ttype, key_ttype_info = ttype_info[0], None + + # value处理 + if type(ttype_info[1]) != tuple: # 说明value为基础类型, 已不可在细分 + val_ttype = ttype_info[1] + val_ttype_info = None + else: + val_ttype, val_ttype_info = ttype_info[1] + + if val is not None: + ret = dict( + [ + ( + self._convert(k, key_ttype, key_ttype_info), + self._convert(v, val_ttype, val_ttype_info), + ) + for (k, v) in val.items() + ] + ) + else: + ret = None + elif ttype == TType.STRING: + if isinstance(val, str): + ret = val.encode("utf8") + elif val is None: + ret = None + else: + ret = str(val) + # 判断string字段是否是base64编码后的string, 如果是则此处需要对该string字段进行b64decode, 还原成原本的字符串 + # todo : 留待实现 + + elif ttype == TType.DOUBLE: + if val is not None: + ret = float(val) + else: + ret = None + elif ttype == TType.I64: + if val is not None: + ret = int(val) + else: + ret = None + elif ttype == TType.I32 or ttype == TType.I16 or ttype == TType.BYTE: + if val is not None: + ret = int(val) + else: + ret = None + elif ttype == TType.BOOL: + if val is not None: + ret = bool(val) + else: + ret = None + else: + raise TypeError("Unrecognized thrift field type: %s" % ttype) + return ret + + +def json2thrift(json_str, thrift_class): + logging.debug(json_str) + return json.loads( + json_str, cls=ThriftJSONDecoder, thrift_class=thrift_class, strict=False + ) + + +def dumper(obj): + try: + return json.dumps(obj, default=lambda o: o.__dict__, sort_keys=True, indent=2) + except: + return obj.__dict__ + + +class MyJSONEncoder(json.JSONEncoder): + def __init__( + self, + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding="utf-8", + default=None, + sort_keys=False, + **kw + ): + super(MyJSONEncoder, self).__init__( + skipkeys=skipkeys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + indent=indent, + separators=separators, + encoding=encoding, + default=default, + sort_keys=sort_keys, + ) + self.skip_nonutf8_value = kw.get( + "skip_nonutf8_value", False + ) # 默认不skip忽略非utf-8编码的字段 + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + + if isinstance(o, str): + + if isinstance(o, str): + _encoding = self.encoding + if _encoding is not None and not (_encoding == "utf-8"): + o = o.decode(_encoding) + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + # add by braver + # todo: fix 'utf8' codec can't decode byte 0x91 in position 3: invalid start byte" + if self.skip_nonutf8_value: # 缺省为false + tmp_chunks = [] + for chunk in chunks: + try: + tmp_chunks.append(unicode_2_utf8_keep_native(chunk)) + except Exception as err: + logging.debug(traceback.format_exc()) + return "".join(tmp_chunks) + + # 保留老的逻辑, /usr/lib/python2.7/package/json/__init__.py dumps接口 + return "".join(chunks) + + +class ThriftJSONEncoder(json.JSONEncoder): + """ + add by braver + """ + + def __init__( + self, + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + default=None, + sort_keys=False, + **kw + ): + + super(ThriftJSONEncoder, self).__init__( + skipkeys=skipkeys, + ensure_ascii=ensure_ascii, + check_circular=check_circular, + allow_nan=allow_nan, + indent=indent, + separators=separators, + default=default, + sort_keys=sort_keys, + ) + self.skip_nonutf8_value = kw.get( + "skip_nonutf8_value", False + ) # 默认不skip忽略非utf-8编码的字段 + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + + if isinstance(o, str): + if isinstance(o, str): + _encoding = self.encoding + if _encoding is not None and not (_encoding == "utf-8"): + o = o.decode(_encoding) + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + # add by braver + # todo: fix 'utf8' codec can't decode byte 0x91 in position 3: invalid start byte" + if self.skip_nonutf8_value: # 缺省为false + tmp_chunks = [] + for chunk in chunks: + try: + tmp_chunks.append(unicode_2_utf8_keep_native(chunk)) + except Exception as err: + logging.debug(traceback.format_exc()) + return "".join(tmp_chunks) + + # 保留老的逻辑, /usr/lib/python2.7/package/json/__init__.py dumps接口 + return "".join(chunks) + + def default(self, o): + if isinstance(o, bytes): + return str(o, encoding="utf-8") + if not hasattr(o, "thrift_spec"): + return super(ThriftJSONEncoder, self).default(o) + + spec = getattr(o, "thrift_spec") + ret = {} + for tag, field in spec.items(): + if field is None: + continue + # (tag, field_ttype, field_name, field_ttype_info, default) = field + field_name = field[1] + default = field[-1] + field_type = field[0] + field_ttype_info = field[2] + # if field_type in [TType.STRING, TType.BINARY]: # 说明是string(明文string或者binary) + # if field_type in [TType.STRING, TType.BYTE]: # 说明是string(明文string或者binary) + if field_name in o.__dict__: + val = o.__dict__[field_name] + if field_type in [TType.LIST, TType.SET]: # 数组类型 + if val: # val为非空数组/Set + val = list(val) # 统一转成数组(list/set) + is_need_binary_bs64 = False + if type(field_ttype_info) != tuple: # 基础类型 + if ( + field_ttype_info in [TType.BYTE] + and type(val[0]) in [str] + and not istext(val[0]) + ): + is_need_binary_bs64 = True + if is_need_binary_bs64: + for index, item in enumerate(val): + if item and type(item) in [str] and not istext(item): + val[index] = base64.b64encode( + item + ) # 判断为二进制字符串, 需要进行base64编码 + if field_type in [TType.BYTE] and type(val) in [ + str + ]: # 说明是string(明文string或者binary) + # 需要对二进制字节字符串字段进行base64编码, 将二进制字节串字段->ascii字符编码的base64编码明文串 + if val and not istext(val): # 说明是该字段非空且为binary string + print("4" * 100, val) + val = base64.b64encode(val.encode("utf-8")) + # val = base64.b64encode(val) # 进行base64编码处理, 不然该字段序列化为json时会报错 + # if val != default: + ret[field_name] = val + if "request_id" in o.__dict__: + ret["request_id"] = o.__dict__["request_id"] + if "rpc_latency" in o.__dict__: + ret["rpc_latency"] = o.__dict__["rpc_latency"] + return ret + + +def thrift2json(obj, skip_nonutf8_value=False): + return json.dumps( + obj, + cls=ThriftJSONEncoder, + ensure_ascii=False, + skip_nonutf8_value=skip_nonutf8_value, + ) + + +def thrift2dict(obj): + str = thrift2json(obj) + return json.loads(str) + + +dict2thrift = json2thrift + +if __name__ == "__main__": + print(istext("Всего за {$price$}, а доставка - бесплатно!")) + print(istext(b"\xe4\xb8\xad\xe6\x96\x87")) + print( + istext( + '{"web_uri":"ad-site-i18n-sg/202103185d0d723d88b7f642452dac73","height":336,"width":336,"file_name":""}' + ) + ) diff --git a/httprunner/thrift/thrift_client.py b/httprunner/thrift/thrift_client.py new file mode 100644 index 0000000..8d5a339 --- /dev/null +++ b/httprunner/thrift/thrift_client.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import enum +import json + +import thriftpy2 +from loguru import logger +from thriftpy2.protocol import ( + TBinaryProtocolFactory, + TCompactProtocolFactory, + TCyBinaryProtocolFactory, + TJSONProtocolFactory, +) +from thriftpy2.rpc import make_client +from thriftpy2.transport import ( + TBufferedTransportFactory, + TCyBufferedTransportFactory, + TCyFramedTransportFactory, + TFramedTransportFactory, +) + +from httprunner.thrift.data_convertor import json2thrift, thrift2dict + + +class ProtoType(enum.Enum): + Binary = 1 + CyBinary = 2 + Compact = 3 + Json = 4 + + +class TransType(enum.Enum): + Buffered = 1 + CyBuffered = 2 + Framed = 3 + CyFramed = 4 + + +class RequestFormat(enum.Enum): + json = 1 + binary = 2 + + +def get_proto_factory(proto_type): + if proto_type == ProtoType.Binary: + return TBinaryProtocolFactory() + if proto_type == ProtoType.CyBinary: + return TCyBinaryProtocolFactory() + if proto_type == ProtoType.Compact: + return TCompactProtocolFactory() + if proto_type == ProtoType.Json: + return TJSONProtocolFactory() + + +def get_trans_factory(trans_type): + if trans_type == TransType.Buffered: + return TBufferedTransportFactory() + if trans_type == TransType.CyBuffered: + return TCyBufferedTransportFactory() + if trans_type == TransType.Framed: + return TFramedTransportFactory() + if trans_type == TransType.CyFramed: + return TCyFramedTransportFactory() + + +class ThriftClient(object): + def __init__( + self, + thrift_file, + service_name, + ip, + port, + include_dirs=None, + timeout=3000, + proto_type=ProtoType.CyBinary, + trans_type=TransType.CyBuffered, + ): + self.thrift_file = thrift_file + self.include_dirs = include_dirs + self.service_name = service_name + self.ip = ip + self.port = port + self.timeout = timeout + self.proto_type = proto_type + self.trans_type = trans_type + try: + logger.debug( + "init thrift module: thrift_file=%s, module_name=%s", + thrift_file, + str(self.service_name) + "_thrift", + ) + self.thrift_module = thriftpy2.load( + self.thrift_file, + module_name=str(self.service_name) + "_thrift", + include_dirs=self.include_dirs, + ) + self.thrift_service_obj = getattr(self.thrift_module, self.service_name) + logger.debug( + "init thrift client: service_name=%s, ip=%s, port=%s", + self.thrift_service_obj, + ip, + port, + ) + self.client = make_client( + self.thrift_service_obj, + self.ip, + int(self.port), + timeout=self.timeout, + proto_factory=get_proto_factory(self.proto_type), + trans_factory=get_trans_factory(self.trans_type), + ) + except Exception as e: + self.thrift_module = None + self.thrift_service_obj = None + self.client = None + logger.exception("init thrift module and client failed: {}".format(e)) + finally: + thriftpy2.parser.parser.thrift_stack = [] + + def get_client(self): + return self.client + + def send_request(self, request_data, request_method=""): + thrift_req_cls = getattr( + self.thrift_service_obj, request_method + "_args" + ).thrift_spec[1][2] + request_obj = json2thrift(json.dumps(request_data), thrift_req_cls) + logger.debug( + "send thrift request: request_method=%s, request_obj=%s", + request_method, + request_obj, + ) + response_obj = getattr(self.client, request_method)(request_obj) + logger.debug("thrift response = %s", response_obj) + return thrift2dict(response_obj) + + def __del__(self): + self.client.close() diff --git a/httprunner/utils.py b/httprunner/utils.py new file mode 100644 index 0000000..b189832 --- /dev/null +++ b/httprunner/utils.py @@ -0,0 +1,366 @@ +import collections +import copy +import itertools +import json +import os +import os.path +import platform +import random +import sys +import time +import uuid +from multiprocessing import Queue +from typing import Any, Dict, List + +import requests +import sentry_sdk +from loguru import logger + +from httprunner import __version__, exceptions +from httprunner.models import VariablesMapping + + +""" run httpbin as test service +https://github.com/postmanlabs/httpbin + +$ docker pull kennethreitz/httpbin +$ docker run -p 80:80 kennethreitz/httpbin +""" +HTTP_BIN_URL = "http://127.0.0.1:80" + + +def get_platform(): + return { + "httprunner_version": __version__, + "python_version": "{} {}".format( + platform.python_implementation(), platform.python_version() + ), + "platform": platform.platform(), + } + + +def init_sentry_sdk(): + if os.getenv("DISABLE_SENTRY") == "true": + return + + sentry_sdk.init( + dsn="https://460e31339bcb428c879aafa6a2e78098@sentry.io/5263855", + release="httprunner@{}".format(__version__), + ) + with sentry_sdk.configure_scope() as scope: + scope.set_user({"id": uuid.getnode()}) + + +class GA4Client(object): + """send events to Google Analytics 4 via Measurement Protocol. + get details in hrp/internal/sdk/ga4.go + """ + + def __init__( + self, measurement_id: str, api_secret: str, debug: bool = False + ) -> None: + self.http_client = requests.Session() + + self.debug = debug + if debug: + uri = "https://www.google-analytics.com/debug/mp/collect" + else: + uri = "https://www.google-analytics.com/mp/collect" + + self.uri = f"{uri}?measurement_id={measurement_id}&api_secret={api_secret}" + self.user_id = str(uuid.getnode()) + self.common_event_params = get_platform() + + # do not send GA events in CI environment + self.__is_ci = os.getenv("DISABLE_GA") == "true" + + def send_event(self, name: str, event_params: dict = None) -> None: + if self.__is_ci: + return + + event_params = event_params or {} + event_params.update(self.common_event_params) + event = { + "name": name, + "params": event_params, + } + + payload = { + "client_id": f"{int(random.random() * 10**8)}.{int(time.time())}", + "user_id": self.user_id, + "timestamp_micros": int(time.time() * 10**6), + "events": [event], + } + + if self.debug: + logger.debug(f"send GA4 event, uri: {self.uri}, payload: {payload}") + + try: + resp = self.http_client.post(self.uri, json=payload, timeout=5) + except Exception as err: # ProxyError, SSLError, ConnectionError + logger.error(f"request GA4 failed, error: {err}") + return + + if resp.status_code >= 300: + logger.error( + f"validation response got unexpected status: {resp.status_code}" + ) + return + + if not self.debug: + return + + try: + resp_body = resp.json() + logger.debug( + "get GA4 validation response, " + f"status code: {resp.status_code}, body: {resp_body}" + ) + except Exception: + pass + + +GA4_MEASUREMENT_ID = "G-9KHR3VC2LN" +GA4_API_SECRET = "w7lKNQIrQsKNS4ikgMPp0Q" + +ga4_client = GA4Client(GA4_MEASUREMENT_ID, GA4_API_SECRET, False) + + +def set_os_environ(variables_mapping): + """set variables mapping to os.environ""" + for variable in variables_mapping: + os.environ[variable] = variables_mapping[variable] + logger.debug(f"Set OS environment variable: {variable}") + + +def unset_os_environ(variables_mapping): + """unset variables mapping to os.environ""" + for variable in variables_mapping: + os.environ.pop(variable) + logger.debug(f"Unset OS environment variable: {variable}") + + +def get_os_environ(variable_name): + """get value of environment variable. + + Args: + variable_name(str): variable name + + Returns: + value of environment variable. + + Raises: + exceptions.EnvNotFound: If environment variable not found. + + """ + try: + return os.environ[variable_name] + except KeyError: + raise exceptions.EnvNotFound(variable_name) + + +def lower_dict_keys(origin_dict): + """convert keys in dict to lower case + + Args: + origin_dict (dict): mapping data structure + + Returns: + dict: mapping with all keys lowered. + + Examples: + >>> origin_dict = { + "Name": "", + "Request": "", + "URL": "", + "METHOD": "", + "Headers": "", + "Data": "" + } + >>> lower_dict_keys(origin_dict) + { + "name": "", + "request": "", + "url": "", + "method": "", + "headers": "", + "data": "" + } + + """ + if not origin_dict or not isinstance(origin_dict, dict): + return origin_dict + + return {key.lower(): value for key, value in origin_dict.items()} + + +def print_info(info_mapping): + """print info in mapping. + + Args: + info_mapping (dict): input(variables) or output mapping. + + Examples: + >>> info_mapping = { + "var_a": "hello", + "var_b": "world" + } + >>> info_mapping = { + "status_code": 500 + } + >>> print_info(info_mapping) + ==================== Output ==================== + Key : Value + ---------------- : ---------------------------- + var_a : hello + var_b : world + ------------------------------------------------ + + """ + if not info_mapping: + return + + content_format = "{:<16} : {:<}\n" + content = "\n==================== Output ====================\n" + content += content_format.format("Variable", "Value") + content += content_format.format("-" * 16, "-" * 29) + + for key, value in info_mapping.items(): + if isinstance(value, (tuple, collections.deque)): + continue + elif isinstance(value, (dict, list)): + value = json.dumps(value) + elif value is None: + value = "None" + + content += content_format.format(key, value) + + content += "-" * 48 + "\n" + logger.info(content) + + +def omit_long_data(body, omit_len=512): + """omit too long str/bytes""" + if not isinstance(body, (str, bytes)): + return body + + body_len = len(body) + if body_len <= omit_len: + return body + + omitted_body = body[0:omit_len] + + appendix_str = f" ... OMITTED {body_len - omit_len} CHARACTORS ..." + if isinstance(body, bytes): + appendix_str = appendix_str.encode("utf-8") + + return omitted_body + appendix_str + + +def sort_dict_by_custom_order(raw_dict: Dict, custom_order: List): + def get_index_from_list(lst: List, item: Any): + try: + return lst.index(item) + except ValueError: + # item is not in lst + return len(lst) + 1 + + return dict( + sorted(raw_dict.items(), key=lambda i: get_index_from_list(custom_order, i[0])) + ) + + +class ExtendJSONEncoder(json.JSONEncoder): + """especially used to safely dump json data with python object, + such as MultipartEncoder""" + + def default(self, obj): + try: + return super(ExtendJSONEncoder, self).default(obj) + except (UnicodeDecodeError, TypeError): + return repr(obj) + + +def merge_variables( + variables: VariablesMapping, variables_to_be_overridden: VariablesMapping +) -> VariablesMapping: + """merge two variables mapping, the first variables have higher priority""" + step_new_variables = {} + for key, value in variables.items(): + if f"${key}" == value or "${" + key + "}" == value: + # e.g. {"base_url": "$base_url"} + # or {"base_url": "${base_url}"} + continue + + step_new_variables[key] = value + + merged_variables = copy.copy(variables_to_be_overridden) + merged_variables.update(step_new_variables) + return merged_variables + + +def is_support_multiprocessing() -> bool: + try: + Queue() + return True + except (ImportError, OSError): + # system that does not support semaphores + # (dependency of multiprocessing), like Android termux + return False + + +def gen_cartesian_product(*args: List[Dict]) -> List[Dict]: + """generate cartesian product for lists + + Args: + args (list of list): lists to be generated with cartesian product + + Returns: + list: cartesian product in list + + Examples: + + >>> arg1 = [{"a": 1}, {"a": 2}] + >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] + >>> args = [arg1, arg2] + >>> gen_cartesian_product(*args) + >>> # same as below + >>> gen_cartesian_product(arg1, arg2) + [ + {'a': 1, 'x': 111, 'y': 112}, + {'a': 1, 'x': 121, 'y': 122}, + {'a': 2, 'x': 111, 'y': 112}, + {'a': 2, 'x': 121, 'y': 122} + ] + + """ + if not args: + return [] + elif len(args) == 1: + return args[0] + + product_list = [] + for product_item_tuple in itertools.product(*args): + product_item_dict = {} + for item in product_item_tuple: + product_item_dict.update(item) + + product_list.append(product_item_dict) + + return product_list + + +LOGGER_FORMAT = ( + "{time:YYYY-MM-DD HH:mm:ss.SSS}" + + " | {level} | {message}" +) + + +def init_logger(level: str): + level = level.upper() + if level not in ["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]: + level = "INFO" # default + + # set log level to INFO + logger.remove() + logger.add(sys.stdout, format=LOGGER_FORMAT, level=level) diff --git a/httprunner/utils_test.py b/httprunner/utils_test.py new file mode 100644 index 0000000..47fac6c --- /dev/null +++ b/httprunner/utils_test.py @@ -0,0 +1,171 @@ +import decimal +import json +import os +import unittest +from pathlib import Path + +import toml + +from httprunner import __version__, loader, utils +from httprunner.utils import ExtendJSONEncoder, merge_variables, ga4_client + + +class TestUtils(unittest.TestCase): + def test_set_os_environ(self): + self.assertNotIn("abc", os.environ) + variables_mapping = {"abc": "123"} + utils.set_os_environ(variables_mapping) + self.assertIn("abc", os.environ) + self.assertEqual(os.environ["abc"], "123") + + def test_validators(self): + from httprunner.builtin import comparators + + functions_mapping = loader.load_module_functions(comparators) + + functions_mapping["equal"](None, None) + functions_mapping["equal"](1, 1) + functions_mapping["equal"]("abc", "abc") + with self.assertRaises(AssertionError): + functions_mapping["equal"]("123", 123) + + functions_mapping["less_than"](1, 2) + functions_mapping["less_or_equals"](2, 2) + + functions_mapping["greater_than"](2, 1) + functions_mapping["greater_or_equals"](2, 2) + + functions_mapping["not_equal"](123, "123") + + functions_mapping["length_equal"]("123", 3) + with self.assertRaises(AssertionError): + functions_mapping["length_equal"]("123", "3") + with self.assertRaises(AssertionError): + functions_mapping["length_equal"]("123", "abc") + functions_mapping["length_greater_than"]("123", 2) + functions_mapping["length_greater_or_equals"]("123", 3) + + functions_mapping["contains"]("123abc456", "3ab") + functions_mapping["contains"](["1", "2"], "1") + functions_mapping["contains"]({"a": 1, "b": 2}, "a") + functions_mapping["contained_by"]("3ab", "123abc456") + functions_mapping["contained_by"](0, [0, 200]) + + functions_mapping["regex_match"]("123abc456", "^123\w+456$") + with self.assertRaises(AssertionError): + functions_mapping["regex_match"]("123abc456", "^12b.*456$") + + functions_mapping["startswith"]("abc123", "ab") + functions_mapping["startswith"]("123abc", 12) + functions_mapping["startswith"](12345, 123) + + functions_mapping["endswith"]("abc123", 23) + functions_mapping["endswith"]("123abc", "abc") + functions_mapping["endswith"](12345, 45) + + functions_mapping["type_match"](580509390, int) + functions_mapping["type_match"](580509390, "int") + functions_mapping["type_match"]([], list) + functions_mapping["type_match"]([], "list") + functions_mapping["type_match"]([1], "list") + functions_mapping["type_match"]({}, "dict") + functions_mapping["type_match"]({"a": 1}, "dict") + functions_mapping["type_match"](None, "None") + functions_mapping["type_match"](None, "NoneType") + functions_mapping["type_match"](None, None) + + def test_lower_dict_keys(self): + request_dict = { + "url": "http://127.0.0.1:5000", + "METHOD": "POST", + "Headers": {"Accept": "application/json", "User-Agent": "ios/9.3"}, + } + new_request_dict = utils.lower_dict_keys(request_dict) + self.assertIn("method", new_request_dict) + self.assertIn("headers", new_request_dict) + self.assertIn("Accept", new_request_dict["headers"]) + self.assertIn("User-Agent", new_request_dict["headers"]) + + request_dict = "$default_request" + new_request_dict = utils.lower_dict_keys(request_dict) + self.assertEqual("$default_request", request_dict) + + request_dict = None + new_request_dict = utils.lower_dict_keys(request_dict) + self.assertEqual(None, request_dict) + + def test_print_info(self): + info_mapping = {"a": 1, "t": (1, 2), "b": {"b1": 123}, "c": None, "d": [4, 5]} + utils.print_info(info_mapping) + + def test_sort_dict_by_custom_order(self): + self.assertEqual( + list( + utils.sort_dict_by_custom_order( + {"C": 3, "D": 2, "A": 1, "B": 8}, ["A", "D"] + ).keys() + ), + ["A", "D", "C", "B"], + ) + + def test_safe_dump_json(self): + class A(object): + pass + + data = {"a": A(), "b": decimal.Decimal("1.45")} + + with self.assertRaises(TypeError): + json.dumps(data) + + json.dumps(data, cls=ExtendJSONEncoder) + + def test_override_config_variables(self): + step_variables = {"base_url": "$base_url", "foo1": "bar1"} + config_variables = {"base_url": "https://postman-echo.com", "foo1": "bar111"} + self.assertEqual( + merge_variables(step_variables, config_variables), + {"base_url": "https://postman-echo.com", "foo1": "bar1"}, + ) + + def test_cartesian_product_one(self): + parameters_content_list = [[{"a": 1}, {"a": 2}]] + product_list = utils.gen_cartesian_product(*parameters_content_list) + self.assertEqual(product_list, [{"a": 1}, {"a": 2}]) + + def test_cartesian_product_multiple(self): + parameters_content_list = [ + [{"a": 1}, {"a": 2}], + [{"x": 111, "y": 112}, {"x": 121, "y": 122}], + ] + product_list = utils.gen_cartesian_product(*parameters_content_list) + self.assertEqual( + product_list, + [ + {"a": 1, "x": 111, "y": 112}, + {"a": 1, "x": 121, "y": 122}, + {"a": 2, "x": 111, "y": 112}, + {"a": 2, "x": 121, "y": 122}, + ], + ) + + def test_cartesian_product_empty(self): + parameters_content_list = [] + product_list = utils.gen_cartesian_product(*parameters_content_list) + self.assertEqual(product_list, []) + + def test_versions_are_in_sync(self): + """Checks if the pyproject.toml and __version__ in __init__.py are in sync.""" + + path = Path(__file__).resolve().parents[1] / "pyproject.toml" + pyproject = toml.loads(open(str(path)).read()) + pyproject_version = pyproject["tool"]["poetry"]["version"] + self.assertEqual(pyproject_version, __version__) + + def test_ga4_send_event(self): + ga4_client.send_event( + "httprunner_debug_event", + { + "a": 123, + "b": 456, + }, + ) diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..5dc4bf4 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1447 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "allure-pytest" +version = "2.13.2" +description = "Allure pytest integration" +optional = true +python-versions = "*" +files = [ + {file = "allure-pytest-2.13.2.tar.gz", hash = "sha256:22243159e8ec81ce2b5254b4013802198821b1b42f118f69d4a289396607c7b3"}, + {file = "allure_pytest-2.13.2-py3-none-any.whl", hash = "sha256:17de9dbee7f61c8e66a5b5e818b00e419dbcea44cb55c24319401ba813220690"}, +] + +[package.dependencies] +allure-python-commons = "2.13.2" +pytest = ">=4.5.0" + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "allure-python-commons" +version = "2.13.2" +description = "Common module for integrate allure with python-based frameworks" +optional = true +python-versions = ">=3.6" +files = [ + {file = "allure-python-commons-2.13.2.tar.gz", hash = "sha256:8a03681330231b1deadd86b97ff68841c6591320114ae638570f1ed60d7a2033"}, + {file = "allure_python_commons-2.13.2-py3-none-any.whl", hash = "sha256:2bb3646ec3fbf5b36d178a5e735002bc130ae9f9ba80f080af97d368ba375051"}, +] + +[package.dependencies] +attrs = ">=16.0.0" +pluggy = ">=0.4.0" + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = true +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "black" +version = "22.12.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "brotli" +version = "1.0.9" +description = "Python bindings for the Brotli compression library" +optional = false +python-versions = "*" +files = [ + {file = "Brotli-1.0.9-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70"}, + {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b"}, + {file = "Brotli-1.0.9-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6"}, + {file = "Brotli-1.0.9-cp27-cp27m-win32.whl", hash = "sha256:afde17ae04d90fbe53afb628f7f2d4ca022797aa093e809de5c3cf276f61bbfa"}, + {file = "Brotli-1.0.9-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7cb81373984cc0e4682f31bc3d6be9026006d96eecd07ea49aafb06897746452"}, + {file = "Brotli-1.0.9-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:db844eb158a87ccab83e868a762ea8024ae27337fc7ddcbfcddd157f841fdfe7"}, + {file = "Brotli-1.0.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9744a863b489c79a73aba014df554b0e7a0fc44ef3f8a0ef2a52919c7d155031"}, + {file = "Brotli-1.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a72661af47119a80d82fa583b554095308d6a4c356b2a554fdc2799bc19f2a43"}, + {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ee83d3e3a024a9618e5be64648d6d11c37047ac48adff25f12fa4226cf23d1c"}, + {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:19598ecddd8a212aedb1ffa15763dd52a388518c4550e615aed88dc3753c0f0c"}, + {file = "Brotli-1.0.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:44bb8ff420c1d19d91d79d8c3574b8954288bdff0273bf788954064d260d7ab0"}, + {file = "Brotli-1.0.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e23281b9a08ec338469268f98f194658abfb13658ee98e2b7f85ee9dd06caa91"}, + {file = "Brotli-1.0.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3496fc835370da351d37cada4cf744039616a6db7d13c430035e901443a34daa"}, + {file = "Brotli-1.0.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b83bb06a0192cccf1eb8d0a28672a1b79c74c3a8a5f2619625aeb6f28b3a82bb"}, + {file = "Brotli-1.0.9-cp310-cp310-win32.whl", hash = "sha256:26d168aac4aaec9a4394221240e8a5436b5634adc3cd1cdf637f6645cecbf181"}, + {file = "Brotli-1.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:622a231b08899c864eb87e85f81c75e7b9ce05b001e59bbfbf43d4a71f5f32b2"}, + {file = "Brotli-1.0.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cc0283a406774f465fb45ec7efb66857c09ffefbe49ec20b7882eff6d3c86d3a"}, + {file = "Brotli-1.0.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:11d3283d89af7033236fa4e73ec2cbe743d4f6a81d41bd234f24bf63dde979df"}, + {file = "Brotli-1.0.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1306004d49b84bd0c4f90457c6f57ad109f5cc6067a9664e12b7b79a9948ad"}, + {file = "Brotli-1.0.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1375b5d17d6145c798661b67e4ae9d5496920d9265e2f00f1c2c0b5ae91fbde"}, + {file = "Brotli-1.0.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cab1b5964b39607a66adbba01f1c12df2e55ac36c81ec6ed44f2fca44178bf1a"}, + {file = "Brotli-1.0.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ed6a5b3d23ecc00ea02e1ed8e0ff9a08f4fc87a1f58a2530e71c0f48adf882f"}, + {file = "Brotli-1.0.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cb02ed34557afde2d2da68194d12f5719ee96cfb2eacc886352cb73e3808fc5d"}, + {file = "Brotli-1.0.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b3523f51818e8f16599613edddb1ff924eeb4b53ab7e7197f85cbc321cdca32f"}, + {file = "Brotli-1.0.9-cp311-cp311-win32.whl", hash = "sha256:ba72d37e2a924717990f4d7482e8ac88e2ef43fb95491eb6e0d124d77d2a150d"}, + {file = "Brotli-1.0.9-cp311-cp311-win_amd64.whl", hash = "sha256:3ffaadcaeafe9d30a7e4e1e97ad727e4f5610b9fa2f7551998471e3736738679"}, + {file = "Brotli-1.0.9-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:c83aa123d56f2e060644427a882a36b3c12db93727ad7a7b9efd7d7f3e9cc2c4"}, + {file = "Brotli-1.0.9-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:6b2ae9f5f67f89aade1fab0f7fd8f2832501311c363a21579d02defa844d9296"}, + {file = "Brotli-1.0.9-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:68715970f16b6e92c574c30747c95cf8cf62804569647386ff032195dc89a430"}, + {file = "Brotli-1.0.9-cp35-cp35m-win32.whl", hash = "sha256:defed7ea5f218a9f2336301e6fd379f55c655bea65ba2476346340a0ce6f74a1"}, + {file = "Brotli-1.0.9-cp35-cp35m-win_amd64.whl", hash = "sha256:88c63a1b55f352b02c6ffd24b15ead9fc0e8bf781dbe070213039324922a2eea"}, + {file = "Brotli-1.0.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:503fa6af7da9f4b5780bb7e4cbe0c639b010f12be85d02c99452825dd0feef3f"}, + {file = "Brotli-1.0.9-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:40d15c79f42e0a2c72892bf407979febd9cf91f36f495ffb333d1d04cebb34e4"}, + {file = "Brotli-1.0.9-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:93130612b837103e15ac3f9cbacb4613f9e348b58b3aad53721d92e57f96d46a"}, + {file = "Brotli-1.0.9-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87fdccbb6bb589095f413b1e05734ba492c962b4a45a13ff3408fa44ffe6479b"}, + {file = "Brotli-1.0.9-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6d847b14f7ea89f6ad3c9e3901d1bc4835f6b390a9c71df999b0162d9bb1e20f"}, + {file = "Brotli-1.0.9-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:495ba7e49c2db22b046a53b469bbecea802efce200dffb69b93dd47397edc9b6"}, + {file = "Brotli-1.0.9-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:4688c1e42968ba52e57d8670ad2306fe92e0169c6f3af0089be75bbac0c64a3b"}, + {file = "Brotli-1.0.9-cp36-cp36m-win32.whl", hash = "sha256:61a7ee1f13ab913897dac7da44a73c6d44d48a4adff42a5701e3239791c96e14"}, + {file = "Brotli-1.0.9-cp36-cp36m-win_amd64.whl", hash = "sha256:1c48472a6ba3b113452355b9af0a60da5c2ae60477f8feda8346f8fd48e3e87c"}, + {file = "Brotli-1.0.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b78a24b5fd13c03ee2b7b86290ed20efdc95da75a3557cc06811764d5ad1126"}, + {file = "Brotli-1.0.9-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:9d12cf2851759b8de8ca5fde36a59c08210a97ffca0eb94c532ce7b17c6a3d1d"}, + {file = "Brotli-1.0.9-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6c772d6c0a79ac0f414a9f8947cc407e119b8598de7621f39cacadae3cf57d12"}, + {file = "Brotli-1.0.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29d1d350178e5225397e28ea1b7aca3648fcbab546d20e7475805437bfb0a130"}, + {file = "Brotli-1.0.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7bbff90b63328013e1e8cb50650ae0b9bac54ffb4be6104378490193cd60f85a"}, + {file = "Brotli-1.0.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ec1947eabbaf8e0531e8e899fc1d9876c179fc518989461f5d24e2223395a9e3"}, + {file = "Brotli-1.0.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:12effe280b8ebfd389022aa65114e30407540ccb89b177d3fbc9a4f177c4bd5d"}, + {file = "Brotli-1.0.9-cp37-cp37m-win32.whl", hash = "sha256:f909bbbc433048b499cb9db9e713b5d8d949e8c109a2a548502fb9aa8630f0b1"}, + {file = "Brotli-1.0.9-cp37-cp37m-win_amd64.whl", hash = "sha256:97f715cf371b16ac88b8c19da00029804e20e25f30d80203417255d239f228b5"}, + {file = "Brotli-1.0.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e16eb9541f3dd1a3e92b89005e37b1257b157b7256df0e36bd7b33b50be73bcb"}, + {file = "Brotli-1.0.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:160c78292e98d21e73a4cc7f76a234390e516afcd982fa17e1422f7c6a9ce9c8"}, + {file = "Brotli-1.0.9-cp38-cp38-manylinux1_i686.whl", hash = "sha256:b663f1e02de5d0573610756398e44c130add0eb9a3fc912a09665332942a2efb"}, + {file = "Brotli-1.0.9-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5b6ef7d9f9c38292df3690fe3e302b5b530999fa90014853dcd0d6902fb59f26"}, + {file = "Brotli-1.0.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a674ac10e0a87b683f4fa2b6fa41090edfd686a6524bd8dedbd6138b309175c"}, + {file = "Brotli-1.0.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e2d9e1cbc1b25e22000328702b014227737756f4b5bf5c485ac1d8091ada078b"}, + {file = "Brotli-1.0.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b336c5e9cf03c7be40c47b5fd694c43c9f1358a80ba384a21969e0b4e66a9b17"}, + {file = "Brotli-1.0.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:85f7912459c67eaab2fb854ed2bc1cc25772b300545fe7ed2dc03954da638649"}, + {file = "Brotli-1.0.9-cp38-cp38-win32.whl", hash = "sha256:35a3edbe18e876e596553c4007a087f8bcfd538f19bc116917b3c7522fca0429"}, + {file = "Brotli-1.0.9-cp38-cp38-win_amd64.whl", hash = "sha256:269a5743a393c65db46a7bb982644c67ecba4b8d91b392403ad8a861ba6f495f"}, + {file = "Brotli-1.0.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2aad0e0baa04517741c9bb5b07586c642302e5fb3e75319cb62087bd0995ab19"}, + {file = "Brotli-1.0.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5cb1e18167792d7d21e21365d7650b72d5081ed476123ff7b8cac7f45189c0c7"}, + {file = "Brotli-1.0.9-cp39-cp39-manylinux1_i686.whl", hash = "sha256:16d528a45c2e1909c2798f27f7bf0a3feec1dc9e50948e738b961618e38b6a7b"}, + {file = "Brotli-1.0.9-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:56d027eace784738457437df7331965473f2c0da2c70e1a1f6fdbae5402e0389"}, + {file = "Brotli-1.0.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bf919756d25e4114ace16a8ce91eb340eb57a08e2c6950c3cebcbe3dff2a5e7"}, + {file = "Brotli-1.0.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e4c4e92c14a57c9bd4cb4be678c25369bf7a092d55fd0866f759e425b9660806"}, + {file = "Brotli-1.0.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e48f4234f2469ed012a98f4b7874e7f7e173c167bed4934912a29e03167cf6b1"}, + {file = "Brotli-1.0.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ed4c92a0665002ff8ea852353aeb60d9141eb04109e88928026d3c8a9e5433c"}, + {file = "Brotli-1.0.9-cp39-cp39-win32.whl", hash = "sha256:cfc391f4429ee0a9370aa93d812a52e1fee0f37a81861f4fdd1f4fb28e8547c3"}, + {file = "Brotli-1.0.9-cp39-cp39-win_amd64.whl", hash = "sha256:854c33dad5ba0fbd6ab69185fec8dab89e13cda6b7d191ba111987df74f38761"}, + {file = "Brotli-1.0.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9749a124280a0ada4187a6cfd1ffd35c350fb3af79c706589d98e088c5044267"}, + {file = "Brotli-1.0.9-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:73fd30d4ce0ea48010564ccee1a26bfe39323fde05cb34b5863455629db61dc7"}, + {file = "Brotli-1.0.9-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02177603aaca36e1fd21b091cb742bb3b305a569e2402f1ca38af471777fb019"}, + {file = "Brotli-1.0.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:76ffebb907bec09ff511bb3acc077695e2c32bc2142819491579a695f77ffd4d"}, + {file = "Brotli-1.0.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b43775532a5904bc938f9c15b77c613cb6ad6fb30990f3b0afaea82797a402d8"}, + {file = "Brotli-1.0.9-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5bf37a08493232fbb0f8229f1824b366c2fc1d02d64e7e918af40acd15f3e337"}, + {file = "Brotli-1.0.9-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:330e3f10cd01da535c70d09c4283ba2df5fb78e915bea0a28becad6e2ac010be"}, + {file = "Brotli-1.0.9-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e1abbeef02962596548382e393f56e4c94acd286bd0c5afba756cffc33670e8a"}, + {file = "Brotli-1.0.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3148362937217b7072cf80a2dcc007f09bb5ecb96dae4617316638194113d5be"}, + {file = "Brotli-1.0.9-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:336b40348269f9b91268378de5ff44dc6fbaa2268194f85177b53463d313842a"}, + {file = "Brotli-1.0.9-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b8b09a16a1950b9ef495a0f8b9d0a87599a9d1f179e2d4ac014b2ec831f87e7"}, + {file = "Brotli-1.0.9-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c8e521a0ce7cf690ca84b8cc2272ddaf9d8a50294fd086da67e517439614c755"}, + {file = "Brotli-1.0.9.zip", hash = "sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "charset-normalizer" +version = "3.2.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, + {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "click" +version = "8.1.6" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, + {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "coverage" +version = "4.5.4" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4" +files = [ + {file = "coverage-4.5.4-cp26-cp26m-macosx_10_12_x86_64.whl", hash = "sha256:eee64c616adeff7db37cc37da4180a3a5b6177f5c46b187894e633f088fb5b28"}, + {file = "coverage-4.5.4-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:ef824cad1f980d27f26166f86856efe11eff9912c4fed97d3804820d43fa550c"}, + {file = "coverage-4.5.4-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:9a334d6c83dfeadae576b4d633a71620d40d1c379129d587faa42ee3e2a85cce"}, + {file = "coverage-4.5.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:7494b0b0274c5072bddbfd5b4a6c6f18fbbe1ab1d22a41e99cd2d00c8f96ecfe"}, + {file = "coverage-4.5.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:826f32b9547c8091679ff292a82aca9c7b9650f9fda3e2ca6bf2ac905b7ce888"}, + {file = "coverage-4.5.4-cp27-cp27m-win32.whl", hash = "sha256:63a9a5fc43b58735f65ed63d2cf43508f462dc49857da70b8980ad78d41d52fc"}, + {file = "coverage-4.5.4-cp27-cp27m-win_amd64.whl", hash = "sha256:e2ede7c1d45e65e209d6093b762e98e8318ddeff95317d07a27a2140b80cfd24"}, + {file = "coverage-4.5.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:dd579709a87092c6dbee09d1b7cfa81831040705ffa12a1b248935274aee0437"}, + {file = "coverage-4.5.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:08907593569fe59baca0bf152c43f3863201efb6113ecb38ce7e97ce339805a6"}, + {file = "coverage-4.5.4-cp33-cp33m-macosx_10_10_x86_64.whl", hash = "sha256:6b62544bb68106e3f00b21c8930e83e584fdca005d4fffd29bb39fb3ffa03cb5"}, + {file = "coverage-4.5.4-cp34-cp34m-macosx_10_12_x86_64.whl", hash = "sha256:331cb5115673a20fb131dadd22f5bcaf7677ef758741312bee4937d71a14b2ef"}, + {file = "coverage-4.5.4-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:bf1ef9eb901113a9805287e090452c05547578eaab1b62e4ad456fcc049a9b7e"}, + {file = "coverage-4.5.4-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:386e2e4090f0bc5df274e720105c342263423e77ee8826002dcffe0c9533dbca"}, + {file = "coverage-4.5.4-cp34-cp34m-win32.whl", hash = "sha256:fa964bae817babece5aa2e8c1af841bebb6d0b9add8e637548809d040443fee0"}, + {file = "coverage-4.5.4-cp34-cp34m-win_amd64.whl", hash = "sha256:df6712284b2e44a065097846488f66840445eb987eb81b3cc6e4149e7b6982e1"}, + {file = "coverage-4.5.4-cp35-cp35m-macosx_10_12_x86_64.whl", hash = "sha256:efc89291bd5a08855829a3c522df16d856455297cf35ae827a37edac45f466a7"}, + {file = "coverage-4.5.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:e4ef9c164eb55123c62411f5936b5c2e521b12356037b6e1c2617cef45523d47"}, + {file = "coverage-4.5.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:ff37757e068ae606659c28c3bd0d923f9d29a85de79bf25b2b34b148473b5025"}, + {file = "coverage-4.5.4-cp35-cp35m-win32.whl", hash = "sha256:bf0a7aed7f5521c7ca67febd57db473af4762b9622254291fbcbb8cd0ba5e33e"}, + {file = "coverage-4.5.4-cp35-cp35m-win_amd64.whl", hash = "sha256:19e4df788a0581238e9390c85a7a09af39c7b539b29f25c89209e6c3e371270d"}, + {file = "coverage-4.5.4-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:60851187677b24c6085248f0a0b9b98d49cba7ecc7ec60ba6b9d2e5574ac1ee9"}, + {file = "coverage-4.5.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:245388cda02af78276b479f299bbf3783ef0a6a6273037d7c60dc73b8d8d7755"}, + {file = "coverage-4.5.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:c0afd27bc0e307a1ffc04ca5ec010a290e49e3afbe841c5cafc5c5a80ecd81c9"}, + {file = "coverage-4.5.4-cp36-cp36m-win32.whl", hash = "sha256:6ba744056423ef8d450cf627289166da65903885272055fb4b5e113137cfa14f"}, + {file = "coverage-4.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:af7ed8a8aa6957aac47b4268631fa1df984643f07ef00acd374e456364b373f5"}, + {file = "coverage-4.5.4-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:3a794ce50daee01c74a494919d5ebdc23d58873747fa0e288318728533a3e1ca"}, + {file = "coverage-4.5.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0be0f1ed45fc0c185cfd4ecc19a1d6532d72f86a2bac9de7e24541febad72650"}, + {file = "coverage-4.5.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:eca2b7343524e7ba246cab8ff00cab47a2d6d54ada3b02772e908a45675722e2"}, + {file = "coverage-4.5.4-cp37-cp37m-win32.whl", hash = "sha256:93715dffbcd0678057f947f496484e906bf9509f5c1c38fc9ba3922893cda5f5"}, + {file = "coverage-4.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:23cc09ed395b03424d1ae30dcc292615c1372bfba7141eb85e11e50efaa6b351"}, + {file = "coverage-4.5.4-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:141f08ed3c4b1847015e2cd62ec06d35e67a3ac185c26f7635f4406b90afa9c5"}, + {file = "coverage-4.5.4.tar.gz", hash = "sha256:e07d9f1a23e9e93ab5c62902833bf3e4b1f65502927379148b6622686223125c"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "cython" +version = "0.29.36" +description = "The Cython compiler for writing C extensions for the Python language." +optional = true +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "Cython-0.29.36-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ea33c1c57f331f5653baa1313e445fbe80d1da56dd9a42c8611037887897b9d"}, + {file = "Cython-0.29.36-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2fe34615c13ace29e77bf9d21c26188d23eff7ad8b3e248da70404e5f5436b95"}, + {file = "Cython-0.29.36-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ae75eac4f13cbbcb50b2097470dcea570182446a3ebd0f7e95dd425c2017a2d7"}, + {file = "Cython-0.29.36-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:847d07fc02978c4433d01b4f5ee489b75fd42fd32ccf9cc4b5fd887e8cffe822"}, + {file = "Cython-0.29.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7cb44aeaf6c5c25bd6a7562ece4eadf50d606fc9b5f624fa95bd0281e8bf0a97"}, + {file = "Cython-0.29.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:28fb10aabd56a2e4d399273b48e106abe5a0d271728fd5eed3d36e7171000045"}, + {file = "Cython-0.29.36-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:86b7a13c6b23ab6471d40a320f573fbc8a4e39833947eebed96661145dc34771"}, + {file = "Cython-0.29.36-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:19ccf7fc527cf556e2e6a3dfeffcadfbcabd24a59a988289117795dfed8a25ad"}, + {file = "Cython-0.29.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:74bddfc7dc8958526b2018d3adc1aa6dc9cf2a24095c972e5ad06758c360b261"}, + {file = "Cython-0.29.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6c4d7e36fe0211e394adffd296382b435ac22762d14f2fe45c506c230f91cf2d"}, + {file = "Cython-0.29.36-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:0bca6a7504e8cfc63a4d3c7c9b9a04e5d05501942a6c8cee177363b61a32c2d4"}, + {file = "Cython-0.29.36-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17c74f80b06e2fa8ffc8acd41925f4f9922da8a219cd25c6901beab2f7c56cc5"}, + {file = "Cython-0.29.36-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:25ff471a459aad82146973b0b8c177175ab896051080713d3035ad4418739f66"}, + {file = "Cython-0.29.36-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a9738f23d00d99481797b155ad58f8fc1c72096926ea2554b8ccc46e1d356c27"}, + {file = "Cython-0.29.36-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:af2f333f08c4c279f3480532341bf70ec8010bcbc7d8a6daa5ca0bf4513af295"}, + {file = "Cython-0.29.36-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:cd77cedbcc13cb67aef39b8615fd50a67fc42b0c6defea6fc0a21e19d3a062ec"}, + {file = "Cython-0.29.36-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50d506d73a46c4a522ef9fdafcbf7a827ba13907b18ff58f61a8fa0887d0bd8d"}, + {file = "Cython-0.29.36-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:6a571d7c7b52ee12d73bc65b4855779c069545da3bac26bec06a1389ad17ade5"}, + {file = "Cython-0.29.36-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a216b2801c7d9c3babe0a10cc25da3bc92494d7047d1f732d3c47b0cceaf0941"}, + {file = "Cython-0.29.36-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:68abee3be27f21c9642a07a93f8333d491f4c52bc70068e42f51685df9ac1a57"}, + {file = "Cython-0.29.36-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1ef90023da8a9bf84cf16f06186db0906d2ce52a09f751e2cb9d3da9d54eae46"}, + {file = "Cython-0.29.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9deef0761e8c798043dbb728a1c6df97b26e5edc65b8d6c7608b3c07af3eb722"}, + {file = "Cython-0.29.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:69af2365de2343b4e5a61c567e7611ddf2575ae6f6e5c01968f7d4f2747324eb"}, + {file = "Cython-0.29.36-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:fdf377b0f6e9325b73ad88933136023184afdc795caeeaaf3dca13494cffd15e"}, + {file = "Cython-0.29.36-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ff2cc5518558c598028ae8d9a43401e0e734b74b6e598156b005328c9da3472"}, + {file = "Cython-0.29.36-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7ca921068242cd8b52544870c807fe285c1f248b12df7b6dfae25cc9957b965e"}, + {file = "Cython-0.29.36-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6058a6d04e04d790cda530e1ff675e9352359eb4b777920df3cac2b62a9a030f"}, + {file = "Cython-0.29.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:de2045ceae1857e56a72f08e0acfa48c994277a353b7bdab1f097db9f8803f19"}, + {file = "Cython-0.29.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9f2a4b4587aaef08815410dc20653613ca04a120a2954a92c39e37c6b5fdf6be"}, + {file = "Cython-0.29.36-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:2edd9f8edca69178d74cbbbc180bc3e848433c9b7dc80374a11a0bb0076c926d"}, + {file = "Cython-0.29.36-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c6c0aea8491a70f98b7496b5057c9523740e02cec21cd678eef609d2aa6c1257"}, + {file = "Cython-0.29.36-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:768f65b16d23c630d8829ce1f95520ef1531a9c0489fa872d87c8c3813f65aee"}, + {file = "Cython-0.29.36-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:568625e8274ee7288ad87b0f615ec36ab446ca9b35e77481ed010027d99c7020"}, + {file = "Cython-0.29.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bdc0a4cb99f55e6878d4b67a4bfee23823484915cb6b7e9c9dd01002dd3592ea"}, + {file = "Cython-0.29.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f0df6552be39853b10dfb5a10dbd08f5c49023d6b390d7ce92d4792a8b6e73ee"}, + {file = "Cython-0.29.36-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:8894db6f5b6479a3c164e0454e13083ebffeaa9a0822668bb2319bdf1b783df1"}, + {file = "Cython-0.29.36-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53f93a8c342e9445a8f0cb7039775294f2dbbe5241936573daeaf0afe30397e4"}, + {file = "Cython-0.29.36-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ee317f9bcab901a3db39c34ee5a27716f7132e5c0de150125342694d18b30f51"}, + {file = "Cython-0.29.36-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e4b8269e5a5d127a2191b02b9df3636c0dac73f14f1ff8a831f39cb5197c4f38"}, + {file = "Cython-0.29.36-py2.py3-none-any.whl", hash = "sha256:95bb13d8be507425d03ebe051f90d4b2a9fdccc64e4f30b35645fdb7542742eb"}, + {file = "Cython-0.29.36.tar.gz", hash = "sha256:41c0cfd2d754e383c9eeb95effc9aa4ab847d0c9747077ddd7c0dcb68c3bc01f"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "exceptiongroup" +version = "1.1.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"}, + {file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "filetype" +version = "1.2.0" +description = "Infer file type and MIME type of any file/buffer. No external dependencies." +optional = true +python-versions = "*" +files = [ + {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, + {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "greenlet" +version = "2.0.2" +description = "Lightweight in-process concurrent programming" +optional = true +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" +files = [ + {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, + {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, + {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, + {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, + {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, + {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, + {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, + {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, + {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, + {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, + {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, + {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, + {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, + {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, + {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, + {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, + {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, + {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, + {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, + {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, + {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, + {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, + {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, + {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, + {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, + {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, + {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, + {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, + {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, + {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, + {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, + {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, + {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, + {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, + {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, + {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, + {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, + {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, +] + +[package.extras] +docs = ["Sphinx", "docutils (<0.18)"] +test = ["objgraph", "psutil"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "importlib-metadata" +version = "6.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, + {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, +] + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "jmespath" +version = "0.9.5" +description = "JSON Matching Expressions" +optional = false +python-versions = "*" +files = [ + {file = "jmespath-0.9.5-py2.py3-none-any.whl", hash = "sha256:695cb76fa78a10663425d5b73ddc5714eb711157e52704d69be03b1a02ba4fec"}, + {file = "jmespath-0.9.5.tar.gz", hash = "sha256:cca55c8d153173e21baa59983015ad0daf603f9cb799904ff057bfb8ff8dc2d9"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "loguru" +version = "0.4.1" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = ">=3.5" +files = [ + {file = "loguru-0.4.1-py3-none-any.whl", hash = "sha256:074b3caa6748452c1e4f2b302093c94b65d5a4c5a4d7743636b4121e06437b0e"}, + {file = "loguru-0.4.1.tar.gz", hash = "sha256:a6101fd435ac89ba5205a105a26a6ede9e4ddbb4408a6e167852efca47806d11"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (>=2.2.1)", "black (>=19.3b0)", "codecov (>=2.0.15)", "colorama (>=0.3.4)", "flake8 (>=3.7.7)", "isort (>=4.3.20)", "pytest (>=4.6.2)", "pytest-cov (>=2.7.1)", "sphinx-autobuild (>=0.7.1)", "sphinx-rtd-theme (>=0.4.3)", "tox (>=3.9.0)", "tox-travis (>=0.12)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pathspec" +version = "0.11.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "platformdirs" +version = "3.9.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.9.1-py3-none-any.whl", hash = "sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f"}, + {file = "platformdirs-3.9.1.tar.gz", hash = "sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.3", markers = "python_version < \"3.8\""} + +[package.extras] +docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pluggy" +version = "1.2.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"}, + {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "ply" +version = "3.11" +description = "Python Lex & Yacc" +optional = true +python-versions = "*" +files = [ + {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, + {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pydantic" +version = "1.8.2" +description = "Data validation and settings management using python 3.6 type hinting" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "pydantic-1.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739"}, + {file = "pydantic-1.8.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4"}, + {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e"}, + {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840"}, + {file = "pydantic-1.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b"}, + {file = "pydantic-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20"}, + {file = "pydantic-1.8.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb"}, + {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1"}, + {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23"}, + {file = "pydantic-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287"}, + {file = "pydantic-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd"}, + {file = "pydantic-1.8.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505"}, + {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e"}, + {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820"}, + {file = "pydantic-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3"}, + {file = "pydantic-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316"}, + {file = "pydantic-1.8.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62"}, + {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f"}, + {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b"}, + {file = "pydantic-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3"}, + {file = "pydantic-1.8.2-py3-none-any.whl", hash = "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"}, + {file = "pydantic-1.8.2.tar.gz", hash = "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b"}, +] + +[package.dependencies] +typing-extensions = ">=3.7.4.3" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pymysql" +version = "1.1.0" +description = "Pure Python MySQL Driver" +optional = true +python-versions = ">=3.7" +files = [ + {file = "PyMySQL-1.1.0-py3-none-any.whl", hash = "sha256:8969ec6d763c856f7073c4c64662882675702efcb114b4bcbb955aea3a069fa7"}, + {file = "PyMySQL-1.1.0.tar.gz", hash = "sha256:4f13a7df8bf36a51e81dd9f3605fede45a4878fe02f9236349fd82a3f0612f96"}, +] + +[package.extras] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pytest" +version = "7.4.0" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, + {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pytest-html" +version = "3.2.0" +description = "pytest plugin for generating HTML reports" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-html-3.2.0.tar.gz", hash = "sha256:c4e2f4bb0bffc437f51ad2174a8a3e71df81bbc2f6894604e604af18fbe687c3"}, + {file = "pytest_html-3.2.0-py3-none-any.whl", hash = "sha256:868c08564a68d8b2c26866f1e33178419bb35b1e127c33784a28622eb827f3f3"}, +] + +[package.dependencies] +py = ">=1.8.2" +pytest = ">=5.0,<6.0.0 || >6.0.0" +pytest-metadata = "*" + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pytest-metadata" +version = "3.0.0" +description = "pytest plugin for test session metadata" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_metadata-3.0.0-py3-none-any.whl", hash = "sha256:a17b1e40080401dc23177599208c52228df463db191c1a573ccdffacd885e190"}, + {file = "pytest_metadata-3.0.0.tar.gz", hash = "sha256:769a9c65d2884bd583bc626b0ace77ad15dbe02dd91a9106d47fd46d9c2569ca"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (>=3.24.5)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "requests-toolbelt" +version = "0.10.1" +description = "A utility belt for advanced users of python-requests" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, + {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "sentry-sdk" +version = "0.14.4" +description = "Python client for Sentry (https://getsentry.com)" +optional = false +python-versions = "*" +files = [ + {file = "sentry-sdk-0.14.4.tar.gz", hash = "sha256:0e5e947d0f7a969314aa23669a94a9712be5a688ff069ff7b9fc36c66adc160c"}, + {file = "sentry_sdk-0.14.4-py2.py3-none-any.whl", hash = "sha256:799a8bf76b012e3030a881be00e97bc0b922ce35dde699c6537122b751d80e2c"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = ">=1.10.0" + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +beam = ["beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)"] +pyspark = ["pyspark (>=2.4.4)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +tornado = ["tornado (>=5)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "sqlalchemy" +version = "1.4.49" +description = "Database Abstraction Library" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "SQLAlchemy-1.4.49-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e126cf98b7fd38f1e33c64484406b78e937b1a280e078ef558b95bf5b6895f6"}, + {file = "SQLAlchemy-1.4.49-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03db81b89fe7ef3857b4a00b63dedd632d6183d4ea5a31c5d8a92e000a41fc71"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:95b9df9afd680b7a3b13b38adf6e3a38995da5e162cc7524ef08e3be4e5ed3e1"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a63e43bf3f668c11bb0444ce6e809c1227b8f067ca1068898f3008a273f52b09"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f835c050ebaa4e48b18403bed2c0fda986525896efd76c245bdd4db995e51a4c"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c21b172dfb22e0db303ff6419451f0cac891d2e911bb9fbf8003d717f1bcf91"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-win32.whl", hash = "sha256:5fb1ebdfc8373b5a291485757bd6431de8d7ed42c27439f543c81f6c8febd729"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-win_amd64.whl", hash = "sha256:f8a65990c9c490f4651b5c02abccc9f113a7f56fa482031ac8cb88b70bc8ccaa"}, + {file = "SQLAlchemy-1.4.49-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8923dfdf24d5aa8a3adb59723f54118dd4fe62cf59ed0d0d65d940579c1170a4"}, + {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9ab2c507a7a439f13ca4499db6d3f50423d1d65dc9b5ed897e70941d9e135b0"}, + {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5debe7d49b8acf1f3035317e63d9ec8d5e4d904c6e75a2a9246a119f5f2fdf3d"}, + {file = "SQLAlchemy-1.4.49-cp311-cp311-win32.whl", hash = "sha256:82b08e82da3756765c2e75f327b9bf6b0f043c9c3925fb95fb51e1567fa4ee87"}, + {file = "SQLAlchemy-1.4.49-cp311-cp311-win_amd64.whl", hash = "sha256:171e04eeb5d1c0d96a544caf982621a1711d078dbc5c96f11d6469169bd003f1"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:36e58f8c4fe43984384e3fbe6341ac99b6b4e083de2fe838f0fdb91cebe9e9cb"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b31e67ff419013f99ad6f8fc73ee19ea31585e1e9fe773744c0f3ce58c039c30"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c14b29d9e1529f99efd550cd04dbb6db6ba5d690abb96d52de2bff4ed518bc95"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c40f3470e084d31247aea228aa1c39bbc0904c2b9ccbf5d3cfa2ea2dac06f26d"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-win32.whl", hash = "sha256:706bfa02157b97c136547c406f263e4c6274a7b061b3eb9742915dd774bbc264"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-win_amd64.whl", hash = "sha256:a7f7b5c07ae5c0cfd24c2db86071fb2a3d947da7bd487e359cc91e67ac1c6d2e"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:4afbbf5ef41ac18e02c8dc1f86c04b22b7a2125f2a030e25bbb4aff31abb224b"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24e300c0c2147484a002b175f4e1361f102e82c345bf263242f0449672a4bccf"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:201de072b818f8ad55c80d18d1a788729cccf9be6d9dc3b9d8613b053cd4836d"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653ed6817c710d0c95558232aba799307d14ae084cc9b1f4c389157ec50df5c"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-win32.whl", hash = "sha256:647e0b309cb4512b1f1b78471fdaf72921b6fa6e750b9f891e09c6e2f0e5326f"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-win_amd64.whl", hash = "sha256:ab73ed1a05ff539afc4a7f8cf371764cdf79768ecb7d2ec691e3ff89abbc541e"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37ce517c011560d68f1ffb28af65d7e06f873f191eb3a73af5671e9c3fada08a"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1878ce508edea4a879015ab5215546c444233881301e97ca16fe251e89f1c55"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e8e608983e6f85d0852ca61f97e521b62e67969e6e640fe6c6b575d4db68557"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccf956da45290df6e809ea12c54c02ace7f8ff4d765d6d3dfb3655ee876ce58d"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-win32.whl", hash = "sha256:f167c8175ab908ce48bd6550679cc6ea20ae169379e73c7720a28f89e53aa532"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-win_amd64.whl", hash = "sha256:45806315aae81a0c202752558f0df52b42d11dd7ba0097bf71e253b4215f34f4"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b6d0c4b15d65087738a6e22e0ff461b407533ff65a73b818089efc8eb2b3e1de"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a843e34abfd4c797018fd8d00ffffa99fd5184c421f190b6ca99def4087689bd"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c890421651b45a681181301b3497e4d57c0d01dc001e10438a40e9a9c25ee77"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d26f280b8f0a8f497bc10573849ad6dc62e671d2468826e5c748d04ed9e670d5"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-win32.whl", hash = "sha256:ec2268de67f73b43320383947e74700e95c6770d0c68c4e615e9897e46296294"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-win_amd64.whl", hash = "sha256:bbdf16372859b8ed3f4d05f925a984771cd2abd18bd187042f24be4886c2a15f"}, + {file = "SQLAlchemy-1.4.49.tar.gz", hash = "sha256:06ff25cbae30c396c4b7737464f2a7fc37a67b7da409993b182b024cec80aed9"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +pymysql = ["pymysql", "pymysql (<1)"] +sqlcipher = ["sqlcipher3-binary"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "thrift" +version = "0.16.0" +description = "Python bindings for the Apache Thrift RPC system" +optional = true +python-versions = "*" +files = [ + {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, +] + +[package.dependencies] +six = ">=1.7.2" + +[package.extras] +all = ["tornado (>=4.0)", "twisted"] +tornado = ["tornado (>=4.0)"] +twisted = ["twisted"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "thriftpy2" +version = "0.4.16" +description = "Pure python implementation of Apache Thrift." +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "thriftpy2-0.4.16.tar.gz", hash = "sha256:2aa67ecda99a948e4146341d388260b48ee7da5dfb9a951c4151988e2ed2fb4c"}, +] + +[package.dependencies] +ply = ">=3.4,<4.0" +six = ">=1.15,<2.0" + +[package.extras] +dev = ["cython (>=0.28.4)", "flake8 (>=2.5)", "pytest (>=2.8)", "pytest (>=6.1.1)", "sphinx (>=1.3)", "sphinx-rtd-theme (>=0.1.9)", "tornado (>=4.0,<6.0)"] +tornado = ["tornado (>=4.0,<6.0)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "typed-ast" +version = "1.5.5" +description = "a fork of Python 2 and 3 ast modules with type comment support" +optional = false +python-versions = ">=3.6" +files = [ + {file = "typed_ast-1.5.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b"}, + {file = "typed_ast-1.5.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686"}, + {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769"}, + {file = "typed_ast-1.5.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04"}, + {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d"}, + {file = "typed_ast-1.5.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d"}, + {file = "typed_ast-1.5.5-cp310-cp310-win_amd64.whl", hash = "sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02"}, + {file = "typed_ast-1.5.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee"}, + {file = "typed_ast-1.5.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18"}, + {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88"}, + {file = "typed_ast-1.5.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2"}, + {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9"}, + {file = "typed_ast-1.5.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8"}, + {file = "typed_ast-1.5.5-cp311-cp311-win_amd64.whl", hash = "sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b"}, + {file = "typed_ast-1.5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5"}, + {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c"}, + {file = "typed_ast-1.5.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa"}, + {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f"}, + {file = "typed_ast-1.5.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d"}, + {file = "typed_ast-1.5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5"}, + {file = "typed_ast-1.5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e"}, + {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e"}, + {file = "typed_ast-1.5.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311"}, + {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2"}, + {file = "typed_ast-1.5.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4"}, + {file = "typed_ast-1.5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431"}, + {file = "typed_ast-1.5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a"}, + {file = "typed_ast-1.5.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437"}, + {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede"}, + {file = "typed_ast-1.5.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4"}, + {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6"}, + {file = "typed_ast-1.5.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4"}, + {file = "typed_ast-1.5.5-cp38-cp38-win_amd64.whl", hash = "sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b"}, + {file = "typed_ast-1.5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10"}, + {file = "typed_ast-1.5.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814"}, + {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8"}, + {file = "typed_ast-1.5.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274"}, + {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a"}, + {file = "typed_ast-1.5.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba"}, + {file = "typed_ast-1.5.5-cp39-cp39-win_amd64.whl", hash = "sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155"}, + {file = "typed_ast-1.5.5.tar.gz", hash = "sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "typing-extensions" +version = "4.7.1" +description = "Backported and Experimental Type Hints for Python 3.7+" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, +] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "urllib3" +version = "1.26.16" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"}, + {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "win32-setctime" +version = "1.1.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, + {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[package.source] +type = "legacy" +url = "https://pypi.tuna.tsinghua.edu.cn/simple" +reference = "tsinghua" + +[extras] +allure = ["allure-pytest"] +sql = ["pymysql", "sqlalchemy"] +thrift = ["cython", "thrift", "thriftpy2"] +upload = ["filetype", "requests-toolbelt"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.7" +content-hash = "c7281ee1e83f6cfc1c0e341084c86be11342200c94e7fa87faaeabd0d658d41f" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..11d9ccd --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,75 @@ +[tool.poetry] +name = "httprunner" +version = "v4.3.5" +description = "One-stop solution for HTTP(S) testing." +license = "Apache-2.0" +readme = "README.md" +authors = ["debugtalk "] + +homepage = "https://httprunner.com" +repository = "https://github.com/httprunner/httprunner" +documentation = "https://httprunner.com/docs" + +keywords = ["HTTP", "apitest", "perftest", "requests"] + +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Topic :: Software Development :: Testing", + "Topic :: Software Development :: Quality Assurance", + "Topic :: Software Development :: Libraries :: Python Modules", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10" +] + +include = ["docs/CHANGELOG.md"] + +[tool.poetry.dependencies] +python = "^3.7" +pydantic = "~1.8" # >=1.8.0 <1.9.0 +loguru = "^0.4.1" +jmespath = "^0.9.5" +black = "^22.3.0" +pytest = "^7.1.1" +pytest-html = "^3.1.1" +sentry-sdk = "^0.14.4" +allure-pytest = {version = "^2.8.16", optional = true} +requests-toolbelt = {version = "^0.10.1", optional = true} +filetype = {version = "^1.0.7", optional = true} +Brotli = "^1.0.9" +jinja2 = "^3.0.3" +toml = "^0.10.2" +sqlalchemy = {version = "^1.4.36", optional = true} +pymysql = {version = "^1.0.2",optional = true} +cython = {version = "^0.29.28", optional = true} +thriftpy2 = {version = "^0.4.14", optional = true} +thrift = {version = "^0.16.0", optional = true} +pyyaml = "^6.0.1" +requests = "^2.31.0" +urllib3 = "^1.26" + +[tool.poetry.extras] +allure = ["allure-pytest"] # pip install "httprunner[allure]", poetry install -E allure +upload = ["requests-toolbelt", "filetype"] # pip install "httprunner[upload]", poetry install -E upload +sql = ["sqlalchemy","pymysql"] # pip install "httprunner[sql]", poetry install -E sql +thrift = ["cython","thrift","thriftpy2"] # pip install "httprunner[thrift]", poetry install -E sql + +[tool.poetry.dev-dependencies] +coverage = "^4.5.4" + +[tool.poetry.scripts] +httprunner = "httprunner.cli:main" +hrun = "httprunner.cli:main_hrun_alias" +hmake = "httprunner.cli:main_make_alias" + +[build-system] +requires = ["poetry>=1.0.0"] +build-backend = "poetry.masonry.api" + +[[tool.poetry.source]] +name = "tsinghua" +url = "https://pypi.tuna.tsinghua.edu.cn/simple/"