Foreword
After the execution of the use case is completed, we hope to obtain the result of the execution, so that we can quickly count the execution of the use case.
You can also use the obtained results as a summary report. When sending emails, you can count the test results first, and then add the html report.
pytest_terminal_summary
About the TerminalReporter class can be viewed in _pytest.terminal
from _pytest import terminal
pytest_terminal_summary(terminalreporter, exitstatus, config)
最后的结果汇总,可以拿到所有的执行结果
参数:
- terminalreporter (_pytest.terminal.TerminalReporter) – 内部使用的终端测试报告对象
- exitstatus (int) – 返回给操作系统的返回码
- config(_pytest.config.Config) - pytest config对象
TerminalReporter part code
class TerminalReporter(object):
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self._numcollected = 0
self._session = None
self._showfspath = None
self.stats = {}
self.startdir = config.invocation_dir
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = time.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - errors - skipped - deselected
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d errors" % errors
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected > 0:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
Case reference
First write a few use cases in test_a.py
# test_a.py
import pytest
def test_1():
print("测试用例1111")
assert 1 == 1
@pytest.mark.skip("跳过")
def test_2():
print("测试用例22222")
assert 1 == 1
def test_3():
print("测试用例3333")
def test_4():
print("测试用例44444444")
assert 1 == 2
test_b.py use case reference
# test_b.py
import time
def test_5():
print("测试用例55555555")
time.sleep(3)
def test_6():
print("测试用例66666666")
time.sleep(3)
assert 1 == 2
So write a pytest_terminal_summary function in conftest.py to collect test results
import time
from _pytest import terminal
def pytest_terminal_summary(terminalreporter, exitstatus, config):
'''收集测试结果'''
print(terminalreporter.stats)
print("total:", terminalreporter._numcollected)
print('passed:', len(terminalreporter.stats.get('passed', [])))
print('failed:', len(terminalreporter.stats.get('failed', [])))
print('error:', len(terminalreporter.stats.get('error', [])))
print('skipped:', len(terminalreporter.stats.get('skipped', [])))
# terminalreporter._sessionstarttime 会话开始时间
duration = time.time() - terminalreporter._sessionstarttime
print('total times:', duration, 'seconds')
operation result
D:\soft\pytest_xuexi_demo>pytest
============================= test session starts =============================
platform win32 -- Python 3.6.0, pytest-4.5.0, py-1.5.4, pluggy-0.13.1
rootdir: D:\soft\pytest_xuexi_demo
plugins: allure-pytest-2.8.6, PyTestReport-0.1.9.3, forked-0.2, html-1.19.0, metadata-1.7.0, repeat-0.7.0, rerunfailures-8.0, xdist-1.23.2
collected 6 items
test_a.py .s.F [ 66%]
test_b.py .F [100%]
================================== FAILURES ===================================
___________________________________ test_4 ____________________________________
def test_4():
print("测试用例44444444")
> assert 1==2
E assert 1 == 2
test_a.py:21: AssertionError
---------------------------- Captured stdout call -----------------------------
测试用例44444444
___________________________________ test_6 ____________________________________
def test_6():
print("测试用例66666666")
time.sleep(3)
> assert 1 == 2
E assert 1 == 2
test_b.py:18: AssertionError
---------------------------- Captured stdout call -----------------------------
测试用例66666666
{'': [<TestReport 'test_a.py::test_1' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_1' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_2' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_3' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_3' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_4' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_4' when='teardown' outcome='passed'>, <TestReport 'test_b.py::test_5' when='setup' outcome='passed'>, <TestReport 'test_b.py::test_5' when='teardown' outcome='passed'>, <TestReport 'test_b.py::test_6' when='setup' outcome='passed'>, <TestReport 'test_b.py::test_6' when='teardown' outcome='passed'>], 'passed': [<TestReport 'test_a.py::test_1' when='call' outcome='passed'>, <TestReport 'test_a.py::test_3' when='call' outcome='passed'>, <TestReport 'test_b.py::test_5' when='call' outcome='passed'>], 'skipped': [<TestReport 'test_a.py::test_2' when='setup' outcome='skipped'>], 'failed': [<TestReport 'test_a.py::test_4' when='call' outcome='failed'>, <TestReport 'test_b.py::test_6' when='call' outcome='failed'>]}
total: 6
passed: 3
failed: 2
error: 0
skipped: 1
total times: 6.150860786437988 seconds
================ 2 failed, 3 passed, 1 skipped in 6.15 seconds ================
Setup and teardown exceptions
If the setup is abnormal, modify the test_b.py code
# test_b.py
import time
import pytest
@pytest.fixture(scope="function")
def setup_demo():
raise TypeError("ERROR!")
def test_5(setup_demo):
print("测试用例55555555")
time.sleep(3)
def test_6():
print("测试用例66666666")
time.sleep(3)
assert 1 == 2
Re-run the use case, the result is as follows
total: 6
passed: 2
failed: 2
error: 1
skipped: 1
成功率:33.33%
total times: 3.1817877292633057 seconds
=========== 2 failed, 2 passed, 1 skipped, 1 error in 3.18 seconds ============
At this point, there is no problem with the statistical results. Next, look at the abnormal situation of teardown
# test_b.py
import time
import pytest
@pytest.fixture(scope="function")
def setup_demo():
yield
raise TypeError("ERROR!")
def test_5(setup_demo):
print("测试用例55555555")
time.sleep(3)
def test_6():
print("测试用例66666666")
time.sleep(3)
assert 1 == 2
operation result
{'': [<TestReport 'test_a.py::test_1' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_1' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_2' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_3' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_3' when='teardown' outcome='passed'>, <TestReport 'test_a.py::test_4' when='setup' outcome='passed'>, <TestReport 'test_a.py::test_4' when='teardown' outcome='passed'>, <TestReport 'test_b.py::test_5' when='setup' outcome='passed'>, <TestReport 'test_b.py::test_6' when='setup' outcome='passed'>, <TestReport 'test_b.py::test_6' when='teardown' outcome='passed'>], 'passed': [<TestReport 'test_a.py::test_1' when='call' outcome='passed'>, <TestReport 'test_a.py::test_3' when='call' outcome='passed'>, <TestReport 'test_b.py::test_5' when='call' outcome='passed'>], 'skipped': [<TestReport 'test_a.py::test_2' when='setup' outcome='skipped'>], 'failed': [<TestReport 'test_a.py::test_4' when='call' outcome='failed'>, <TestReport 'test_b.py::test_6' when='call' outcome='failed'>], 'error': [<TestReport 'test_b.py::test_5' when='teardown' outcome='failed'>]}
total: 6
passed: 3
failed: 2
error: 1
skipped: 1
成功率:50.00%
total times: 6.18759298324585 seconds
=========== 2 failed, 3 passed, 1 skipped, 1 error in 6.19 seconds ============
At this time, the total use case is 6, but the 2 failed, 3 passed, 1 skipped, 1 error
total number is 7, why?
It can be seen from terminalreporter.stats that when pass = 'call' in passed, a test_5 use case is counted
<TestReport 'test_b.py::test_5' when='call' outcome='passed'>
error = when = 'teardown' once again count test_5 use case
'error': [<TestReport 'test_b.py::test_5' when='teardown' outcome='failed'>]
when = 'teardown' is the post-operation of the test case, generally used for data cleaning, if the error is reported, it will not affect the execution result of the test case, so it can be ignored
The final code after modification is as follows
import time
from _pytest import terminal
def pytest_terminal_summary(terminalreporter, exitstatus, config):
'''收集测试结果'''
# print(terminalreporter.stats)
print("total:", terminalreporter._numcollected)
print('passed:', len([i for i in terminalreporter.stats.get('passed', []) if i.when != 'teardown']))
print('failed:', len([i for i in terminalreporter.stats.get('failed', []) if i.when != 'teardown']))
print('error:', len([i for i in terminalreporter.stats.get('error', []) if i.when != 'teardown']))
print('skipped:', len([i for i in terminalreporter.stats.get('skipped', []) if i.when != 'teardown']))
print('成功率:%.2f' % (len(terminalreporter.stats.get('passed', []))/terminalreporter._numcollected*100)+'%')
# terminalreporter._sessionstarttime 会话开始时间
duration = time.time() - terminalreporter._sessionstarttime
print('total times:', duration, 'seconds')