Skip to content

Multiple colors with terminal summary_stats #5061

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog/5061.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Use multiple colors with terminal summary statistics.
83 changes: 60 additions & 23 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,15 +864,41 @@ def _outrep_summary(self, rep):
self._tw.line(content)

def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "{} in {}".format(line, format_session_duration(session_duration))
markup = {color: True, "bold": True}
if self.verbosity < -1:
return

if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
session_duration = time.time() - self._sessionstarttime
(parts, main_color) = build_summary_stats_line(self.stats)
line_parts = []

display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)

main_markup = {main_color: True}
duration = " in {}".format(format_session_duration(session_duration))
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup

if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep

if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)

def short_test_summary(self):
if not self.reportchars:
Expand Down Expand Up @@ -1011,6 +1037,15 @@ def _folded_skips(skipped):
return values


_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
}
_color_for_type_default = "yellow"


def build_summary_stats_line(stats):
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
Expand All @@ -1021,30 +1056,32 @@ def build_summary_stats_line(stats):
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True

# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats:
main_color = "green"
else:
main_color = "yellow"

parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
parts.append("%d %s" % (count, key))

if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % (count, key), markup))

if "failed" in stats or "error" in stats:
color = "red"
elif "warnings" in stats or unknown_type_seen:
color = "yellow"
elif "passed" in stats:
color = "green"
else:
color = "yellow"
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]

return line, color
return parts, main_color


def _plugin_nameversions(plugininfo):
Expand Down
10 changes: 5 additions & 5 deletions testing/test_pdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def test_one(self):
)
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect("Skipping also with pdb active")
child.expect("1 skipped in")
child.expect_exact("= \x1b[33m\x1b[1m1 skipped\x1b[0m\x1b[33m in")
child.sendeof()
self.flush(child)

Expand Down Expand Up @@ -221,7 +221,7 @@ def test_not_called_due_to_quit():
child.sendeof()
rest = child.read().decode("utf8")
assert "Exit: Quitting debugger" in rest
assert "= 1 failed in" in rest
assert "= \x1b[31m\x1b[1m1 failed\x1b[0m\x1b[31m in" in rest
assert "def test_1" not in rest
assert "get rekt" not in rest
self.flush(child)
Expand Down Expand Up @@ -703,7 +703,7 @@ def do_continue(self, arg):
assert "> PDB continue (IO-capturing resumed) >" in rest
else:
assert "> PDB continue >" in rest
assert "1 passed in" in rest
assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest

def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile(
Expand Down Expand Up @@ -1019,7 +1019,7 @@ def test_3():
child.sendline("q")
child.expect_exact("Exit: Quitting debugger")
rest = child.read().decode("utf8")
assert "2 passed in" in rest
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
assert "reading from stdin while output" not in rest
# Only printed once - not on stderr.
assert "Exit: Quitting debugger" not in child.before.decode("utf8")
Expand Down Expand Up @@ -1130,7 +1130,7 @@ def test_inner({fixture}):

TestPDB.flush(child)
assert child.exitstatus == 0
assert "= 1 passed in " in rest
assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest
assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest


Expand Down
131 changes: 106 additions & 25 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def test_1():
child.expect(r"collecting 2 items")
child.expect(r"collected 2 items")
rest = child.read().decode("utf8")
assert "2 passed in" in rest
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest

def test_itemreport_subclasses_show_subclassed_file(self, testdir):
testdir.makepyfile(
Expand Down Expand Up @@ -1252,42 +1252,123 @@ def test_failure():
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
("red", "1 failed", {"failed": (1,)}),
("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
("red", "1 error", {"error": (1,)}),
("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
("red", [("1 failed", {"bold": True, "red": True})], {"failed": (1,)}),
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("1 passed", {"bold": False, "green": True}),
],
{"failed": (1,), "passed": (1,)},
),
("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
(
"red",
[
("1 passed", {"bold": False, "green": True}),
("1 error", {"bold": True, "red": True}),
],
{"error": (1,), "passed": (1,)},
),
# (a status that's not known to the code)
("yellow", "1 weird", {"weird": (1,)}),
("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
("yellow", "1 warnings", {"warnings": (1,)}),
("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}),
("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": (1,)}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 weird", {"bold": True, "yellow": True}),
],
{"weird": (1,), "passed": (1,)},
),
(
"yellow",
[("1 warnings", {"bold": True, "yellow": True})],
{"warnings": (1,)},
),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 warnings", {"bold": True, "yellow": True}),
],
{"warnings": (1,), "passed": (1,)},
),
(
"green",
[("5 passed", {"bold": True, "green": True})],
{"passed": (1, 2, 3, 4, 5)},
),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
("yellow", "1 skipped", {"skipped": (1,)}),
("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
("yellow", "1 deselected", {"deselected": (1,)}),
("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
("yellow", "1 xfailed", {"xfailed": (1,)}),
("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
("yellow", "1 xpassed", {"xpassed": (1,)}),
("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": (1,)}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 skipped", {"bold": False, "yellow": True}),
],
{"skipped": (1,), "passed": (1,)},
),
(
"yellow",
[("1 deselected", {"bold": True, "yellow": True})],
{"deselected": (1,)},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 deselected", {"bold": False, "yellow": True}),
],
{"deselected": (1,), "passed": (1,)},
),
("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": (1,)}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 xfailed", {"bold": False, "yellow": True}),
],
{"xfailed": (1,), "passed": (1,)},
),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 xpassed", {"bold": False, "yellow": True}),
],
{"xpassed": (1,), "passed": (1,)},
),
# Likewise if no tests were found at all
("yellow", "no tests ran", {}),
("yellow", [("no tests ran", {"yellow": True})], {}),
# Test the empty-key special case
("yellow", "no tests ran", {"": (1,)}),
("green", "1 passed", {"": (1,), "passed": (1,)}),
("yellow", [("no tests ran", {"yellow": True})], {"": (1,)}),
(
"green",
[("1 passed", {"bold": True, "green": True})],
{"": (1,), "passed": (1,)},
),
# A couple more complex combinations
(
"red",
"1 failed, 2 passed, 3 xfailed",
[
("1 failed", {"bold": True, "red": True}),
("2 passed", {"bold": False, "green": True}),
("3 xfailed", {"bold": False, "yellow": True}),
],
{"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)},
),
(
"green",
"1 passed, 2 skipped, 3 deselected, 2 xfailed",
[
("1 passed", {"bold": True, "green": True}),
("2 skipped", {"bold": False, "yellow": True}),
("3 deselected", {"bold": False, "yellow": True}),
("2 xfailed", {"bold": False, "yellow": True}),
],
{
"passed": (1,),
"skipped": (1, 2),
Expand All @@ -1313,11 +1394,11 @@ class DummyReport(BaseReport):
r1 = DummyReport()
r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)})
assert res == ("2 failed", "red")
assert res == ([("2 failed", {"bold": True, "red": True})], "red")

r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)})
assert res == ("1 failed", "red")
assert res == ([("1 failed", {"bold": True, "red": True})], "red")


class TestClassicOutputStyle:
Expand Down