Skip to content

switch testing output to test result panel #22039

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 26 commits into from
Oct 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
f5230da
switch testing output to test result panel
eleanorjboyd Sep 20, 2023
1aebbb7
remove sleep
eleanorjboyd Sep 21, 2023
d2ffd0d
fix existing tests
eleanorjboyd Sep 25, 2023
48b68a5
add output channel msg
eleanorjboyd Sep 25, 2023
83934c7
add test python side
eleanorjboyd Sep 25, 2023
93196a3
fix new line
eleanorjboyd Sep 25, 2023
8d94714
Merge branch 'main' into switch-output-location
eleanorjboyd Sep 25, 2023
3fc1f20
update with comment
eleanorjboyd Sep 25, 2023
7455d87
remove unneeded import
eleanorjboyd Sep 25, 2023
4c891f1
edits from feedback and collect discovery
eleanorjboyd Sep 25, 2023
5d8e61f
remove color addition
eleanorjboyd Sep 25, 2023
506c320
Merge branch 'main' into switch-output-location
eleanorjboyd Oct 9, 2023
9f270d6
switch testing output to test result panel
eleanorjboyd Sep 20, 2023
09e3868
fix existing tests
eleanorjboyd Sep 25, 2023
9f4598a
add output channel msg
eleanorjboyd Sep 25, 2023
3405f27
fix new line
eleanorjboyd Sep 25, 2023
b2ab4f7
edits from feedback and collect discovery
eleanorjboyd Sep 25, 2023
9a67e66
remove color addition
eleanorjboyd Sep 25, 2023
6a1f01b
fix syntax issues from merge
eleanorjboyd Oct 9, 2023
9db80b9
fix tests
eleanorjboyd Oct 9, 2023
a7c6c61
fix linting
eleanorjboyd Oct 9, 2023
2d0830a
fix failing tests
eleanorjboyd Oct 9, 2023
6ecaa9a
fix to dynamic print
eleanorjboyd Oct 9, 2023
f9bc755
switch to using constant for deprecation msg
eleanorjboyd Oct 9, 2023
1bfc34b
Update src/client/testing/testController/common/utils.ts
eleanorjboyd Oct 10, 2023
6b8444a
Merge branch 'main' into switch-output-location
eleanorjboyd Oct 10, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions pythonFiles/tests/pytestadapter/.data/test_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import sys


def test_logging2(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
assert False


def test_logging(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
28 changes: 28 additions & 0 deletions pythonFiles/tests/pytestadapter/expected_execution_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,3 +596,31 @@
"subtest": None,
}
}


# This is the expected output for the test logging file.
# └── test_logging.py
# └── test_logging2: failure
# └── test_logging: success
test_logging_path = TEST_DATA_PATH / "test_logging.py"

logging_test_expected_execution_output = {
get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging2", test_logging_path
),
"outcome": "failure",
"message": "ERROR MESSAGE",
"traceback": None,
"subtest": None,
},
get_absolute_test_id("test_logging.py::test_logging", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging", test_logging_path
),
"outcome": "success",
"message": None,
"traceback": None,
"subtest": None,
},
}
1 change: 1 addition & 0 deletions pythonFiles/tests/pytestadapter/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def runner_with_cwd(
"pytest",
"-p",
"vscode_pytest",
"-s",
] + args
listener: socket.socket = create_server()
_, port = listener.getsockname()
Expand Down
27 changes: 17 additions & 10 deletions pythonFiles/tests/pytestadapter/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,23 +215,30 @@ def test_bad_id_error_execution():
],
expected_execution_test_output.doctest_pytest_expected_execution_output,
),
(
["test_logging.py::test_logging2", "test_logging.py::test_logging"],
expected_execution_test_output.logging_test_expected_execution_output,
),
],
)
def test_pytest_execution(test_ids, expected_const):
"""
Test that pytest discovery works as expected where run pytest is always successful
but the actual test results are both successes and failures.:
1. uf_execution_expected_output: unittest tests run on multiple files.
2. uf_single_file_expected_output: test run on a single file.
3. uf_single_method_execution_expected_output: test run on a single method in a file.
4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
1: skip_tests_execution_expected_output: test run on a file with skipped tests.
2. error_raised_exception_execution_expected_output: test run on a file that raises an exception.
3. uf_execution_expected_output: unittest tests run on multiple files.
4. uf_single_file_expected_output: test run on a single file.
5. uf_single_method_execution_expected_output: test run on a single method in a file.
6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
at the top level and one test file in a nested folder.
7. double_nested_folder_expected_execution_output: test run on a double nested folder.
8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
9. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
10. doctest_pytest_expected_execution_output: test run on doctest file.
9. double_nested_folder_expected_execution_output: test run on a double nested folder.
10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
11. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
12. doctest_pytest_expected_execution_output: test run on doctest file.
13. logging_test_expected_execution_output: test run on a file with logging.


Keyword arguments:
Expand Down
2 changes: 0 additions & 2 deletions pythonFiles/unittestadapter/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,6 @@ def post_response(
)
# Clear the buffer as complete JSON object is received
buffer = b""

# Process the JSON data
break
except json.JSONDecodeError:
# JSON decoding error, the complete JSON object is not yet received
Expand Down
2 changes: 0 additions & 2 deletions pythonFiles/vscode_pytest/run_pytest_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@
)
# Clear the buffer as complete JSON object is received
buffer = b""

# Process the JSON data
print("Received JSON data in run script")
break
except json.JSONDecodeError:
Expand Down
48 changes: 18 additions & 30 deletions src/client/testing/testController/common/resultResolver.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testI
import { sendTelemetryEvent } from '../../../telemetry';
import { EventName } from '../../../telemetry/constants';
import { splitLines } from '../../../common/stringUtils';
import { buildErrorNodeOptions, fixLogLines, populateTestTree, splitTestNameWithRegex } from './utils';
import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils';
import { Deferred } from '../../../common/utils/async';

export class PythonResultResolver implements ITestResultResolver {
Expand Down Expand Up @@ -138,15 +138,16 @@ export class PythonResultResolver implements ITestResultResolver {
const tempArr: TestItem[] = getTestCaseNodes(i);
testCases.push(...tempArr);
});
const testItem = rawTestExecData.result[keyTemp];

if (rawTestExecData.result[keyTemp].outcome === 'error') {
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
if (testItem.outcome === 'error') {
const rawTraceback = testItem.traceback ?? '';
const traceback = splitLines(rawTraceback, {
trim: false,
removeEmptyEntries: true,
}).join('\r\n');
const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
const text = `${testItem.test} failed with error: ${
testItem.message ?? testItem.outcome
}\r\n${traceback}\r\n`;
const message = new TestMessage(text);

Expand All @@ -157,23 +158,17 @@ export class PythonResultResolver implements ITestResultResolver {
if (indiItem.uri && indiItem.range) {
message.location = new Location(indiItem.uri, indiItem.range);
runInstance.errored(indiItem, message);
runInstance.appendOutput(fixLogLines(text));
}
}
});
} else if (
rawTestExecData.result[keyTemp].outcome === 'failure' ||
rawTestExecData.result[keyTemp].outcome === 'passed-unexpected'
) {
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
} else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') {
const rawTraceback = testItem.traceback ?? '';
const traceback = splitLines(rawTraceback, {
trim: false,
removeEmptyEntries: true,
}).join('\r\n');

const text = `${rawTestExecData.result[keyTemp].test} failed: ${
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
}\r\n${traceback}\r\n`;
const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}\r\n`;
const message = new TestMessage(text);

// note that keyTemp is a runId for unittest library...
Expand All @@ -184,14 +179,10 @@ export class PythonResultResolver implements ITestResultResolver {
if (indiItem.uri && indiItem.range) {
message.location = new Location(indiItem.uri, indiItem.range);
runInstance.failed(indiItem, message);
runInstance.appendOutput(fixLogLines(text));
}
}
});
} else if (
rawTestExecData.result[keyTemp].outcome === 'success' ||
rawTestExecData.result[keyTemp].outcome === 'expected-failure'
) {
} else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') {
const grabTestItem = this.runIdToTestItem.get(keyTemp);
const grabVSid = this.runIdToVSid.get(keyTemp);
if (grabTestItem !== undefined) {
Expand All @@ -203,7 +194,7 @@ export class PythonResultResolver implements ITestResultResolver {
}
});
}
} else if (rawTestExecData.result[keyTemp].outcome === 'skipped') {
} else if (testItem.outcome === 'skipped') {
const grabTestItem = this.runIdToTestItem.get(keyTemp);
const grabVSid = this.runIdToVSid.get(keyTemp);
if (grabTestItem !== undefined) {
Expand All @@ -215,11 +206,11 @@ export class PythonResultResolver implements ITestResultResolver {
}
});
}
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') {
} else if (testItem.outcome === 'subtest-failure') {
// split on [] or () based on how the subtest is setup.
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
const data = rawTestExecData.result[keyTemp];
const data = testItem;
// find the subtest's parent test item
if (parentTestItem) {
const subtestStats = this.subTestStats.get(parentTestCaseId);
Expand All @@ -230,20 +221,19 @@ export class PythonResultResolver implements ITestResultResolver {
failed: 1,
passed: 0,
});
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
// clear since subtest items don't persist between runs
clearAllChildren(parentTestItem);
}
const subTestItem = this.testController?.createTestItem(subtestId, subtestId);
runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`));
// create a new test item for the subtest
if (subTestItem) {
const traceback = data.traceback ?? '';
const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`;
runInstance.appendOutput(fixLogLines(text));
const text = `${data.subtest} failed: ${
testItem.message ?? testItem.outcome
}\r\n${traceback}\r\n`;
parentTestItem.children.add(subTestItem);
runInstance.started(subTestItem);
const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? '');
const message = new TestMessage(text);
if (parentTestItem.uri && parentTestItem.range) {
message.location = new Location(parentTestItem.uri, parentTestItem.range);
}
Expand All @@ -254,7 +244,7 @@ export class PythonResultResolver implements ITestResultResolver {
} else {
throw new Error('Parent test item not found');
}
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') {
} else if (testItem.outcome === 'subtest-success') {
// split on [] or () based on how the subtest is setup.
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
Expand All @@ -266,7 +256,6 @@ export class PythonResultResolver implements ITestResultResolver {
subtestStats.passed += 1;
} else {
this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 });
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
// clear since subtest items don't persist between runs
clearAllChildren(parentTestItem);
}
Expand All @@ -276,7 +265,6 @@ export class PythonResultResolver implements ITestResultResolver {
parentTestItem.children.add(subTestItem);
runInstance.started(subTestItem);
runInstance.passed(subTestItem);
runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`));
} else {
throw new Error('Unable to create new child node for subtest');
}
Expand Down
Loading