From f5230dacf825c296b73632f743e93b56205c6589 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 20 Sep 2023 15:29:17 -0700 Subject: [PATCH 01/23] switch testing output to test result panel --- pythonFiles/tests/pytestadapter/helpers.py | 1 + .../tests/pytestadapter/test_discovery.py | 2 + .../tests/pytestadapter/test_execution.py | 376 +++++++++--------- .../vscode_pytest/run_pytest_script.py | 2 +- .../testing/testController/common/server.ts | 41 +- .../pytest/pytestDiscoveryAdapter.ts | 22 +- .../pytest/pytestExecutionAdapter.ts | 20 +- .../loggingWorkspace/test_logging.py | 13 + 8 files changed, 271 insertions(+), 206 deletions(-) create mode 100644 src/testTestingRootWkspc/loggingWorkspace/test_logging.py diff --git a/pythonFiles/tests/pytestadapter/helpers.py b/pythonFiles/tests/pytestadapter/helpers.py index b534e950945a..2d36da59956b 100644 --- a/pythonFiles/tests/pytestadapter/helpers.py +++ b/pythonFiles/tests/pytestadapter/helpers.py @@ -129,6 +129,7 @@ def runner_with_cwd( "pytest", "-p", "vscode_pytest", + "-s", ] + args listener: socket.socket = create_server() _, port = listener.getsockname() diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 674d92ac0545..81e42a5e9ed7 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,6 +9,8 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd +pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") + def test_import_error(tmp_path): """Test pytest discovery on a file that has a pytest marker but does not import pytest. diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 37a392f66d4b..260221af48be 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -11,208 +11,212 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -def test_config_file(): - """Test pytest execution when a config file is specified.""" - args = [ - "-c", - "tests/pytest.ini", - str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), - ] - new_cwd = TEST_DATA_PATH / "root" - actual = runner_with_cwd(args, new_cwd) - expected_const = ( - expected_execution_test_output.config_file_pytest_expected_execution_output - ) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - assert len(actual_list) == len(expected_const) - actual_result_dict = dict() - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "result") - ) - assert actual_item.get("status") == "success" - assert actual_item.get("cwd") == os.fspath(new_cwd) - actual_result_dict.update(actual_item["result"]) - assert actual_result_dict == expected_const +# def test_config_file(): +# """Test pytest execution when a config file is specified.""" +# args = [ +# "-c", +# "tests/pytest.ini", +# str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), +# ] +# new_cwd = TEST_DATA_PATH / "root" +# actual = runner_with_cwd(args, new_cwd) +# expected_const = ( +# expected_execution_test_output.config_file_pytest_expected_execution_output +# ) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# assert len(actual_list) == len(expected_const) +# actual_result_dict = dict() +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "result") +# ) +# assert actual_item.get("status") == "success" +# assert actual_item.get("cwd") == os.fspath(new_cwd) +# actual_result_dict.update(actual_item["result"]) +# assert actual_result_dict == expected_const -def test_rootdir_specified(): - """Test pytest execution when a --rootdir is specified.""" - rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" - args = [rd, "tests/test_a.py::test_a_function"] - new_cwd = TEST_DATA_PATH / "root" - actual = runner_with_cwd(args, new_cwd) - expected_const = ( - expected_execution_test_output.config_file_pytest_expected_execution_output - ) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - assert len(actual_list) == len(expected_const) - actual_result_dict = dict() - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "result") - ) - assert actual_item.get("status") == "success" - assert actual_item.get("cwd") == os.fspath(new_cwd) - actual_result_dict.update(actual_item["result"]) - assert actual_result_dict == expected_const +# def test_rootdir_specified(): +# """Test pytest execution when a --rootdir is specified.""" +# rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" +# args = [rd, "tests/test_a.py::test_a_function"] +# new_cwd = TEST_DATA_PATH / "root" +# actual = runner_with_cwd(args, new_cwd) +# expected_const = ( +# expected_execution_test_output.config_file_pytest_expected_execution_output +# ) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# assert len(actual_list) == len(expected_const) +# actual_result_dict = dict() +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "result") +# ) +# assert actual_item.get("status") == "success" +# assert actual_item.get("cwd") == os.fspath(new_cwd) +# actual_result_dict.update(actual_item["result"]) +# assert actual_result_dict == expected_const -def test_syntax_error_execution(tmp_path): - """Test pytest execution on a file that has a syntax error. +# def test_syntax_error_execution(tmp_path): +# """Test pytest execution on a file that has a syntax error. - Copies the contents of a .txt file to a .py file in the temporary directory - to then run pytest execution on. +# Copies the contents of a .txt file to a .py file in the temporary directory +# to then run pytest execution on. - The json should still be returned but the errors list should be present. +# The json should still be returned but the errors list should be present. - Keyword arguments: - tmp_path -- pytest fixture that creates a temporary directory. - """ - # Saving some files as .txt to avoid that file displaying a syntax error for - # the extension as a whole. Instead, rename it before running this test - # in order to test the error handling. - file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" - temp_dir = tmp_path / "temp_data" - temp_dir.mkdir() - p = temp_dir / "error_syntax_discovery.py" - shutil.copyfile(file_path, p) - actual = runner(["error_syntax_discover.py::test_function"]) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "error") - ) - assert actual_item.get("status") == "error" - assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) - error_content = actual_item.get("error") - if error_content is not None and isinstance( - error_content, (list, tuple, str) - ): # You can add other types if needed - assert len(error_content) == 1 - else: - assert False +# Keyword arguments: +# tmp_path -- pytest fixture that creates a temporary directory. +# """ +# # Saving some files as .txt to avoid that file displaying a syntax error for +# # the extension as a whole. Instead, rename it before running this test +# # in order to test the error handling. +# file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" +# temp_dir = tmp_path / "temp_data" +# temp_dir.mkdir() +# p = temp_dir / "error_syntax_discovery.py" +# shutil.copyfile(file_path, p) +# actual = runner(["error_syntax_discover.py::test_function"]) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "error") +# ) +# assert actual_item.get("status") == "error" +# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) +# error_content = actual_item.get("error") +# if error_content is not None and isinstance( +# error_content, (list, tuple, str) +# ): # You can add other types if needed +# assert len(error_content) == 1 +# else: +# assert False -def test_bad_id_error_execution(): - """Test pytest discovery with a non-existent test_id. +# def test_bad_id_error_execution(): +# """Test pytest discovery with a non-existent test_id. - The json should still be returned but the errors list should be present. - """ - actual = runner(["not/a/real::test_id"]) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "error") - ) - assert actual_item.get("status") == "error" - assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) - error_content = actual_item.get("error") - if error_content is not None and isinstance( - error_content, (list, tuple, str) - ): # You can add other types if needed - assert len(error_content) == 1 - else: - assert False +# The json should still be returned but the errors list should be present. +# """ +# actual = runner(["not/a/real::test_id"]) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "error") +# ) +# assert actual_item.get("status") == "error" +# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) +# error_content = actual_item.get("error") +# if error_content is not None and isinstance( +# error_content, (list, tuple, str) +# ): # You can add other types if needed +# assert len(error_content) == 1 +# else: +# assert False @pytest.mark.parametrize( "test_ids, expected_const", [ + # ( + # [ + # "skip_tests.py::test_something", + # "skip_tests.py::test_another_thing", + # "skip_tests.py::test_decorator_thing", + # "skip_tests.py::test_decorator_thing_2", + # "skip_tests.py::TestClass::test_class_function_a", + # "skip_tests.py::TestClass::test_class_function_b", + # ], + # expected_execution_test_output.skip_tests_execution_expected_output, + # ), + # ( + # ["error_raise_exception.py::TestSomething::test_a"], + # expected_execution_test_output.error_raised_exception_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + # ], + # expected_execution_test_output.uf_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + # ], + # expected_execution_test_output.uf_single_file_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # ], + # expected_execution_test_output.uf_single_method_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + # ], + # expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + # ), + # ( + # [ + # "unittest_pytest_same_file.py::TestExample::test_true_unittest", + # "unittest_pytest_same_file.py::test_true_pytest", + # ], + # expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + # ), + # ( + # [ + # "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + # "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + # ], + # expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + # ), + # ( + # ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + # expected_execution_test_output.double_nested_folder_expected_execution_output, + # ), + # ( + # [ + # "parametrize_tests.py::test_adding[3+5-8]", + # "parametrize_tests.py::test_adding[2+4-6]", + # "parametrize_tests.py::test_adding[6+9-16]", + # ], + # expected_execution_test_output.parametrize_tests_expected_execution_output, + # ), + # ( + # [ + # "parametrize_tests.py::test_adding[3+5-8]", + # ], + # expected_execution_test_output.single_parametrize_tests_expected_execution_output, + # ), + # ( + # [ + # "text_docstring.txt::text_docstring.txt", + # ], + # expected_execution_test_output.doctest_pytest_expected_execution_output, + # ), ( - [ - "skip_tests.py::test_something", - "skip_tests.py::test_another_thing", - "skip_tests.py::test_decorator_thing", - "skip_tests.py::test_decorator_thing_2", - "skip_tests.py::TestClass::test_class_function_a", - "skip_tests.py::TestClass::test_class_function_b", - ], - expected_execution_test_output.skip_tests_execution_expected_output, - ), - ( - ["error_raise_exception.py::TestSomething::test_a"], - expected_execution_test_output.error_raised_exception_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", - ], - expected_execution_test_output.uf_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - ], - expected_execution_test_output.uf_single_file_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - ], - expected_execution_test_output.uf_single_method_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - ], - expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, - ), - ( - [ - "unittest_pytest_same_file.py::TestExample::test_true_unittest", - "unittest_pytest_same_file.py::test_true_pytest", - ], - expected_execution_test_output.unit_pytest_same_file_execution_expected_output, - ), - ( - [ - "dual_level_nested_folder/test_top_folder.py::test_top_function_t", - "dual_level_nested_folder/test_top_folder.py::test_top_function_f", - "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", - "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", - ], - expected_execution_test_output.dual_level_nested_folder_execution_expected_output, - ), - ( - ["folder_a/folder_b/folder_a/test_nest.py::test_function"], - expected_execution_test_output.double_nested_folder_expected_execution_output, - ), - ( - [ - "parametrize_tests.py::test_adding[3+5-8]", - "parametrize_tests.py::test_adding[2+4-6]", - "parametrize_tests.py::test_adding[6+9-16]", - ], - expected_execution_test_output.parametrize_tests_expected_execution_output, - ), - ( - [ - "parametrize_tests.py::test_adding[3+5-8]", - ], - expected_execution_test_output.single_parametrize_tests_expected_execution_output, - ), - ( - [ - "text_docstring.txt::text_docstring.txt", - ], + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], expected_execution_test_output.doctest_pytest_expected_execution_output, ), ], diff --git a/pythonFiles/vscode_pytest/run_pytest_script.py b/pythonFiles/vscode_pytest/run_pytest_script.py index 0fca8208a406..9f3f94e58844 100644 --- a/pythonFiles/vscode_pytest/run_pytest_script.py +++ b/pythonFiles/vscode_pytest/run_pytest_script.py @@ -52,7 +52,7 @@ # Clear the buffer as complete JSON object is received buffer = b"" - # Process the JSON data + # Process the JSON data. print("Received JSON data in run script") break except json.JSONDecodeError: diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 699f7f754122..a500a11ef658 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -20,6 +20,7 @@ import { createEOTPayload, createExecutionErrorPayload, extractJsonPayload, + fixLogLines, } from './utils'; import { createDeferred } from '../../../common/utils/async'; @@ -86,7 +87,7 @@ export class PythonTestServer implements ITestServer, Disposable { // what payload is so small it doesn't include the whole UUID think got this if (extractedJsonPayload.uuid !== undefined && extractedJsonPayload.cleanedJsonData !== undefined) { // if a full json was found in the buffer, fire the data received event then keep cycling with the remaining raw data. - traceInfo(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`); + traceLog(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`); this._fireDataReceived(extractedJsonPayload.uuid, extractedJsonPayload.cleanedJsonData); } buffer = Buffer.from(extractedJsonPayload.remainingRawData); @@ -170,6 +171,7 @@ export class PythonTestServer implements ITestServer, Disposable { callback?: () => void, ): Promise { const { uuid } = options; + const isDiscovery = testIds === undefined; const pythonPathParts: string[] = process.env.PYTHONPATH?.split(path.delimiter) ?? []; const pythonPathCommand = [options.cwd, ...pythonPathParts].join(path.delimiter); @@ -189,7 +191,6 @@ export class PythonTestServer implements ITestServer, Disposable { resource: options.workspaceFolder, }; const execService = await this.executionFactory.createActivatedEnvironment(creationOptions); - // Add the generated UUID to the data to be sent (expecting to receive it back). // first check if we have testIds passed in (in case of execution) and // insert appropriate flag and test id array @@ -197,6 +198,15 @@ export class PythonTestServer implements ITestServer, Disposable { options.command.args, ); + // If the user didn't explicit dictate the color during run, then add it + if (isRun) { + if (!args.includes('--color=no')) { + if (!args.includes('--color=yes')) { + args.push('--color=yes'); + } + } + } + if (options.outChannel) { options.outChannel.appendLine(`python ${args.join(' ')}`); } @@ -232,15 +242,28 @@ export class PythonTestServer implements ITestServer, Disposable { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - result?.proc?.stdout?.on('data', (data) => { - spawnOptions?.outputChannel?.append(data.toString()); - }); - result?.proc?.stderr?.on('data', (data) => { - spawnOptions?.outputChannel?.append(data.toString()); - }); + // Discovery output should be sent to the output channel, run output should be sent to the test run instance. + if (isDiscovery) { + result?.proc?.stdout?.on('data', (data) => { + const out = fixLogLines(data.toString()); + traceLog(out); + }); + result?.proc?.stderr?.on('data', (data) => { + const out = fixLogLines(data.toString()); + traceLog(out); + }); + } else { + result?.proc?.stdout?.on('data', (data) => { + runInstance?.appendOutput(`${fixLogLines(data.toString())}\r\n`); + }); + result?.proc?.stderr?.on('data', (data) => { + runInstance?.appendOutput(`${fixLogLines(data.toString())}\r\n`); + }); + } + result?.proc?.on('exit', (code, signal) => { // if the child has testIds then this is a run request - if (code !== 0 && testIds && testIds?.length !== 0) { + if (code !== 0 && !isDiscovery) { traceError( `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error execution payload`, ); diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index c03baeae0421..82ca4f4f4a51 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -11,7 +11,7 @@ import { import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; import { Deferred, createDeferred } from '../../../common/utils/async'; import { EXTENSION_ROOT_DIR } from '../../../constants'; -import { traceError, traceInfo, traceVerbose } from '../../../logging'; +import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging'; import { DataReceivedEvent, DiscoveredTestPayload, @@ -19,7 +19,7 @@ import { ITestResultResolver, ITestServer, } from '../common/types'; -import { createDiscoveryErrorPayload, createEOTPayload } from '../common/utils'; +import { createDiscoveryErrorPayload, createEOTPayload, fixLogLines } from '../common/utils'; /** * Wrapper class for unittest test discovery. This is where we call `runTestCommand`. #this seems incorrectly copied @@ -84,17 +84,24 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { const execService = await executionFactory?.createActivatedEnvironment(creationOptions); // delete UUID following entire discovery finishing. const deferredExec = createDeferred>(); - const execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); + + let execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); + // filter out color=yes from pytestArgs + execArgs = execArgs.filter((item) => item !== '--color=yes'); traceVerbose(`Running pytest discovery with command: ${execArgs.join(' ')}`); const result = execService?.execObservable(execArgs, spawnOptions); // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. result?.proc?.stdout?.on('data', (data) => { - spawnOptions.outputChannel?.append(data.toString()); + const out = fixLogLines(data.toString()); + traceLog(out); + // spawnOptions.outputChannel?.append(data.toString()); }); result?.proc?.stderr?.on('data', (data) => { - spawnOptions.outputChannel?.append(data.toString()); + const out = fixLogLines(data.toString()); + traceError(out); + // spawnOptions.outputChannel?.append(data.toString()); }); result?.proc?.on('exit', (code, signal) => { if (code !== 0) { @@ -112,7 +119,10 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { data: JSON.stringify(createEOTPayload(true)), }); } - deferredExec.resolve({ stdout: '', stderr: '' }); + deferredExec.resolve({ + stdout: '', + stderr: '', + }); deferred.resolve(); }); diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 085af40375d4..938c6221c55d 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -4,7 +4,7 @@ import { TestRun, Uri } from 'vscode'; import * as path from 'path'; import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; -import { Deferred, createDeferred } from '../../../common/utils/async'; +import { Deferred, createDeferred, sleep } from '../../../common/utils/async'; import { traceError, traceInfo } from '../../../logging'; import { DataReceivedEvent, @@ -125,8 +125,15 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const execService = await executionFactory?.createActivatedEnvironment(creationOptions); try { + const colorOff = pytestArgs.includes('--color=no'); // Remove positional test folders and files, we will add as needed per node const testArgs = removePositionalFoldersAndFiles(pytestArgs); + // If the user didn't explicit dictate the color, then add it + if (!colorOff) { + if (!testArgs.includes('--color=yes')) { + testArgs.push('--color=yes'); + } + } // if user has provided `--rootdir` then use that, otherwise add `cwd` if (testArgs.filter((a) => a.startsWith('--rootdir')).length === 0) { @@ -166,7 +173,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const deferredExec = createDeferred>(); const result = execService?.execObservable(runArgs, spawnOptions); - runInstance?.token.onCancellationRequested(() => { traceInfo('Test run cancelled, killing pytest subprocess.'); result?.proc?.kill(); @@ -175,10 +181,16 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. result?.proc?.stdout?.on('data', (data) => { - this.outputChannel?.append(data.toString()); + const out = utils.fixLogLines(data.toString()); + runInstance?.appendOutput(`${out}\r\n`); + // with traceInfo, gets full message, without traceInfo, gets truncated + // traceInfo(`${out}\r\n`); }); result?.proc?.stderr?.on('data', (data) => { - this.outputChannel?.append(data.toString()); + const out = utils.fixLogLines(data.toString()); + runInstance?.appendOutput(`${out}\r\n`); + // traceInfo(`${out}\r\n`); + // console.log(`${out}\r\n`); }); result?.proc?.on('exit', (code, signal) => { diff --git a/src/testTestingRootWkspc/loggingWorkspace/test_logging.py b/src/testTestingRootWkspc/loggingWorkspace/test_logging.py new file mode 100644 index 000000000000..a3e77f06ae78 --- /dev/null +++ b/src/testTestingRootWkspc/loggingWorkspace/test_logging.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +import logging + + +def test_logging(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") From 1aebbb74e3fd4f48671c5508e47171cf7a531381 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Thu, 21 Sep 2023 13:02:58 -0700 Subject: [PATCH 02/23] remove sleep --- .../testing/testController/pytest/pytestExecutionAdapter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 938c6221c55d..f9794cb5da82 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -4,7 +4,7 @@ import { TestRun, Uri } from 'vscode'; import * as path from 'path'; import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; -import { Deferred, createDeferred, sleep } from '../../../common/utils/async'; +import { Deferred, createDeferred } from '../../../common/utils/async'; import { traceError, traceInfo } from '../../../logging'; import { DataReceivedEvent, From d2ffd0dd4b7dddf6e3a1ee12faca31167b2bd515 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 09:00:11 -0700 Subject: [PATCH 03/23] fix existing tests --- .../pytest/pytestExecutionAdapter.ts | 4 - .../unittest/testDiscoveryAdapter.ts | 2 +- .../testing/common/testingAdapter.test.ts | 868 ++++++++++-------- .../pytestExecutionAdapter.unit.test.ts | 12 +- .../testController/server.unit.test.ts | 2 +- .../smallWorkspace/test_simple.py | 17 +- 6 files changed, 504 insertions(+), 401 deletions(-) diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index f9794cb5da82..adb6f51f3ce4 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -183,14 +183,10 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { result?.proc?.stdout?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); runInstance?.appendOutput(`${out}\r\n`); - // with traceInfo, gets full message, without traceInfo, gets truncated - // traceInfo(`${out}\r\n`); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); runInstance?.appendOutput(`${out}\r\n`); - // traceInfo(`${out}\r\n`); - // console.log(`${out}\r\n`); }); result?.proc?.on('exit', (code, signal) => { diff --git a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts index 440df4f94dc6..5d2e37034d4f 100644 --- a/src/client/testing/testController/unittest/testDiscoveryAdapter.ts +++ b/src/client/testing/testController/unittest/testDiscoveryAdapter.ts @@ -67,7 +67,7 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter { } private async callSendCommand(options: TestCommandOptions, callback: () => void): Promise { - await this.testServer.sendCommand(options, undefined, undefined, [], callback); + await this.testServer.sendCommand(options, undefined, undefined, undefined, callback); const discoveryPayload: DiscoveredTestPayload = { cwd: '', status: 'success' }; return discoveryPayload; } diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 4f46f1cf738c..11215f30d48b 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -93,396 +93,484 @@ suite('End to End Tests: test adapters', () => { teardown(async () => { pythonTestServer.dispose(); }); - test('unittest discovery adapter small workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - workspaceUri = Uri.parse(rootPathSmallWorkspace); - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - - // set workspace to test workspace folder and set up settings - - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - - // run unittest discovery - const discoveryAdapter = new UnittestTestDiscoveryAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - - await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // verification after discovery is complete - - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); - // 2. Confirm no errors - assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - - test('unittest discovery adapter large workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - - // set settings to work for the given workspace - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run discovery - const discoveryAdapter = new UnittestTestDiscoveryAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - - await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); - // 2. Confirm no errors - assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - test('pytest discovery adapter small workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - // run pytest discovery - const discoveryAdapter = new PytestTestDiscoveryAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete - - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - test('pytest discovery adapter large workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - // run pytest discovery - const discoveryAdapter = new PytestTestDiscoveryAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - test('unittest execution adapter small workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; - - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // run execution - const executionAdapter = new UnittestTestExecutionAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - await executionAdapter - .runTests(workspaceUri, ['test_simple.SimpleClass.test_simple_unit'], false, testRun.object) - .finally(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); - }); - }); - test('unittest execution adapter large workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - const validStatuses = ['subtest-success', 'subtest-failure']; - assert.ok( - validStatuses.includes(payload.status), - `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; - - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - - // run unittest execution - const executionAdapter = new UnittestTestExecutionAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - await executionAdapter - .runTests(workspaceUri, ['test_parameterized_subtest.NumbersTest.test_even'], false, testRun.object) - .then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); - }); - }); - test('pytest execution adapter small workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - - // run pytest execution - const executionAdapter = new PytestTestExecutionAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - await executionAdapter - .runTests( - workspaceUri, - [`${rootPathSmallWorkspace}/test_simple.py::test_a`], - false, - testRun.object, - pythonExecFactory, - ) - .then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); - }); - }); - test('pytest execution adapter large workspace', async () => { - // result resolver and saved data for assertions - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - let failureOccurred = false; - let failureMsg = ''; - resultResolver._resolveExecution = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - // the payloads that get to the _resolveExecution are all data and should be successful. - try { - assert.strictEqual( - payload.status, - 'success', - `Expected status to be 'success', instead status is ${payload.status}`, - ); - assert.ok(payload.result, 'Expected results to be present'); - } catch (err) { - failureMsg = err ? (err as Error).toString() : ''; - failureOccurred = true; - } - return Promise.resolve(); - }; - - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - - // generate list of test_ids - const testIds: string[] = []; - for (let i = 0; i < 2000; i = i + 1) { - const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - testIds.push(testId); - } - - // run pytest execution - const executionAdapter = new PytestTestExecutionAdapter( - pythonTestServer, - configService, - testOutputChannel.object, - resultResolver, - ); - const testRun = typeMoq.Mock.ofType(); - testRun - .setup((t) => t.token) - .returns( - () => - ({ - onCancellationRequested: () => undefined, - } as any), - ); - await executionAdapter.runTests(workspaceUri, testIds, false, testRun.object, pythonExecFactory).then(() => { - // verify that the _resolveExecution was called once per test - assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - assert.strictEqual(failureOccurred, false, failureMsg); - }); - }); + // test('unittest discovery adapter small workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + + // // set workspace to test workspace folder and set up settings + + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + + // // run unittest discovery + // const discoveryAdapter = new UnittestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // // verification after discovery is complete + + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); + // // 2. Confirm no errors + // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + + // test('unittest discovery adapter large workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + + // // set settings to work for the given workspace + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // run discovery + // const discoveryAdapter = new UnittestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); + // // 2. Confirm no errors + // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + // test('pytest discovery adapter small workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + // // run pytest discovery + // const discoveryAdapter = new PytestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete + + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + // test('pytest discovery adapter large workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + // // run pytest discovery + // const discoveryAdapter = new PytestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + // test('unittest execution adapter small workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; + + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // run execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests(workspaceUri, ['test_simple.SimpleClass.test_simple_unit'], false, testRun.object) + // .finally(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); + + // // verify output works for stdout and stderr as well as unittest output + // assert.ok( + // collectedOutput.includes('expected printed output, stdout'), + // 'The test string does not contain the expected stdout output.', + // ); + // assert.ok( + // collectedOutput.includes('expected printed output, stderr'), + // 'The test string does not contain the expected stderr output.', + // ); + // assert.ok( + // collectedOutput.includes('Ran 1 test in'), + // 'The test string does not contain the expected unittest output.', + // ); + // }); + // }); + // test('unittest execution adapter large workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // const validStatuses = ['subtest-success', 'subtest-failure']; + // assert.ok( + // validStatuses.includes(payload.status), + // `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; + + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + + // // run unittest execution + // const executionAdapter = new UnittestTestExecutionAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // // File "/Users/eleanorboyd/vscode-python/src/testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py" + // // FAILED((failures = 1000)); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests(workspaceUri, ['test_parameterized_subtest.NumbersTest.test_even'], false, testRun.object) + // .then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); + + // // verify output + // assert.ok( + // collectedOutput.includes('testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py'), + // 'The test string does not contain the correct test name which should be printed', + // ); + // assert.ok( + // collectedOutput.includes('FAILED (failures=1000)'), + // 'The test string does not contain the last of the unittest output', + // ); + // }); + // }); + // test('pytest execution adapter small workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + + // // run pytest execution + // const executionAdapter = new PytestTestExecutionAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter + // .runTests( + // workspaceUri, + // [`${rootPathSmallWorkspace}/test_simple.py::test_a`], + // false, + // testRun.object, + // pythonExecFactory, + // ) + // .then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); + + // // verify output works for stdout and stderr as well as pytest output + // assert.ok( + // collectedOutput.includes('test session starts'), + // 'The test string does not contain the expected stdout output.', + // ); + // assert.ok( + // collectedOutput.includes('Captured log call'), + // 'The test string does not contain the expected log section.', + // ); + // const searchStrings = [ + // 'This is a warning message.', + // 'This is an error message.', + // 'This is a critical message.', + // ]; + // let searchString: string; + // for (searchString of searchStrings) { + // const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; + // assert.strictEqual( + // count, + // 2, + // `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, + // ); + // } + // }); + // }); + // test('pytest execution adapter large workspace', async () => { + // // result resolver and saved data for assertions + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // let failureOccurred = false; + // let failureMsg = ''; + // resultResolver._resolveExecution = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // // the payloads that get to the _resolveExecution are all data and should be successful. + // try { + // assert.strictEqual( + // payload.status, + // 'success', + // `Expected status to be 'success', instead status is ${payload.status}`, + // ); + // assert.ok(payload.result, 'Expected results to be present'); + // } catch (err) { + // failureMsg = err ? (err as Error).toString() : ''; + // failureOccurred = true; + // } + // return Promise.resolve(); + // }; + + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + + // // generate list of test_ids + // const testIds: string[] = []; + // for (let i = 0; i < 2000; i = i + 1) { + // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + // testIds.push(testId); + // } + + // // run pytest execution + // const executionAdapter = new PytestTestExecutionAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + // const testRun = typeMoq.Mock.ofType(); + // testRun + // .setup((t) => t.token) + // .returns( + // () => + // ({ + // onCancellationRequested: () => undefined, + // } as any), + // ); + // let collectedOutput = ''; + // testRun + // .setup((t) => t.appendOutput(typeMoq.It.isAny())) + // .callback((output: string) => { + // collectedOutput += output; + // traceLog('appendOutput was called with:', output); + // }) + // .returns(() => false); + // await executionAdapter.runTests(workspaceUri, testIds, false, testRun.object, pythonExecFactory).then(() => { + // // verify that the _resolveExecution was called once per test + // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + // assert.strictEqual(failureOccurred, false, failureMsg); + + // // verify output works for large repo + // assert.ok( + // collectedOutput.includes('test session starts'), + // 'The test string does not contain the expected stdout output from pytest.', + // ); + // }); + // }); test('unittest discovery adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index 9cc428ab0a4c..4aa4724d66b8 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -165,7 +165,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -231,7 +231,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -298,7 +298,13 @@ suite('pytest test execution adapter', () => { x.launchDebugger( typeMoq.It.is((launchOptions) => { assert.equal(launchOptions.cwd, uri.fsPath); - assert.deepEqual(launchOptions.args, ['--rootdir', myTestPath, '--capture', 'no']); + assert.deepEqual(launchOptions.args, [ + '--rootdir', + myTestPath, + '--color=yes', + '--capture', + 'no', + ]); assert.equal(launchOptions.testProvider, 'pytest'); assert.equal(launchOptions.pytestPort, '12345'); assert.equal(launchOptions.pytestUUID, 'uuid123'); diff --git a/src/test/testing/testController/server.unit.test.ts b/src/test/testing/testController/server.unit.test.ts index 92a9a1135f55..6270f79f33b2 100644 --- a/src/test/testing/testController/server.unit.test.ts +++ b/src/test/testing/testController/server.unit.test.ts @@ -216,7 +216,7 @@ suite('Python Test Server, Send command etc', () => { mockProc.trigger('close'); const port = server.getPort(); - const expectedArgs = ['myscript', '--port', `${port}`, '--uuid', FAKE_UUID, '-foo', 'foo']; + const expectedArgs = ['myscript', '--port', `${port}`, '--uuid', FAKE_UUID, '-foo', 'foo', '--color=yes']; execService.verify((x) => x.execObservable(expectedArgs, typeMoq.It.isAny()), typeMoq.Times.once()); }); diff --git a/src/testTestingRootWkspc/smallWorkspace/test_simple.py b/src/testTestingRootWkspc/smallWorkspace/test_simple.py index 6b4f7bd2f8a6..f68a0d7d0d93 100644 --- a/src/testTestingRootWkspc/smallWorkspace/test_simple.py +++ b/src/testTestingRootWkspc/smallWorkspace/test_simple.py @@ -1,12 +1,25 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import unittest +import logging +import sys -def test_a(): - assert 1 == 1 +def test_a(caplog): + logger = logging.getLogger(__name__) + # caplog.set_level(logging.ERROR) # Set minimum log level to capture + logger.setLevel(logging.WARN) + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + assert False class SimpleClass(unittest.TestCase): def test_simple_unit(self): + print("expected printed output, stdout") + print("expected printed output, stderr", file=sys.stderr) assert True From 48b68a55034d643270ae6010ab858f27a5c4ef29 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 09:55:08 -0700 Subject: [PATCH 04/23] add output channel msg --- .../tests/pytestadapter/.data/test_logging.py | 35 + .../tests/pytestadapter/test_execution.py | 378 +++---- .../testing/testController/common/server.ts | 21 +- .../pytest/pytestDiscoveryAdapter.ts | 10 +- .../pytest/pytestExecutionAdapter.ts | 9 +- .../testing/common/testingAdapter.test.ts | 956 +++++++++--------- 6 files changed, 734 insertions(+), 675 deletions(-) create mode 100644 pythonFiles/tests/pytestadapter/.data/test_logging.py diff --git a/pythonFiles/tests/pytestadapter/.data/test_logging.py b/pythonFiles/tests/pytestadapter/.data/test_logging.py new file mode 100644 index 000000000000..058ad8075718 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/test_logging.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import logging +import sys + + +def test_logging2(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) + assert False + + +def test_logging(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 260221af48be..4c2ccb5c1fa7 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -11,210 +11,210 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -# def test_config_file(): -# """Test pytest execution when a config file is specified.""" -# args = [ -# "-c", -# "tests/pytest.ini", -# str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), -# ] -# new_cwd = TEST_DATA_PATH / "root" -# actual = runner_with_cwd(args, new_cwd) -# expected_const = ( -# expected_execution_test_output.config_file_pytest_expected_execution_output -# ) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# assert len(actual_list) == len(expected_const) -# actual_result_dict = dict() -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "result") -# ) -# assert actual_item.get("status") == "success" -# assert actual_item.get("cwd") == os.fspath(new_cwd) -# actual_result_dict.update(actual_item["result"]) -# assert actual_result_dict == expected_const +def test_config_file(): + """Test pytest execution when a config file is specified.""" + args = [ + "-c", + "tests/pytest.ini", + str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), + ] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const -# def test_rootdir_specified(): -# """Test pytest execution when a --rootdir is specified.""" -# rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" -# args = [rd, "tests/test_a.py::test_a_function"] -# new_cwd = TEST_DATA_PATH / "root" -# actual = runner_with_cwd(args, new_cwd) -# expected_const = ( -# expected_execution_test_output.config_file_pytest_expected_execution_output -# ) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# assert len(actual_list) == len(expected_const) -# actual_result_dict = dict() -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "result") -# ) -# assert actual_item.get("status") == "success" -# assert actual_item.get("cwd") == os.fspath(new_cwd) -# actual_result_dict.update(actual_item["result"]) -# assert actual_result_dict == expected_const +def test_rootdir_specified(): + """Test pytest execution when a --rootdir is specified.""" + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + args = [rd, "tests/test_a.py::test_a_function"] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const -# def test_syntax_error_execution(tmp_path): -# """Test pytest execution on a file that has a syntax error. +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. -# Copies the contents of a .txt file to a .py file in the temporary directory -# to then run pytest execution on. + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest execution on. -# The json should still be returned but the errors list should be present. + The json should still be returned but the errors list should be present. -# Keyword arguments: -# tmp_path -- pytest fixture that creates a temporary directory. -# """ -# # Saving some files as .txt to avoid that file displaying a syntax error for -# # the extension as a whole. Instead, rename it before running this test -# # in order to test the error handling. -# file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" -# temp_dir = tmp_path / "temp_data" -# temp_dir.mkdir() -# p = temp_dir / "error_syntax_discovery.py" -# shutil.copyfile(file_path, p) -# actual = runner(["error_syntax_discover.py::test_function"]) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "error") -# ) -# assert actual_item.get("status") == "error" -# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) -# error_content = actual_item.get("error") -# if error_content is not None and isinstance( -# error_content, (list, tuple, str) -# ): # You can add other types if needed -# assert len(error_content) == 1 -# else: -# assert False + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False -# def test_bad_id_error_execution(): -# """Test pytest discovery with a non-existent test_id. +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. -# The json should still be returned but the errors list should be present. -# """ -# actual = runner(["not/a/real::test_id"]) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "error") -# ) -# assert actual_item.get("status") == "error" -# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) -# error_content = actual_item.get("error") -# if error_content is not None and isinstance( -# error_content, (list, tuple, str) -# ): # You can add other types if needed -# assert len(error_content) == 1 -# else: -# assert False + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False @pytest.mark.parametrize( "test_ids, expected_const", [ - # ( - # [ - # "skip_tests.py::test_something", - # "skip_tests.py::test_another_thing", - # "skip_tests.py::test_decorator_thing", - # "skip_tests.py::test_decorator_thing_2", - # "skip_tests.py::TestClass::test_class_function_a", - # "skip_tests.py::TestClass::test_class_function_b", - # ], - # expected_execution_test_output.skip_tests_execution_expected_output, - # ), - # ( - # ["error_raise_exception.py::TestSomething::test_a"], - # expected_execution_test_output.error_raised_exception_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", - # ], - # expected_execution_test_output.uf_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - # ], - # expected_execution_test_output.uf_single_file_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # ], - # expected_execution_test_output.uf_single_method_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - # ], - # expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, - # ), - # ( - # [ - # "unittest_pytest_same_file.py::TestExample::test_true_unittest", - # "unittest_pytest_same_file.py::test_true_pytest", - # ], - # expected_execution_test_output.unit_pytest_same_file_execution_expected_output, - # ), - # ( - # [ - # "dual_level_nested_folder/test_top_folder.py::test_top_function_t", - # "dual_level_nested_folder/test_top_folder.py::test_top_function_f", - # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", - # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", - # ], - # expected_execution_test_output.dual_level_nested_folder_execution_expected_output, - # ), - # ( - # ["folder_a/folder_b/folder_a/test_nest.py::test_function"], - # expected_execution_test_output.double_nested_folder_expected_execution_output, - # ), - # ( - # [ - # "parametrize_tests.py::test_adding[3+5-8]", - # "parametrize_tests.py::test_adding[2+4-6]", - # "parametrize_tests.py::test_adding[6+9-16]", - # ], - # expected_execution_test_output.parametrize_tests_expected_execution_output, - # ), - # ( - # [ - # "parametrize_tests.py::test_adding[3+5-8]", - # ], - # expected_execution_test_output.single_parametrize_tests_expected_execution_output, - # ), - # ( - # [ - # "text_docstring.txt::text_docstring.txt", - # ], - # expected_execution_test_output.doctest_pytest_expected_execution_output, - # ), + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + "skip_tests.py::TestClass::test_class_function_a", + "skip_tests.py::TestClass::test_class_function_b", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), ( ["test_logging.py::test_logging2", "test_logging.py::test_logging"], expected_execution_test_output.doctest_pytest_expected_execution_output, diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index a500a11ef658..144dc6079e63 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -240,29 +240,40 @@ export class PythonTestServer implements ITestServer, Disposable { result?.proc?.kill(); }); - // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - // Discovery output should be sent to the output channel, run output should be sent to the test run instance. + // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. + // TOOD: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. if (isDiscovery) { result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); traceLog(out); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLines(data.toString()); - traceLog(out); + traceError(out); + spawnOptions?.outputChannel?.append(`${out}`); }); } else { result?.proc?.stdout?.on('data', (data) => { - runInstance?.appendOutput(`${fixLogLines(data.toString())}\r\n`); + const out = fixLogLines(data.toString()); + runInstance?.appendOutput(`${out}\r\n`); + spawnOptions?.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { - runInstance?.appendOutput(`${fixLogLines(data.toString())}\r\n`); + const out = fixLogLines(data.toString()); + runInstance?.appendOutput(`${out}\r\n`); + spawnOptions?.outputChannel?.append(out); }); } result?.proc?.on('exit', (code, signal) => { // if the child has testIds then this is a run request + spawnOptions?.outputChannel?.append( + 'Starting now, all test run output will be sent to the Test Result panel' + + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', + ); if (code !== 0 && !isDiscovery) { traceError( `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error execution payload`, diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 82ca4f4f4a51..e568023b5f59 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -93,17 +93,23 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. + // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); traceLog(out); - // spawnOptions.outputChannel?.append(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLines(data.toString()); traceError(out); - // spawnOptions.outputChannel?.append(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.on('exit', (code, signal) => { + this.outputChannel?.append( + 'Starting now, all test run output will be sent to the Test Result panel' + + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', + ); if (code !== 0) { traceError( `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`, diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index adb6f51f3ce4..5ef4c5e28951 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -180,17 +180,24 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. + // TOODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. result?.proc?.stdout?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); runInstance?.appendOutput(`${out}\r\n`); + this.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); runInstance?.appendOutput(`${out}\r\n`); + this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { - traceInfo('Test run finished, subprocess exited.'); + this.outputChannel?.append( + 'Starting now, all test run output will be sent to the Test Result panel' + + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', + ); // if the child has testIds then this is a run request if (code !== 0 && testIds) { traceError( diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 11215f30d48b..61ad482fc95a 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -93,484 +93,484 @@ suite('End to End Tests: test adapters', () => { teardown(async () => { pythonTestServer.dispose(); }); - // test('unittest discovery adapter small workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - - // // set workspace to test workspace folder and set up settings - - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - - // // run unittest discovery - // const discoveryAdapter = new UnittestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - - // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // // verification after discovery is complete - - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); - // // 2. Confirm no errors - // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - - // test('unittest discovery adapter large workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - - // // set settings to work for the given workspace - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // // run discovery - // const discoveryAdapter = new UnittestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - - // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); - // // 2. Confirm no errors - // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - // test('pytest discovery adapter small workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - // // run pytest discovery - // const discoveryAdapter = new PytestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete - - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - // test('pytest discovery adapter large workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - // // run pytest discovery - // const discoveryAdapter = new PytestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - // test('unittest execution adapter small workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - // // run execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests(workspaceUri, ['test_simple.SimpleClass.test_simple_unit'], false, testRun.object) - // .finally(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); - - // // verify output works for stdout and stderr as well as unittest output - // assert.ok( - // collectedOutput.includes('expected printed output, stdout'), - // 'The test string does not contain the expected stdout output.', - // ); - // assert.ok( - // collectedOutput.includes('expected printed output, stderr'), - // 'The test string does not contain the expected stderr output.', - // ); - // assert.ok( - // collectedOutput.includes('Ran 1 test in'), - // 'The test string does not contain the expected unittest output.', - // ); - // }); - // }); - // test('unittest execution adapter large workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // const validStatuses = ['subtest-success', 'subtest-failure']; - // assert.ok( - // validStatuses.includes(payload.status), - // `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; - - // // run unittest execution - // const executionAdapter = new UnittestTestExecutionAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // // File "/Users/eleanorboyd/vscode-python/src/testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py" - // // FAILED((failures = 1000)); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests(workspaceUri, ['test_parameterized_subtest.NumbersTest.test_even'], false, testRun.object) - // .then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); - - // // verify output - // assert.ok( - // collectedOutput.includes('testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py'), - // 'The test string does not contain the correct test name which should be printed', - // ); - // assert.ok( - // collectedOutput.includes('FAILED (failures=1000)'), - // 'The test string does not contain the last of the unittest output', - // ); - // }); - // }); - // test('pytest execution adapter small workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - - // // run pytest execution - // const executionAdapter = new PytestTestExecutionAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter - // .runTests( - // workspaceUri, - // [`${rootPathSmallWorkspace}/test_simple.py::test_a`], - // false, - // testRun.object, - // pythonExecFactory, - // ) - // .then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); - - // // verify output works for stdout and stderr as well as pytest output - // assert.ok( - // collectedOutput.includes('test session starts'), - // 'The test string does not contain the expected stdout output.', - // ); - // assert.ok( - // collectedOutput.includes('Captured log call'), - // 'The test string does not contain the expected log section.', - // ); - // const searchStrings = [ - // 'This is a warning message.', - // 'This is an error message.', - // 'This is a critical message.', - // ]; - // let searchString: string; - // for (searchString of searchStrings) { - // const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; - // assert.strictEqual( - // count, - // 2, - // `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, - // ); - // } - // }); - // }); - // test('pytest execution adapter large workspace', async () => { - // // result resolver and saved data for assertions - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // let failureOccurred = false; - // let failureMsg = ''; - // resultResolver._resolveExecution = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // // the payloads that get to the _resolveExecution are all data and should be successful. - // try { - // assert.strictEqual( - // payload.status, - // 'success', - // `Expected status to be 'success', instead status is ${payload.status}`, - // ); - // assert.ok(payload.result, 'Expected results to be present'); - // } catch (err) { - // failureMsg = err ? (err as Error).toString() : ''; - // failureOccurred = true; - // } - // return Promise.resolve(); - // }; - - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - - // // generate list of test_ids - // const testIds: string[] = []; - // for (let i = 0; i < 2000; i = i + 1) { - // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - // testIds.push(testId); - // } - - // // run pytest execution - // const executionAdapter = new PytestTestExecutionAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - // const testRun = typeMoq.Mock.ofType(); - // testRun - // .setup((t) => t.token) - // .returns( - // () => - // ({ - // onCancellationRequested: () => undefined, - // } as any), - // ); - // let collectedOutput = ''; - // testRun - // .setup((t) => t.appendOutput(typeMoq.It.isAny())) - // .callback((output: string) => { - // collectedOutput += output; - // traceLog('appendOutput was called with:', output); - // }) - // .returns(() => false); - // await executionAdapter.runTests(workspaceUri, testIds, false, testRun.object, pythonExecFactory).then(() => { - // // verify that the _resolveExecution was called once per test - // assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); - // assert.strictEqual(failureOccurred, false, failureMsg); - - // // verify output works for large repo - // assert.ok( - // collectedOutput.includes('test session starts'), - // 'The test string does not contain the expected stdout output from pytest.', - // ); - // }); - // }); + test('unittest discovery adapter small workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + workspaceUri = Uri.parse(rootPathSmallWorkspace); + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + + // set workspace to test workspace folder and set up settings + + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + + // run unittest discovery + const discoveryAdapter = new UnittestTestDiscoveryAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + + await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // verification after discovery is complete + + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); + // 2. Confirm no errors + assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + + test('unittest discovery adapter large workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + + // set settings to work for the given workspace + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // run discovery + const discoveryAdapter = new UnittestTestDiscoveryAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + + await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); + // 2. Confirm no errors + assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + test('pytest discovery adapter small workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + // run pytest discovery + const discoveryAdapter = new PytestTestDiscoveryAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete + + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + test('pytest discovery adapter large workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + resultResolver = new PythonResultResolver(testController, pytestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + // run pytest discovery + const discoveryAdapter = new PytestTestDiscoveryAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + test('unittest execution adapter small workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; + + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // run execution + const executionAdapter = new UnittestTestExecutionAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests(workspaceUri, ['test_simple.SimpleClass.test_simple_unit'], false, testRun.object) + .finally(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for stdout and stderr as well as unittest output + assert.ok( + collectedOutput.includes('expected printed output, stdout'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('expected printed output, stderr'), + 'The test string does not contain the expected stderr output.', + ); + assert.ok( + collectedOutput.includes('Ran 1 test in'), + 'The test string does not contain the expected unittest output.', + ); + }); + }); + test('unittest execution adapter large workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + const validStatuses = ['subtest-success', 'subtest-failure']; + assert.ok( + validStatuses.includes(payload.status), + `Expected status to be one of ${validStatuses.join(', ')}, but instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; + + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + + // run unittest execution + const executionAdapter = new UnittestTestExecutionAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + // File "/Users/eleanorboyd/vscode-python/src/testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py" + // FAILED((failures = 1000)); + }) + .returns(() => false); + await executionAdapter + .runTests(workspaceUri, ['test_parameterized_subtest.NumbersTest.test_even'], false, testRun.object) + .then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output + assert.ok( + collectedOutput.includes('testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py'), + 'The test string does not contain the correct test name which should be printed', + ); + assert.ok( + collectedOutput.includes('FAILED (failures=1000)'), + 'The test string does not contain the last of the unittest output', + ); + }); + }); + test('pytest execution adapter small workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + + // run pytest execution + const executionAdapter = new PytestTestExecutionAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter + .runTests( + workspaceUri, + [`${rootPathSmallWorkspace}/test_simple.py::test_a`], + false, + testRun.object, + pythonExecFactory, + ) + .then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for stdout and stderr as well as pytest output + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('Captured log call'), + 'The test string does not contain the expected log section.', + ); + const searchStrings = [ + 'This is a warning message.', + 'This is an error message.', + 'This is a critical message.', + ]; + let searchString: string; + for (searchString of searchStrings) { + const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; + assert.strictEqual( + count, + 2, + `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, + ); + } + }); + }); + test('pytest execution adapter large workspace', async () => { + // result resolver and saved data for assertions + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + let failureOccurred = false; + let failureMsg = ''; + resultResolver._resolveExecution = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + // the payloads that get to the _resolveExecution are all data and should be successful. + try { + assert.strictEqual( + payload.status, + 'success', + `Expected status to be 'success', instead status is ${payload.status}`, + ); + assert.ok(payload.result, 'Expected results to be present'); + } catch (err) { + failureMsg = err ? (err as Error).toString() : ''; + failureOccurred = true; + } + return Promise.resolve(); + }; + + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + + // generate list of test_ids + const testIds: string[] = []; + for (let i = 0; i < 2000; i = i + 1) { + const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + testIds.push(testId); + } + + // run pytest execution + const executionAdapter = new PytestTestExecutionAdapter( + pythonTestServer, + configService, + testOutputChannel.object, + resultResolver, + ); + const testRun = typeMoq.Mock.ofType(); + testRun + .setup((t) => t.token) + .returns( + () => + ({ + onCancellationRequested: () => undefined, + } as any), + ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); + await executionAdapter.runTests(workspaceUri, testIds, false, testRun.object, pythonExecFactory).then(() => { + // verify that the _resolveExecution was called once per test + assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); + assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for large repo + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output from pytest.', + ); + }); + }); test('unittest discovery adapter seg fault error handling', async () => { resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; From 83934c789ea6daf0e154d0e431b9d31aabaab1f2 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 10:51:50 -0700 Subject: [PATCH 05/23] add test python side --- .../expected_execution_test_output.py | 28 +++++++++++++++++ .../tests/pytestadapter/test_execution.py | 30 ++++++++++++------- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py index 76d21b3e2518..3fdb7b45a0c0 100644 --- a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -596,3 +596,31 @@ "subtest": None, } } + + +# This is the expected output for the test logging file. +# └── test_logging.py +# └── test_logging2: failure +# └── test_logging: success +test_logging_path = TEST_DATA_PATH / "test_logging.py" + +logging_test_expected_execution_output = { + get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging2", test_logging_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_logging.py::test_logging", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging", test_logging_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 4c2ccb5c1fa7..540db358e34f 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import json import os import shutil from typing import Any, Dict, List @@ -10,6 +11,9 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd +# uncomment this line to skip all tests in this module +# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") + def test_config_file(): """Test pytest execution when a config file is specified.""" @@ -217,7 +221,7 @@ def test_bad_id_error_execution(): ), ( ["test_logging.py::test_logging2", "test_logging.py::test_logging"], - expected_execution_test_output.doctest_pytest_expected_execution_output, + expected_execution_test_output.logging_test_expected_execution_output, ), ], ) @@ -225,23 +229,27 @@ def test_pytest_execution(test_ids, expected_const): """ Test that pytest discovery works as expected where run pytest is always successful but the actual test results are both successes and failures.: - 1. uf_execution_expected_output: unittest tests run on multiple files. - 2. uf_single_file_expected_output: test run on a single file. - 3. uf_single_method_execution_expected_output: test run on a single method in a file. - 4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. - 5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. - 6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. + 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. + 3. uf_execution_expected_output: unittest tests run on multiple files. + 4. uf_single_file_expected_output: test run on a single file. + 5. uf_single_method_execution_expected_output: test run on a single method in a file. + 6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file at the top level and one test file in a nested folder. - 7. double_nested_folder_expected_execution_output: test run on a double nested folder. - 8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. - 9. single_parametrize_tests_expected_execution_output: test run on single parametrize test. - 10. doctest_pytest_expected_execution_output: test run on doctest file. + 9. double_nested_folder_expected_execution_output: test run on a double nested folder. + 10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 11. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 12. doctest_pytest_expected_execution_output: test run on doctest file. + 13. logging_test_expected_execution_output: test run on a file with logging. Keyword arguments: test_ids -- an array of test_ids to run. expected_const -- a dictionary of the expected output from running pytest discovery on the files. """ + print("Test IDs: ", test_ids) args = test_ids actual = runner(args) assert actual From 93196a341dbc74b6380e5aece172f74d6c03c8d9 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 11:00:12 -0700 Subject: [PATCH 06/23] fix new line --- src/client/testing/testController/common/server.ts | 4 ++-- .../testing/testController/pytest/pytestExecutionAdapter.ts | 4 ++-- src/test/testing/common/testingAdapter.test.ts | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 144dc6079e63..4e36e02c8774 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -257,12 +257,12 @@ export class PythonTestServer implements ITestServer, Disposable { } else { result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}\r\n`); + runInstance?.appendOutput(`${out}`); spawnOptions?.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}\r\n`); + runInstance?.appendOutput(`${out}`); spawnOptions?.outputChannel?.append(out); }); } diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 5ef4c5e28951..24b0b3a1c65f 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -183,12 +183,12 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // TOODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. result?.proc?.stdout?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}\r\n`); + runInstance?.appendOutput(`${out}`); this.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}\r\n`); + runInstance?.appendOutput(`${out}`); this.outputChannel?.append(out); }); diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 61ad482fc95a..c14755e2afbe 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -268,7 +268,7 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); }); - test('unittest execution adapter small workspace', async () => { + test('unittest execution adapter small workspace with correct output', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; @@ -413,7 +413,7 @@ suite('End to End Tests: test adapters', () => { ); }); }); - test('pytest execution adapter small workspace', async () => { + test('pytest execution adapter small workspace with correct output', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; From 3fc1f20be5e8bffe4058248700d3b94ccf1628fd Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 11:09:50 -0700 Subject: [PATCH 07/23] update with comment --- pythonFiles/tests/pytestadapter/test_discovery.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 81e42a5e9ed7..3e918c4fc072 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,7 +9,8 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") +# uncomment this line to skip all tests in this module +# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") def test_import_error(tmp_path): From 7455d87fe15c2b51bac4cf7b2ebbd5c9686ec685 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 11:11:49 -0700 Subject: [PATCH 08/23] remove unneeded import --- pythonFiles/tests/pytestadapter/test_execution.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 540db358e34f..d7382c624d4f 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -import json import os import shutil from typing import Any, Dict, List From 4c891f18b8ea884ef76fadedf4846b210fe94b48 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 12:47:03 -0700 Subject: [PATCH 09/23] edits from feedback and collect discovery --- .../tests/pytestadapter/test_discovery.py | 3 - .../tests/pytestadapter/test_execution.py | 3 - pythonFiles/unittestadapter/execution.py | 2 - .../vscode_pytest/run_pytest_script.py | 2 - .../testing/testController/common/server.ts | 61 +++++++++---------- .../pytest/pytestDiscoveryAdapter.ts | 9 ++- .../pytest/pytestExecutionAdapter.ts | 9 +-- 7 files changed, 37 insertions(+), 52 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 3e918c4fc072..674d92ac0545 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,9 +9,6 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -# uncomment this line to skip all tests in this module -# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") - def test_import_error(tmp_path): """Test pytest discovery on a file that has a pytest marker but does not import pytest. diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index d7382c624d4f..eb6993412d0a 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -10,9 +10,6 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -# uncomment this line to skip all tests in this module -# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") - def test_config_file(): """Test pytest execution when a config file is specified.""" diff --git a/pythonFiles/unittestadapter/execution.py b/pythonFiles/unittestadapter/execution.py index a208056c6682..41bac41ce875 100644 --- a/pythonFiles/unittestadapter/execution.py +++ b/pythonFiles/unittestadapter/execution.py @@ -312,8 +312,6 @@ def post_response( ) # Clear the buffer as complete JSON object is received buffer = b"" - - # Process the JSON data break except json.JSONDecodeError: # JSON decoding error, the complete JSON object is not yet received diff --git a/pythonFiles/vscode_pytest/run_pytest_script.py b/pythonFiles/vscode_pytest/run_pytest_script.py index 9f3f94e58844..c3720c8ab8d0 100644 --- a/pythonFiles/vscode_pytest/run_pytest_script.py +++ b/pythonFiles/vscode_pytest/run_pytest_script.py @@ -51,8 +51,6 @@ ) # Clear the buffer as complete JSON object is received buffer = b"" - - # Process the JSON data. print("Received JSON data in run script") break except json.JSONDecodeError: diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 4e36e02c8774..2fdc0ea67e16 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -11,7 +11,7 @@ import { IPythonExecutionFactory, SpawnOptions, } from '../../../common/process/types'; -import { traceError, traceInfo, traceLog } from '../../../logging'; +import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging'; import { DataReceivedEvent, ITestServer, TestCommandOptions } from './types'; import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; @@ -87,7 +87,7 @@ export class PythonTestServer implements ITestServer, Disposable { // what payload is so small it doesn't include the whole UUID think got this if (extractedJsonPayload.uuid !== undefined && extractedJsonPayload.cleanedJsonData !== undefined) { // if a full json was found in the buffer, fire the data received event then keep cycling with the remaining raw data. - traceLog(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`); + traceVerbose(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`); this._fireDataReceived(extractedJsonPayload.uuid, extractedJsonPayload.cleanedJsonData); } buffer = Buffer.from(extractedJsonPayload.remainingRawData); @@ -197,16 +197,6 @@ export class PythonTestServer implements ITestServer, Disposable { const args = [options.command.script, '--port', this.getPort().toString(), '--uuid', uuid].concat( options.command.args, ); - - // If the user didn't explicit dictate the color during run, then add it - if (isRun) { - if (!args.includes('--color=no')) { - if (!args.includes('--color=yes')) { - args.push('--color=yes'); - } - } - } - if (options.outChannel) { options.outChannel.appendLine(`python ${args.join(' ')}`); } @@ -242,17 +232,16 @@ export class PythonTestServer implements ITestServer, Disposable { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. - // TOOD: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. + // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. + let collectedOutput = ''; if (isDiscovery) { result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); - traceLog(out); - spawnOptions?.outputChannel?.append(`${out}`); + collectedOutput += out; }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLines(data.toString()); - traceError(out); - spawnOptions?.outputChannel?.append(`${out}`); + collectedOutput += out; }); } else { result?.proc?.stdout?.on('data', (data) => { @@ -274,7 +263,29 @@ export class PythonTestServer implements ITestServer, Disposable { ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', ); - if (code !== 0 && !isDiscovery) { + if (isDiscovery) { + // Collect all discovery output and log it at process finish to avoid dividing it between log lines. + traceLog(`\r\n${collectedOutput}`); + spawnOptions?.outputChannel?.append(`${collectedOutput}`); + + if (code !== 0) { + // This occurs when we are running discovery + traceError( + `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`, + ); + this._onDiscoveryDataReceived.fire({ + uuid, + data: JSON.stringify(createDiscoveryErrorPayload(code, signal, options.cwd)), + }); + // then send a EOT payload + this._onDiscoveryDataReceived.fire({ + uuid, + data: JSON.stringify(createEOTPayload(true)), + }); + } + } else if (code !== 0) { + // This occurs when we are running the test and there is an error which occurs. + traceError( `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error execution payload`, ); @@ -288,20 +299,6 @@ export class PythonTestServer implements ITestServer, Disposable { uuid, data: JSON.stringify(createEOTPayload(true)), }); - } else if (code !== 0) { - // This occurs when we are running discovery - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`, - ); - this._onDiscoveryDataReceived.fire({ - uuid, - data: JSON.stringify(createDiscoveryErrorPayload(code, signal, options.cwd)), - }); - // then send a EOT payload - this._onDiscoveryDataReceived.fire({ - uuid, - data: JSON.stringify(createEOTPayload(true)), - }); } deferred.resolve({ stdout: '', stderr: '' }); }); diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index e568023b5f59..0b3bcfde60c4 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -94,17 +94,22 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. + + let collectedOutput = ''; result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); - traceLog(out); - spawnOptions?.outputChannel?.append(`${out}`); + collectedOutput += out; }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLines(data.toString()); + collectedOutput += out; traceError(out); spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.on('exit', (code, signal) => { + // Collect all discovery output and log it at process finish to avoid dividing it between log lines. + traceLog(`\r\n${collectedOutput}`); + spawnOptions?.outputChannel?.append(`${collectedOutput}`); this.outputChannel?.append( 'Starting now, all test run output will be sent to the Test Result panel' + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 24b0b3a1c65f..bbc4647b5b1f 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -125,15 +125,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const execService = await executionFactory?.createActivatedEnvironment(creationOptions); try { - const colorOff = pytestArgs.includes('--color=no'); // Remove positional test folders and files, we will add as needed per node const testArgs = removePositionalFoldersAndFiles(pytestArgs); - // If the user didn't explicit dictate the color, then add it - if (!colorOff) { - if (!testArgs.includes('--color=yes')) { - testArgs.push('--color=yes'); - } - } // if user has provided `--rootdir` then use that, otherwise add `cwd` if (testArgs.filter((a) => a.startsWith('--rootdir')).length === 0) { @@ -180,7 +173,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - // TOODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. + // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. result?.proc?.stdout?.on('data', (data) => { const out = utils.fixLogLines(data.toString()); runInstance?.appendOutput(`${out}`); From 5d8e61f0099887c9d15fc83393455ac8265fa927 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 13:49:32 -0700 Subject: [PATCH 10/23] remove color addition --- .../testController/pytest/pytestDiscoveryAdapter.ts | 4 +--- .../pytest/pytestExecutionAdapter.unit.test.ts | 12 +++--------- src/test/testing/testController/server.unit.test.ts | 2 +- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 0b3bcfde60c4..fe7d0ca9f950 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -85,9 +85,7 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // delete UUID following entire discovery finishing. const deferredExec = createDeferred>(); - let execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); - // filter out color=yes from pytestArgs - execArgs = execArgs.filter((item) => item !== '--color=yes'); + const execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); traceVerbose(`Running pytest discovery with command: ${execArgs.join(' ')}`); const result = execService?.execObservable(execArgs, spawnOptions); diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index 4aa4724d66b8..9cc428ab0a4c 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -165,7 +165,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -231,7 +231,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -298,13 +298,7 @@ suite('pytest test execution adapter', () => { x.launchDebugger( typeMoq.It.is((launchOptions) => { assert.equal(launchOptions.cwd, uri.fsPath); - assert.deepEqual(launchOptions.args, [ - '--rootdir', - myTestPath, - '--color=yes', - '--capture', - 'no', - ]); + assert.deepEqual(launchOptions.args, ['--rootdir', myTestPath, '--capture', 'no']); assert.equal(launchOptions.testProvider, 'pytest'); assert.equal(launchOptions.pytestPort, '12345'); assert.equal(launchOptions.pytestUUID, 'uuid123'); diff --git a/src/test/testing/testController/server.unit.test.ts b/src/test/testing/testController/server.unit.test.ts index 6270f79f33b2..92a9a1135f55 100644 --- a/src/test/testing/testController/server.unit.test.ts +++ b/src/test/testing/testController/server.unit.test.ts @@ -216,7 +216,7 @@ suite('Python Test Server, Send command etc', () => { mockProc.trigger('close'); const port = server.getPort(); - const expectedArgs = ['myscript', '--port', `${port}`, '--uuid', FAKE_UUID, '-foo', 'foo', '--color=yes']; + const expectedArgs = ['myscript', '--port', `${port}`, '--uuid', FAKE_UUID, '-foo', 'foo']; execService.verify((x) => x.execObservable(expectedArgs, typeMoq.It.isAny()), typeMoq.Times.once()); }); From 9f270d6a5636aabbd62690a85d7cac71c4141f12 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Wed, 20 Sep 2023 15:29:17 -0700 Subject: [PATCH 11/23] switch testing output to test result panel --- .../tests/pytestadapter/test_discovery.py | 2 + .../tests/pytestadapter/test_execution.py | 376 +++++++++--------- .../vscode_pytest/run_pytest_script.py | 2 + .../testing/testController/common/server.ts | 10 +- .../pytest/pytestDiscoveryAdapter.ts | 3 +- .../pytest/pytestExecutionAdapter.ts | 8 +- 6 files changed, 204 insertions(+), 197 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 674d92ac0545..81e42a5e9ed7 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,6 +9,8 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd +pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") + def test_import_error(tmp_path): """Test pytest discovery on a file that has a pytest marker but does not import pytest. diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index eb6993412d0a..8998c987eb43 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -11,208 +11,212 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -def test_config_file(): - """Test pytest execution when a config file is specified.""" - args = [ - "-c", - "tests/pytest.ini", - str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), - ] - new_cwd = TEST_DATA_PATH / "root" - actual = runner_with_cwd(args, new_cwd) - expected_const = ( - expected_execution_test_output.config_file_pytest_expected_execution_output - ) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - assert len(actual_list) == len(expected_const) - actual_result_dict = dict() - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "result") - ) - assert actual_item.get("status") == "success" - assert actual_item.get("cwd") == os.fspath(new_cwd) - actual_result_dict.update(actual_item["result"]) - assert actual_result_dict == expected_const +# def test_config_file(): +# """Test pytest execution when a config file is specified.""" +# args = [ +# "-c", +# "tests/pytest.ini", +# str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), +# ] +# new_cwd = TEST_DATA_PATH / "root" +# actual = runner_with_cwd(args, new_cwd) +# expected_const = ( +# expected_execution_test_output.config_file_pytest_expected_execution_output +# ) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# assert len(actual_list) == len(expected_const) +# actual_result_dict = dict() +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "result") +# ) +# assert actual_item.get("status") == "success" +# assert actual_item.get("cwd") == os.fspath(new_cwd) +# actual_result_dict.update(actual_item["result"]) +# assert actual_result_dict == expected_const -def test_rootdir_specified(): - """Test pytest execution when a --rootdir is specified.""" - rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" - args = [rd, "tests/test_a.py::test_a_function"] - new_cwd = TEST_DATA_PATH / "root" - actual = runner_with_cwd(args, new_cwd) - expected_const = ( - expected_execution_test_output.config_file_pytest_expected_execution_output - ) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - assert len(actual_list) == len(expected_const) - actual_result_dict = dict() - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "result") - ) - assert actual_item.get("status") == "success" - assert actual_item.get("cwd") == os.fspath(new_cwd) - actual_result_dict.update(actual_item["result"]) - assert actual_result_dict == expected_const +# def test_rootdir_specified(): +# """Test pytest execution when a --rootdir is specified.""" +# rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" +# args = [rd, "tests/test_a.py::test_a_function"] +# new_cwd = TEST_DATA_PATH / "root" +# actual = runner_with_cwd(args, new_cwd) +# expected_const = ( +# expected_execution_test_output.config_file_pytest_expected_execution_output +# ) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# assert len(actual_list) == len(expected_const) +# actual_result_dict = dict() +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "result") +# ) +# assert actual_item.get("status") == "success" +# assert actual_item.get("cwd") == os.fspath(new_cwd) +# actual_result_dict.update(actual_item["result"]) +# assert actual_result_dict == expected_const -def test_syntax_error_execution(tmp_path): - """Test pytest execution on a file that has a syntax error. +# def test_syntax_error_execution(tmp_path): +# """Test pytest execution on a file that has a syntax error. - Copies the contents of a .txt file to a .py file in the temporary directory - to then run pytest execution on. +# Copies the contents of a .txt file to a .py file in the temporary directory +# to then run pytest execution on. - The json should still be returned but the errors list should be present. +# The json should still be returned but the errors list should be present. - Keyword arguments: - tmp_path -- pytest fixture that creates a temporary directory. - """ - # Saving some files as .txt to avoid that file displaying a syntax error for - # the extension as a whole. Instead, rename it before running this test - # in order to test the error handling. - file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" - temp_dir = tmp_path / "temp_data" - temp_dir.mkdir() - p = temp_dir / "error_syntax_discovery.py" - shutil.copyfile(file_path, p) - actual = runner(["error_syntax_discover.py::test_function"]) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "error") - ) - assert actual_item.get("status") == "error" - assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) - error_content = actual_item.get("error") - if error_content is not None and isinstance( - error_content, (list, tuple, str) - ): # You can add other types if needed - assert len(error_content) == 1 - else: - assert False +# Keyword arguments: +# tmp_path -- pytest fixture that creates a temporary directory. +# """ +# # Saving some files as .txt to avoid that file displaying a syntax error for +# # the extension as a whole. Instead, rename it before running this test +# # in order to test the error handling. +# file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" +# temp_dir = tmp_path / "temp_data" +# temp_dir.mkdir() +# p = temp_dir / "error_syntax_discovery.py" +# shutil.copyfile(file_path, p) +# actual = runner(["error_syntax_discover.py::test_function"]) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "error") +# ) +# assert actual_item.get("status") == "error" +# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) +# error_content = actual_item.get("error") +# if error_content is not None and isinstance( +# error_content, (list, tuple, str) +# ): # You can add other types if needed +# assert len(error_content) == 1 +# else: +# assert False -def test_bad_id_error_execution(): - """Test pytest discovery with a non-existent test_id. +# def test_bad_id_error_execution(): +# """Test pytest discovery with a non-existent test_id. - The json should still be returned but the errors list should be present. - """ - actual = runner(["not/a/real::test_id"]) - assert actual - actual_list: List[Dict[str, Any]] = actual - assert actual_list.pop(-1).get("eot") - if actual_list is not None: - for actual_item in actual_list: - assert all( - item in actual_item.keys() for item in ("status", "cwd", "error") - ) - assert actual_item.get("status") == "error" - assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) - error_content = actual_item.get("error") - if error_content is not None and isinstance( - error_content, (list, tuple, str) - ): # You can add other types if needed - assert len(error_content) == 1 - else: - assert False +# The json should still be returned but the errors list should be present. +# """ +# actual = runner(["not/a/real::test_id"]) +# assert actual +# actual_list: List[Dict[str, Any]] = actual +# assert actual_list.pop(-1).get("eot") +# if actual_list is not None: +# for actual_item in actual_list: +# assert all( +# item in actual_item.keys() for item in ("status", "cwd", "error") +# ) +# assert actual_item.get("status") == "error" +# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) +# error_content = actual_item.get("error") +# if error_content is not None and isinstance( +# error_content, (list, tuple, str) +# ): # You can add other types if needed +# assert len(error_content) == 1 +# else: +# assert False @pytest.mark.parametrize( "test_ids, expected_const", [ + # ( + # [ + # "skip_tests.py::test_something", + # "skip_tests.py::test_another_thing", + # "skip_tests.py::test_decorator_thing", + # "skip_tests.py::test_decorator_thing_2", + # "skip_tests.py::TestClass::test_class_function_a", + # "skip_tests.py::TestClass::test_class_function_b", + # ], + # expected_execution_test_output.skip_tests_execution_expected_output, + # ), + # ( + # ["error_raise_exception.py::TestSomething::test_a"], + # expected_execution_test_output.error_raised_exception_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + # ], + # expected_execution_test_output.uf_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + # ], + # expected_execution_test_output.uf_single_file_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # ], + # expected_execution_test_output.uf_single_method_execution_expected_output, + # ), + # ( + # [ + # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + # ], + # expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + # ), + # ( + # [ + # "unittest_pytest_same_file.py::TestExample::test_true_unittest", + # "unittest_pytest_same_file.py::test_true_pytest", + # ], + # expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + # ), + # ( + # [ + # "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + # "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + # ], + # expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + # ), + # ( + # ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + # expected_execution_test_output.double_nested_folder_expected_execution_output, + # ), + # ( + # [ + # "parametrize_tests.py::test_adding[3+5-8]", + # "parametrize_tests.py::test_adding[2+4-6]", + # "parametrize_tests.py::test_adding[6+9-16]", + # ], + # expected_execution_test_output.parametrize_tests_expected_execution_output, + # ), + # ( + # [ + # "parametrize_tests.py::test_adding[3+5-8]", + # ], + # expected_execution_test_output.single_parametrize_tests_expected_execution_output, + # ), + # ( + # [ + # "text_docstring.txt::text_docstring.txt", + # ], + # expected_execution_test_output.doctest_pytest_expected_execution_output, + # ), ( - [ - "skip_tests.py::test_something", - "skip_tests.py::test_another_thing", - "skip_tests.py::test_decorator_thing", - "skip_tests.py::test_decorator_thing_2", - "skip_tests.py::TestClass::test_class_function_a", - "skip_tests.py::TestClass::test_class_function_b", - ], - expected_execution_test_output.skip_tests_execution_expected_output, - ), - ( - ["error_raise_exception.py::TestSomething::test_a"], - expected_execution_test_output.error_raised_exception_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", - ], - expected_execution_test_output.uf_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - ], - expected_execution_test_output.uf_single_file_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - ], - expected_execution_test_output.uf_single_method_execution_expected_output, - ), - ( - [ - "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - ], - expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, - ), - ( - [ - "unittest_pytest_same_file.py::TestExample::test_true_unittest", - "unittest_pytest_same_file.py::test_true_pytest", - ], - expected_execution_test_output.unit_pytest_same_file_execution_expected_output, - ), - ( - [ - "dual_level_nested_folder/test_top_folder.py::test_top_function_t", - "dual_level_nested_folder/test_top_folder.py::test_top_function_f", - "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", - "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", - ], - expected_execution_test_output.dual_level_nested_folder_execution_expected_output, - ), - ( - ["folder_a/folder_b/folder_a/test_nest.py::test_function"], - expected_execution_test_output.double_nested_folder_expected_execution_output, - ), - ( - [ - "parametrize_tests.py::test_adding[3+5-8]", - "parametrize_tests.py::test_adding[2+4-6]", - "parametrize_tests.py::test_adding[6+9-16]", - ], - expected_execution_test_output.parametrize_tests_expected_execution_output, - ), - ( - [ - "parametrize_tests.py::test_adding[3+5-8]", - ], - expected_execution_test_output.single_parametrize_tests_expected_execution_output, - ), - ( - [ - "text_docstring.txt::text_docstring.txt", - ], + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], expected_execution_test_output.doctest_pytest_expected_execution_output, ), ( diff --git a/pythonFiles/vscode_pytest/run_pytest_script.py b/pythonFiles/vscode_pytest/run_pytest_script.py index c3720c8ab8d0..9f3f94e58844 100644 --- a/pythonFiles/vscode_pytest/run_pytest_script.py +++ b/pythonFiles/vscode_pytest/run_pytest_script.py @@ -51,6 +51,8 @@ ) # Clear the buffer as complete JSON object is received buffer = b"" + + # Process the JSON data. print("Received JSON data in run script") break except json.JSONDecodeError: diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 52a12aa2dafe..b7267d70ca1f 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -16,13 +16,7 @@ import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging' import { DataReceivedEvent, ITestServer, TestCommandOptions } from './types'; import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; -import { - createDiscoveryErrorPayload, - createEOTPayload, - createExecutionErrorPayload, - extractJsonPayload, - fixLogLines, -} from './utils'; +import { createEOTPayload, createExecutionErrorPayload, extractJsonPayload } from './utils'; import { createDeferred } from '../../../common/utils/async'; import { EnvironmentVariables } from '../../../api/types'; @@ -198,7 +192,7 @@ export class PythonTestServer implements ITestServer, Disposable { }; const execService = await this.executionFactory.createActivatedEnvironment(creationOptions); const args = [options.command.script].concat(options.command.args); - + if (options.outChannel) { options.outChannel.appendLine(`python ${args.join(' ')}`); } diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index e15c6ddde8bd..bc81ffdcae98 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -18,7 +18,6 @@ import { ITestResultResolver, ITestServer, } from '../common/types'; -import { createDiscoveryErrorPayload, createEOTPayload, fixLogLines } from '../common/utils'; import { createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred } from '../common/utils'; import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; @@ -99,7 +98,7 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. - let collectedOutput = ''; + const collectedOutput = ''; result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); collectedOutput += out; diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 2c86d4a61831..0ca1ac68a0f4 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -124,8 +124,15 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // need to check what will happen in the exec service is NOT defined and is null const execService = await executionFactory?.createActivatedEnvironment(creationOptions); try { + const colorOff = pytestArgs.includes('--color=no'); // Remove positional test folders and files, we will add as needed per node const testArgs = removePositionalFoldersAndFiles(pytestArgs); + // If the user didn't explicit dictate the color, then add it + if (!colorOff) { + if (!testArgs.includes('--color=yes')) { + testArgs.push('--color=yes'); + } + } // if user has provided `--rootdir` then use that, otherwise add `cwd` if (testArgs.filter((a) => a.startsWith('--rootdir')).length === 0) { @@ -173,7 +180,6 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { const runArgs = [scriptPath, ...testArgs]; traceInfo(`Running pytest with arguments: ${runArgs.join(' ')}\r\n`); - let resultProc: ChildProcess | undefined; runInstance?.token.onCancellationRequested(() => { From 09e3868b4b3ef7f09ca7f7d80a9ae09786d3a9c9 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 09:00:11 -0700 Subject: [PATCH 12/23] fix existing tests --- .../testing/common/testingAdapter.test.ts | 219 +++++++++--------- .../pytestExecutionAdapter.unit.test.ts | 12 +- 2 files changed, 122 insertions(+), 109 deletions(-) diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 4755573ddaf6..66be769dab7d 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -96,27 +96,27 @@ suite('End to End Tests: test adapters', () => { teardown(async () => { pythonTestServer.dispose(); }); - test('unittest discovery adapter small workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - workspaceUri = Uri.parse(rootPathSmallWorkspace); - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; - - // set workspace to test workspace folder and set up settings - - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // test('unittest discovery adapter small workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; + + // // set workspace to test workspace folder and set up settings + + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; // run unittest discovery const discoveryAdapter = new UnittestTestDiscoveryAdapter( @@ -126,41 +126,48 @@ suite('End to End Tests: test adapters', () => { resultResolver, envVarsService, ); - - await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // verification after discovery is complete - - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); - // 2. Confirm no errors - assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); - - assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - }); - }); - - test('unittest discovery adapter large workspace', async () => { - // result resolver and saved data for assertions - let actualData: { - cwd: string; - tests?: unknown; - status: 'success' | 'error'; - error?: string[]; - }; - resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - let callCount = 0; - resultResolver._resolveDiscovery = async (payload, _token?) => { - traceLog(`resolveDiscovery ${payload}`); - callCount = callCount + 1; - actualData = payload; - return Promise.resolve(); - }; + // // run unittest discovery + // const discoveryAdapter = new UnittestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // // verification after discovery is complete + + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); + // // 2. Confirm no errors + // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); + + // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + // }); + // }); + + // test('unittest discovery adapter large workspace', async () => { + // // result resolver and saved data for assertions + // let actualData: { + // cwd: string; + // tests?: unknown; + // status: 'success' | 'error'; + // error?: string[]; + // }; + // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + // let callCount = 0; + // resultResolver._resolveDiscovery = async (payload, _token?) => { + // traceLog(`resolveDiscovery ${payload}`); + // callCount = callCount + 1; + // actualData = payload; + // return Promise.resolve(); + // }; // set settings to work for the given workspace workspaceUri = Uri.parse(rootPathLargeWorkspace); @@ -174,17 +181,17 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); - // 2. Confirm no errors - assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); + // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); + // // 2. Confirm no errors + // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -214,20 +221,20 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathSmallWorkspace); - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete - - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathSmallWorkspace); + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete + + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -257,20 +264,20 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - - await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // verification after discovery is complete - // 1. Check the status is "success" - assert.strictEqual( - actualData.status, - 'success', - `Expected status to be 'success' instead status is ${actualData.status}`, - ); // 2. Confirm no errors - assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // 3. Confirm tests are found - assert.ok(actualData.tests, 'Expected tests to be present'); + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + + // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // // verification after discovery is complete + // // 1. Check the status is "success" + // assert.strictEqual( + // actualData.status, + // 'success', + // `Expected status to be 'success' instead status is ${actualData.status}`, + // ); // 2. Confirm no errors + // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // // 3. Confirm tests are found + // assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -373,9 +380,9 @@ suite('End to End Tests: test adapters', () => { return Promise.resolve(); }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); - configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; // run unittest execution const executionAdapter = new UnittestTestExecutionAdapter( @@ -535,15 +542,15 @@ suite('End to End Tests: test adapters', () => { return Promise.resolve(); }; - // set workspace to test workspace folder - workspaceUri = Uri.parse(rootPathLargeWorkspace); + // // set workspace to test workspace folder + // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // generate list of test_ids - const testIds: string[] = []; - for (let i = 0; i < 2000; i = i + 1) { - const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - testIds.push(testId); - } + // // generate list of test_ids + // const testIds: string[] = []; + // for (let i = 0; i < 2000; i = i + 1) { + // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + // testIds.push(testId); + // } // run pytest execution const executionAdapter = new PytestTestExecutionAdapter( diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index a097df654360..c22cf3b46668 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -171,7 +171,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -238,7 +238,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -305,7 +305,13 @@ suite('pytest test execution adapter', () => { x.launchDebugger( typeMoq.It.is((launchOptions) => { assert.equal(launchOptions.cwd, uri.fsPath); - assert.deepEqual(launchOptions.args, ['--rootdir', myTestPath, '--capture', 'no']); + assert.deepEqual(launchOptions.args, [ + '--rootdir', + myTestPath, + '--color=yes', + '--capture', + 'no', + ]); assert.equal(launchOptions.testProvider, 'pytest'); assert.equal(launchOptions.pytestPort, '12345'); assert.equal(launchOptions.pytestUUID, 'uuid123'); From 9f4598a7cd65b622567dae37429b6ebb5bd804c8 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 09:55:08 -0700 Subject: [PATCH 13/23] add output channel msg --- .../tests/pytestadapter/test_execution.py | 378 +++++++++--------- .../testing/testController/common/server.ts | 1 - .../testing/common/testingAdapter.test.ts | 226 +++++------ 3 files changed, 302 insertions(+), 303 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 8998c987eb43..1853b84fef6f 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -11,210 +11,210 @@ from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -# def test_config_file(): -# """Test pytest execution when a config file is specified.""" -# args = [ -# "-c", -# "tests/pytest.ini", -# str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), -# ] -# new_cwd = TEST_DATA_PATH / "root" -# actual = runner_with_cwd(args, new_cwd) -# expected_const = ( -# expected_execution_test_output.config_file_pytest_expected_execution_output -# ) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# assert len(actual_list) == len(expected_const) -# actual_result_dict = dict() -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "result") -# ) -# assert actual_item.get("status") == "success" -# assert actual_item.get("cwd") == os.fspath(new_cwd) -# actual_result_dict.update(actual_item["result"]) -# assert actual_result_dict == expected_const +def test_config_file(): + """Test pytest execution when a config file is specified.""" + args = [ + "-c", + "tests/pytest.ini", + str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), + ] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const -# def test_rootdir_specified(): -# """Test pytest execution when a --rootdir is specified.""" -# rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" -# args = [rd, "tests/test_a.py::test_a_function"] -# new_cwd = TEST_DATA_PATH / "root" -# actual = runner_with_cwd(args, new_cwd) -# expected_const = ( -# expected_execution_test_output.config_file_pytest_expected_execution_output -# ) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# assert len(actual_list) == len(expected_const) -# actual_result_dict = dict() -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "result") -# ) -# assert actual_item.get("status") == "success" -# assert actual_item.get("cwd") == os.fspath(new_cwd) -# actual_result_dict.update(actual_item["result"]) -# assert actual_result_dict == expected_const +def test_rootdir_specified(): + """Test pytest execution when a --rootdir is specified.""" + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + args = [rd, "tests/test_a.py::test_a_function"] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const -# def test_syntax_error_execution(tmp_path): -# """Test pytest execution on a file that has a syntax error. +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. -# Copies the contents of a .txt file to a .py file in the temporary directory -# to then run pytest execution on. + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest execution on. -# The json should still be returned but the errors list should be present. + The json should still be returned but the errors list should be present. -# Keyword arguments: -# tmp_path -- pytest fixture that creates a temporary directory. -# """ -# # Saving some files as .txt to avoid that file displaying a syntax error for -# # the extension as a whole. Instead, rename it before running this test -# # in order to test the error handling. -# file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" -# temp_dir = tmp_path / "temp_data" -# temp_dir.mkdir() -# p = temp_dir / "error_syntax_discovery.py" -# shutil.copyfile(file_path, p) -# actual = runner(["error_syntax_discover.py::test_function"]) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "error") -# ) -# assert actual_item.get("status") == "error" -# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) -# error_content = actual_item.get("error") -# if error_content is not None and isinstance( -# error_content, (list, tuple, str) -# ): # You can add other types if needed -# assert len(error_content) == 1 -# else: -# assert False + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False -# def test_bad_id_error_execution(): -# """Test pytest discovery with a non-existent test_id. +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. -# The json should still be returned but the errors list should be present. -# """ -# actual = runner(["not/a/real::test_id"]) -# assert actual -# actual_list: List[Dict[str, Any]] = actual -# assert actual_list.pop(-1).get("eot") -# if actual_list is not None: -# for actual_item in actual_list: -# assert all( -# item in actual_item.keys() for item in ("status", "cwd", "error") -# ) -# assert actual_item.get("status") == "error" -# assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) -# error_content = actual_item.get("error") -# if error_content is not None and isinstance( -# error_content, (list, tuple, str) -# ): # You can add other types if needed -# assert len(error_content) == 1 -# else: -# assert False + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False @pytest.mark.parametrize( "test_ids, expected_const", [ - # ( - # [ - # "skip_tests.py::test_something", - # "skip_tests.py::test_another_thing", - # "skip_tests.py::test_decorator_thing", - # "skip_tests.py::test_decorator_thing_2", - # "skip_tests.py::TestClass::test_class_function_a", - # "skip_tests.py::TestClass::test_class_function_b", - # ], - # expected_execution_test_output.skip_tests_execution_expected_output, - # ), - # ( - # ["error_raise_exception.py::TestSomething::test_a"], - # expected_execution_test_output.error_raised_exception_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", - # ], - # expected_execution_test_output.uf_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", - # ], - # expected_execution_test_output.uf_single_file_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # ], - # expected_execution_test_output.uf_single_method_execution_expected_output, - # ), - # ( - # [ - # "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", - # "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", - # ], - # expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, - # ), - # ( - # [ - # "unittest_pytest_same_file.py::TestExample::test_true_unittest", - # "unittest_pytest_same_file.py::test_true_pytest", - # ], - # expected_execution_test_output.unit_pytest_same_file_execution_expected_output, - # ), - # ( - # [ - # "dual_level_nested_folder/test_top_folder.py::test_top_function_t", - # "dual_level_nested_folder/test_top_folder.py::test_top_function_f", - # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", - # "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", - # ], - # expected_execution_test_output.dual_level_nested_folder_execution_expected_output, - # ), - # ( - # ["folder_a/folder_b/folder_a/test_nest.py::test_function"], - # expected_execution_test_output.double_nested_folder_expected_execution_output, - # ), - # ( - # [ - # "parametrize_tests.py::test_adding[3+5-8]", - # "parametrize_tests.py::test_adding[2+4-6]", - # "parametrize_tests.py::test_adding[6+9-16]", - # ], - # expected_execution_test_output.parametrize_tests_expected_execution_output, - # ), - # ( - # [ - # "parametrize_tests.py::test_adding[3+5-8]", - # ], - # expected_execution_test_output.single_parametrize_tests_expected_execution_output, - # ), - # ( - # [ - # "text_docstring.txt::text_docstring.txt", - # ], - # expected_execution_test_output.doctest_pytest_expected_execution_output, - # ), + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + "skip_tests.py::TestClass::test_class_function_a", + "skip_tests.py::TestClass::test_class_function_b", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), ( ["test_logging.py::test_logging2", "test_logging.py::test_logging"], expected_execution_test_output.doctest_pytest_expected_execution_output, diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index b7267d70ca1f..f93fd84cfc30 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -238,7 +238,6 @@ export class PythonTestServer implements ITestServer, Disposable { const result = execService?.execObservable(args, spawnOptions); resultProc = result?.proc; - // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 66be769dab7d..240d63e7f6b8 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -96,27 +96,27 @@ suite('End to End Tests: test adapters', () => { teardown(async () => { pythonTestServer.dispose(); }); - // test('unittest discovery adapter small workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; - - // // set workspace to test workspace folder and set up settings - - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + test('unittest discovery adapter small workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + workspaceUri = Uri.parse(rootPathSmallWorkspace); + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; + + // set workspace to test workspace folder and set up settings + + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; // run unittest discovery const discoveryAdapter = new UnittestTestDiscoveryAdapter( @@ -126,48 +126,48 @@ suite('End to End Tests: test adapters', () => { resultResolver, envVarsService, ); - // // run unittest discovery - // const discoveryAdapter = new UnittestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); - - // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // // verification after discovery is complete - - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); - // // 2. Confirm no errors - // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); - - // assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); - // }); - // }); - - // test('unittest discovery adapter large workspace', async () => { - // // result resolver and saved data for assertions - // let actualData: { - // cwd: string; - // tests?: unknown; - // status: 'success' | 'error'; - // error?: string[]; - // }; - // resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); - // let callCount = 0; - // resultResolver._resolveDiscovery = async (payload, _token?) => { - // traceLog(`resolveDiscovery ${payload}`); - // callCount = callCount + 1; - // actualData = payload; - // return Promise.resolve(); - // }; + // // run unittest discovery + // const discoveryAdapter = new UnittestTestDiscoveryAdapter( + // pythonTestServer, + // configService, + // testOutputChannel.object, + // resultResolver, + // ); + + await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // verification after discovery is complete + + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); + // 2. Confirm no errors + assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); + + assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); + }); + }); + + test('unittest discovery adapter large workspace', async () => { + // result resolver and saved data for assertions + let actualData: { + cwd: string; + tests?: unknown; + status: 'success' | 'error'; + error?: string[]; + }; + resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); + let callCount = 0; + resultResolver._resolveDiscovery = async (payload, _token?) => { + traceLog(`resolveDiscovery ${payload}`); + callCount = callCount + 1; + actualData = payload; + return Promise.resolve(); + }; // set settings to work for the given workspace workspaceUri = Uri.parse(rootPathLargeWorkspace); @@ -181,17 +181,17 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - // await discoveryAdapter.discoverTests(workspaceUri).finally(() => { - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); - // // 2. Confirm no errors - // assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); + await discoveryAdapter.discoverTests(workspaceUri).finally(() => { + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); + // 2. Confirm no errors + assert.strictEqual(actualData.error, undefined, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -221,20 +221,20 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathSmallWorkspace); - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete - - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathSmallWorkspace); + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete + + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -264,20 +264,20 @@ suite('End to End Tests: test adapters', () => { envVarsService, ); - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - - // await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { - // // verification after discovery is complete - // // 1. Check the status is "success" - // assert.strictEqual( - // actualData.status, - // 'success', - // `Expected status to be 'success' instead status is ${actualData.status}`, - // ); // 2. Confirm no errors - // assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); - // // 3. Confirm tests are found - // assert.ok(actualData.tests, 'Expected tests to be present'); + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + + await discoveryAdapter.discoverTests(workspaceUri, pythonExecFactory).finally(() => { + // verification after discovery is complete + // 1. Check the status is "success" + assert.strictEqual( + actualData.status, + 'success', + `Expected status to be 'success' instead status is ${actualData.status}`, + ); // 2. Confirm no errors + assert.strictEqual(actualData.error?.length, 0, "Expected no errors in 'error' field"); + // 3. Confirm tests are found + assert.ok(actualData.tests, 'Expected tests to be present'); assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); @@ -380,9 +380,9 @@ suite('End to End Tests: test adapters', () => { return Promise.resolve(); }; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); - // configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); + configService.getSettings(workspaceUri).testing.unittestArgs = ['-s', '.', '-p', '*test*.py']; // run unittest execution const executionAdapter = new UnittestTestExecutionAdapter( @@ -542,15 +542,15 @@ suite('End to End Tests: test adapters', () => { return Promise.resolve(); }; - // // set workspace to test workspace folder - // workspaceUri = Uri.parse(rootPathLargeWorkspace); + // set workspace to test workspace folder + workspaceUri = Uri.parse(rootPathLargeWorkspace); - // // generate list of test_ids - // const testIds: string[] = []; - // for (let i = 0; i < 2000; i = i + 1) { - // const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; - // testIds.push(testId); - // } + // generate list of test_ids + const testIds: string[] = []; + for (let i = 0; i < 2000; i = i + 1) { + const testId = `${rootPathLargeWorkspace}/test_parameterized_subtest.py::test_odd_even[${i}]`; + testIds.push(testId); + } // run pytest execution const executionAdapter = new PytestTestExecutionAdapter( From 3405f274858a0f49593798a82a1aba3f32e93cf8 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 11:00:12 -0700 Subject: [PATCH 14/23] fix new line --- src/client/testing/testController/common/server.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index f93fd84cfc30..284773c1cdad 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -264,12 +264,6 @@ export class PythonTestServer implements ITestServer, Disposable { }); } - result?.proc?.on('exit', (code, signal) => { - if (code !== 0) { - traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}`); - } - }); - result?.proc?.on('exit', (code, signal) => { // if the child has testIds then this is a run request spawnOptions?.outputChannel?.append( From b2ab4f72267887271e43a35aaba399dacdff48be Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 12:47:03 -0700 Subject: [PATCH 15/23] edits from feedback and collect discovery --- pythonFiles/tests/pytestadapter/test_discovery.py | 3 ++- pythonFiles/vscode_pytest/run_pytest_script.py | 2 -- .../testController/pytest/pytestExecutionAdapter.ts | 7 ------- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 81e42a5e9ed7..3e918c4fc072 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,7 +9,8 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") +# uncomment this line to skip all tests in this module +# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") def test_import_error(tmp_path): diff --git a/pythonFiles/vscode_pytest/run_pytest_script.py b/pythonFiles/vscode_pytest/run_pytest_script.py index 9f3f94e58844..c3720c8ab8d0 100644 --- a/pythonFiles/vscode_pytest/run_pytest_script.py +++ b/pythonFiles/vscode_pytest/run_pytest_script.py @@ -51,8 +51,6 @@ ) # Clear the buffer as complete JSON object is received buffer = b"" - - # Process the JSON data. print("Received JSON data in run script") break except json.JSONDecodeError: diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index 0ca1ac68a0f4..ef2a0eb86da2 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -124,15 +124,8 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // need to check what will happen in the exec service is NOT defined and is null const execService = await executionFactory?.createActivatedEnvironment(creationOptions); try { - const colorOff = pytestArgs.includes('--color=no'); // Remove positional test folders and files, we will add as needed per node const testArgs = removePositionalFoldersAndFiles(pytestArgs); - // If the user didn't explicit dictate the color, then add it - if (!colorOff) { - if (!testArgs.includes('--color=yes')) { - testArgs.push('--color=yes'); - } - } // if user has provided `--rootdir` then use that, otherwise add `cwd` if (testArgs.filter((a) => a.startsWith('--rootdir')).length === 0) { From 9a67e6688d6f84c4e699e5d13ce18d80178b8494 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 25 Sep 2023 13:49:32 -0700 Subject: [PATCH 16/23] remove color addition --- .../pytest/pytestExecutionAdapter.unit.test.ts | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts index c22cf3b46668..a097df654360 100644 --- a/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts +++ b/src/test/testing/testController/pytest/pytestExecutionAdapter.unit.test.ts @@ -171,7 +171,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -238,7 +238,7 @@ suite('pytest test execution adapter', () => { const pathToPythonFiles = path.join(EXTENSION_ROOT_DIR, 'pythonFiles'); const pathToPythonScript = path.join(pathToPythonFiles, 'vscode_pytest', 'run_pytest_script.py'); - const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath, '--color=yes']; + const expectedArgs = [pathToPythonScript, '--rootdir', myTestPath]; const expectedExtraVariables = { PYTHONPATH: pathToPythonFiles, TEST_UUID: 'uuid123', @@ -305,13 +305,7 @@ suite('pytest test execution adapter', () => { x.launchDebugger( typeMoq.It.is((launchOptions) => { assert.equal(launchOptions.cwd, uri.fsPath); - assert.deepEqual(launchOptions.args, [ - '--rootdir', - myTestPath, - '--color=yes', - '--capture', - 'no', - ]); + assert.deepEqual(launchOptions.args, ['--rootdir', myTestPath, '--capture', 'no']); assert.equal(launchOptions.testProvider, 'pytest'); assert.equal(launchOptions.pytestPort, '12345'); assert.equal(launchOptions.pytestUUID, 'uuid123'); From 6a1f01b35ab489233abfde04cc7be7cc68150ec8 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 09:28:08 -0700 Subject: [PATCH 17/23] fix syntax issues from merge --- src/client/testing/testController/common/server.ts | 8 +++++++- .../testController/pytest/pytestDiscoveryAdapter.ts | 5 ++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 284773c1cdad..7cca907314f1 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -16,7 +16,13 @@ import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging' import { DataReceivedEvent, ITestServer, TestCommandOptions } from './types'; import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; -import { createEOTPayload, createExecutionErrorPayload, extractJsonPayload } from './utils'; +import { + createDiscoveryErrorPayload, + createEOTPayload, + createExecutionErrorPayload, + extractJsonPayload, + fixLogLines, +} from './utils'; import { createDeferred } from '../../../common/utils/async'; import { EnvironmentVariables } from '../../../api/types'; diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index bc81ffdcae98..93f55752bbf2 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -18,7 +18,7 @@ import { ITestResultResolver, ITestServer, } from '../common/types'; -import { createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred } from '../common/utils'; +import { createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred, fixLogLines } from '../common/utils'; import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; /** @@ -87,7 +87,6 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { }; const execService = await executionFactory?.createActivatedEnvironment(creationOptions); // delete UUID following entire discovery finishing. - const deferredExec = createDeferred>(); const execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs); traceVerbose(`Running pytest discovery with command: ${execArgs.join(' ')}`); @@ -98,7 +97,7 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. - const collectedOutput = ''; + let collectedOutput = ''; result?.proc?.stdout?.on('data', (data) => { const out = fixLogLines(data.toString()); collectedOutput += out; From 9db80b91266a1b9b23010e59bb219efc4b5d5afe Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 09:52:39 -0700 Subject: [PATCH 18/23] fix tests --- pythonFiles/tests/pytestadapter/test_discovery.py | 3 --- pythonFiles/tests/pytestadapter/test_execution.py | 1 - src/client/testing/testController/common/server.ts | 4 ++-- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 3e918c4fc072..674d92ac0545 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -9,9 +9,6 @@ from . import expected_discovery_test_output from .helpers import TEST_DATA_PATH, runner, runner_with_cwd -# uncomment this line to skip all tests in this module -# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module") - def test_import_error(tmp_path): """Test pytest discovery on a file that has a pytest marker but does not import pytest. diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 1853b84fef6f..9977fddc805b 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -249,7 +249,6 @@ def test_pytest_execution(test_ids, expected_const): test_ids -- an array of test_ids to run. expected_const -- a dictionary of the expected output from running pytest discovery on the files. """ - print("Test IDs: ", test_ids) args = test_ids actual = runner(args) assert actual diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 7cca907314f1..7029a70989e3 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -174,7 +174,7 @@ export class PythonTestServer implements ITestServer, Disposable { callback?: () => void, ): Promise { const { uuid } = options; - const isDiscovery = testIds === undefined; + const isDiscovery = (testIds === undefined || testIds.length === 0) && runTestIdPort === undefined; const mutableEnv = { ...env }; const pythonPathParts: string[] = process.env.PYTHONPATH?.split(path.delimiter) ?? []; const pythonPathCommand = [options.cwd, ...pythonPathParts].join(path.delimiter); @@ -314,7 +314,7 @@ export class PythonTestServer implements ITestServer, Disposable { data: JSON.stringify(createEOTPayload(true)), }); } - deferredTillExecClose.resolve({ stdout: '', stderr: '' }); + deferredTillExecClose.resolve(); }); await deferredTillExecClose.promise; } From a7c6c612821889c47fd3e4c26d298550d622340c Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 09:55:56 -0700 Subject: [PATCH 19/23] fix linting --- src/client/testing/testController/common/server.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 7029a70989e3..9757523e56c0 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -297,7 +297,7 @@ export class PythonTestServer implements ITestServer, Disposable { data: JSON.stringify(createEOTPayload(true)), }); } - } else if (code !== 0) { + } else if (code !== 0 && testIds) { // This occurs when we are running the test and there is an error which occurs. traceError( From 2d0830af6122afa2e9e17982b9662a1803414013 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 10:24:23 -0700 Subject: [PATCH 20/23] fix failing tests --- pythonFiles/tests/pytestadapter/test_execution.py | 4 ---- src/test/testing/common/testingAdapter.test.ts | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 9977fddc805b..98698d8cdd7c 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -215,10 +215,6 @@ def test_bad_id_error_execution(): ], expected_execution_test_output.doctest_pytest_expected_execution_output, ), - ( - ["test_logging.py::test_logging2", "test_logging.py::test_logging"], - expected_execution_test_output.doctest_pytest_expected_execution_output, - ), ( ["test_logging.py::test_logging2", "test_logging.py::test_logging"], expected_execution_test_output.logging_test_expected_execution_output, diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 240d63e7f6b8..5cc1696386ae 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -407,8 +407,6 @@ suite('End to End Tests: test adapters', () => { .callback((output: string) => { collectedOutput += output; traceLog('appendOutput was called with:', output); - // File "/Users/eleanorboyd/vscode-python/src/testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py" - // FAILED((failures = 1000)); }) .returns(() => false); await executionAdapter @@ -420,7 +418,7 @@ suite('End to End Tests: test adapters', () => { // verify output assert.ok( - collectedOutput.includes('testTestingRootWkspc/largeWorkspace/test_parameterized_subtest.py'), + collectedOutput.includes('test_parameterized_subtest.py'), 'The test string does not contain the correct test name which should be printed', ); assert.ok( From 6ecaa9a337ebd38343617c5ce15c974b5086e507 Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 14:44:36 -0700 Subject: [PATCH 21/23] fix to dynamic print --- .../testController/common/resultResolver.ts | 48 +++++++------------ .../testing/testController/common/server.ts | 19 +++----- .../testing/testController/common/utils.ts | 5 ++ .../pytest/pytestDiscoveryAdapter.ts | 20 ++++---- .../pytest/pytestExecutionAdapter.ts | 8 ++-- 5 files changed, 45 insertions(+), 55 deletions(-) diff --git a/src/client/testing/testController/common/resultResolver.ts b/src/client/testing/testController/common/resultResolver.ts index 79cee6452a8c..35ee12eb81e6 100644 --- a/src/client/testing/testController/common/resultResolver.ts +++ b/src/client/testing/testController/common/resultResolver.ts @@ -11,7 +11,7 @@ import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testI import { sendTelemetryEvent } from '../../../telemetry'; import { EventName } from '../../../telemetry/constants'; import { splitLines } from '../../../common/stringUtils'; -import { buildErrorNodeOptions, fixLogLines, populateTestTree, splitTestNameWithRegex } from './utils'; +import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils'; import { Deferred } from '../../../common/utils/async'; export class PythonResultResolver implements ITestResultResolver { @@ -138,15 +138,16 @@ export class PythonResultResolver implements ITestResultResolver { const tempArr: TestItem[] = getTestCaseNodes(i); testCases.push(...tempArr); }); + const testItem = rawTestExecData.result[keyTemp]; - if (rawTestExecData.result[keyTemp].outcome === 'error') { - const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? ''; + if (testItem.outcome === 'error') { + const rawTraceback = testItem.traceback ?? ''; const traceback = splitLines(rawTraceback, { trim: false, removeEmptyEntries: true, }).join('\r\n'); - const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${ - rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome + const text = `${testItem.test} failed with error: ${ + testItem.message ?? testItem.outcome }\r\n${traceback}\r\n`; const message = new TestMessage(text); @@ -157,23 +158,17 @@ export class PythonResultResolver implements ITestResultResolver { if (indiItem.uri && indiItem.range) { message.location = new Location(indiItem.uri, indiItem.range); runInstance.errored(indiItem, message); - runInstance.appendOutput(fixLogLines(text)); } } }); - } else if ( - rawTestExecData.result[keyTemp].outcome === 'failure' || - rawTestExecData.result[keyTemp].outcome === 'passed-unexpected' - ) { - const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? ''; + } else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') { + const rawTraceback = testItem.traceback ?? ''; const traceback = splitLines(rawTraceback, { trim: false, removeEmptyEntries: true, }).join('\r\n'); - const text = `${rawTestExecData.result[keyTemp].test} failed: ${ - rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome - }\r\n${traceback}\r\n`; + const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}\r\n`; const message = new TestMessage(text); // note that keyTemp is a runId for unittest library... @@ -184,14 +179,10 @@ export class PythonResultResolver implements ITestResultResolver { if (indiItem.uri && indiItem.range) { message.location = new Location(indiItem.uri, indiItem.range); runInstance.failed(indiItem, message); - runInstance.appendOutput(fixLogLines(text)); } } }); - } else if ( - rawTestExecData.result[keyTemp].outcome === 'success' || - rawTestExecData.result[keyTemp].outcome === 'expected-failure' - ) { + } else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') { const grabTestItem = this.runIdToTestItem.get(keyTemp); const grabVSid = this.runIdToVSid.get(keyTemp); if (grabTestItem !== undefined) { @@ -203,7 +194,7 @@ export class PythonResultResolver implements ITestResultResolver { } }); } - } else if (rawTestExecData.result[keyTemp].outcome === 'skipped') { + } else if (testItem.outcome === 'skipped') { const grabTestItem = this.runIdToTestItem.get(keyTemp); const grabVSid = this.runIdToVSid.get(keyTemp); if (grabTestItem !== undefined) { @@ -215,11 +206,11 @@ export class PythonResultResolver implements ITestResultResolver { } }); } - } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') { + } else if (testItem.outcome === 'subtest-failure') { // split on [] or () based on how the subtest is setup. const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); - const data = rawTestExecData.result[keyTemp]; + const data = testItem; // find the subtest's parent test item if (parentTestItem) { const subtestStats = this.subTestStats.get(parentTestCaseId); @@ -230,20 +221,19 @@ export class PythonResultResolver implements ITestResultResolver { failed: 1, passed: 0, }); - runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); // clear since subtest items don't persist between runs clearAllChildren(parentTestItem); } const subTestItem = this.testController?.createTestItem(subtestId, subtestId); - runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`)); // create a new test item for the subtest if (subTestItem) { const traceback = data.traceback ?? ''; - const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`; - runInstance.appendOutput(fixLogLines(text)); + const text = `${data.subtest} failed: ${ + testItem.message ?? testItem.outcome + }\r\n${traceback}\r\n`; parentTestItem.children.add(subTestItem); runInstance.started(subTestItem); - const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? ''); + const message = new TestMessage(text); if (parentTestItem.uri && parentTestItem.range) { message.location = new Location(parentTestItem.uri, parentTestItem.range); } @@ -254,7 +244,7 @@ export class PythonResultResolver implements ITestResultResolver { } else { throw new Error('Parent test item not found'); } - } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') { + } else if (testItem.outcome === 'subtest-success') { // split on [] or () based on how the subtest is setup. const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); @@ -266,7 +256,6 @@ export class PythonResultResolver implements ITestResultResolver { subtestStats.passed += 1; } else { this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); - runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); // clear since subtest items don't persist between runs clearAllChildren(parentTestItem); } @@ -276,7 +265,6 @@ export class PythonResultResolver implements ITestResultResolver { parentTestItem.children.add(subTestItem); runInstance.started(subTestItem); runInstance.passed(subTestItem); - runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`)); } else { throw new Error('Unable to create new child node for subtest'); } diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 9757523e56c0..e7d418c9e28c 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -21,7 +21,7 @@ import { createEOTPayload, createExecutionErrorPayload, extractJsonPayload, - fixLogLines, + fixLogLinesNoTrailing, } from './utils'; import { createDeferred } from '../../../common/utils/async'; import { EnvironmentVariables } from '../../../api/types'; @@ -247,24 +247,23 @@ export class PythonTestServer implements ITestServer, Disposable { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. - let collectedOutput = ''; if (isDiscovery) { result?.proc?.stdout?.on('data', (data) => { - const out = fixLogLines(data.toString()); - collectedOutput += out; + const out = fixLogLinesNoTrailing(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { - const out = fixLogLines(data.toString()); - collectedOutput += out; + const out = fixLogLinesNoTrailing(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); }); } else { result?.proc?.stdout?.on('data', (data) => { - const out = fixLogLines(data.toString()); + const out = fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(`${out}`); spawnOptions?.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { - const out = fixLogLines(data.toString()); + const out = fixLogLinesNoTrailing(data.toString()); runInstance?.appendOutput(`${out}`); spawnOptions?.outputChannel?.append(out); }); @@ -278,10 +277,6 @@ export class PythonTestServer implements ITestServer, Disposable { ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', ); if (isDiscovery) { - // Collect all discovery output and log it at process finish to avoid dividing it between log lines. - traceLog(`\r\n${collectedOutput}`); - spawnOptions?.outputChannel?.append(`${collectedOutput}`); - if (code !== 0) { // This occurs when we are running discovery traceError( diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index f5f416529c42..0846387fc01d 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -23,6 +23,11 @@ export function fixLogLines(content: string): string { const lines = content.split(/\r?\n/g); return `${lines.join('\r\n')}\r\n`; } + +export function fixLogLinesNoTrailing(content: string): string { + const lines = content.split(/\r?\n/g); + return `${lines.join('\r\n')}`; +} export interface IJSONRPCData { extractedJSON: string; remainingRawData: string; diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 93f55752bbf2..d1e472d88382 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -10,7 +10,7 @@ import { import { IConfigurationService, ITestOutputChannel } from '../../../common/types'; import { Deferred, createDeferred } from '../../../common/utils/async'; import { EXTENSION_ROOT_DIR } from '../../../constants'; -import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging'; +import { traceError, traceInfo, traceVerbose } from '../../../logging'; import { DataReceivedEvent, DiscoveredTestPayload, @@ -18,7 +18,12 @@ import { ITestResultResolver, ITestServer, } from '../common/types'; -import { createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred, fixLogLines } from '../common/utils'; +import { + createDiscoveryErrorPayload, + createEOTPayload, + createTestingDeferred, + fixLogLinesNoTrailing, +} from '../common/utils'; import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; /** @@ -97,21 +102,18 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. - let collectedOutput = ''; result?.proc?.stdout?.on('data', (data) => { - const out = fixLogLines(data.toString()); - collectedOutput += out; + const out = fixLogLinesNoTrailing(data.toString()); + traceInfo(out); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { - const out = fixLogLines(data.toString()); - collectedOutput += out; + const out = fixLogLinesNoTrailing(data.toString()); traceError(out); spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.on('exit', (code, signal) => { // Collect all discovery output and log it at process finish to avoid dividing it between log lines. - traceLog(`\r\n${collectedOutput}`); - spawnOptions?.outputChannel?.append(`${collectedOutput}`); this.outputChannel?.append( 'Starting now, all test run output will be sent to the Test Result panel' + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index ef2a0eb86da2..db427262d330 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -192,13 +192,13 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Displays output to user and ensure the subprocess doesn't run into buffer overflow. // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. result?.proc?.stdout?.on('data', (data) => { - const out = utils.fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}`); + const out = utils.fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(out); this.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { - const out = utils.fixLogLines(data.toString()); - runInstance?.appendOutput(`${out}`); + const out = utils.fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(out); this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { From f9bc7551478c0a862e7547e88ba5213bbc266f0b Mon Sep 17 00:00:00 2001 From: eleanorjboyd Date: Mon, 9 Oct 2023 14:51:18 -0700 Subject: [PATCH 22/23] switch to using constant for deprecation msg --- src/client/testing/testController/common/server.ts | 9 ++++----- src/client/testing/testController/common/utils.ts | 5 +++++ .../testController/pytest/pytestDiscoveryAdapter.ts | 8 ++------ .../testController/pytest/pytestExecutionAdapter.ts | 6 +----- src/test/testing/common/testingAdapter.test.ts | 7 ------- 5 files changed, 12 insertions(+), 23 deletions(-) diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index e7d418c9e28c..e496860526e4 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -17,6 +17,7 @@ import { DataReceivedEvent, ITestServer, TestCommandOptions } from './types'; import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; import { + MESSAGE_ON_TESTING_OUTPUT_MOVE, createDiscoveryErrorPayload, createEOTPayload, createExecutionErrorPayload, @@ -251,10 +252,12 @@ export class PythonTestServer implements ITestServer, Disposable { result?.proc?.stdout?.on('data', (data) => { const out = fixLogLinesNoTrailing(data.toString()); spawnOptions?.outputChannel?.append(`${out}`); + traceInfo(out); }); result?.proc?.stderr?.on('data', (data) => { const out = fixLogLinesNoTrailing(data.toString()); spawnOptions?.outputChannel?.append(`${out}`); + traceError(out); }); } else { result?.proc?.stdout?.on('data', (data) => { @@ -271,11 +274,7 @@ export class PythonTestServer implements ITestServer, Disposable { result?.proc?.on('exit', (code, signal) => { // if the child has testIds then this is a run request - spawnOptions?.outputChannel?.append( - 'Starting now, all test run output will be sent to the Test Result panel' + - ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + - ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', - ); + spawnOptions?.outputChannel?.append(MESSAGE_ON_TESTING_OUTPUT_MOVE); if (isDiscovery) { if (code !== 0) { // This occurs when we are running discovery diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index 0846387fc01d..4502708063e9 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -47,6 +47,11 @@ export interface ExtractOutput { export const JSONRPC_UUID_HEADER = 'Request-uuid'; export const JSONRPC_CONTENT_LENGTH_HEADER = 'Content-Length'; export const JSONRPC_CONTENT_TYPE_HEADER = 'Content-Type'; +export const MESSAGE_ON_TESTING_OUTPUT_MOVE = + 'Starting now, all test run output will be sent to the Test Result panel' + + ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + ' The "Python Test Log" channel will be deprecated within the next month.' + + 'See https://github.com/microsoft/vscode-python/wiki/New-Method-for-Output-Handling-in-Python-Testing for details.'; export function createTestingDeferred(): Deferred { return createDeferred(); diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index d1e472d88382..92bd9f04834e 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -19,6 +19,7 @@ import { ITestServer, } from '../common/types'; import { + MESSAGE_ON_TESTING_OUTPUT_MOVE, createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred, @@ -113,12 +114,7 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.on('exit', (code, signal) => { - // Collect all discovery output and log it at process finish to avoid dividing it between log lines. - this.outputChannel?.append( - 'Starting now, all test run output will be sent to the Test Result panel' + - ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + - ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', - ); + this.outputChannel?.append(MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0) { traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}.`); } diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index db427262d330..5c04aabab845 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -202,11 +202,7 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { - this.outputChannel?.append( - 'Starting now, all test run output will be sent to the Test Result panel' + - ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + - ' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.', - ); + this.outputChannel?.append(utils.MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0 && testIds) { traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}.`); } diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 5cc1696386ae..519a60e3f0f7 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -126,13 +126,6 @@ suite('End to End Tests: test adapters', () => { resultResolver, envVarsService, ); - // // run unittest discovery - // const discoveryAdapter = new UnittestTestDiscoveryAdapter( - // pythonTestServer, - // configService, - // testOutputChannel.object, - // resultResolver, - // ); await discoveryAdapter.discoverTests(workspaceUri).finally(() => { // verification after discovery is complete From 1bfc34b9d9abf076dc471be0145c7723db58da45 Mon Sep 17 00:00:00 2001 From: Eleanor Boyd Date: Tue, 10 Oct 2023 08:42:10 -0700 Subject: [PATCH 23/23] Update src/client/testing/testController/common/utils.ts Co-authored-by: Courtney Webster <60238438+cwebster-99@users.noreply.github.com> --- src/client/testing/testController/common/utils.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index 4502708063e9..5022fa5a44e6 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -48,8 +48,8 @@ export const JSONRPC_UUID_HEADER = 'Request-uuid'; export const JSONRPC_CONTENT_LENGTH_HEADER = 'Content-Length'; export const JSONRPC_CONTENT_TYPE_HEADER = 'Content-Type'; export const MESSAGE_ON_TESTING_OUTPUT_MOVE = - 'Starting now, all test run output will be sent to the Test Result panel' + - ' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + 'Starting now, all test run output will be sent to the Test Result panel,' + + ' while test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + ' The "Python Test Log" channel will be deprecated within the next month.' + 'See https://github.com/microsoft/vscode-python/wiki/New-Method-for-Output-Handling-in-Python-Testing for details.';