Skip to content
This repository was archived by the owner on Apr 30, 2020. It is now read-only.

Paralellize the integration tests #62

Merged
merged 4 commits into from
May 30, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ __pycache__
/artifacts-*/
/build/
/dist/
/mockroots/
/.tox/
/.eggs/
.cache
Expand Down
5 changes: 3 additions & 2 deletions mock.cfg
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
include('/etc/mock/fedora-27-x86_64.cfg')
include('/etc/mock/fedora-28-x86_64.cfg')

config_opts['chroot_setup_cmd'] = 'install ansible dnf'
config_opts['use_host_resolv'] = True
config_opts['rpmbuild_networking'] = True
config_opts['use_nspawn'] = False
config_opts['root'] = 'fedora-27-x86_64-taskotron'
config_opts['root'] = 'taskotron-python-versions-master'
config_opts['plugin_conf']['root_cache_opts']['dir'] = "%(cache_topdir)s/taskotron-python-versions/root_cache/"
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this makes sure all our mock roots share one root cache

18 changes: 18 additions & 0 deletions test/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,22 @@
from xdist.scheduler import LoadScopeScheduling


def pytest_addoption(parser):
parser.addoption('--fake', action='store_true', default=False,
help='don\'t run the code, reuse the result from '
'last tests')


class FixtureScheduling(LoadScopeScheduling):
"""Split by [] value. This is very hackish and might blow up any time!
See https://github.com/pytest-dev/pytest-xdist/issues/18
"""
def _split_scope(self, nodeid):
if '[' in nodeid:
parameters = nodeid.rsplit('[')[-1].replace(']', '')
return parameters.split('-')[0]
return None


def pytest_xdist_make_scheduler(log, config):
return FixtureScheduling(config, log)
148 changes: 91 additions & 57 deletions test/integration/test_integration.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from collections import namedtuple
import contextlib
import glob
import os
import pprint
import shutil
import subprocess
Expand All @@ -16,14 +17,27 @@


class MockEnv:
'''Use this to work with mock. Mutliple instances are not safe.'''
'''Use this to work with mock. Mutliple concurrent instances are safe.'''
mock = ['mock', '-r', './mock.cfg']

def __init__(self):
def __init__(self, worker_id):
self.worker_id = worker_id
self._run(['--init'], check=True)

@property
def root(self):
return 'taskotron-python-versions-{}'.format(self.worker_id)

@property
def rootdir(self):
return os.path.join(os.path.abspath('.'), 'mockroots', self.root)

def _run(self, what, **kwargs):
return subprocess.run(self.mock + what, **kwargs)
command = list(self.mock) # needs a copy not to change in place
command.append('--config-opts=root={}'.format(self.root))
command.append('--rootdir={}'.format(self.rootdir))
command.extend(what)
return subprocess.run(command, **kwargs)

def copy_in(self, files):
self._run(['--copyin'] + files + ['/'], check=True)
Expand Down Expand Up @@ -52,12 +66,12 @@ def copy_out(self, directory, target, *, clean_target=False):


@pytest.fixture(scope="session")
def mock(request):
def mock(worker_id, request):
'''Setup a mock we can run Ansible tasks in under root'''
if request.config.getoption('--fake'):
mockenv = FakeMockEnv()
mockenv = FakeMockEnv(worker_id)
else:
mockenv = MockEnv()
mockenv = MockEnv(worker_id)
files = ['taskotron_python_versions'] + glob.glob('*.py') + ['tests.yml']
mockenv.copy_in(files)
yield mockenv
Expand Down Expand Up @@ -159,17 +173,19 @@ def results(request):
_nodejs = fixtures_factory('nodejs-semver-5.1.1-2.fc26')
nodejs = fixtures_factory('_nodejs')

_bucky = fixtures_factory('python-bucky-2.2.2-9.fc28')
bucky = fixtures_factory('_bucky')
_pycallgraph = fixtures_factory('python-pycallgraph-0.5.1-13.fc28')
pycallgraph = fixtures_factory('_pycallgraph')

_jsonrpc = fixtures_factory('jsonrpc-glib-3.27.4-1.fc28')
jsonrpc = fixtures_factory('_jsonrpc')


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'yum',
'vdirsyncer', 'docutils', 'nodejs',
'bucky', 'jsonrpc'))
def parametrize(*fixtrues):
return pytest.mark.parametrize('results', fixtrues)


@parametrize('eric', 'six', 'admesh', 'tracer', 'copr', 'epub', 'twine', 'yum',
'vdirsyncer', 'docutils', 'nodejs', 'pycallgraph', 'jsonrpc')
def test_number_of_results(results, request):
# getting a fixture by name
# https://github.com/pytest-dev/pytest/issues/349#issuecomment-112203541
Expand All @@ -179,19 +195,19 @@ def test_number_of_results(results, request):
assert len(results) == 8


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh',
'copr', 'epub', 'twine',
'bucky'))
@parametrize('eric', 'six', 'admesh', 'copr', 'epub', 'twine', 'pycallgraph')
def test_two_three_passed(results, request):
results = request.getfixturevalue(results)
assert results['dist.python-versions.two_three'].outcome == 'PASSED'


def test_two_three_failed(tracer):
assert tracer['dist.python-versions.two_three'].outcome == 'FAILED'
@parametrize('tracer')
def test_two_three_failed(results, request):
results = request.getfixturevalue(results)
assert results['dist.python-versions.two_three'].outcome == 'FAILED'


@pytest.mark.parametrize('results', ('tracer', 'copr', 'admesh'))
@parametrize('tracer', 'copr', 'admesh')
def test_one_failed_result_is_total_failed(results, request):
results = request.getfixturevalue(results)
assert results['dist.python-versions'].outcome == 'FAILED'
Expand All @@ -207,8 +223,10 @@ def test_artifact_is_the_same(results, task, request):
results['dist.python-versions.' + task].artifact)


def test_artifact_contains_two_three_and_looks_as_expected(tracer):
result = tracer['dist.python-versions.two_three']
@parametrize('tracer')
def test_artifact_contains_two_three_and_looks_as_expected(results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.two_three']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -220,21 +238,23 @@ def test_artifact_contains_two_three_and_looks_as_expected(tracer):
''').strip().format(result.item) in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'epub', 'twine', 'vdirsyncer',
'bucky'))
@parametrize('eric', 'epub', 'twine', 'vdirsyncer', 'pycallgraph')
def test_naming_scheme_passed(results, request):
results = request.getfixturevalue(results)
assert results['dist.python-versions.naming_scheme'].outcome == 'PASSED'


@pytest.mark.parametrize('results', ('copr', 'six', 'admesh'))
@parametrize('copr', 'six', 'admesh')
def test_naming_scheme_failed(results, request):
results = request.getfixturevalue(results)
assert results['dist.python-versions.naming_scheme'].outcome == 'FAILED'


def test_artifact_contains_naming_scheme_and_looks_as_expected(copr):
result = copr['dist.python-versions.naming_scheme']
@parametrize('copr')
def test_artifact_contains_naming_scheme_and_looks_as_expected(results,
request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.naming_scheme']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -244,23 +264,25 @@ def test_artifact_contains_naming_scheme_and_looks_as_expected(copr):
""").strip().format(result.item) in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'twine', 'six'))
@parametrize('eric', 'twine', 'six')
def test_requires_naming_scheme_passed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.requires_naming_scheme']
assert task_result.outcome == 'PASSED'


@pytest.mark.parametrize('results', ('admesh', 'copr'))
@parametrize('admesh', 'copr')
def test_requires_naming_scheme_failed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.requires_naming_scheme']
assert task_result.outcome == 'FAILED'


@parametrize('tracer')
def test_artifact_contains_requires_naming_scheme_and_looks_as_expected(
tracer):
result = tracer['dist.python-versions.requires_naming_scheme']
results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.requires_naming_scheme']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -281,8 +303,10 @@ def test_artifact_contains_requires_naming_scheme_and_looks_as_expected(
""").strip() in artifact.strip()


def test_requires_naming_scheme_contains_python(yum):
result = yum['dist.python-versions.requires_naming_scheme']
@parametrize('yum')
def test_requires_naming_scheme_contains_python(results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.requires_naming_scheme']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -291,24 +315,26 @@ def test_requires_naming_scheme_contains_python(yum):
assert 'python (python2 is available)' in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'bucky'))
@parametrize('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'pycallgraph')
def test_executables_passed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.executables']
assert task_result.outcome == 'PASSED'


@pytest.mark.parametrize('results', ('docutils',))
@parametrize('docutils')
def test_executables_failed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.executables']
assert task_result.outcome == 'FAILED'


@parametrize('docutils')
def test_artifact_contains_executables_and_looks_as_expected(
docutils):
result = docutils['dist.python-versions.executables']
results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.executables']
with open(result.artifact) as f:
artifact = f.read()

Expand Down Expand Up @@ -337,24 +363,25 @@ def test_artifact_contains_executables_and_looks_as_expected(
""").strip() in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh', 'copr',
'epub', 'twine', 'nodejs'))
@parametrize('eric', 'six', 'admesh', 'copr', 'epub', 'twine', 'nodejs')
def test_unvesioned_shebangs_passed(results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.unversioned_shebangs']
assert result.outcome == 'PASSED'


@pytest.mark.parametrize('results', ('yum', 'tracer', 'bucky'))
@parametrize('yum', 'tracer', 'pycallgraph')
def test_unvesioned_shebangs_failed(results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.unversioned_shebangs']
assert result.outcome == 'FAILED'


@parametrize('tracer')
def test_artifact_contains_unversioned_shebangs_and_looks_as_expected(
tracer):
result = tracer['dist.python-versions.unversioned_shebangs']
results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.unversioned_shebangs']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -371,16 +398,18 @@ def test_artifact_contains_unversioned_shebangs_and_looks_as_expected(
""").strip() in artifact.strip()


@pytest.mark.parametrize('results', ('bucky',))
@parametrize('pycallgraph')
def test_unvesioned_shebangs_mangled_failed(results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.unversioned_shebangs']
assert result.outcome == 'FAILED'


@parametrize('pycallgraph')
def test_artifact_contains_mangled_unversioned_shebangs_and_looks_as_expected(
bucky):
result = bucky['dist.python-versions.unversioned_shebangs']
results, request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.unversioned_shebangs']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -405,30 +434,32 @@ def test_artifact_contains_mangled_unversioned_shebangs_and_looks_as_expected(
""").strip() in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'docutils'))
@parametrize('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'docutils')
def test_py3_support_passed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.py3_support']
assert task_result.outcome == 'PASSED'


@pytest.mark.parametrize('results', ('bucky',))
@parametrize('pycallgraph')
def test_py3_support_failed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.py3_support']
assert task_result.outcome == 'FAILED'


@parametrize('pycallgraph')
def test_artifact_contains_py3_support_and_looks_as_expected(
bucky):
results, request):
"""Test that py3_support check fails if the package is mispackaged.

NOTE: The test will start to fail as soon as python-bucky
NOTE: The test will start to fail as soon as python-pycallgraph
gets ported to Python 3 and its Bugzilla gets closed.
See https://bugzilla.redhat.com/show_bug.cgi?id=1367012
See https://bugzilla.redhat.com/show_bug.cgi?id=1309383
"""
result = bucky['dist.python-versions.py3_support']
results = request.getfixturevalue(results)
result = results['dist.python-versions.py3_support']
with open(result.artifact) as f:
artifact = f.read()

Expand All @@ -440,27 +471,30 @@ def test_artifact_contains_py3_support_and_looks_as_expected(

Software MUST be packaged for Python 3 if upstream supports it.
See the following Bugzilla:
https://bugzilla.redhat.com/show_bug.cgi?id=1367012
https://bugzilla.redhat.com/show_bug.cgi?id=1309383
""").strip() in artifact.strip()


@pytest.mark.parametrize('results', ('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'docutils'))
@parametrize('eric', 'six', 'admesh', 'tracer',
'copr', 'epub', 'twine', 'docutils')
def test_python_usage_passed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.python_usage']
assert task_result.outcome == 'PASSED'


@pytest.mark.parametrize('results', ('jsonrpc',))
@parametrize('jsonrpc')
def test_python_usage_failed(results, request):
results = request.getfixturevalue(results)
task_result = results['dist.python-versions.python_usage']
assert task_result.outcome == 'FAILED'


def test_artifact_contains_python_usage_and_looks_as_expected(jsonrpc):
result = jsonrpc['dist.python-versions.python_usage']
@parametrize('jsonrpc')
def test_artifact_contains_python_usage_and_looks_as_expected(results,
request):
results = request.getfixturevalue(results)
result = results['dist.python-versions.python_usage']
with open(result.artifact) as f:
artifact = f.read()

Expand Down
Loading