Skip to content

[skip changelog] Optimize a bit integration tests #943

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Sep 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
143 changes: 129 additions & 14 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ authors = []

[tool.poetry.dependencies]
python = "^3.8"
pytest = "5.3.4"
pytest = "6.0.2"
simplejson = "3.17.0"
semver = "2.9.0"
pyserial = "3.4"
Expand All @@ -17,6 +17,9 @@ pytest-timeout = "1.3.4"
invoke = "1.4.1"
flake8 = "^3.8.3"
black = { version = "^19.10b0", allow-prereleases = true }
filelock = "^3.0.12"
pytest-xdist = "^2.1.0"
pytest_httpserver = "^0.3.5"

[tool.black]
line-length = 120
Expand Down
10 changes: 10 additions & 0 deletions test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# a commercial license, send an email to [email protected].
import os
import collections
import json


Board = collections.namedtuple("Board", "address fqbn package architecture id core")
Expand All @@ -25,3 +26,12 @@ def running_on_ci():
"""
val = os.getenv("APPVEYOR") or os.getenv("DRONE") or os.getenv("GITHUB_WORKFLOW")
return val is not None


def parse_json_traces(log_json_lines):
trace_entries = []
for entry in log_json_lines:
entry = json.loads(entry)
if entry.get("level") == "trace":
trace_entries.append(entry.get("msg"))
return trace_entries
50 changes: 44 additions & 6 deletions test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@
import platform
import signal
import shutil
import time
from pathlib import Path

import pytest
import simplejson as json
from invoke import Local
from invoke.context import Context
import tempfile
from filelock import FileLock

from .common import Board

Expand Down Expand Up @@ -54,17 +56,32 @@ def data_dir(tmpdir_factory):
if platform.system() == "Windows":
with tempfile.TemporaryDirectory() as tmp:
yield tmp
# shutil.rmtree(tmp, ignore_errors=True)
else:
yield str(tmpdir_factory.mktemp("ArduinoTest"))
data = tmpdir_factory.mktemp("ArduinoTest")
yield str(data)
# shutil.rmtree(data, ignore_errors=True)


@pytest.fixture(scope="session")
def downloads_dir(tmpdir_factory):
def downloads_dir(tmpdir_factory, worker_id):
"""
To save time and bandwidth, all the tests will access
the same download cache folder.
"""
return str(tmpdir_factory.mktemp("ArduinoTest"))
download_dir = tmpdir_factory.mktemp("ArduinoTest", numbered=False)

# This folders should be created only once per session, if we're running
# tests in parallel using multiple processes we need to make sure this
# this fixture is executed only once, thus the use of the lockfile
if not worker_id == "master":
lock = Path(download_dir / "lock")
with FileLock(lock):
if not lock.is_file():
lock.touch()

yield str(download_dir)
# shutil.rmtree(download_dir, ignore_errors=True)


@pytest.fixture(scope="function")
Expand All @@ -74,7 +91,9 @@ def working_dir(tmpdir_factory):
will be created before running each test and deleted
at the end, this way all the tests work in isolation.
"""
return str(tmpdir_factory.mktemp("ArduinoTestWork"))
work_dir = tmpdir_factory.mktemp("ArduinoTestWork")
yield str(work_dir)
# shutil.rmtree(work_dir, ignore_errors=True)


@pytest.fixture(scope="function")
Expand All @@ -95,9 +114,12 @@ def run_command(pytestconfig, data_dir, downloads_dir, working_dir):
}
(Path(data_dir) / "packages").mkdir()

def _run(cmd_string, custom_working_dir=None):
def _run(cmd_string, custom_working_dir=None, custom_env=None):

if not custom_working_dir:
custom_working_dir = working_dir
if not custom_env:
custom_env = env
cli_full_line = '"{}" {}'.format(cli_path, cmd_string)
run_context = Context()
# It might happen that we need to change directories between drives on Windows,
Expand All @@ -109,7 +131,7 @@ def _run(cmd_string, custom_working_dir=None):
# It escapes spaces in the path using "\ " but it doesn't always work,
# wrapping the path in quotation marks is the safest approach
with run_context.prefix(f'{cd_command} "{custom_working_dir}"'):
return run_context.run(cli_full_line, echo=False, hide=True, warn=True, env=env)
return run_context.run(cli_full_line, echo=False, hide=True, warn=True, env=custom_env)

return _run

Expand Down Expand Up @@ -195,3 +217,19 @@ def copy_sketch(working_dir):
test_sketch_path = Path(working_dir) / "sketch_simple"
shutil.copytree(sketch_path, test_sketch_path)
yield str(test_sketch_path)


@pytest.fixture(scope="function")
def wait_for_board(run_command):
def _waiter(seconds=10):
# Waits for the specified amount of second for a board to be visible.
# This is necessary since it might happen that a board is not immediately
# available after a test upload and subsequent tests might consequently fail.
time_end = time.time() + seconds
while time.time() < time_end:
result = run_command("board list --format json")
ports = json.loads(result.stdout)
if len([p.get("boards", []) for p in ports]) > 0:
break

return _waiter
10 changes: 8 additions & 2 deletions test/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,11 @@ filterwarnings =
markers =
slow: marks tests as slow (deselect with '-m "not slow"')

# atm some tests depend on each other, better to exit at first failure (-x)
addopts = -x -s --verbose --tb=short
# -x to exit at first failure
# -s to disable per-test capture
# --verbose is what is says it is
# --tb=long sets the length of the traceback in case of failures
# -n=auto sets the numbers of parallel processes to use
# --dist=loadfile distributes the tests in the parallel processes dividing them per file
# See https://pypi.org/project/pytest-xdist/#parallelization for more info on parallelization
addopts = -x -s --verbose --tb=long -n=auto --dist=loadfile
Loading