Let's simulate the experience of debugging a pytest run. We've added a breaking test to the script below.
import time
import pytest
import numpy as np
def normalize(X):
return (X - X.min())/(X.max() - X.min())
def threshold(X, min_val=-1, max_val=1):
result = np.where(X <= min_val, min_val, X)
return np.where(result >= max_val, max_val, result)
@pytest.fixture(params=[(1,1), (2,2), (3,3), (4,4)], ids=lambda d: f"rows: {d[0]} cols: {d[1]}")
def random_numpy_array(request):
return np.random.normal(request.param)
def test_equal_dict():
a = {"a": 1, "b": "I"}
b = {"a": 1, "b": "1"}
assert a == b
@pytest.mark.parametrize("func", [normalize, threshold], ids=lambda d: d.__name__)
def test_shape_same(func, random_numpy_array):
time.sleep(0.5)
X_norm = func(random_numpy_array)
assert random_numpy_array.shape == X_norm.shape
def test_min_max_normalise(random_numpy_array):
X_norm = normalize(random_numpy_array)
assert X_norm.min() == 0.0
assert X_norm.max() == 1.0
@pytest.mark.parametrize("min_val", [-3, -2, -1], ids=lambda x: f"min_val:{x}")
@pytest.mark.parametrize("max_val", [3, 2, 1], ids=lambda x: f"max_val:{x}")
def test_min_max_threshold(random_numpy_array, min_val, max_val):
X_norm = threshold(random_numpy_array, min_val, max_val)
assert X_norm.min() >= min_val
assert X_norm.max() <= max_val
If you were to run pytest, you'll notice that it takes a while before the breaking test is revealed.
pytest --verbose
We can fix this, by adding the -x
flag.
pytest -x --verbose
By adding this flag, you'll prevent pytest from running any extra tests once it find a breaking test. This can make it much easier to pinpoint the painpoint. Especially when you consider that you can refer to specific test once you've identified it.
# Run pytest, but only on the `test_equal_dict` test
# found in the test_normalise.py file.
pytest test_normalise.py::test_equal_dict
More Clarity
The output of pytest isn't bad, but it can be improved by installing the pytest-clarity plugin.
python -m pip install pytest-clarity
Once installed, it will improve the -vv
output of the failing tests in the terminal.
This should make it much easier to spot the issue.
pytest -vv