pytest makes testing enjoyable. Here's how to use it effectively.
Getting Started
pip install pytestWrite a test:
# test_math.py
def add(a, b):
return a + b
def test_add():
assert add(2, 3) == 5
assert add(-1, 1) == 0Run it:
pytest
pytest -v # Verbose
pytest test_math.py # Specific file
pytest test_math.py::test_add # Specific testTest Discovery
pytest finds tests automatically:
- Files named
test_*.pyor*_test.py - Functions named
test_* - Classes named
Test*
tests/
├── test_users.py
├── test_orders.py
└── conftest.py
Assertions
Just use assert:
def test_strings():
assert "hello" in "hello world"
assert len([1, 2, 3]) == 3
assert {"a": 1} == {"a": 1}pytest rewrites assertions to show helpful diffs on failure.
Fixtures
Fixtures provide test data and setup:
import pytest
@pytest.fixture
def user():
return {"name": "Owen", "email": "owen@example.com"}
def test_user_has_name(user):
assert user["name"] == "Owen"
def test_user_has_email(user):
assert "@" in user["email"]The fixture runs fresh for each test.
Fixture Scope
@pytest.fixture(scope="function") # Default, runs per test
def func_fixture():
return create_something()
@pytest.fixture(scope="module") # Once per file
def module_fixture():
return expensive_setup()
@pytest.fixture(scope="session") # Once per test run
def session_fixture():
return very_expensive_setup()Fixtures with Cleanup
@pytest.fixture
def database():
db = connect_database()
yield db # Test runs here
db.close() # Cleanup after testconftest.py
Share fixtures across files:
# tests/conftest.py
import pytest
@pytest.fixture
def api_client():
return TestClient(app)All tests can use api_client without importing.
Parametrize
Run same test with different inputs:
import pytest
@pytest.mark.parametrize("input,expected", [
(2, 4),
(3, 9),
(4, 16),
(-2, 4),
])
def test_square(input, expected):
assert input ** 2 == expectedMultiple parameters:
@pytest.mark.parametrize("a,b,expected", [
(1, 2, 3),
(0, 0, 0),
(-1, 1, 0),
])
def test_add(a, b, expected):
assert add(a, b) == expectedMarkers
Tag tests for selective running:
import pytest
@pytest.mark.slow
def test_big_computation():
...
@pytest.mark.integration
def test_database_connection():
...Run or skip by marker:
pytest -m slow # Only slow tests
pytest -m "not slow" # Skip slow tests
pytest -m "integration" # Only integration testsBuilt-in Markers
@pytest.mark.skip(reason="Not implemented yet")
def test_future_feature():
...
@pytest.mark.skipif(sys.platform == "win32", reason="Unix only")
def test_unix_feature():
...
@pytest.mark.xfail(reason="Known bug")
def test_broken_feature():
...Testing Exceptions
import pytest
def test_raises_error():
with pytest.raises(ValueError):
int("not a number")
def test_error_message():
with pytest.raises(ValueError) as exc_info:
int("not a number")
assert "invalid literal" in str(exc_info.value)Mocking
Use pytest-mock or unittest.mock:
def test_api_call(mocker):
mock_get = mocker.patch("requests.get")
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_status()
assert result == "ok"
mock_get.assert_called_once()Or with unittest.mock:
from unittest.mock import patch, Mock
def test_api_call():
with patch("mymodule.requests.get") as mock_get:
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_status()
assert result == "ok"Temporary Files
def test_file_processing(tmp_path):
# tmp_path is a pytest fixture
test_file = tmp_path / "test.txt"
test_file.write_text("hello")
result = process_file(test_file)
assert result == "processed: hello"Capturing Output
def test_print_output(capsys):
print("hello")
captured = capsys.readouterr()
assert captured.out == "hello\n"Test Organization
class TestUser:
def test_create(self):
user = User("Owen")
assert user.name == "Owen"
def test_email_validation(self):
with pytest.raises(ValueError):
User("Owen", email="invalid")Coverage
pip install pytest-cov
pytest --cov=mypackage
pytest --cov=mypackage --cov-report=htmlUseful Options
pytest -v # Verbose output
pytest -x # Stop on first failure
pytest --lf # Run last failed tests
pytest --ff # Run failed tests first
pytest -k "user" # Run tests matching "user"
pytest --pdb # Debug on failure
pytest -n auto # Parallel (needs pytest-xdist)pytest.ini
Configure pytest:
[pytest]
testpaths = tests
python_files = test_*.py
python_functions = test_*
addopts = -v --strict-markers
markers =
slow: marks tests as slow
integration: integration testsOr in pyproject.toml:
[tool.pytest.ini_options]
testpaths = ["tests"]
addopts = "-v"My Patterns
# tests/conftest.py
import pytest
@pytest.fixture
def sample_data():
return {"id": 1, "name": "Test"}
@pytest.fixture
def mock_api(mocker):
return mocker.patch("myapp.api.client")# tests/test_service.py
class TestUserService:
def test_creates_user(self, sample_data):
user = UserService.create(sample_data)
assert user.id == 1
def test_calls_api(self, mock_api):
UserService.sync()
mock_api.post.assert_called_once()Keep tests focused, use fixtures for setup, parametrize for variations.