⏳
Loading cheatsheet...
Python test organization, fixtures, parametrization, markers and plugin-driven workflows.
# ── Basic Test Structure ──
# Files must be named test_*.py or *_test.py
# Functions must be named test_*
# Classes must be named Test*
def test_addition():
assert 1 + 1 == 2
def test_string_concat():
assert "hello" + " world" == "hello world"
# Test class — groups related tests
class TestMathOperations:
def test_multiply(self):
assert 2 * 3 == 6
def test_divide(self):
assert 6 / 2 == 3.0
def test_divide_by_zero(self):
with pytest.raises(ZeroDivisionError):
1 / 0# ── Installation & Running ──
pip install pytest pytest-mock pytest-cov
# Run all tests (auto-discovers)
pytest
pytest . # current directory
pytest tests/ # specific folder
pytest tests/test_users.py # specific file
pytest tests/test_users.py::test_create_user # specific test
pytest -k "addition" # by name pattern
pytest -k "not slow" # exclude pattern
pytest -m "smoke" # by marker
pytest -m "smoke and not slow" # marker combinations
pytest --co # collect (list) tests without running
pytest -v # verbose output
pytest -vv # extra verbose (show full diffs)
pytest -s # show print() output (no capture)
pytest -x # stop after first failure
pytest --maxfail=3 # stop after N failures
pytest -n auto # parallel with pytest-xdist
pytest --lf # run last-failed tests
pytest --ff # run last-failed first, then the rest
pytest --sw # stepwise: abort on first failure
pytest --tb=short # shorter tracebacks
pytest --tb=no # no tracebacks
pytest -q # quiet output| Pattern | Example | Scope |
|---|---|---|
| test_*.py | test_users.py | Test module |
| *_test.py | users_test.py | Test module |
| test_*() | test_login() | Test function |
| Test* | TestUserAPI | Test class |
| test_*.py in __init__.py | tests/__init__.py | Package discovery |
| Flag | Description |
|---|---|
| -v / -vv | Verbose / extra verbose output |
| -s | Disable output capture (see prints) |
| -x | Stop after first failure |
| --lf | Last-failed mode |
| --ff | Failed-first mode |
| --co | Collect tests (dry run) |
| -n auto | Parallel execution (xdist) |
| -k "expr" | Filter by keyword expression |
| -m "marker" | Filter by marker |
| --tb=short | Short traceback format |
| -q | Quiet mode |
# ── Built-in & Custom Markers ──
import pytest
# Built-in markers
@pytest.mark.skip(reason="Not implemented yet")
def test_feature_not_ready():
pass
@pytest.mark.skipif(sys.version_info < (3, 10), reason="Needs 3.10+")
def test_python_310_feature():
pass
@pytest.mark.xfail(reason="Known bug #42")
def test_known_failure():
assert 1 == 2 # expected to fail
@pytest.mark.xfail(strict=True, reason="Must pass now")
def test_must_pass():
assert 1 == 1 # if this fails, test FAILS (not xfail)
@pytest.mark.parametrize("a,b,expected", [
(1, 1, 2),
(2, 3, 5),
(10, -5, 5),
])
def test_add(a, b, expected):
assert a + b == expected
# Custom marker (register in pytest.ini)
@pytest.mark.slow
def test_long_running():
time.sleep(10)
@pytest.mark.integration
def test_database_connection():
passtest_*.py or *_test.py are collected. No test runner class or main() is needed.pytest.ini or pyproject.toml to avoid warnings: [pytest.ini_options] markers = slow: slow-running tests.# ── Basic Fixtures ──
import pytest
@pytest.fixture
def sample_user():
return {"id": 1, "name": "Alice", "email": "alice@example.com"}
def test_user_name(sample_user):
assert sample_user["name"] == "Alice"
def test_user_email(sample_user):
assert sample_user["email"] == "alice@example.com"
# ── Fixture with setup & teardown ──
@pytest.fixture
def db_session():
# Setup
connection = create_connection()
session = connection.start_session()
yield session # provide the fixture value
# Teardown (runs after test, even on failure)
session.close()
connection.disconnect()
def test_insert(db_session):
db_session.insert({"id": 1})
result = db_session.query("SELECT * FROM users")
assert len(result) == 1# ── conftest.py: Shared Fixtures ──
# Placed in tests/ root — available to ALL test modules
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture(scope="session")
def engine():
"""One engine per test session."""
return create_engine("sqlite:///:memory:")
@pytest.fixture(scope="function")
def db_session(engine):
"""Fresh DB session for each test."""
connection = engine.connect()
transaction = connection.begin()
Session = sessionmaker(bind=connection)
session = Session()
yield session
session.close()
transaction.rollback()
connection.close()
@pytest.fixture
def authenticated_client(client, db_session):
"""A client with an authenticated user."""
user = create_user(db_session, email="test@example.com", password="pass")
token = create_access_token(user.id)
client.headers["Authorization"] = f"Bearer {token}"
return client
@pytest.fixture(autouse=True)
def reset_settings():
"""Auto-applied to ALL tests in the directory."""
original = settings.DEBUG
settings.DEBUG = False
yield
settings.DEBUG = original# ── Parametrized & Dynamic Fixtures ──
import pytest
# Parametrized fixture — test runs multiple times
@pytest.fixture(params=["sqlite", "postgresql", "mysql"])
def db_engine(request):
return create_engine(DATABASE_URLS[request.param])
def test_query(db_engine):
# Runs 3 times, once for each db_engine param
result = db_engine.execute("SELECT 1")
assert result.scalar() == 1
# Fixture using another fixture (composition)
@pytest.fixture
def app_client(db_session):
app = create_app()
app.db = db_session
return app.test_client()
# Factory fixture (creates fresh instances)
@pytest.fixture
def make_user():
def _make(**kwargs):
defaults = {"name": "Test User", "email": "test@test.com"}
defaults.update(kwargs)
return User(**defaults)
return _make
def test_create_multiple_users(make_user):
u1 = make_user(name="Alice")
u2 = make_user(name="Bob", email="bob@test.com")
assert u1.name == "Alice"
assert u2.email == "bob@test.com"
# Fixture returning multiple values
@pytest.fixture
def user_and_token():
user = create_user()
token = generate_token(user)
yield user, token
delete_user(user)| Scope | Lifetime | Use Case |
|---|---|---|
| function | Per test (default) | Database sessions, temp dirs |
| class | Per test class | Shared class-level setup |
| module | Per test module | Module-level DB connections |
| package | Per package | Package-level fixtures |
| session | Per test run | Global DB, expensive resources |
| Param | Description |
|---|---|
| scope | function/class/module/package/session |
| params | List of parametrized values |
| autouse=True | Auto-inject into all tests |
| name | Override fixture name |
| ids | Custom param IDs in output |
conftest.py to share fixtures across test modules. Pytest discovers conftest.py files at every directory level — fixtures are only available to tests in that directory and below.def _factory(**kwargs): return Thing(**kwargs).# ── @pytest.mark.parametrize ──
import pytest
# Single parameter
@pytest.mark.parametrize("input_val", [1, 2, 3, 10, 100])
def test_is_positive(input_val):
assert input_val > 0
# Multiple parameters
@pytest.mark.parametrize("a,b,expected", [
(1, 1, 2),
(2, 3, 5),
(0, 0, 0),
(-1, 1, 0),
(100, 200, 300),
])
def test_addition(a, b, expected):
assert a + b == expected
# Named test IDs
@pytest.mark.parametrize("status_code,should_raise", [
(200, False),
(404, True),
(500, True),
], ids=["success", "not_found", "server_error"])
def test_http_status(status_code, should_raise):
if should_raise:
with pytest.raises(HTTPError):
check_status(status_code)
else:
check_status(status_code)
# Stack multiple parametrize decorators
@pytest.mark.parametrize("x", [0, 1])
@pytest.mark.parametrize("y", [0, 1])
def test_cartesian(x, y):
# Runs 4 times: (0,0), (0,1), (1,0), (1,1)
assert isinstance(x, int) and isinstance(y, int)# ── Advanced Parametrization ──
import pytest
from datetime import date
# Parametrize from external data (CSV, JSON, etc.)
def load_test_cases():
import json
with open("test_data.json") as f:
return json.load(f)
@pytest.mark.parametrize("case", load_test_cases(), ids=lambda c: c["name"])
def test_from_json(case):
assert calculate(case["input"]) == case["expected"]
# Parametrize with class-based tests
class TestUserValidation:
@pytest.mark.parametrize("email,is_valid", [
("user@example.com", True),
("invalid-email", False),
("@no-user.com", False),
("", False),
])
def test_email_validation(self, email, is_valid):
assert validate_email(email) == is_valid
@pytest.mark.parametrize("password,errors", [
("Pass123!", []),
("short", ["min_length"]),
("nodigits!", ["digit"]),
("NOLOWER123!", ["lowercase"]),
])
def test_password_strength(self, password, errors):
assert validate_password(password) == errors
# Indirect parametrization (params go through a fixture)
@pytest.fixture
def parsed_date(request):
return date.fromisoformat(request.param)
@pytest.mark.parametrize("parsed_date", ["2024-01-15", "2024-06-30"], indirect=True)
def test_year(parsed_date):
assert parsed_date.year == 2024| Option | Description |
|---|---|
| argnames | Comma-separated param names |
| argvalues | List of tuples/lists of values |
| ids | Custom test IDs for each case |
| indirect | Route params through fixtures |
| scope | Override parametrization scope |
| Pattern | IDs Generated |
|---|---|
| Default | argvalues[i]-argvalues[j] |
| ids=["a","b"] | Custom named IDs |
| ids=lambda c: c["name"] | Dynamic from dict data |
| pytest_generate_tests | Custom hook for full control |
@pytest.mark.parametrize decorators to create a cartesian product. @parametrize("x", [0,1]) + @parametrize("y", [0,1]) = 4 test combinations.ids parameter to give meaningful names to parametrized test cases. Without it, pytest generates IDs from the values which can be hard to read for complex objects.# ── unittest.mock & pytest-mock ──
import pytest
from unittest.mock import Mock, patch, MagicMock, call
# Simple mock
def test_simple_mock():
mock_db = Mock()
mock_db.insert.return_value = 1
mock_db.query.return_value = [{"id": 1, "name": "Alice"}]
assert mock_db.insert({"name": "Alice"}) == 1
mock_db.insert.assert_called_once_with({"name": "Alice"})
mock_db.query.assert_called_once()
# pytest-mock fixture (cleaner API)
def test_with_mocker(mocker):
mock_func = mocker.patch("myapp.utils.send_email")
mock_func.return_value = True
result = register_user("alice@example.com")
assert result is True
mock_func.assert_called_once_with(
to="alice@example.com",
subject="Welcome!"
)# ── Patching & Advanced Mocking ──
import pytest
from unittest.mock import patch, PropertyMock, mock_open
# Patch a function (decorator form)
@patch("myapp.services.get_weather")
def test_weather_service(mock_get_weather):
mock_get_weather.return_value = {"temp": 72, "condition": "sunny"}
result = get_daily_forecast("NYC")
assert result["temp"] == 72
# Patch with context manager
def test_with_cm():
with patch("myapp.services.get_weather") as mock_w:
mock_w.return_value = {"temp": 50, "condition": "rainy"}
result = get_daily_forecast("London")
assert result["condition"] == "rainy"
# Patch multiple objects
@patch("myapp.services.send_sms")
@patch("myapp.services.send_email")
def test_notifications(mock_email, mock_sms):
notify_user(user_id=1, method="email")
mock_email.assert_called_once()
mock_sms.assert_not_called()
# Mock a property
@patch("myapp.models.User.is_admin", new_callable=PropertyMock)
def test_admin_access(mock_is_admin):
mock_is_admin.return_value = True
user = User(id=1)
assert user.is_admin is True
# Mock file operations
@patch("builtins.open", mock_open(read_data="file contents"))
def test_read_file():
data = read_config_file("config.yaml")
assert data == "file contents"
# Mock with side effects
def test_side_effect():
mock = Mock()
mock.query.side_effect = [
[{"id": 1}], # first call
[], # second call
RuntimeError("DB down"), # third call raises
]
assert len(mock.query("SELECT 1")) == 1
assert mock.query("SELECT 2") == []
with pytest.raises(RuntimeError):
mock.query("SELECT 3")# ── Spies & Call Verification ──
import pytest
from unittest.mock import Mock, call, ANY
def test_call_args(mocker):
mock_repo = mocker.Mock()
save_user(mock_repo, name="Alice", age=30, role="admin")
# Verify call count
mock_repo.save.assert_called_once()
assert mock_repo.save.call_count == 1
# Verify exact args
mock_repo.save.assert_called_with(
name="Alice", age=30, role="admin"
)
# Verify partial args (ignore order)
mock_repo.save.assert_called_once_with(
name="Alice", age=30, role="admin"
)
# Use ANY to ignore specific args
mock_repo.save.assert_called_with(
name="Alice", age=ANY, role="admin"
)
# Access call history
first_call = mock_repo.save.call_args_list[0]
kwargs = first_call.kwargs
assert kwargs["name"] == "Alice"
# Spy: wrap real function, track calls
def test_spy(mocker):
real_send = send_email_notification
spy = mocker.spy(real_send.__module__, "send_email_notification")
process_notification(user_id=1, message="Hello")
spy.assert_called_once()
spy.assert_called_with(user_id=1, message="Hello")
# Stub: replace return value but keep real implementation for other methods
def test_stub(mocker):
mocker.patch("myapp.utils.uuid4", return_value="fixed-uuid-123")
user = create_user(name="Bob")
assert user.uuid == "fixed-uuid-123"| Method | Description |
|---|---|
| assert_called() | Called at least once |
| assert_called_once() | Called exactly once |
| assert_not_called() | Never called |
| assert_called_with(*a, **kw) | Exact args match |
| assert_called_once_with(*a, **kw) | Exact args, once |
| assert_any_call(*a, **kw) | Any call matched |
| assert_has_calls(calls) | All calls in order |
| reset_mock() | Clear call history |
| Method | Description |
|---|---|
| mocker.patch() | Patch object/function |
| mocker.spy() | Spy on real object |
| mocker.stub() | Create empty stub |
| mocker.Mock() | Create new mock |
| mocker.MagicMock() | Magic mock (auto-attrs) |
| mocker.call() | Call object for assert_has_calls |
| mocker.ANY | Match any argument |
| mocker.PropertyMock | Mock a property |
@patch("myapp.services.email_client") patches the reference in services.py, not the original class.# ── Pytest Assertions (no self.assert needed!) ──
import pytest
# Equality
assert result == expected
assert result != "wrong"
# Boolean
assert is_active is True
assert is_deleted is False
assert user is not None
# Numeric comparisons
assert score >= 90
assert price < 100.0
assert count == len(items)
# Collection membership
assert "alice" in users
assert "admin" not in user_roles
assert set(result) == {1, 2, 3}
# Exception testing
def test_raises():
with pytest.raises(ValueError):
int("not_a_number")
def test_raises_match():
with pytest.raises(ValueError, match="must be positive"):
calculate(-1)
def test_raises_exact_message():
with pytest.raises(PermissionError) as exc_info:
delete_user(user_id=1)
assert "access denied" in str(exc_info.value)
# Approximate comparisons (floats)
assert result == pytest.approx(3.14159, rel=1e-3)
assert 0.1 + 0.2 == pytest.approx(0.3)
assert result == pytest.approx(100.0, abs=0.01) # absolute tolerance
# Warnings
def test_deprecation():
with pytest.warns(DeprecationWarning):
old_function()# ── Common Assertion Patterns ──
import pytest
# Dict/subset assertions
def test_dict_contains():
response = {"id": 1, "name": "Alice", "email": "a@b.com", "created": "..."}
assert response["name"] == "Alice"
assert {"id": 1, "name": "Alice"}.items() <= response.items()
# List assertions
def test_list_order():
results = sort_users(users)
assert results[0].name == "Alice"
assert results[-1].name == "Zack"
assert len(results) == 3
# Using pytest.approx for collections
def test_approx_list():
assert [0.1 + 0.2, 0.3 + 0.4] == pytest.approx([0.3, 0.7])
# Assert logs
def test_logging(caplog):
import logging
process_order(order_id=42)
assert "Order 42 processed" in caplog.text
assert any("error" in r.message.lower() for r in caplog.records)
# Snapshot-style with pytest-regtest or inline snapshot
def test_output_format(capsys):
print_user(user)
captured = capsys.readouterr()
assert captured.out == "Alice (alice@example.com)\n"| Helper | Description |
|---|---|
| pytest.approx(x) | Approximate float comparison |
| pytest.raises(exc) | Assert exception is raised |
| pytest.warns(warn) | Assert warning is issued |
| pytest.deprecated_call() | Assert DeprecationWarning |
| caplog fixture | Capture log records |
| capsys fixture | Capture stdout/stderr |
| capfd fixture | Capture file descriptors |
| tmp_path fixture | Temporary directory (pathlib) |
| tmpdir fixture | Temporary directory (py.path) |
| Pattern | Use Case |
|---|---|
| assert a == b | Exact equality |
| assert a == pytest.approx(b) | Float comparison |
| assert a.items() <= b.items() | Dict subset |
| assert set(a) == set(b) | Order-independent list |
| assert a in b | Membership test |
| assert a is None | Identity checks |
| assert isinstance(a, B) | Type checking |
| assert len(a) == n | Collection size |
assert statements — no need for self.assertEqual() or self.assertTrue(). Pytest rewrites assert statements to provide rich error messages with context.# ── Essential Pytest Plugins ──
pytest-cov>=5.0 # Coverage reporting
pytest-xdist>=3.5 # Parallel test execution
pytest-mock>=3.12 # Wrapper around unittest.mock
pytest-asyncio>=0.23 # Async test support
pytest-django>=4.7 # Django integration
pytest-flask>=1.3 # Flask integration
pytest-selenium>=4.1 # Selenium browser testing
pytest-benchmark>=4.0 # Performance benchmarking
pytest-timeout>=2.2 # Timeout for slow tests
pytest-randomly>=3.15 # Randomize test order
pytest-regressions>=2.5 # Data/file regression testing
pytest-factoryboy>=2.7 # Factory Boy integration
pytest-snapshot>=0.9 # Snapshot testing
pytest-html>=4.1 # HTML test reports
pytest-allure>=2.13 # Allure test reports
faker>=22.0 # Test data generation
factory-boy>=3.3 # Test object factories# ── pytest-asyncio: Async Testing ──
import pytest
@pytest.mark.asyncio
async def test_fetch_user():
user = await fetch_user_from_api(user_id=1)
assert user.name == "Alice"
@pytest.fixture
async def async_db():
conn = await asyncpg.connect("postgres://localhost/test")
yield conn
await conn.close()
@pytest.mark.asyncio
async def test_async_query(async_db):
result = await async_db.fetchval("SELECT 1")
assert result == 1
# Session-scoped async fixture
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()# ── factory-boy: Test Factories ──
import pytest
from factory import Factory, Faker, SubFactory, LazyAttribute
class UserFactory(Factory):
class Meta:
model = User
name = Faker("name")
email = Faker("email")
is_active = True
class PostFactory(Factory):
class Meta:
model = Post
title = Faker("sentence")
author = SubFactory(UserFactory)
body = LazyAttribute(lambda o: f"Content by {o.author.name}")
# Usage in tests
def test_user_creation(db_session):
user = UserFactory.build() # no DB save
user2 = UserFactory.create() # saves to DB (if Meta.sqlalchemy_session)
assert user.name # auto-generated by Faker
def test_post_with_author(db_session):
post = PostFactory.create()
assert post.author.name # auto-created user
assert post.title # auto-generated sentence| Plugin | Purpose |
|---|---|
| pytest-cov | Code coverage with --cov |
| pytest-xdist | Parallel: pytest -n auto |
| pytest-mock | mocker fixture for mocking |
| pytest-asyncio | @pytest.mark.asyncio |
| pytest-django | Django DB/Client fixtures |
| pytest-timeout | @pytest.mark.timeout(30) |
| pytest-randomly | Detect order-dependent tests |
| pytest-html | HTML report: --html=report.html |
| Flag | Description |
|---|---|
| --cov=myapp | Measure coverage for myapp |
| --cov-report=html | Generate HTML report |
| --cov-report=term | Terminal report |
| --cov-report=xml | XML for CI (Cobertura) |
| --cov-fail-under=80 | Fail if below 80% |
| --cov-branch | Branch coverage (not just line) |
pytest-xdist for parallel execution: pytest -n auto detects CPU cores automatically. Combine with --dist loadfile to balance by file rather than by test.# ── Coverage Commands ──
# Run with coverage
pytest --cov=myapp
pytest --cov=myapp --cov-report=html
pytest --cov=myapp --cov-report=term-missing
pytest --cov=myapp tests/ --cov-report=xml
# Multi-package coverage
pytest --cov=myapp.api --cov=myapp.services --cov=myapp.models
# Fail below threshold
pytest --cov=myapp --cov-fail-under=80
# Exclude files/patterns
pytest --cov=myapp --cov-report=term-missing \
--cov-config=.coveragerc
# Branch coverage (more accurate)
pytest --cov=myapp --cov-branch# ── .coveragerc Configuration ──
[run]
source = myapp
branch = true
parallel = true
[report]
exclude_lines =
pragma: no cover
def __repr__
raise NotImplementedError
if TYPE_CHECKING:
if __name__ == .__main__.:
@abstractmethod
@overload
pass
[html]
directory = htmlcov
[xml]
output = coverage.xml
omit =
myapp/tests/*
myapp/migrations/*
myapp/__init__.py
myapp/conftest.py# ── Coverage in pyproject.toml ──
[tool.coverage.run]
source = ["myapp"]
branch = true
parallel = true
omit = [
"myapp/tests/*",
"myapp/migrations/*",
"myapp/__init__.py",
]
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"raise NotImplementedError",
"if TYPE_CHECKING:",
"if __name__ == .__main__.:",
"@abstractmethod",
"pass",
]
fail_under = 80
show_missing = true| Strategy | Target | Description |
|---|---|---|
| Line | 80%+ | Each line executed at least once |
| Branch | 70%+ | Each if/else branch hit |
| Path | Hard | Every possible execution path |
| Mutation | Gold | Tests detect code changes |
| Pattern | Why |
|---|---|
| pragma: no cover | Intentionally uncovered |
| if TYPE_CHECKING: | Type hints only |
| @abstractmethod | No implementation |
| if __name__==... | Script entry point |
| migrations/* | Auto-generated code |
| raise NotImplementedError | Interface stubs |
--cov-report=term-missing to see which specific lines are not covered.# ── pytest.ini (project root) ──
[pytest]
testpaths = tests
python_files = test_*.py *_test.py
python_classes = Test*
python_functions = test_*
addopts = -v --tb=short --strict-markers
markers =
slow: slow-running tests (deselect with '-m "not slow"')
integration: integration tests requiring external services
unit: fast unit tests
smoke: critical path smoke tests
filterwarnings =
error::DeprecationWarning
ignore::UserWarning:myapp
log_cli = true
log_cli_level = INFO
log_format = %(asctime)s [%(levelname)s] %(message)s
log_date_format = %Y-%m-%d %H:%M:%S# ── pyproject.toml (recommended modern approach) ──
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"-v",
"--tb=short",
"--strict-markers",
"--cov=myapp",
"--cov-report=term-missing",
]
markers = [
"slow: slow-running tests",
"integration: tests with external dependencies",
"unit: fast isolated unit tests",
"smoke: critical path tests",
]
filterwarnings = [
"error::DeprecationWarning",
"ignore::UserWarning:myapp",
]
asyncio_mode = "auto"# ── conftest.py Hooks ──
import pytest
# Custom marker registration (alternative to ini)
def pytest_configure(config):
config.addinivalue_line(
"markers", "slow: marks tests as slow (deselect with '-m "not slow"')"
)
# Dynamic test collection
def pytest_collect_file(parent, path):
if path.ext == ".yaml" and path.basename.startswith("test_"):
return YamlFile.from_parent(parent, fspath=path)
# Modify collected items
def pytest_collection_modifyitems(config, items):
# Auto-mark tests in 'slow/' directory
for item in items:
if "slow" in str(item.fspath):
item.add_marker(pytest.mark.slow)
# Auto-add estimated duration
if "integration" in item.keywords:
item.add_marker(pytest.mark.timeout(60))
# Session-scoped setup/teardown
def pytest_sessionstart(session):
print("\n🚀 Test session starting...")
def pytest_sessionfinish(session, exitstatus):
print("\n🏁 Test session finished!")
# Per-test hooks
def pytest_runtest_setup(item):
"""Called before each test runs."""
if "integration" in item.keywords and not item.config.getoption("--run-integration"):
pytest.skip("Skipping integration tests (use --run-integration)")| Priority | File | Location |
|---|---|---|
| 1 (highest) | pytest.ini | Project root |
| 2 | pyproject.toml [tool.pytest] | Project root |
| 3 | tox.ini [pytest] | Project root |
| 4 | setup.cfg [tool:pytest] | Project root |
| Flag | Purpose |
|---|---|
| --strict-markers | Error on unknown markers |
| -x | Stop on first failure |
| -ra | Summary of all test results |
| --durations=10 | Show 10 slowest tests |
| -p no:cacheprovider | Disable caching |
| --cov=myapp | Always measure coverage |
--strict-markers in addopts to catch typos in marker names early. Pytest will error on any marker not registered in pytest.ini.pyproject.toml over pytest.ini for new projects. It keeps all Python project configuration in one file alongside [tool.black], [tool.ruff], [tool.mypy], etc.