Skip to content

Commit

Permalink
Add interface performance benchmark (#2948)
Browse files Browse the repository at this point in the history
  • Loading branch information
skilkis authored Jul 14, 2023
1 parent fb7a9c4 commit 1360e30
Showing 1 changed file with 28 additions and 1 deletion.
29 changes: 28 additions & 1 deletion tests/benchmarks/test_execute.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import datetime
import random
from datetime import date
from typing import List
from typing import List, Type, cast

import pytest
from asgiref.sync import async_to_sync
from pytest_codspeed.plugin import BenchmarkFixture

import strawberry
from strawberry.scalars import ID


@pytest.mark.benchmark
Expand Down Expand Up @@ -72,3 +73,29 @@ def patrons(self) -> List[Patron]:
"""

benchmark(async_to_sync(schema.execute), query)


@pytest.mark.parametrize("ntypes", [2**k for k in range(0, 13, 4)])
def test_interface_performance(benchmark: BenchmarkFixture, ntypes: int):
@strawberry.interface
class Item:
id: ID

CONCRETE_TYPES: List[Type[Item]] = []
for i in range(ntypes):
CONCRETE_TYPES.append(strawberry.type(type(f"Item{i}", (Item,), {})))

@strawberry.type
class Query:
items: List[Item]

schema = strawberry.Schema(query=Query, types=CONCRETE_TYPES)
query = "query { items { id } }"

benchmark(
async_to_sync(schema.execute),
query,
root_value=Query(
items=[CONCRETE_TYPES[i % ntypes](id=cast(ID, i)) for i in range(1000)]
),
)

0 comments on commit 1360e30

Please sign in to comment.