Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ft: LoadTestShapes with custom user classes #2181

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 57 additions & 0 deletions examples/custom_shape/staging_user_classes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape


class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")


class WebsiteUserA(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]

class WebsiteUserB(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]


class StagesShapeWithCustomUsers(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.

Keyword arguments:

stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage

stop_at_end -- Can be set to stop once all stages have run.
"""

stages = [
{"duration": 60, "users": 10, "spawn_rate": 10, "user_classes": [WebsiteUserA]},
{"duration": 100, "users": 50, "spawn_rate": 10, "user_classes": [WebsiteUserB]},
{"duration": 180, "users": 100, "spawn_rate": 10, "user_classes": [WebsiteUserA]},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]

def tick(self):
run_time = self.get_run_time()

for stage in self.stages:
if run_time < stage["duration"]:
# Not the smartest solution, TODO: find something better
try:
tick_data = (stage["users"], stage["spawn_rate"], stage["user_classes"])
except:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data

return None
5 changes: 4 additions & 1 deletion locust/dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,13 +163,16 @@ def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]:

self._dispatch_in_progress = False

def new_dispatch(self, target_user_count: int, spawn_rate: float) -> None:
def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: any) -> None:
cyberw marked this conversation as resolved.
Show resolved Hide resolved
"""
Initialize a new dispatch cycle.

:param target_user_count: The desired user count at the end of the dispatch cycle
:param spawn_rate: The spawn rate
"""
self._user_classes = user_classes
self._user_generator = self._user_gen()

self._target_user_count = target_user_count

self._spawn_rate = spawn_rate
Expand Down
34 changes: 22 additions & 12 deletions locust/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ def monitor_cpu_and_memory(self) -> NoReturn:
gevent.sleep(CPU_MONITOR_INTERVAL)

@abstractmethod
def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None:
def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list=None) -> None:
...

def start_shape(self) -> None:
Expand Down Expand Up @@ -364,7 +364,11 @@ def shape_worker(self) -> None:
elif self.shape_last_state == new_state:
gevent.sleep(1)
else:
user_count, spawn_rate = new_state
if len(new_state) == 2:
user_count, spawn_rate = new_state
user_classes = None
else:
user_count, spawn_rate, user_classes = new_state
logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate))
# TODO: This `self.start()` call is blocking until the ramp-up is completed. This can leads
# to unexpected behaviours such as the one in the following example:
Expand All @@ -379,7 +383,7 @@ def shape_worker(self) -> None:
# We should probably use a `gevent.timeout` with a duration a little over
# `(user_count - prev_user_count) / spawn_rate` in order to limit the runtime
# of each load test shape stage.
self.start(user_count=user_count, spawn_rate=spawn_rate)
self.start(user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes)
self.shape_last_state = new_state

def stop(self) -> None:
Expand Down Expand Up @@ -463,7 +467,7 @@ def on_user_error(user_instance, exception, tb):

self.environment.events.user_error.add_listener(on_user_error)

def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None:
def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None:
"""
Start running a load test

Expand All @@ -486,7 +490,10 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None
if wait and user_count - self.user_count > spawn_rate:
raise ValueError("wait is True but the amount of users to add is greater than the spawn rate")

for user_class in self.user_classes:
if user_classes is None:
user_classes = self.user_classes

for user_class in user_classes:
if self.environment.host:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happens if we combine a -H/--host parameter with shape-configured Users? I guess that will still work?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So normally it should. I'll try it out tomorrow :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@cyberw So I tried it with a few example locust files, and it seemed to work.

If you want to be sure, you can also try it out. :)

user_class.host = self.environment.host

Expand All @@ -500,7 +507,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None

logger.info("Ramping to %d users at a rate of %.2f per second" % (user_count, spawn_rate))

cast(UsersDispatcher, self._users_dispatcher).new_dispatch(user_count, spawn_rate)
cast(UsersDispatcher, self._users_dispatcher).new_dispatch(user_count, spawn_rate, user_classes)

try:
for dispatched_users in self._users_dispatcher:
Expand Down Expand Up @@ -542,7 +549,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None

self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values()))

def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None:
def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None:
if spawn_rate > 100:
logger.warning(
"Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
Expand All @@ -551,7 +558,7 @@ def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None:
if self.spawning_greenlet:
# kill existing spawning_greenlet before we start a new one
self.spawning_greenlet.kill(block=True)
self.spawning_greenlet = self.greenlet.spawn(lambda: self._start(user_count, spawn_rate, wait=wait))
self.spawning_greenlet = self.greenlet.spawn(lambda: self._start(user_count, spawn_rate, wait=wait, user_classes=user_classes))
self.spawning_greenlet.link_exception(greenlet_exception_handler)

def stop(self) -> None:
Expand Down Expand Up @@ -729,7 +736,7 @@ def cpu_log_warning(self) -> bool:
warning_emitted = True
return warning_emitted

def start(self, user_count: int, spawn_rate: float, wait=False) -> None:
def start(self, user_count: int, spawn_rate: float, wait=False, user_classes:list = None) -> None:
self.spawning_completed = False

self.target_user_count = user_count
Expand All @@ -739,7 +746,10 @@ def start(self, user_count: int, spawn_rate: float, wait=False) -> None:
logger.warning("You can't start a distributed test before at least one worker processes has connected")
return

for user_class in self.user_classes:
if user_classes is None:
user_classes = self.user_classes

for user_class in user_classes:
if self.environment.host:
user_class.host = self.environment.host

Expand Down Expand Up @@ -771,7 +781,7 @@ def start(self, user_count: int, spawn_rate: float, wait=False) -> None:

self.update_state(STATE_SPAWNING)

self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate)
self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes)

try:
for dispatched_users in self._users_dispatcher:
Expand Down Expand Up @@ -1204,7 +1214,7 @@ def on_user_error(user_instance: User, exception: Exception, tb: TracebackType)

self.environment.events.user_error.add_listener(on_user_error)

def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None:
def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None:
raise NotImplementedError("use start_worker")

def start_worker(self, user_classes_count: Dict[str, int], **kwargs) -> None:
Expand Down
4 changes: 2 additions & 2 deletions locust/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ def get_current_user_count(self):
"""
return self.runner.user_count

def tick(self) -> Optional[Tuple[int, float]]:
def tick(self) -> Optional[Tuple[int, float, any]]:
"""
Returns a tuple with 2 elements to control the running load test:

user_count -- Total user count
spawn_rate -- Number of users to start/stop per second when changing number of users

user_classes -- None or a List of userclasses to be spawend in it tick
samuelspagl marked this conversation as resolved.
Show resolved Hide resolved
If `None` is returned then the running load test will be stopped.

"""
Expand Down