From 0346a4eb01a73f899ac153314b240c4a0063d98c Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Wed, 31 Aug 2022 16:25:58 +0200 Subject: [PATCH 01/17] Add possibility to extend the stages with control over userclasses --- examples/custom_shape/staging_user_classes.py | 57 +++++++++++++++++++ locust/dispatch.py | 5 +- locust/runners.py | 34 +++++++---- locust/shape.py | 4 +- 4 files changed, 85 insertions(+), 15 deletions(-) create mode 100644 examples/custom_shape/staging_user_classes.py diff --git a/examples/custom_shape/staging_user_classes.py b/examples/custom_shape/staging_user_classes.py new file mode 100644 index 0000000000..285f4e5676 --- /dev/null +++ b/examples/custom_shape/staging_user_classes.py @@ -0,0 +1,57 @@ +from locust import HttpUser, TaskSet, task, constant +from locust import LoadTestShape + + +class UserTasks(TaskSet): + @task + def get_root(self): + self.client.get("/") + + +class WebsiteUserA(HttpUser): + wait_time = constant(0.5) + tasks = [UserTasks] + +class WebsiteUserB(HttpUser): + wait_time = constant(0.5) + tasks = [UserTasks] + + +class StagesShapeWithCustomUsers(LoadTestShape): + """ + A simply load test shape class that has different user and spawn_rate at + different stages. + + Keyword arguments: + + stages -- A list of dicts, each representing a stage with the following keys: + duration -- When this many seconds pass the test is advanced to the next stage + users -- Total user count + spawn_rate -- Number of users to start/stop per second + stop -- A boolean that can stop that test at a specific stage + + stop_at_end -- Can be set to stop once all stages have run. + """ + + stages = [ + {"duration": 60, "users": 10, "spawn_rate": 10, "user_classes": [WebsiteUserA]}, + {"duration": 100, "users": 50, "spawn_rate": 10, "user_classes": [WebsiteUserB]}, + {"duration": 180, "users": 100, "spawn_rate": 10, "user_classes": [WebsiteUserA]}, + {"duration": 220, "users": 30, "spawn_rate": 10}, + {"duration": 230, "users": 10, "spawn_rate": 10}, + {"duration": 240, "users": 1, "spawn_rate": 1}, + ] + + def tick(self): + run_time = self.get_run_time() + + for stage in self.stages: + if run_time < stage["duration"]: + # Not the smartest solution, TODO: find something better + try: + tick_data = (stage["users"], stage["spawn_rate"], stage["user_classes"]) + except: + tick_data = (stage["users"], stage["spawn_rate"]) + return tick_data + + return None diff --git a/locust/dispatch.py b/locust/dispatch.py index f4abb7b566..eeea847a70 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -163,13 +163,16 @@ def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]: self._dispatch_in_progress = False - def new_dispatch(self, target_user_count: int, spawn_rate: float) -> None: + def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: any) -> None: """ Initialize a new dispatch cycle. :param target_user_count: The desired user count at the end of the dispatch cycle :param spawn_rate: The spawn rate """ + self._user_classes = user_classes + self._user_generator = self._user_gen() + self._target_user_count = target_user_count self._spawn_rate = spawn_rate diff --git a/locust/runners.py b/locust/runners.py index 83560c8c74..7bed2b7f42 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -330,7 +330,7 @@ def monitor_cpu_and_memory(self) -> NoReturn: gevent.sleep(CPU_MONITOR_INTERVAL) @abstractmethod - def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list=None) -> None: ... def start_shape(self) -> None: @@ -364,7 +364,11 @@ def shape_worker(self) -> None: elif self.shape_last_state == new_state: gevent.sleep(1) else: - user_count, spawn_rate = new_state + if len(new_state) == 2: + user_count, spawn_rate = new_state + user_classes = None + else: + user_count, spawn_rate, user_classes = new_state logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate)) # TODO: This `self.start()` call is blocking until the ramp-up is completed. This can leads # to unexpected behaviours such as the one in the following example: @@ -379,7 +383,7 @@ def shape_worker(self) -> None: # We should probably use a `gevent.timeout` with a duration a little over # `(user_count - prev_user_count) / spawn_rate` in order to limit the runtime # of each load test shape stage. - self.start(user_count=user_count, spawn_rate=spawn_rate) + self.start(user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes) self.shape_last_state = new_state def stop(self) -> None: @@ -463,7 +467,7 @@ def on_user_error(user_instance, exception, tb): self.environment.events.user_error.add_listener(on_user_error) - def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None: + def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: """ Start running a load test @@ -486,7 +490,10 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None if wait and user_count - self.user_count > spawn_rate: raise ValueError("wait is True but the amount of users to add is greater than the spawn rate") - for user_class in self.user_classes: + if user_classes is None: + user_classes = self.user_classes + + for user_class in user_classes: if self.environment.host: user_class.host = self.environment.host @@ -500,7 +507,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None logger.info("Ramping to %d users at a rate of %.2f per second" % (user_count, spawn_rate)) - cast(UsersDispatcher, self._users_dispatcher).new_dispatch(user_count, spawn_rate) + cast(UsersDispatcher, self._users_dispatcher).new_dispatch(user_count, spawn_rate, user_classes) try: for dispatched_users in self._users_dispatcher: @@ -542,7 +549,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values())) - def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: if spawn_rate > 100: logger.warning( "Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?" @@ -551,7 +558,7 @@ def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None: if self.spawning_greenlet: # kill existing spawning_greenlet before we start a new one self.spawning_greenlet.kill(block=True) - self.spawning_greenlet = self.greenlet.spawn(lambda: self._start(user_count, spawn_rate, wait=wait)) + self.spawning_greenlet = self.greenlet.spawn(lambda: self._start(user_count, spawn_rate, wait=wait, user_classes=user_classes)) self.spawning_greenlet.link_exception(greenlet_exception_handler) def stop(self) -> None: @@ -729,7 +736,7 @@ def cpu_log_warning(self) -> bool: warning_emitted = True return warning_emitted - def start(self, user_count: int, spawn_rate: float, wait=False) -> None: + def start(self, user_count: int, spawn_rate: float, wait=False, user_classes:list = None) -> None: self.spawning_completed = False self.target_user_count = user_count @@ -739,7 +746,10 @@ def start(self, user_count: int, spawn_rate: float, wait=False) -> None: logger.warning("You can't start a distributed test before at least one worker processes has connected") return - for user_class in self.user_classes: + if user_classes is None: + user_classes = self.user_classes + + for user_class in user_classes: if self.environment.host: user_class.host = self.environment.host @@ -771,7 +781,7 @@ def start(self, user_count: int, spawn_rate: float, wait=False) -> None: self.update_state(STATE_SPAWNING) - self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate) + self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes) try: for dispatched_users in self._users_dispatcher: @@ -1204,7 +1214,7 @@ def on_user_error(user_instance: User, exception: Exception, tb: TracebackType) self.environment.events.user_error.add_listener(on_user_error) - def start(self, user_count: int, spawn_rate: float, wait: bool = False) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: raise NotImplementedError("use start_worker") def start_worker(self, user_classes_count: Dict[str, int], **kwargs) -> None: diff --git a/locust/shape.py b/locust/shape.py index 2561386da0..d35b64689e 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -33,13 +33,13 @@ def get_current_user_count(self): """ return self.runner.user_count - def tick(self) -> Optional[Tuple[int, float]]: + def tick(self) -> Optional[Tuple[int, float, any]]: """ Returns a tuple with 2 elements to control the running load test: user_count -- Total user count spawn_rate -- Number of users to start/stop per second when changing number of users - + user_classes -- None or a List of userclasses to be spawend in it tick If `None` is returned then the running load test will be stopped. """ From e569c87455f346cfa929b82654a9410e6abb5773 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Thu, 1 Sep 2022 09:11:02 +0200 Subject: [PATCH 02/17] add better type hinting and docstring --- locust/dispatch.py | 3 ++- locust/runners.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index eeea847a70..5bfd4ae25c 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -163,12 +163,13 @@ def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]: self._dispatch_in_progress = False - def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: any) -> None: + def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: Optional[List[User]]) -> None: """ Initialize a new dispatch cycle. :param target_user_count: The desired user count at the end of the dispatch cycle :param spawn_rate: The spawn rate + :param user_classes: The user classes to be used for the new dispatch """ self._user_classes = user_classes self._user_generator = self._user_gen() diff --git a/locust/runners.py b/locust/runners.py index 7bed2b7f42..c8a1c395ec 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -476,6 +476,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl :param wait: If True calls to this method will block until all users are spawned. If False (the default), a greenlet that spawns the users will be started and the call to this method will return immediately. + :param user_classes: The user classes to be dispatched """ self.target_user_count = user_count From 90808344dc0cf099b3953bf3a18d076637088568 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Thu, 1 Sep 2022 09:13:28 +0200 Subject: [PATCH 03/17] add better type hinting --- locust/runners.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index c8a1c395ec..332d7d8954 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -330,7 +330,7 @@ def monitor_cpu_and_memory(self) -> NoReturn: gevent.sleep(CPU_MONITOR_INTERVAL) @abstractmethod - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list=None) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:Optional[List[User]]=None) -> None: ... def start_shape(self) -> None: @@ -550,7 +550,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values())) - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: if spawn_rate > 100: logger.warning( "Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?" @@ -737,7 +737,7 @@ def cpu_log_warning(self) -> bool: warning_emitted = True return warning_emitted - def start(self, user_count: int, spawn_rate: float, wait=False, user_classes:list = None) -> None: + def start(self, user_count: int, spawn_rate: float, wait=False, user_classes: Optional[List[User]] = None) -> None: self.spawning_completed = False self.target_user_count = user_count @@ -1215,7 +1215,7 @@ def on_user_error(user_instance: User, exception: Exception, tb: TracebackType) self.environment.events.user_error.add_listener(on_user_error) - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:Optional[List[User]] = None) -> None: raise NotImplementedError("use start_worker") def start_worker(self, user_classes_count: Dict[str, int], **kwargs) -> None: From c621217faa1686f4c0ac681fb626ce93c3ded1b9 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Thu, 1 Sep 2022 09:40:53 +0200 Subject: [PATCH 04/17] add documentation entry --- docs/custom-load-shape.rst | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/custom-load-shape.rst b/docs/custom-load-shape.rst index 696ecca56c..1c5570de29 100644 --- a/docs/custom-load-shape.rst +++ b/docs/custom-load-shape.rst @@ -40,3 +40,35 @@ This functionality is further demonstrated in the `examples on github `_. + + +Extend your shape with custom users +----------------------------------- + +Extending the return value of the ``tick()`` with the argument ``user_classes`` makes it possible to pick the users being created for a ``tick()`` specifically. + +.. code-block:: python + + class StagesShapeWithCustomUsers(LoadTestShape): + + stages = [ + {"duration": 10, "users": 10, "spawn_rate": 10, "user_classes": [UserA]}, + {"duration": 30, "users": 50, "spawn_rate": 10, "user_classes": [UserA, UserB]}, + {"duration": 60, "users": 100, "spawn_rate": 10, "user_classes": [UserB]}, + {"duration": 120, "users": 100, "spawn_rate": 10, "user_classes": [UserA,UserB]}, + + def tick(self): + run_time = self.get_run_time() + + for stage in self.stages: + if run_time < stage["duration"]: + try: + tick_data = (stage["users"], stage["spawn_rate"], stage["user_classes"]) + except: + tick_data = (stage["users"], stage["spawn_rate"]) + return tick_data + + return None + +This shape would create create in the first 10 seconds 10 User of ``UserA``. In the next twenty seconds 40 of type ``UserA / UserB`` and this continues until the stages end. +Using the extra argument makes it possible to create even more fine grained scenarios. \ No newline at end of file From a39b3a927b445b2d327466963192967efcb298f9 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Thu, 1 Sep 2022 10:07:06 +0200 Subject: [PATCH 05/17] changes according to flake8, black, mypy --- examples/custom_shape/staging_user_classes.py | 3 ++- locust/runners.py | 4 ++-- locust/shape.py | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/examples/custom_shape/staging_user_classes.py b/examples/custom_shape/staging_user_classes.py index 285f4e5676..12b381142b 100644 --- a/examples/custom_shape/staging_user_classes.py +++ b/examples/custom_shape/staging_user_classes.py @@ -12,6 +12,7 @@ class WebsiteUserA(HttpUser): wait_time = constant(0.5) tasks = [UserTasks] + class WebsiteUserB(HttpUser): wait_time = constant(0.5) tasks = [UserTasks] @@ -50,7 +51,7 @@ def tick(self): # Not the smartest solution, TODO: find something better try: tick_data = (stage["users"], stage["spawn_rate"], stage["user_classes"]) - except: + except KeyError: tick_data = (stage["users"], stage["spawn_rate"]) return tick_data diff --git a/locust/runners.py b/locust/runners.py index 332d7d8954..05d02c6b59 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -330,7 +330,7 @@ def monitor_cpu_and_memory(self) -> NoReturn: gevent.sleep(CPU_MONITOR_INTERVAL) @abstractmethod - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:Optional[List[User]]=None) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: ... def start_shape(self) -> None: @@ -1215,7 +1215,7 @@ def on_user_error(user_instance: User, exception: Exception, tb: TracebackType) self.environment.events.user_error.add_listener(on_user_error) - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:Optional[List[User]] = None) -> None: + def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: raise NotImplementedError("use start_worker") def start_worker(self, user_classes_count: Dict[str, int], **kwargs) -> None: diff --git a/locust/shape.py b/locust/shape.py index d35b64689e..97d7b2ef91 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -1,5 +1,7 @@ import time -from typing import Optional, Tuple +from typing import Optional, Tuple, List + +from . import User from .runners import Runner @@ -33,7 +35,7 @@ def get_current_user_count(self): """ return self.runner.user_count - def tick(self) -> Optional[Tuple[int, float, any]]: + def tick(self) -> Optional[Tuple[int, float], Tuple[int, float, Optional[List[User]]]]: """ Returns a tuple with 2 elements to control the running load test: From 7a3cdc5c1253e803526e2bf8d3964b514e47b7ef Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Mon, 5 Sep 2022 11:10:18 +0200 Subject: [PATCH 06/17] adding some tests --- locust/dispatch.py | 7 +- locust/shape.py | 2 +- locust/test/test_dispatch.py | 227 +++++++++++++++++++++++++++++++++++ 3 files changed, 232 insertions(+), 4 deletions(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index 5bfd4ae25c..865029e482 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -163,7 +163,7 @@ def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]: self._dispatch_in_progress = False - def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: Optional[List[User]]) -> None: + def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: Optional[List] = None) -> None: """ Initialize a new dispatch cycle. @@ -171,8 +171,9 @@ def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: :param spawn_rate: The spawn rate :param user_classes: The user classes to be used for the new dispatch """ - self._user_classes = user_classes - self._user_generator = self._user_gen() + if user_classes is not None: + self._user_classes = user_classes + self._user_generator = self._user_gen() self._target_user_count = target_user_count diff --git a/locust/shape.py b/locust/shape.py index 97d7b2ef91..7ed88c7b5e 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -35,7 +35,7 @@ def get_current_user_count(self): """ return self.runner.user_count - def tick(self) -> Optional[Tuple[int, float], Tuple[int, float, Optional[List[User]]]]: + def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[User]]] | None: """ Returns a tuple with 2 elements to control the running load test: diff --git a/locust/test/test_dispatch.py b/locust/test/test_dispatch.py index d885c57567..5790d3a4d5 100644 --- a/locust/test/test_dispatch.py +++ b/locust/test/test_dispatch.py @@ -12,6 +12,7 @@ class TestRampUpUsersFromZero(unittest.TestCase): + def test_ramp_up_users_to_3_workers_with_spawn_rate_of_0_5(self): """Final distribution should be {"User1": 3, "User2": 3, "User3": 3}""" @@ -3590,6 +3591,232 @@ class User5(User): user_class.fixed_count, ) +class TestRampUpDifferentUsers(unittest.TestCase): + def test_ramp_up_different_users_for_each_dispatch(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=3, spawn_rate=3) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 1, "User2": 1, "User3": 1} + } + ) + user_dispatcher.new_dispatch(target_user_count=4, spawn_rate=1, user_classes=[User1]) + self.assertDictEqual(next(user_dispatcher), { + "1": {"User1": 2, "User2": 1, "User3": 1} + }) + + user_dispatcher.new_dispatch(target_user_count=5, spawn_rate=1, user_classes=[User2]) + self.assertDictEqual(next(user_dispatcher), { + "1": {"User1": 2, "User2": 2, "User3": 1} + }) + + user_dispatcher.new_dispatch(target_user_count=6, spawn_rate=1, user_classes=[User3]) + self.assertDictEqual(next(user_dispatcher), { + "1": {"User1": 2, "User2": 2, "User3": 2} + }) + + def test_ramp_up_only_one_kind_of_user(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 0, "User2": 10, "User3": 0} + } + ) + + def test_ramp_up_first_half_user1_second_half_user2(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 0, "User2": 10, "User3": 0} + } + ) + + user_dispatcher.new_dispatch(target_user_count=40, spawn_rate=30, user_classes=[User3]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 0, "User2": 10, "User3": 30} + } + ) + + def test_ramp_up_first_one_user_then_all_classes(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 0, "User2": 10, "User3": 0} + } + ) + + user_dispatcher.new_dispatch(target_user_count=40, spawn_rate=30, user_classes=[User1, User2, User3]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 10, "User2": 20, "User3": 10} + } + ) + + + def test_ramp_up_different_users_each_dispatch_multiple_worker(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + worker_node2 = WorkerNode("2") + worker_node3 = WorkerNode("3") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1, worker_node2, worker_node3], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=9, spawn_rate=9) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 3, "User2": 0, "User3": 0}, + "2": {"User1": 0, "User2": 3, "User3": 0}, + "3": {"User1": 0, "User2": 0, "User3": 3}, + } + ) + + user_dispatcher.new_dispatch(target_user_count=12, spawn_rate=3, user_classes=[User3]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 3, "User2": 0, "User3": 1}, + "2": {"User1": 0, "User2": 3, "User3": 1}, + "3": {"User1": 0, "User2": 0, "User3": 4}, + } + ) + + user_dispatcher.new_dispatch(target_user_count=15, spawn_rate=3, user_classes=[User2]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 3, "User2": 1, "User3": 1}, + "2": {"User1": 0, "User2": 4, "User3": 1}, + "3": {"User1": 0, "User2": 1, "User3": 4}, + } + ) + + user_dispatcher.new_dispatch(target_user_count=18, spawn_rate=3, user_classes=[User1]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 4, "User2": 1, "User3": 1}, + "2": {"User1": 1, "User2": 4, "User3": 1}, + "3": {"User1": 1, "User2": 1, "User3": 4}, + } + ) + + def test_ramp_up_one_user_class_multiple_worker(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_node1 = WorkerNode("1") + worker_node2 = WorkerNode("2") + worker_node3 = WorkerNode("3") + + sleep_time = 0.2 + + user_dispatcher = UsersDispatcher( + worker_nodes=[worker_node1, worker_node2, worker_node3], user_classes=[User1, User2, User3] + ) + + user_dispatcher.new_dispatch(target_user_count=60, spawn_rate=60, user_classes=[User2]) + self.assertDictEqual( + next(user_dispatcher), + { + "1": {"User1": 0, "User2": 20, "User3": 0}, + "2": {"User1": 0, "User2": 20, "User3": 0}, + "3": {"User1": 0, "User2": 20, "User3": 0}, + } + ) + def _aggregate_dispatched_users(d: Dict[str, Dict[str, int]]) -> Dict[str, int]: user_classes = list(next(iter(d.values())).keys()) From 9367ca80c436e2d9fde8eba60fe2f3f3e0de1d58 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Mon, 5 Sep 2022 17:06:08 +0200 Subject: [PATCH 07/17] Updating Tests --- locust/dispatch.py | 5 +- locust/test/test_dispatch.py | 289 +++++++++++++++++++++++++++++++++++ 2 files changed, 292 insertions(+), 2 deletions(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index 865029e482..b4e28ec94d 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -56,6 +56,7 @@ def __init__(self, worker_nodes: List["WorkerNode"], user_classes: List[Type[Use """ self._worker_nodes = worker_nodes self._sort_workers() + self._original_user_classes = sorted(user_classes, key=attrgetter("__name__")) self._user_classes = sorted(user_classes, key=attrgetter("__name__")) assert len(user_classes) > 0 @@ -229,7 +230,7 @@ def _prepare_rebalance(self) -> None: # Reset users before recalculating since the current users is used to calculate how many # fixed users to add. self._users_on_workers = { - worker_node.id: {user_class.__name__: 0 for user_class in self._user_classes} + worker_node.id: {user_class.__name__: 0 for user_class in self._original_user_classes} for worker_node in self._worker_nodes } self._try_dispatch_fixed = True @@ -330,7 +331,7 @@ def _distribute_users( worker_gen = itertools.cycle(self._worker_nodes) users_on_workers = { - worker_node.id: {user_class.__name__: 0 for user_class in self._user_classes} + worker_node.id: {user_class.__name__: 0 for user_class in self._original_user_classes} for worker_node in self._worker_nodes } diff --git a/locust/test/test_dispatch.py b/locust/test/test_dispatch.py index 5790d3a4d5..074321babf 100644 --- a/locust/test/test_dispatch.py +++ b/locust/test/test_dispatch.py @@ -1010,6 +1010,49 @@ class User3(User): delta = time.perf_counter() - ts self.assertTrue(0 <= delta <= _TOLERANCE, delta) + # def test_ramp_down_users_on_workers_respecting_weight(self): + # class User1(User): + # weight = 1 + # + # class User2(User): + # weight = 1 + # + # class User3(User): + # weight = 1 + # + # user_classes = [User1, User2, User3] + # workers = [WorkerNode(str(i + 1)) for i in range(3)] + # + # user_dispatcher = UsersDispatcher(worker_nodes= workers, user_classes = user_classes) + # user_dispatcher.new_dispatch(target_user_count=7, spawn_rate=7) + # + # dispatched_users = next(user_dispatcher) + # self.assertDictEqual(dispatched_users, + # { + # "1": {"User1": 3, "User2": 0, "User3": 0}, + # "2": {"User1": 0, "User2": 2, "User3": 0}, + # "3": {"User1": 0, "User2": 0, "User3": 2} + # }) + # + # user_dispatcher.new_dispatch(target_user_count=16, spawn_rate=9) + # dispatched_users = next(user_dispatcher) + # self.assertDictEqual(dispatched_users, + # { + # "1": {"User1": 6, "User2": 0, "User3": 0}, + # "2": {"User1": 0, "User2": 5, "User3": 0}, + # "3": {"User1": 0, "User2": 0, "User3": 5} + # }) + # + # user_dispatcher.new_dispatch(target_user_count=3, spawn_rate=15) + # dispatched_users = next(user_dispatcher) + # self.assertDictEqual(dispatched_users, + # { + # "1": {"User1": 1, "User2": 0, "User3": 0}, + # "2": {"User1": 0, "User2": 1, "User3": 0}, + # "3": {"User1": 0, "User2": 0, "User3": 1} + # }) + # + def test_ramp_down_users_to_3_workers_with_spawn_rate_of_1(self): class User1(User): weight = 1 @@ -3817,6 +3860,252 @@ class User3(User): } ) + def test_ramp_down_custom_user_classes_respect_weighting(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + worker_nodes = [WorkerNode(str(i + 1)) for i in range(3)] + user_dispatcher = UsersDispatcher(worker_nodes=worker_nodes, user_classes=[User1, User2, User3]) + + user_dispatcher.new_dispatch(target_user_count=20, spawn_rate=20, user_classes=[User3]) + dispatched_users = next(user_dispatcher) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 0, "User3": 7}, + "2": {"User1": 0, "User2": 0, "User3": 7}, + "3": {"User1": 0, "User2": 0, "User3": 6}, + }) + + user_dispatcher.new_dispatch(target_user_count=9, spawn_rate=20, user_classes=[User3]) + dispatched_users = next(user_dispatcher) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 0, "User3": 3}, + "2": {"User1": 0, "User2": 0, "User3": 3}, + "3": {"User1": 0, "User2": 0, "User3": 3}, + }) + + user_dispatcher.new_dispatch(target_user_count=3, spawn_rate=20, user_classes=[User1, User2, User3]) + dispatched_users = next(user_dispatcher) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 0, "User3": 1}, + "2": {"User1": 0, "User2": 0, "User3": 1}, + "3": {"User1": 0, "User2": 0, "User3": 1}, + }) + + user_dispatcher.new_dispatch(target_user_count=21, spawn_rate=21, user_classes=[User1, User2, User3]) + dispatched_users = next(user_dispatcher) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 6, "User3": 1}, # 7 + "2": {"User1": 0, "User2": 0, "User3": 7}, # 7 + "3": {"User1": 6, "User2": 0, "User3": 1}, # 7 + }) + + user_dispatcher.new_dispatch(target_user_count=9, spawn_rate=20, user_classes=[User1, User2, User3]) + dispatched_users = next(user_dispatcher) + + # this is disrespecting the weighting + + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 2, "User3": 1}, + "2": {"User1": 0, "User2": 0, "User3": 3}, + "3": {"User1": 2, "User2": 0, "User3": 1}, + }) + + + + + def test_remove_worker_during_ramp_up_custom_classes(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + user_classes = [User1, User2, User3] + + worker_nodes = [WorkerNode(str(i + 1)) for i in range(3)] + + users_dispatcher = UsersDispatcher(worker_nodes=worker_nodes, user_classes=user_classes) + + sleep_time = 0.2 # Speed-up test + + users_dispatcher.new_dispatch(target_user_count=9, spawn_rate=3, user_classes=[User2]) + users_dispatcher._wait_between_dispatch = sleep_time + + # Dispatch iteration 1 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(0 <= delta <= _TOLERANCE, delta) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 1, "User3": 0}, + "2": {"User1": 0, "User2": 1, "User3": 0}, + "3": {"User1": 0, "User2": 1, "User3": 0}, + }) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 3, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 1) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 1) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 1) + + # Dispatch iteration 2 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 6, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 2) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 2) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 2) + + self.assertFalse(users_dispatcher._rebalance) + + users_dispatcher.remove_worker(worker_nodes[1]) + + self.assertTrue(users_dispatcher._rebalance) + + # Re-balance + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(0 <= delta <= _TOLERANCE, f"Expected re-balance dispatch to be instantaneous but got {delta}s") + self.assertDictEqual(dispatched_users, + { + "1":{"User1": 0, "User2": 3, "User3": 0}, + "3": {"User1": 0, "User2": 3, "User3": 0} + }) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 6, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 3) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) + + self.assertFalse(users_dispatcher._rebalance) + + # Dispatch iteration 3 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 9, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 5) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 4) + + + + # New dispatch + users_dispatcher.new_dispatch(16,7,[User3]) + dispatched_users = next(users_dispatcher) + self.assertDictEqual(dispatched_users, + { + "1": {"User1": 0, "User2": 5, "User3": 3}, + "3": {"User1": 0, "User2": 4, "User3": 4} + }) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 9, "User3": 7}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 8) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 8) + + + + def test_add_worker_during_ramp_up_custom_classes(self): + class User1(User): + weight = 1 + + class User2(User): + weight = 1 + + class User3(User): + weight = 1 + + user_classes = [User1, User2, User3] + + worker_nodes = [WorkerNode(str(i + 1)) for i in range(3)] + + users_dispatcher = UsersDispatcher(worker_nodes=[worker_nodes[0], worker_nodes[2]], user_classes=user_classes) + + sleep_time = 0.2 # Speed-up test + + users_dispatcher.new_dispatch(target_user_count=11, spawn_rate=3, user_classes=[User1]) + users_dispatcher._wait_between_dispatch = sleep_time + + # Dispatch iteration 1 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(0 <= delta <= _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 3, "User2": 0, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 2) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 1) + + # Dispatch iteration 2 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 6, "User2": 0, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 3) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) + + self.assertFalse(users_dispatcher._rebalance) + + users_dispatcher.add_worker(worker_nodes[1]) + + self.assertTrue(users_dispatcher._rebalance) + + # Re-balance + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(0 <= delta <= _TOLERANCE, f"Expected re-balance dispatch to be instantaneous but got {delta}s") + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 6, "User2": 0, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 2) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 2) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 2) + + self.assertFalse(users_dispatcher._rebalance) + + # Dispatch iteration 3 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 9, "User2": 0, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 3) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 3) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) + + # Dispatch iteration 4 + ts = time.perf_counter() + dispatched_users = next(users_dispatcher) + delta = time.perf_counter() - ts + self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 11, "User2": 0, "User3": 0}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 4) + # without host-based balancing the following two values would be reversed + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 4) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) + + #New Dispatch + users_dispatcher.new_dispatch(target_user_count=18, spawn_rate=7, user_classes=[User3]) + dispatched_users = next(users_dispatcher) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 11, "User2": 0, "User3": 7}) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 6) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 6) + self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 6) + + + def _aggregate_dispatched_users(d: Dict[str, Dict[str, int]]) -> Dict[str, int]: user_classes = list(next(iter(d.values())).keys()) From 70b551b75bb6909910508944b10b3829759c5322 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Mon, 5 Sep 2022 17:20:59 +0200 Subject: [PATCH 08/17] Small fix in new dispatch, not creating a new generator if all userclasses are included. --- locust/dispatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index b4e28ec94d..c5dc4f1cbc 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -172,7 +172,7 @@ def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: :param spawn_rate: The spawn rate :param user_classes: The user classes to be used for the new dispatch """ - if user_classes is not None: + if user_classes is not None and user_classes != self._original_user_classes: self._user_classes = user_classes self._user_generator = self._user_gen() From c13124114c48178b7dedbe0dde02a0bb11b633c4 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 09:13:59 +0200 Subject: [PATCH 09/17] Updated files according to flake, blake and mypy --- locust/runners.py | 39 +++++--- locust/shape.py | 4 +- locust/test/test_dispatch.py | 180 +++++++++++++---------------------- 3 files changed, 95 insertions(+), 128 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index 05d02c6b59..f0ad274c40 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -28,6 +28,7 @@ Type, Any, cast, + Union, ) from uuid import uuid4 @@ -64,7 +65,6 @@ logger = logging.getLogger(__name__) - STATE_INIT, STATE_SPAWNING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = [ "ready", "spawning", @@ -84,7 +84,6 @@ CONNECT_TIMEOUT = 5 CONNECT_RETRY_COUNT = 60 - greenlet_exception_handler = greenlet_exception_logger(logger) @@ -119,7 +118,7 @@ def __init__(self, environment: "Environment") -> None: self.state = STATE_INIT self.spawning_greenlet: Optional[gevent.Greenlet] = None self.shape_greenlet: Optional[gevent.Greenlet] = None - self.shape_last_state: Optional[Tuple[int, float]] = None + self.shape_last_state: Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None = None self.current_cpu_usage: int = 0 self.cpu_warning_emitted: bool = False self.worker_cpu_warning_emitted: bool = False @@ -330,7 +329,9 @@ def monitor_cpu_and_memory(self) -> NoReturn: gevent.sleep(CPU_MONITOR_INTERVAL) @abstractmethod - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: + def start( + self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[Type[User]]] = None + ) -> None: ... def start_shape(self) -> None: @@ -351,7 +352,9 @@ def start_shape(self) -> None: def shape_worker(self) -> None: logger.info("Shape worker starting") while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING: - new_state = self.environment.shape_class.tick() if self.environment.shape_class is not None else None + new_state: Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None = ( + self.environment.shape_class.tick() if self.environment.shape_class is not None else None + ) if new_state is None: logger.info("Shape test stopping") if self.environment.parsed_options and self.environment.parsed_options.headless: @@ -365,10 +368,10 @@ def shape_worker(self) -> None: gevent.sleep(1) else: if len(new_state) == 2: - user_count, spawn_rate = new_state + user_count, spawn_rate = new_state # type: ignore user_classes = None else: - user_count, spawn_rate, user_classes = new_state + user_count, spawn_rate, user_classes = new_state # type: ignore logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate)) # TODO: This `self.start()` call is blocking until the ramp-up is completed. This can leads # to unexpected behaviours such as the one in the following example: @@ -467,7 +470,7 @@ def on_user_error(user_instance, exception, tb): self.environment.events.user_error.add_listener(on_user_error) - def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes:list = None) -> None: + def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: list = None) -> None: """ Start running a load test @@ -550,7 +553,9 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values())) - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: + def start( + self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[Type[User]]] = None + ) -> None: if spawn_rate > 100: logger.warning( "Your selected spawn rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?" @@ -559,7 +564,9 @@ def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cla if self.spawning_greenlet: # kill existing spawning_greenlet before we start a new one self.spawning_greenlet.kill(block=True) - self.spawning_greenlet = self.greenlet.spawn(lambda: self._start(user_count, spawn_rate, wait=wait, user_classes=user_classes)) + self.spawning_greenlet = self.greenlet.spawn( + lambda: self._start(user_count, spawn_rate, wait=wait, user_classes=user_classes) + ) self.spawning_greenlet.link_exception(greenlet_exception_handler) def stop(self) -> None: @@ -737,7 +744,9 @@ def cpu_log_warning(self) -> bool: warning_emitted = True return warning_emitted - def start(self, user_count: int, spawn_rate: float, wait=False, user_classes: Optional[List[User]] = None) -> None: + def start( + self, user_count: int, spawn_rate: float, wait=False, user_classes: Optional[List[Type[User]]] = None + ) -> None: self.spawning_completed = False self.target_user_count = user_count @@ -782,7 +791,9 @@ def start(self, user_count: int, spawn_rate: float, wait=False, user_classes: Op self.update_state(STATE_SPAWNING) - self._users_dispatcher.new_dispatch(target_user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes) + self._users_dispatcher.new_dispatch( + target_user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes + ) try: for dispatched_users in self._users_dispatcher: @@ -1215,7 +1226,9 @@ def on_user_error(user_instance: User, exception: Exception, tb: TracebackType) self.environment.events.user_error.add_listener(on_user_error) - def start(self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[User]] = None) -> None: + def start( + self, user_count: int, spawn_rate: float, wait: bool = False, user_classes: Optional[List[Type[User]]] = None + ) -> None: raise NotImplementedError("use start_worker") def start_worker(self, user_classes_count: Dict[str, int], **kwargs) -> None: diff --git a/locust/shape.py b/locust/shape.py index 7ed88c7b5e..5cd10145d6 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -1,5 +1,5 @@ import time -from typing import Optional, Tuple, List +from typing import Optional, Tuple, List, Type from . import User from .runners import Runner @@ -35,7 +35,7 @@ def get_current_user_count(self): """ return self.runner.user_count - def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[User]]] | None: + def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None: """ Returns a tuple with 2 elements to control the running load test: diff --git a/locust/test/test_dispatch.py b/locust/test/test_dispatch.py index 074321babf..88ef460f42 100644 --- a/locust/test/test_dispatch.py +++ b/locust/test/test_dispatch.py @@ -12,7 +12,6 @@ class TestRampUpUsersFromZero(unittest.TestCase): - def test_ramp_up_users_to_3_workers_with_spawn_rate_of_0_5(self): """Final distribution should be {"User1": 3, "User2": 3, "User3": 3}""" @@ -3634,6 +3633,7 @@ class User5(User): user_class.fixed_count, ) + class TestRampUpDifferentUsers(unittest.TestCase): def test_ramp_up_different_users_for_each_dispatch(self): class User1(User): @@ -3649,31 +3649,18 @@ class User3(User): sleep_time = 0.2 - user_dispatcher = UsersDispatcher( - worker_nodes=[worker_node1], user_classes=[User1, User2, User3] - ) + user_dispatcher = UsersDispatcher(worker_nodes=[worker_node1], user_classes=[User1, User2, User3]) user_dispatcher.new_dispatch(target_user_count=3, spawn_rate=3) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 1, "User2": 1, "User3": 1} - } - ) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 1, "User2": 1, "User3": 1}}) user_dispatcher.new_dispatch(target_user_count=4, spawn_rate=1, user_classes=[User1]) - self.assertDictEqual(next(user_dispatcher), { - "1": {"User1": 2, "User2": 1, "User3": 1} - }) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 2, "User2": 1, "User3": 1}}) user_dispatcher.new_dispatch(target_user_count=5, spawn_rate=1, user_classes=[User2]) - self.assertDictEqual(next(user_dispatcher), { - "1": {"User1": 2, "User2": 2, "User3": 1} - }) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 2, "User2": 2, "User3": 1}}) user_dispatcher.new_dispatch(target_user_count=6, spawn_rate=1, user_classes=[User3]) - self.assertDictEqual(next(user_dispatcher), { - "1": {"User1": 2, "User2": 2, "User3": 2} - }) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 2, "User2": 2, "User3": 2}}) def test_ramp_up_only_one_kind_of_user(self): class User1(User): @@ -3689,17 +3676,10 @@ class User3(User): sleep_time = 0.2 - user_dispatcher = UsersDispatcher( - worker_nodes=[worker_node1], user_classes=[User1, User2, User3] - ) + user_dispatcher = UsersDispatcher(worker_nodes=[worker_node1], user_classes=[User1, User2, User3]) user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 0, "User2": 10, "User3": 0} - } - ) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 0, "User2": 10, "User3": 0}}) def test_ramp_up_first_half_user1_second_half_user2(self): class User1(User): @@ -3715,25 +3695,13 @@ class User3(User): sleep_time = 0.2 - user_dispatcher = UsersDispatcher( - worker_nodes=[worker_node1], user_classes=[User1, User2, User3] - ) + user_dispatcher = UsersDispatcher(worker_nodes=[worker_node1], user_classes=[User1, User2, User3]) user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 0, "User2": 10, "User3": 0} - } - ) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 0, "User2": 10, "User3": 0}}) user_dispatcher.new_dispatch(target_user_count=40, spawn_rate=30, user_classes=[User3]) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 0, "User2": 10, "User3": 30} - } - ) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 0, "User2": 10, "User3": 30}}) def test_ramp_up_first_one_user_then_all_classes(self): class User1(User): @@ -3749,26 +3717,13 @@ class User3(User): sleep_time = 0.2 - user_dispatcher = UsersDispatcher( - worker_nodes=[worker_node1], user_classes=[User1, User2, User3] - ) + user_dispatcher = UsersDispatcher(worker_nodes=[worker_node1], user_classes=[User1, User2, User3]) user_dispatcher.new_dispatch(target_user_count=10, spawn_rate=10, user_classes=[User2]) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 0, "User2": 10, "User3": 0} - } - ) + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 0, "User2": 10, "User3": 0}}) user_dispatcher.new_dispatch(target_user_count=40, spawn_rate=30, user_classes=[User1, User2, User3]) - self.assertDictEqual( - next(user_dispatcher), - { - "1": {"User1": 10, "User2": 20, "User3": 10} - } - ) - + self.assertDictEqual(next(user_dispatcher), {"1": {"User1": 10, "User2": 20, "User3": 10}}) def test_ramp_up_different_users_each_dispatch_multiple_worker(self): class User1(User): @@ -3797,7 +3752,7 @@ class User3(User): "1": {"User1": 3, "User2": 0, "User3": 0}, "2": {"User1": 0, "User2": 3, "User3": 0}, "3": {"User1": 0, "User2": 0, "User3": 3}, - } + }, ) user_dispatcher.new_dispatch(target_user_count=12, spawn_rate=3, user_classes=[User3]) @@ -3807,7 +3762,7 @@ class User3(User): "1": {"User1": 3, "User2": 0, "User3": 1}, "2": {"User1": 0, "User2": 3, "User3": 1}, "3": {"User1": 0, "User2": 0, "User3": 4}, - } + }, ) user_dispatcher.new_dispatch(target_user_count=15, spawn_rate=3, user_classes=[User2]) @@ -3817,7 +3772,7 @@ class User3(User): "1": {"User1": 3, "User2": 1, "User3": 1}, "2": {"User1": 0, "User2": 4, "User3": 1}, "3": {"User1": 0, "User2": 1, "User3": 4}, - } + }, ) user_dispatcher.new_dispatch(target_user_count=18, spawn_rate=3, user_classes=[User1]) @@ -3827,7 +3782,7 @@ class User3(User): "1": {"User1": 4, "User2": 1, "User3": 1}, "2": {"User1": 1, "User2": 4, "User3": 1}, "3": {"User1": 1, "User2": 1, "User3": 4}, - } + }, ) def test_ramp_up_one_user_class_multiple_worker(self): @@ -3857,7 +3812,7 @@ class User3(User): "1": {"User1": 0, "User2": 20, "User3": 0}, "2": {"User1": 0, "User2": 20, "User3": 0}, "3": {"User1": 0, "User2": 20, "User3": 0}, - } + }, ) def test_ramp_down_custom_user_classes_respect_weighting(self): @@ -3875,54 +3830,61 @@ class User3(User): user_dispatcher.new_dispatch(target_user_count=20, spawn_rate=20, user_classes=[User3]) dispatched_users = next(user_dispatcher) - self.assertDictEqual(dispatched_users, + self.assertDictEqual( + dispatched_users, { "1": {"User1": 0, "User2": 0, "User3": 7}, "2": {"User1": 0, "User2": 0, "User3": 7}, "3": {"User1": 0, "User2": 0, "User3": 6}, - }) + }, + ) user_dispatcher.new_dispatch(target_user_count=9, spawn_rate=20, user_classes=[User3]) dispatched_users = next(user_dispatcher) - self.assertDictEqual(dispatched_users, + self.assertDictEqual( + dispatched_users, { "1": {"User1": 0, "User2": 0, "User3": 3}, "2": {"User1": 0, "User2": 0, "User3": 3}, "3": {"User1": 0, "User2": 0, "User3": 3}, - }) + }, + ) user_dispatcher.new_dispatch(target_user_count=3, spawn_rate=20, user_classes=[User1, User2, User3]) dispatched_users = next(user_dispatcher) - self.assertDictEqual(dispatched_users, - { - "1": {"User1": 0, "User2": 0, "User3": 1}, - "2": {"User1": 0, "User2": 0, "User3": 1}, - "3": {"User1": 0, "User2": 0, "User3": 1}, - }) + self.assertDictEqual( + dispatched_users, + { + "1": {"User1": 0, "User2": 0, "User3": 1}, + "2": {"User1": 0, "User2": 0, "User3": 1}, + "3": {"User1": 0, "User2": 0, "User3": 1}, + }, + ) user_dispatcher.new_dispatch(target_user_count=21, spawn_rate=21, user_classes=[User1, User2, User3]) dispatched_users = next(user_dispatcher) - self.assertDictEqual(dispatched_users, + self.assertDictEqual( + dispatched_users, { - "1": {"User1": 0, "User2": 6, "User3": 1}, # 7 - "2": {"User1": 0, "User2": 0, "User3": 7}, # 7 - "3": {"User1": 6, "User2": 0, "User3": 1}, # 7 - }) + "1": {"User1": 0, "User2": 6, "User3": 1}, # 7 + "2": {"User1": 0, "User2": 0, "User3": 7}, # 7 + "3": {"User1": 6, "User2": 0, "User3": 1}, # 7 + }, + ) user_dispatcher.new_dispatch(target_user_count=9, spawn_rate=20, user_classes=[User1, User2, User3]) dispatched_users = next(user_dispatcher) # this is disrespecting the weighting - self.assertDictEqual(dispatched_users, - { - "1": {"User1": 0, "User2": 2, "User3": 1}, - "2": {"User1": 0, "User2": 0, "User3": 3}, - "3": {"User1": 2, "User2": 0, "User3": 1}, - }) - - - + self.assertDictEqual( + dispatched_users, + { + "1": {"User1": 0, "User2": 2, "User3": 1}, + "2": {"User1": 0, "User2": 0, "User3": 3}, + "3": {"User1": 2, "User2": 0, "User3": 1}, + }, + ) def test_remove_worker_during_ramp_up_custom_classes(self): class User1(User): @@ -3950,13 +3912,15 @@ class User3(User): dispatched_users = next(users_dispatcher) delta = time.perf_counter() - ts self.assertTrue(0 <= delta <= _TOLERANCE, delta) - self.assertDictEqual(dispatched_users, + self.assertDictEqual( + dispatched_users, { "1": {"User1": 0, "User2": 1, "User3": 0}, "2": {"User1": 0, "User2": 1, "User3": 0}, "3": {"User1": 0, "User2": 1, "User3": 0}, - }) - self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 3, "User3": 0}) + }, + ) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 0, "User2": 3, "User3": 0}) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 1) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 1) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 1) @@ -3966,7 +3930,7 @@ class User3(User): dispatched_users = next(users_dispatcher) delta = time.perf_counter() - ts self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) - self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 6, "User3": 0}) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 0, "User2": 6, "User3": 0}) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 2) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 2) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 2) @@ -3982,12 +3946,10 @@ class User3(User): dispatched_users = next(users_dispatcher) delta = time.perf_counter() - ts self.assertTrue(0 <= delta <= _TOLERANCE, f"Expected re-balance dispatch to be instantaneous but got {delta}s") - self.assertDictEqual(dispatched_users, - { - "1":{"User1": 0, "User2": 3, "User3": 0}, - "3": {"User1": 0, "User2": 3, "User3": 0} - }) - self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 6, "User3": 0}) + self.assertDictEqual( + dispatched_users, {"1": {"User1": 0, "User2": 3, "User3": 0}, "3": {"User1": 0, "User2": 3, "User3": 0}} + ) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 0, "User2": 6, "User3": 0}) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 3) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) @@ -3998,26 +3960,20 @@ class User3(User): dispatched_users = next(users_dispatcher) delta = time.perf_counter() - ts self.assertTrue(sleep_time - _TOLERANCE <= delta <= sleep_time + _TOLERANCE, delta) - self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 9, "User3": 0}) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 0, "User2": 9, "User3": 0}) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 5) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 4) - - # New dispatch - users_dispatcher.new_dispatch(16,7,[User3]) + users_dispatcher.new_dispatch(16, 7, [User3]) dispatched_users = next(users_dispatcher) - self.assertDictEqual(dispatched_users, - { - "1": {"User1": 0, "User2": 5, "User3": 3}, - "3": {"User1": 0, "User2": 4, "User3": 4} - }) - self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {'User1': 0,"User2": 9, "User3": 7}) + self.assertDictEqual( + dispatched_users, {"1": {"User1": 0, "User2": 5, "User3": 3}, "3": {"User1": 0, "User2": 4, "User3": 4}} + ) + self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 0, "User2": 9, "User3": 7}) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[0].id), 8) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 8) - - def test_add_worker_during_ramp_up_custom_classes(self): class User1(User): weight = 1 @@ -4096,7 +4052,7 @@ class User3(User): self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[1].id), 4) self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) - #New Dispatch + # New Dispatch users_dispatcher.new_dispatch(target_user_count=18, spawn_rate=7, user_classes=[User3]) dispatched_users = next(users_dispatcher) self.assertDictEqual(_aggregate_dispatched_users(dispatched_users), {"User1": 11, "User2": 0, "User3": 7}) @@ -4105,8 +4061,6 @@ class User3(User): self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 6) - - def _aggregate_dispatched_users(d: Dict[str, Dict[str, int]]) -> Dict[str, int]: user_classes = list(next(iter(d.values())).keys()) return {u: sum(d[u] for d in d.values()) for u in user_classes} From 713477c6dc75b9f22aef60f80047c05765883fa2 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 09:30:46 +0200 Subject: [PATCH 10/17] fix arguments of `new_dispatch` --- locust/dispatch.py | 2 +- locust/test/test_dispatch.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index c5dc4f1cbc..cf0fadf023 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -172,7 +172,7 @@ def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: :param spawn_rate: The spawn rate :param user_classes: The user classes to be used for the new dispatch """ - if user_classes is not None and user_classes != self._original_user_classes: + if user_classes is not None and self._user_classes != user_classes: self._user_classes = user_classes self._user_generator = self._user_gen() diff --git a/locust/test/test_dispatch.py b/locust/test/test_dispatch.py index 88ef460f42..b5d3b0bfa7 100644 --- a/locust/test/test_dispatch.py +++ b/locust/test/test_dispatch.py @@ -3863,6 +3863,7 @@ class User3(User): user_dispatcher.new_dispatch(target_user_count=21, spawn_rate=21, user_classes=[User1, User2, User3]) dispatched_users = next(user_dispatcher) + print(dispatched_users) self.assertDictEqual( dispatched_users, { From 389f68ae0f007a287d2ce57fcd725f6978e0377c Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 13:03:33 +0200 Subject: [PATCH 11/17] add sorting of userclasses --- locust/dispatch.py | 4 ++-- locust/runners.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/locust/dispatch.py b/locust/dispatch.py index cf0fadf023..51594f1bec 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -172,8 +172,8 @@ def new_dispatch(self, target_user_count: int, spawn_rate: float, user_classes: :param spawn_rate: The spawn rate :param user_classes: The user classes to be used for the new dispatch """ - if user_classes is not None and self._user_classes != user_classes: - self._user_classes = user_classes + if user_classes is not None and self._user_classes != sorted(user_classes, key=attrgetter("__name__")): + self._user_classes = sorted(user_classes, key=attrgetter("__name__")) self._user_generator = self._user_gen() self._target_user_count = target_user_count diff --git a/locust/runners.py b/locust/runners.py index f0ad274c40..fe83f1f14a 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -494,10 +494,10 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl if wait and user_count - self.user_count > spawn_rate: raise ValueError("wait is True but the amount of users to add is greater than the spawn rate") - if user_classes is None: - user_classes = self.user_classes + #if user_classes is None: + # user_classes = self.user_classes - for user_class in user_classes: + for user_class in self.user_classes: if self.environment.host: user_class.host = self.environment.host @@ -756,10 +756,10 @@ def start( logger.warning("You can't start a distributed test before at least one worker processes has connected") return - if user_classes is None: - user_classes = self.user_classes + #if user_classes is None: + # user_classes = self.user_classes - for user_class in user_classes: + for user_class in self.user_classes: if self.environment.host: user_class.host = self.environment.host From fdabf8148dca98bab838cb36ec9549ca331e6812 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 16:25:45 +0200 Subject: [PATCH 12/17] typing according to python 3.7 --- locust/runners.py | 8 ++++---- locust/shape.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index fe83f1f14a..97faab0c7d 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -118,7 +118,7 @@ def __init__(self, environment: "Environment") -> None: self.state = STATE_INIT self.spawning_greenlet: Optional[gevent.Greenlet] = None self.shape_greenlet: Optional[gevent.Greenlet] = None - self.shape_last_state: Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None = None + self.shape_last_state: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = None self.current_cpu_usage: int = 0 self.cpu_warning_emitted: bool = False self.worker_cpu_warning_emitted: bool = False @@ -352,7 +352,7 @@ def start_shape(self) -> None: def shape_worker(self) -> None: logger.info("Shape worker starting") while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING: - new_state: Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None = ( + new_state: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = ( self.environment.shape_class.tick() if self.environment.shape_class is not None else None ) if new_state is None: @@ -494,7 +494,7 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl if wait and user_count - self.user_count > spawn_rate: raise ValueError("wait is True but the amount of users to add is greater than the spawn rate") - #if user_classes is None: + # if user_classes is None: # user_classes = self.user_classes for user_class in self.user_classes: @@ -756,7 +756,7 @@ def start( logger.warning("You can't start a distributed test before at least one worker processes has connected") return - #if user_classes is None: + # if user_classes is None: # user_classes = self.user_classes for user_class in self.user_classes: diff --git a/locust/shape.py b/locust/shape.py index 5cd10145d6..c32351f12e 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -1,5 +1,5 @@ import time -from typing import Optional, Tuple, List, Type +from typing import Optional, Tuple, List, Type, Union from . import User from .runners import Runner @@ -35,7 +35,7 @@ def get_current_user_count(self): """ return self.runner.user_count - def tick(self) -> Tuple[int, float] | Tuple[int, float, Optional[List[Type[User]]]] | None: + def tick(self) -> Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None]: """ Returns a tuple with 2 elements to control the running load test: From 2028b20839195ee2f63579b3f3ca80508f5edad7 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 23:12:30 +0200 Subject: [PATCH 13/17] deleting unnecessary comment in docs --- docs/custom-load-shape.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/custom-load-shape.rst b/docs/custom-load-shape.rst index 1c5570de29..340bce8378 100644 --- a/docs/custom-load-shape.rst +++ b/docs/custom-load-shape.rst @@ -70,5 +70,4 @@ Extending the return value of the ``tick()`` with the argument ``user_classes`` return None -This shape would create create in the first 10 seconds 10 User of ``UserA``. In the next twenty seconds 40 of type ``UserA / UserB`` and this continues until the stages end. -Using the extra argument makes it possible to create even more fine grained scenarios. \ No newline at end of file +This shape would create create in the first 10 seconds 10 User of ``UserA``. In the next twenty seconds 40 of type ``UserA / UserB`` and this continues until the stages end. \ No newline at end of file From f5822f4aa2e8eb6c9838b0e2f73f53c5c2968977 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 23:17:17 +0200 Subject: [PATCH 14/17] small changes in `runner.py` --- locust/runners.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index 97faab0c7d..39ae2e88ab 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -479,7 +479,8 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl :param wait: If True calls to this method will block until all users are spawned. If False (the default), a greenlet that spawns the users will be started and the call to this method will return immediately. - :param user_classes: The user classes to be dispatched + :param user_classes: The user classes to be dispatched, None indicates to use the classes the dispatcher was + invoked with. """ self.target_user_count = user_count @@ -494,8 +495,6 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl if wait and user_count - self.user_count > spawn_rate: raise ValueError("wait is True but the amount of users to add is greater than the spawn rate") - # if user_classes is None: - # user_classes = self.user_classes for user_class in self.user_classes: if self.environment.host: @@ -756,9 +755,6 @@ def start( logger.warning("You can't start a distributed test before at least one worker processes has connected") return - # if user_classes is None: - # user_classes = self.user_classes - for user_class in self.user_classes: if self.environment.host: user_class.host = self.environment.host From d6715e9811408d5a10f372d9c3acc9e5adfaf57a Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Tue, 6 Sep 2022 23:18:01 +0200 Subject: [PATCH 15/17] fix grammar in `shape.py` --- locust/shape.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locust/shape.py b/locust/shape.py index c32351f12e..ed61e311b2 100644 --- a/locust/shape.py +++ b/locust/shape.py @@ -41,7 +41,7 @@ def tick(self) -> Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[ user_count -- Total user count spawn_rate -- Number of users to start/stop per second when changing number of users - user_classes -- None or a List of userclasses to be spawend in it tick + user_classes -- None or a List of userclasses to be spawned in it tick If `None` is returned then the running load test will be stopped. """ From 696ba58fa0a4fa76be7b100266cddb354cf57679 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Wed, 7 Sep 2022 07:08:36 +0200 Subject: [PATCH 16/17] renamed `new_state` to `current_tick` --- locust/runners.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index 39ae2e88ab..92a43a2a24 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -352,10 +352,10 @@ def start_shape(self) -> None: def shape_worker(self) -> None: logger.info("Shape worker starting") while self.state == STATE_INIT or self.state == STATE_SPAWNING or self.state == STATE_RUNNING: - new_state: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = ( + current_tick: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = ( self.environment.shape_class.tick() if self.environment.shape_class is not None else None ) - if new_state is None: + if current_tick is None: logger.info("Shape test stopping") if self.environment.parsed_options and self.environment.parsed_options.headless: self.quit() @@ -364,14 +364,14 @@ def shape_worker(self) -> None: self.shape_greenlet = None self.shape_last_state = None return - elif self.shape_last_state == new_state: + elif self.shape_last_state == current_tick: gevent.sleep(1) else: - if len(new_state) == 2: - user_count, spawn_rate = new_state # type: ignore + if len(current_tick) == 2: + user_count, spawn_rate = current_tick # type: ignore user_classes = None else: - user_count, spawn_rate, user_classes = new_state # type: ignore + user_count, spawn_rate, user_classes = current_tick # type: ignore logger.info("Shape test updating to %d users at %.2f spawn rate" % (user_count, spawn_rate)) # TODO: This `self.start()` call is blocking until the ramp-up is completed. This can leads # to unexpected behaviours such as the one in the following example: @@ -387,7 +387,7 @@ def shape_worker(self) -> None: # `(user_count - prev_user_count) / spawn_rate` in order to limit the runtime # of each load test shape stage. self.start(user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes) - self.shape_last_state = new_state + self.shape_last_state = current_tick def stop(self) -> None: """ @@ -495,7 +495,6 @@ def _start(self, user_count: int, spawn_rate: float, wait: bool = False, user_cl if wait and user_count - self.user_count > spawn_rate: raise ValueError("wait is True but the amount of users to add is greater than the spawn rate") - for user_class in self.user_classes: if self.environment.host: user_class.host = self.environment.host From 1c74db81a2c26a8336aebfa57be3f312d3fad859 Mon Sep 17 00:00:00 2001 From: Samuel Spagl Date: Wed, 7 Sep 2022 09:41:47 +0200 Subject: [PATCH 17/17] renamed `shape_last_state` to `shape_last_tick` --- locust/runners.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/locust/runners.py b/locust/runners.py index 92a43a2a24..a0f1cfce34 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -118,7 +118,7 @@ def __init__(self, environment: "Environment") -> None: self.state = STATE_INIT self.spawning_greenlet: Optional[gevent.Greenlet] = None self.shape_greenlet: Optional[gevent.Greenlet] = None - self.shape_last_state: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = None + self.shape_last_tick: Union[Tuple[int, float], Tuple[int, float, Optional[List[Type[User]]]], None] = None self.current_cpu_usage: int = 0 self.cpu_warning_emitted: bool = False self.worker_cpu_warning_emitted: bool = False @@ -362,9 +362,9 @@ def shape_worker(self) -> None: else: self.stop() self.shape_greenlet = None - self.shape_last_state = None + self.shape_last_tick = None return - elif self.shape_last_state == current_tick: + elif self.shape_last_tick == current_tick: gevent.sleep(1) else: if len(current_tick) == 2: @@ -387,7 +387,7 @@ def shape_worker(self) -> None: # `(user_count - prev_user_count) / spawn_rate` in order to limit the runtime # of each load test shape stage. self.start(user_count=user_count, spawn_rate=spawn_rate, user_classes=user_classes) - self.shape_last_state = current_tick + self.shape_last_tick = current_tick def stop(self) -> None: """ @@ -410,7 +410,7 @@ def stop(self) -> None: if self.shape_greenlet is not None: self.shape_greenlet.kill(block=True) self.shape_greenlet = None - self.shape_last_state = None + self.shape_last_tick = None self.stop_users(self.user_classes_count) @@ -889,7 +889,7 @@ def stop(self, send_stop_to_client: bool = True) -> None: ): self.shape_greenlet.kill(block=True) self.shape_greenlet = None - self.shape_last_state = None + self.shape_last_tick = None self._users_dispatcher = None