From f771226c972d15061f06a70384ede548fa18a99a Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 16:49:49 -0400 Subject: [PATCH 01/26] Refactor dask tests --- evalml/tests/automl_tests/test_automl_dask.py | 102 +++++++------ evalml/tests/automl_tests/test_dask_engine.py | 139 ++++++++++-------- evalml/tests/conftest.py | 6 +- 3 files changed, 140 insertions(+), 107 deletions(-) diff --git a/evalml/tests/automl_tests/test_automl_dask.py b/evalml/tests/automl_tests/test_automl_dask.py index 74f49c213c..8f7e425149 100644 --- a/evalml/tests/automl_tests/test_automl_dask.py +++ b/evalml/tests/automl_tests/test_automl_dask.py @@ -1,8 +1,6 @@ -import unittest import numpy as np import pytest -from distributed import Client from evalml.automl import AutoMLSearch from evalml.automl.callbacks import raise_error_callback @@ -13,32 +11,34 @@ TestPipelineWithFitError, TestPipelineWithScoreError, ) +from dask.distributed import LocalCluster, Client -@pytest.mark.usefixtures("X_y_binary_cls") -class TestAutoMLSearchDask(unittest.TestCase): - @pytest.fixture(autouse=True) - def inject_fixtures(self, caplog): - """Gives the unittests access to the logger""" - self._caplog = caplog +@pytest.fixture +def sequential_engine(): + return SequentialEngine() - @classmethod - def setUpClass(cls) -> None: - cls.client = Client() - cls.parallel_engine = DaskEngine(cls.client) - cls.sequential_engine = SequentialEngine() - def test_automl(self): - """Comparing the results of parallel and sequential AutoML to each other.""" - X, y = self.X_y_binary +@pytest.fixture(scope='module') +def cluster(): + dask_cluster = LocalCluster(n_workers=1, threads_per_worker=2, dashboard_address=None) + yield dask_cluster + dask_cluster.close() + + +def test_automl(X_y_binary_cls, cluster, sequential_engine): + """Comparing the results of parallel and sequential AutoML to each other.""" + with Client(cluster) as client: + parallel_engine = DaskEngine(client) + X, y = X_y_binary_cls par_automl = AutoMLSearch( - X_train=X, y_train=y, problem_type="binary", engine=self.parallel_engine + X_train=X, y_train=y, problem_type="binary", engine=parallel_engine ) par_automl.search() parallel_rankings = par_automl.full_rankings seq_automl = AutoMLSearch( - X_train=X, y_train=y, problem_type="binary", engine=self.sequential_engine + X_train=X, y_train=y, problem_type="binary", engine=sequential_engine ) seq_automl.search() sequential_rankings = seq_automl.full_rankings @@ -60,15 +60,21 @@ def test_automl(self): np.array(par_results["percent_better_than_baseline"]), ) - def test_automl_max_iterations(self): - """Making sure that the max_iterations parameter limits the number of pipelines run.""" - X, y = self.X_y_binary + +def test_automl_max_iterations(X_y_binary_cls, cluster, sequential_engine): + """Making sure that the max_iterations parameter limits the number of pipelines run.""" + + X, y = X_y_binary_cls + with Client(cluster) as client: + parallel_engine = DaskEngine(client) + + max_iterations = 4 par_automl = AutoMLSearch( X_train=X, y_train=y, problem_type="binary", - engine=self.parallel_engine, + engine=parallel_engine, max_iterations=max_iterations, ) par_automl.search() @@ -78,7 +84,7 @@ def test_automl_max_iterations(self): X_train=X, y_train=y, problem_type="binary", - engine=self.sequential_engine, + engine=sequential_engine, max_iterations=max_iterations, ) seq_automl.search() @@ -86,44 +92,56 @@ def test_automl_max_iterations(self): assert len(sequential_rankings) == len(parallel_rankings) == max_iterations - def test_automl_train_dask_error_callback(self): - """Make sure the pipeline training error message makes its way back from the workers.""" - self._caplog.clear() - X, y = self.X_y_binary + +def test_automl_train_dask_error_callback(X_y_binary_cls, cluster, caplog): + """Make sure the pipeline training error message makes its way back from the workers.""" + caplog.clear() + with Client(cluster) as client: + parallel_engine = DaskEngine(client) + X, y = X_y_binary_cls + pipelines = [TestPipelineWithFitError({})] automl = AutoMLSearch( X_train=X, y_train=y, problem_type="binary", - engine=self.parallel_engine, + engine=parallel_engine, max_iterations=2, allowed_pipelines=pipelines, ) automl.train_pipelines(pipelines) - assert "Train error for PipelineWithError: Yikes" in self._caplog.text + assert "Train error for PipelineWithError: Yikes" in caplog.text + + +def test_automl_score_dask_error_callback(X_y_binary_cls, cluster, caplog): + """Make sure the pipeline scoring error message makes its way back from the workers.""" + caplog.clear() + with Client(cluster) as client: + parallel_engine = DaskEngine(client) - def test_automl_score_dask_error_callback(self): - """Make sure the pipeline scoring error message makes its way back from the workers.""" - self._caplog.clear() - X, y = self.X_y_binary + X, y = X_y_binary_cls pipelines = [TestPipelineWithScoreError({})] automl = AutoMLSearch( X_train=X, y_train=y, problem_type="binary", - engine=self.parallel_engine, + engine=parallel_engine, max_iterations=2, allowed_pipelines=pipelines, ) automl.score_pipelines( pipelines, X, y, objectives=["Log Loss Binary", "F1", "AUC"] ) - assert "Score error for PipelineWithError" in self._caplog.text + assert "Score error for PipelineWithError" in caplog.text + + +def test_automl_immediate_quit(X_y_binary_cls, cluster, caplog): + """Make sure the AutoMLSearch quits when error_callback is defined and does no further work.""" + caplog.clear() + X, y = X_y_binary_cls + with Client(cluster) as client: + parallel_engine = DaskEngine(client) - def test_automl_immediate_quit(self): - """Make sure the AutoMLSearch quits when error_callback is defined and does no further work.""" - self._caplog.clear() - X, y = self.X_y_binary pipelines = [ TestPipelineFast({}), TestPipelineWithFitError({}), @@ -133,7 +151,7 @@ def test_automl_immediate_quit(self): X_train=X, y_train=y, problem_type="binary", - engine=self.parallel_engine, + engine=parallel_engine, max_iterations=4, allowed_pipelines=pipelines, error_callback=raise_error_callback, @@ -155,7 +173,3 @@ def test_automl_immediate_quit(self): assert TestPipelineWithFitError.custom_name not in set( automl.full_rankings["pipeline_name"] ) - - @classmethod - def tearDownClass(cls) -> None: - cls.client.close() diff --git a/evalml/tests/automl_tests/test_dask_engine.py b/evalml/tests/automl_tests/test_dask_engine.py index 977e0c067d..80c3279ab2 100644 --- a/evalml/tests/automl_tests/test_dask_engine.py +++ b/evalml/tests/automl_tests/test_dask_engine.py @@ -1,10 +1,8 @@ -import unittest import numpy as np import pandas as pd import pytest import woodwork as ww -from distributed import Client from evalml.automl.engine.dask_engine import DaskComputation, DaskEngine from evalml.automl.engine.engine_base import ( @@ -21,28 +19,33 @@ TestSchemaCheckPipeline, automl_data, ) +from dask.distributed import LocalCluster, Client -@pytest.mark.usefixtures("X_y_binary_cls") -class TestDaskEngine(unittest.TestCase): - @classmethod - def setUpClass(cls) -> None: - cls.client = Client() +@pytest.fixture(scope='module') +def cluster(): + dask_cluster = LocalCluster(n_workers=1, threads_per_worker=1, dashboard_address=None) + yield dask_cluster + dask_cluster.close() - def test_init(self): - engine = DaskEngine(client=self.client) - assert engine.client == self.client + +def test_init(cluster): + with Client(cluster) as client: + engine = DaskEngine(client=client) + assert engine.client == client with pytest.raises( TypeError, match="Expected dask.distributed.Client, received" ): DaskEngine(client="Client") - def test_submit_training_job_single(self): - """Test that training a single pipeline using the parallel engine produces the - same results as simply running the train_pipeline function.""" - X, y = self.X_y_binary - engine = DaskEngine(client=self.client) + +def test_submit_training_job_single(X_y_binary_cls, cluster): + """Test that training a single pipeline using the parallel engine produces the + same results as simply running the train_pipeline function.""" + X, y = X_y_binary_cls + with Client(cluster) as client: + engine = DaskEngine(client=client) pipeline = BinaryClassificationPipeline( component_graph=["Logistic Regression Classifier"], parameters={"Logistic Regression Classifier": {"n_jobs": 1}}, @@ -68,10 +71,12 @@ def test_submit_training_job_single(self): dask_pipeline_fitted.predict(X), original_pipeline_fitted.predict(X) ) - def test_submit_training_jobs_multiple(self): - """Test that training multiple pipelines using the parallel engine produces the - same results as the sequential engine.""" - X, y = self.X_y_binary + +def test_submit_training_jobs_multiple(X_y_binary_cls, cluster): + """Test that training multiple pipelines using the parallel engine produces the + same results as the sequential engine.""" + X, y = X_y_binary_cls + with Client(cluster) as client: pipelines = [ BinaryClassificationPipeline( component_graph=["Logistic Regression Classifier"], @@ -98,7 +103,7 @@ def fit_pipelines(pipelines, engine): assert pipeline._is_fitted # Verify all pipelines are trained and fitted. - par_pipelines = fit_pipelines(pipelines, DaskEngine(client=self.client)) + par_pipelines = fit_pipelines(pipelines, DaskEngine(client=client)) for pipeline in par_pipelines: assert pipeline._is_fitted @@ -107,19 +112,22 @@ def fit_pipelines(pipelines, engine): for par_pipeline in par_pipelines: assert par_pipeline in seq_pipelines - def test_submit_evaluate_job_single(self): - """Test that evaluating a single pipeline using the parallel engine produces the - same results as simply running the evaluate_pipeline function.""" - X, y = self.X_y_binary - X.ww.init() - y = ww.init_series(y) + +def test_submit_evaluate_job_single(X_y_binary_cls, cluster): + """Test that evaluating a single pipeline using the parallel engine produces the + same results as simply running the evaluate_pipeline function.""" + X, y = X_y_binary_cls + X.ww.init() + y = ww.init_series(y) + + with Client(cluster) as client: pipeline = BinaryClassificationPipeline( component_graph=["Logistic Regression Classifier"], parameters={"Logistic Regression Classifier": {"n_jobs": 1}}, ) - engine = DaskEngine(client=self.client) + engine = DaskEngine(client=client) # Verify that engine evaluates a pipeline pipeline_future = engine.submit_evaluation_job( @@ -155,12 +163,15 @@ def test_submit_evaluate_job_single(self): == original_eval_results.get("logger").logs ) - def test_submit_evaluate_jobs_multiple(self): - """Test that evaluating multiple pipelines using the parallel engine produces the - same results as the sequential engine.""" - X, y = self.X_y_binary - X.ww.init() - y = ww.init_series(y) + +def test_submit_evaluate_jobs_multiple(X_y_binary_cls, cluster): + """Test that evaluating multiple pipelines using the parallel engine produces the + same results as the sequential engine.""" + X, y = X_y_binary_cls + X.ww.init() + y = ww.init_series(y) + + with Client(cluster) as client: pipelines = [ BinaryClassificationPipeline( @@ -182,7 +193,7 @@ def eval_pipelines(pipelines, engine): results = [f.get_result() for f in futures] return results - par_eval_results = eval_pipelines(pipelines, DaskEngine(client=self.client)) + par_eval_results = eval_pipelines(pipelines, DaskEngine(client=client)) par_dicts = [s.get("scores") for s in par_eval_results] par_scores = [s["cv_data"][0]["mean_cv_score"] for s in par_dicts] par_pipelines = [s.get("pipeline") for s in par_eval_results] @@ -205,18 +216,21 @@ def eval_pipelines(pipelines, engine): for par_pipeline in par_pipelines: assert par_pipeline in seq_pipelines - def test_submit_scoring_job_single(self): - """Test that scoring a single pipeline using the parallel engine produces the - same results as simply running the score_pipeline function.""" - X, y = self.X_y_binary - X.ww.init() - y = ww.init_series(y) + +def test_submit_scoring_job_single(X_y_binary_cls, cluster): + """Test that scoring a single pipeline using the parallel engine produces the + same results as simply running the score_pipeline function.""" + X, y = X_y_binary_cls + X.ww.init() + y = ww.init_series(y) + + with Client(cluster) as client: pipeline = BinaryClassificationPipeline( component_graph=["Logistic Regression Classifier"], parameters={"Logistic Regression Classifier": {"n_jobs": 1}}, ) - engine = DaskEngine(client=self.client) + engine = DaskEngine(client=client) objectives = [automl_data.objective] pipeline_future = engine.submit_training_job( @@ -238,12 +252,15 @@ def test_submit_scoring_job_single(self): assert not np.isnan(pipeline_score["Log Loss Binary"]) assert pipeline_score == original_pipeline_score - def test_submit_scoring_jobs_multiple(self): - """Test that scoring multiple pipelines using the parallel engine produces the - same results as the sequential engine.""" - X, y = self.X_y_binary - X.ww.init() - y = ww.init_series(y) + +def test_submit_scoring_jobs_multiple(X_y_binary_cls, cluster): + """Test that scoring multiple pipelines using the parallel engine produces the + same results as the sequential engine.""" + X, y = X_y_binary_cls + X.ww.init() + y = ww.init_series(y) + + with Client(cluster) as client: pipelines = [ BinaryClassificationPipeline( @@ -277,7 +294,7 @@ def score_pipelines(pipelines, engine): results = [f.get_result() for f in futures] return results - par_eval_results = score_pipelines(pipelines, DaskEngine(client=self.client)) + par_eval_results = score_pipelines(pipelines, DaskEngine(client=client)) par_scores = [s["Log Loss Binary"] for s in par_eval_results] seq_eval_results = score_pipelines(pipelines, SequentialEngine()) @@ -289,11 +306,14 @@ def score_pipelines(pipelines, engine): assert not any([np.isnan(s) for s in seq_scores]) np.testing.assert_allclose(par_scores, seq_scores, rtol=1e-10) - def test_cancel_job(self): - """Test that training a single pipeline using the parallel engine produces the - same results as simply running the train_pipeline function.""" - X, y = self.X_y_binary - engine = DaskEngine(client=self.client) + +def test_cancel_job(X_y_binary_cls, cluster): + """Test that training a single pipeline using the parallel engine produces the + same results as simply running the train_pipeline function.""" + X, y = X_y_binary_cls + + with Client(cluster) as client: + engine = DaskEngine(client=client) pipeline = TestPipelineSlow({"Logistic Regression Classifier": {"n_jobs": 1}}) # Verify that engine fits a pipeline @@ -303,9 +323,12 @@ def test_cancel_job(self): pipeline_future.cancel() assert pipeline_future.is_cancelled - def test_dask_sends_woodwork_schema(self): - X, y = self.X_y_binary - engine = DaskEngine(client=self.client) + +def test_dask_sends_woodwork_schema(X_y_binary_cls, cluster): + X, y = X_y_binary_cls + + with Client(cluster) as client: + engine = DaskEngine(client=client) X.ww.init( logical_types={0: "Categorical"}, semantic_tags={0: ["my cool feature"]} @@ -349,7 +372,3 @@ def test_dask_sends_woodwork_schema(self): future = engine.submit_evaluation_job(new_config, pipeline, X, y) future.get_result() - - @classmethod - def tearDownClass(cls) -> None: - cls.client.close() diff --git a/evalml/tests/conftest.py b/evalml/tests/conftest.py index 654af8aa4f..6bce86b58b 100644 --- a/evalml/tests/conftest.py +++ b/evalml/tests/conftest.py @@ -201,12 +201,12 @@ def X_y_binary(): return X, y -@pytest.fixture(scope="class") -def X_y_binary_cls(request): +@pytest.fixture +def X_y_binary_cls(): X, y = datasets.make_classification( n_samples=100, n_features=20, n_informative=2, n_redundant=2, random_state=0 ) - request.cls.X_y_binary = pd.DataFrame(X), pd.Series(y) + return pd.DataFrame(X), pd.Series(y) @pytest.fixture From 677f0c49d62e50c7ae604379417db2db761c2abf Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 16:51:44 -0400 Subject: [PATCH 02/26] release notes --- docs/source/release_notes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst index 67640d9079..234ba0bb77 100644 --- a/docs/source/release_notes.rst +++ b/docs/source/release_notes.rst @@ -7,6 +7,7 @@ Release Notes * Documentation Changes * Testing Changes * Add ``pytest-timeout``. All tests that run longer than 6 minutes will fail. :pr:`2374` + * Refactored dask tests :pr:`2377` .. warning:: From ef2955202a843060d8d575d1858723ff889f771e Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 17:05:26 -0400 Subject: [PATCH 03/26] Lint and latest deps --- evalml/tests/automl_tests/test_automl_dask.py | 10 +++++----- evalml/tests/automl_tests/test_dask_engine.py | 9 +++++---- .../latest_dependency_versions.txt | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/evalml/tests/automl_tests/test_automl_dask.py b/evalml/tests/automl_tests/test_automl_dask.py index 8f7e425149..96ba8e5071 100644 --- a/evalml/tests/automl_tests/test_automl_dask.py +++ b/evalml/tests/automl_tests/test_automl_dask.py @@ -1,6 +1,6 @@ - import numpy as np import pytest +from dask.distributed import Client, LocalCluster from evalml.automl import AutoMLSearch from evalml.automl.callbacks import raise_error_callback @@ -11,7 +11,6 @@ TestPipelineWithFitError, TestPipelineWithScoreError, ) -from dask.distributed import LocalCluster, Client @pytest.fixture @@ -19,9 +18,11 @@ def sequential_engine(): return SequentialEngine() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def cluster(): - dask_cluster = LocalCluster(n_workers=1, threads_per_worker=2, dashboard_address=None) + dask_cluster = LocalCluster( + n_workers=1, threads_per_worker=2, dashboard_address=None + ) yield dask_cluster dask_cluster.close() @@ -68,7 +69,6 @@ def test_automl_max_iterations(X_y_binary_cls, cluster, sequential_engine): with Client(cluster) as client: parallel_engine = DaskEngine(client) - max_iterations = 4 par_automl = AutoMLSearch( X_train=X, diff --git a/evalml/tests/automl_tests/test_dask_engine.py b/evalml/tests/automl_tests/test_dask_engine.py index 80c3279ab2..f8e68902f3 100644 --- a/evalml/tests/automl_tests/test_dask_engine.py +++ b/evalml/tests/automl_tests/test_dask_engine.py @@ -1,8 +1,8 @@ - import numpy as np import pandas as pd import pytest import woodwork as ww +from dask.distributed import Client, LocalCluster from evalml.automl.engine.dask_engine import DaskComputation, DaskEngine from evalml.automl.engine.engine_base import ( @@ -19,12 +19,13 @@ TestSchemaCheckPipeline, automl_data, ) -from dask.distributed import LocalCluster, Client -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def cluster(): - dask_cluster = LocalCluster(n_workers=1, threads_per_worker=1, dashboard_address=None) + dask_cluster = LocalCluster( + n_workers=1, threads_per_worker=1, dashboard_address=None + ) yield dask_cluster dask_cluster.close() diff --git a/evalml/tests/dependency_update_check/latest_dependency_versions.txt b/evalml/tests/dependency_update_check/latest_dependency_versions.txt index 1d120fb984..32efc39960 100644 --- a/evalml/tests/dependency_update_check/latest_dependency_versions.txt +++ b/evalml/tests/dependency_update_check/latest_dependency_versions.txt @@ -3,7 +3,7 @@ click==8.0.1 cloudpickle==1.6.0 colorama==0.4.4 dask==2021.6.0 -featuretools==0.24.1 +featuretools==0.25.0 graphviz==0.16 imbalanced-learn==0.8.0 ipywidgets==7.6.3 From bedbcdb804b75beae94609d3d7898dd49f3a6442 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 17:44:56 -0400 Subject: [PATCH 04/26] Trigger CI From 22bbb4ed4ffeed9269f2f33a47157d2cf86585c1 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 18:15:00 -0400 Subject: [PATCH 05/26] Trigger CI From 64fa8ed74c3e91537af7f8c3ee55fc3a54596fb4 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Fri, 11 Jun 2021 21:00:00 -0400 Subject: [PATCH 06/26] Trigger CI From 7c5861c2c0e0078a28649928c329b5321243ac5e Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 00:00:00 -0400 Subject: [PATCH 07/26] Trigger CI From 3f686a96dcf5e4fbd893c5026295dfc0ebddcd93 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 03:00:00 -0400 Subject: [PATCH 08/26] Trigger CI From 89323bd8cde8d16aca63415256d944abc9bf59be Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 06:00:01 -0400 Subject: [PATCH 09/26] Trigger CI From 43e0cf6b56671bb1bac7bbce2fd0263f21bda57b Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 09:00:00 -0400 Subject: [PATCH 10/26] Trigger CI From 4fea9199c08bed96223e9cdc5589edd4e448db45 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 12:00:01 -0400 Subject: [PATCH 11/26] Trigger CI From 41245550c16d3ef02f4ce1b93d0194852552cb87 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 15:00:01 -0400 Subject: [PATCH 12/26] Trigger CI From f620d98f6b193e4239d09a99eec6a681401a885a Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 18:00:00 -0400 Subject: [PATCH 13/26] Trigger CI From 3bc604b0cd1a1ce13ca4e21c9a02a8124064775d Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sat, 12 Jun 2021 21:00:01 -0400 Subject: [PATCH 14/26] Trigger CI From 928022f193ff76c9f95a108e819c267789ad49db Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 00:00:00 -0400 Subject: [PATCH 15/26] Trigger CI From 19a0c2a1a1959420fea5bed4e689e76ca1b9e758 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 03:00:00 -0400 Subject: [PATCH 16/26] Trigger CI From 906275742d16fb83702258d6d9aa083e56eed81d Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 06:00:00 -0400 Subject: [PATCH 17/26] Trigger CI From 7a49e09fe3b4c47c1bf9dd43973712ba2e20f735 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 09:00:00 -0400 Subject: [PATCH 18/26] Trigger CI From c4b68709e422cc26e9cc317fb8c4d8a21182ba0f Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 12:00:00 -0400 Subject: [PATCH 19/26] Trigger CI From 440341b6c917cfe3f07f4f2323a0a6170090f5a7 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 15:00:01 -0400 Subject: [PATCH 20/26] Trigger CI From d43273edf86c515d2e5abbfe21d218b5d2c687b7 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 18:00:00 -0400 Subject: [PATCH 21/26] Trigger CI From 1e8bf76c74eb79963c1ac70a34835a3e53dccac6 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Sun, 13 Jun 2021 21:00:01 -0400 Subject: [PATCH 22/26] Trigger CI From 193f904b87d873ba8eb40a6a577755b89066a48e Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Mon, 14 Jun 2021 00:00:00 -0400 Subject: [PATCH 23/26] Trigger CI From 956f0c4daded3e4b65b9ef1bc98f469f86656fe1 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Mon, 14 Jun 2021 03:00:00 -0400 Subject: [PATCH 24/26] Trigger CI From 8732ad0b9d77759cc1091beaac7260a8b9c53acf Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Mon, 14 Jun 2021 06:00:00 -0400 Subject: [PATCH 25/26] Trigger CI From 5d57ebb4267f8b92e0db687f0cb1cfabc099d766 Mon Sep 17 00:00:00 2001 From: freddyaboulton Date: Mon, 14 Jun 2021 09:00:00 -0400 Subject: [PATCH 26/26] Trigger CI