diff --git a/.env-example b/.env-example index 20d550e..f79f1b8 100644 --- a/.env-example +++ b/.env-example @@ -1,5 +1,7 @@ GH_TOKEN = " " SEARCH_QUERY = "repo:owner/repo is:open is:issue" +LABELS_TO_MEASURE = "waiting-for-review,waiting-for-manager" HIDE_TIME_TO_FIRST_RESPONSE = False HIDE_TIME_TO_CLOSE = False HIDE_TIME_TO_ANSWER = False +HIDE_LABEL_METRICS = False diff --git a/.pylintrc b/.pylintrc index 1a27885..96c0fce 100644 --- a/.pylintrc +++ b/.pylintrc @@ -3,4 +3,6 @@ disable= redefined-argument-from-local, too-many-arguments, too-few-public-methods, - duplicate-code, \ No newline at end of file + duplicate-code, + too-many-locals, + too-many-branches, \ No newline at end of file diff --git a/README.md b/README.md index b300e20..d1032fa 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,16 @@ [![CodeQL](https://github.com/github/issue-metrics/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/codeql-analysis.yml) [![Docker Image CI](https://github.com/github/issue-metrics/actions/workflows/docker-image.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/docker-image.yml) [![Python package](https://github.com/github/issue-metrics/actions/workflows/python-package.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/python-package.yml) -This is a GitHub Action that searches for pull requests/issues/discussions in a repository and measures -the time to first response for each one. It then calculates the average time -to first response and writes the issues/pull requests/discussions with their metrics -to a Markdown file. The issues/pull requests/discussions to search for can be filtered by using a search query. +This is a GitHub Action that searches for pull requests/issues/discussions in a repository and measures and reports on +several metrics. The issues/pull requests/discussions to search for can be filtered by using a search query. + +The metrics that are measured are: +| Metric | Description | +|--------|-------------| +| Time to first response | The time between when an issue/pull request/discussion is created and when the first comment or review is made. | +| Time to close | The time between when an issue/pull request/discussion is created and when it is closed. | +| Time to answer | (Discussions only) The time between when a discussion is created and when it is answered. | +| Time in label | The time between when a label has a specific label appplied to an issue/pull request/discussion and when it is removed. This requires the LABELS_TO_MEASURE env variable to be set. | This action was developed by the GitHub OSPO for our own use and developed in a way that we could open source it that it might be useful to you as well! If you want to know more about how we use it, reach out in an issue in this repository. @@ -37,9 +43,11 @@ Below are the allowed configuration options: |-----------------------|----------|---------|-------------| | `GH_TOKEN` | True | | The GitHub Token used to scan the repository. Must have read access to all repository you are interested in scanning. | | `SEARCH_QUERY` | True | | The query by which you can filter issues/prs which must contain a `repo:` entry or an `org:` entry. For discussions, include `type:discussions` in the query. | +| `LABELS_TO_MEASURE` | False | | A comma separated list of labels to measure how much time the label is applied. If not provided, no labels durations will be measured. Not compatible with discussions at this time. | | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to true, the time to first response will not be displayed in the generated markdown file. | | `HIDE_TIME_TO_CLOSE` | False | False | If set to true, the time to close will not be displayed in the generated markdown file. | | `HIDE_TIME_TO_ANSWER` | False | False | If set to true, the time to answer a discussion will not be displayed in the generated markdown file. | +| `HIDE_LABEL_METRICS` | False | False | If set to true, the time in label metrics will not be displayed in the generated markdown file. | ### Example workflows @@ -197,6 +205,65 @@ jobs: assignees: ``` +## Measuring time spent in labels + +**Note**: The discussions API currently doesn't support the `LabeledEvent` so this action cannot measure the time spent in a label for discussions. + +Sometimes it is helpful to know how long an issue or pull request spent in a particular label. This action can be configured to measure the time spent in a label. This is different from only wanting to measure issues with a specific label. If that is what you want, see the section on [configuring your search query](https://github.com/github/issue-metrics/blob/main/README.md#search_query-issues-or-pull-requests-open-or-closed). + +Here is an example workflow that does this: + +```yaml +name: Monthly issue metrics +on: + workflow_dispatch: + +jobs: + build: + name: issue metrics + runs-on: ubuntu-latest + + steps: + + - name: Run issue-metrics tool + uses: github/issue-metrics@v2 + env: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + LABELS_TO_MEASURE: 'waiting-for-manager-approval,waiting-for-security-review' + SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' + + - name: Create issue + uses: peter-evans/create-issue-from-file@v4 + with: + title: Monthly issue metrics report + content-filepath: ./issue_metrics.md + assignees: + +``` + +then the report will look like this: + +```markdown +# Issue Metrics + +| Metric | Value | +| --- | ---: | +| Average time to first response | 0:50:44.666667 | +| Average time to close | 6 days, 7:08:52 | +| Average time to answer | 1 day | +| Average time spent in waiting-for-manager-approval | 0:00:41 | +| Average time spent in waiting-for-security-review | 2 days, 4:25:03 | +| Number of items that remain open | 2 | +| Number of items closed | 1 | +| Total number of items created | 3 | + +| Title | URL | Time to first response | Time to close | Time to answer | Time spent in waiting-for-manager-approval | Time spent in waiting-for-security-review | +| --- | --- | --- | --- | --- | --- | --- | +| Pull Request Title 1 | https://github.com/user/repo/pulls/1 | 0:05:26 | None | None | None | None | +| Issue Title 2 | https://github.com/user/repo/issues/2 | 2:26:07 | None | None | 0:00:41 | 2 days, 4:25:03 | + +``` + ## Example issue_metrics.md output Here is the output with no hidden columns: @@ -234,7 +301,7 @@ Here is the output with all hidable columns hidden: | --- | --- | | Discussion Title 1 | https://github.com/user/repo/discussions/1 | | Pull Request Title 2 | https://github.com/user/repo/pulls/2 | -| Issue Title 3 | https://github.com/user/repo/issues/3 | 2:26:07 | +| Issue Title 3 | https://github.com/user/repo/issues/3 | ``` diff --git a/classes.py b/classes.py index 4892c7b..8550275 100644 --- a/classes.py +++ b/classes.py @@ -17,6 +17,7 @@ class IssueWithMetrics: time_to_close (timedelta, optional): The time it took to close the issue. time_to_answer (timedelta, optional): The time it took to answer the discussions in the issue. + label_metrics (dict, optional): A dictionary containing the label metrics """ @@ -27,9 +28,11 @@ def __init__( time_to_first_response=None, time_to_close=None, time_to_answer=None, + labels_metrics=None, ): self.title = title self.html_url = html_url self.time_to_first_response = time_to_first_response self.time_to_close = time_to_close self.time_to_answer = time_to_answer + self.label_metrics = labels_metrics diff --git a/issue_metrics.py b/issue_metrics.py index 789d6c6..ac83e8c 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -32,6 +32,7 @@ from classes import IssueWithMetrics from discussions import get_discussions from json_writer import write_to_json +from labels import get_average_time_in_labels, get_label_metrics from markdown_writer import write_to_markdown from time_to_answer import get_average_time_to_answer, measure_time_to_answer from time_to_close import get_average_time_to_close, measure_time_to_close @@ -102,6 +103,7 @@ def auth_to_github() -> github3.GitHub: def get_per_issue_metrics( issues: Union[List[dict], List[github3.issues.Issue]], # type: ignore discussions: bool = False, + labels: Union[List[str], None] = None, ) -> tuple[List, int, int]: """ Calculate the metrics for each issue/pr/discussion in a list provided. @@ -111,6 +113,7 @@ def get_per_issue_metrics( GitHub issues or discussions. discussions (bool, optional): Whether the issues are discussions or not. Defaults to False. + labels (List[str]): A list of labels to measure time spent in. Defaults to empty list. Returns: tuple[List[IssueWithMetrics], int, int]: A tuple containing a @@ -130,6 +133,7 @@ def get_per_issue_metrics( None, None, None, + None, ) issue_with_metrics.time_to_first_response = measure_time_to_first_response( None, issue @@ -147,10 +151,13 @@ def get_per_issue_metrics( None, None, None, + None, ) issue_with_metrics.time_to_first_response = measure_time_to_first_response( issue, None ) + if labels: + issue_with_metrics.label_metrics = get_label_metrics(issue, labels) if issue.state == "closed": # type: ignore issue_with_metrics.time_to_close = measure_time_to_close(issue, None) num_issues_closed += 1 @@ -240,13 +247,24 @@ def main(): (ie. repo:owner/repo) or an organization (ie. org:organization)" ) + # Determine if there are label to measure + labels = os.environ.get("LABELS_TO_MEASURE") + if labels: + labels = labels.split(",") + else: + labels = [] + # Search for issues # If type:discussions is in the search_query, search for discussions using get_discussions() if "type:discussions" in search_query: + if labels: + raise ValueError( + "The search query for discussions cannot include labels to measure" + ) issues = get_discussions(token, search_query) if len(issues) <= 0: print("No discussions found") - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) return else: if owner is None or repo_name is None: @@ -257,13 +275,14 @@ def main(): issues = search_issues(search_query, github_connection) if len(issues.items) <= 0: print("No issues found") - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) return # Get all the metrics issues_with_metrics, num_issues_open, num_issues_closed = get_per_issue_metrics( issues, discussions="type:discussions" in search_query, + labels=labels, ) average_time_to_first_response = get_average_time_to_first_response( @@ -275,12 +294,17 @@ def main(): average_time_to_answer = get_average_time_to_answer(issues_with_metrics) + # Get the average time in label for each label and store it in a dictionary + # where the key is the label and the value is the average time + average_time_in_labels = get_average_time_in_labels(issues_with_metrics, labels) + # Write the results to json and a markdown file write_to_json( issues_with_metrics, average_time_to_first_response, average_time_to_close, average_time_to_answer, + average_time_in_labels, num_issues_open, num_issues_closed, ) @@ -289,8 +313,10 @@ def main(): average_time_to_first_response, average_time_to_close, average_time_to_answer, + average_time_in_labels, num_issues_open, num_issues_closed, + labels, ) diff --git a/json_writer.py b/json_writer.py index 53a8758..b540c9f 100644 --- a/json_writer.py +++ b/json_writer.py @@ -27,6 +27,7 @@ def write_to_json( average_time_to_first_response: Union[timedelta, None], average_time_to_close: Union[timedelta, None], average_time_to_answer: Union[timedelta, None], + average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], ) -> str: @@ -48,6 +49,9 @@ def write_to_json( "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", + "label_metrics": { + "bug": "1 day, 16:24:12" + } }, { "title": "Issue 2", @@ -55,6 +59,8 @@ def write_to_json( "time_to_first_response": "2 days, 0:00:00", "time_to_close": "4 days, 0:00:00", "time_to_answer": "1 day, 0:00:00", + "label_metrics": { + } }, ], } @@ -66,10 +72,15 @@ def write_to_json( return "" # Create a dictionary with the metrics + labels_metrics = {} + if average_time_in_labels: + for label, time in average_time_in_labels.items(): + labels_metrics[label] = str(time) metrics = { "average_time_to_first_response": str(average_time_to_first_response), "average_time_to_close": str(average_time_to_close), "average_time_to_answer": str(average_time_to_answer), + "average_time_in_labels": labels_metrics, "num_items_opened": num_issues_opened, "num_items_closed": num_issues_closed, "total_item_count": len(issues_with_metrics), @@ -78,6 +89,10 @@ def write_to_json( # Create a list of dictionaries with the issues and metrics issues = [] for issue in issues_with_metrics: + formatted_label_metrics = {} + if issue.label_metrics: + for label, time in issue.label_metrics.items(): + formatted_label_metrics[label] = str(time) issues.append( { "title": issue.title, @@ -85,6 +100,7 @@ def write_to_json( "time_to_first_response": str(issue.time_to_first_response), "time_to_close": str(issue.time_to_close), "time_to_answer": str(issue.time_to_answer), + "label_metrics": formatted_label_metrics, } ) diff --git a/labels.py b/labels.py new file mode 100644 index 0000000..ad6fa7c --- /dev/null +++ b/labels.py @@ -0,0 +1,119 @@ +""" Functions for calculating time spent in labels. """ +from datetime import datetime, timedelta +from typing import List + +import github3 +import pytz + +from classes import IssueWithMetrics + + +def get_label_events( + issue: github3.issues.Issue, labels: List[str] # type: ignore +) -> List[github3.issues.event]: # type: ignore + """ + Get the label events for a given issue if the label is of interest. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels of interest. + + Returns: + List[github3.issues.event]: A list of label events for the given issue. + """ + label_events = [] + for event in issue.issue.events(): + if event.event in ("labeled", "unlabeled") and event.label["name"] in labels: + label_events.append(event) + + return label_events + + +def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: # type: ignore + """ + Calculate the time spent with the given labels on a given issue. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels to measure time spent in. + + Returns: + dict: A dictionary containing the time spent in each label or None. + """ + label_metrics = {} + label_events = get_label_events(issue, labels) + + for label in labels: + label_metrics[label] = None + + # If the event is one of the labels we're looking for, add the time to the dictionary + unlabeled = {} + labeled = {} + if not label_events: + return label_metrics + + # Calculate the time to add or subtract to the time spent in label based on the label events + for event in label_events: + if event.event == "labeled": + labeled[event.label["name"]] = True + if event.label["name"] in labels: + if label_metrics[event.label["name"]] is None: + label_metrics[event.label["name"]] = timedelta(0) + label_metrics[ + event.label["name"] + ] -= event.created_at - datetime.fromisoformat(issue.created_at) + elif event.event == "unlabeled": + unlabeled[event.label["name"]] = True + if event.label["name"] in labels: + if label_metrics[event.label["name"]] is None: + label_metrics[event.label["name"]] = timedelta(0) + label_metrics[ + event.label["name"] + ] += event.created_at - datetime.fromisoformat(issue.created_at) + + for label in labels: + # if the label is still on there, add the time from the last event to now + if label in labeled and label not in unlabeled: + # if the issue is closed, add the time from the issue creation to the closed_at time + if issue.state == "closed": + label_metrics[label] += datetime.fromisoformat( + issue.closed_at + ) - datetime.fromisoformat(issue.created_at) + else: + # if the issue is open, add the time from the issue creation to now + label_metrics[label] += datetime.now(pytz.utc) - datetime.fromisoformat( + issue.created_at + ) + + return label_metrics + + +def get_average_time_in_labels( + issues_with_metrics: List[IssueWithMetrics], + labels: List[str], +) -> dict[str, timedelta]: + """Calculate the average time spent in each label.""" + average_time_in_labels = {} + number_of_issues_in_labels = {} + for issue in issues_with_metrics: + if issue.label_metrics: + for label in issue.label_metrics: + if issue.label_metrics[label] is None: + continue + if label not in average_time_in_labels: + average_time_in_labels[label] = issue.label_metrics[label] + number_of_issues_in_labels[label] = 1 + else: + average_time_in_labels[label] += issue.label_metrics[label] + number_of_issues_in_labels[label] += 1 + + for label in average_time_in_labels: + average_time_in_labels[label] = ( + average_time_in_labels[label] / number_of_issues_in_labels[label] + ) + + for label in labels: + if label not in average_time_in_labels: + average_time_in_labels[label] = None + + return average_time_in_labels diff --git a/markdown_writer.py b/markdown_writer.py index b0eac1d..1f2a37c 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -31,12 +31,12 @@ from classes import IssueWithMetrics -def get_non_hidden_columns() -> List[str]: +def get_non_hidden_columns(labels) -> List[str]: """ Get a list of the columns that are not hidden. Args: - None + labels (List[str]): A list of the labels that are used in the issues. Returns: List[str]: A list of the columns that are not hidden. @@ -56,6 +56,11 @@ def get_non_hidden_columns() -> List[str]: if not hide_time_to_answer: columns.append("Time to answer") + hide_label_metrics = os.getenv("HIDE_LABEL_METRICS") + if not hide_label_metrics and labels: + for label in labels: + columns.append(f"Time spent in {label}") + return columns @@ -64,9 +69,10 @@ def write_to_markdown( average_time_to_first_response: Union[timedelta, None], average_time_to_close: Union[timedelta, None], average_time_to_answer: Union[timedelta, None], + average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], - file=None, + labels=None, ) -> None: """Write the issues with metrics to a markdown file. @@ -76,43 +82,41 @@ def write_to_markdown( response for the issues. average_time_to_close (datetime.timedelta): The average time to close for the issues. average_time_to_answer (datetime.timedelta): The average time to answer the discussions. + average_time_in_labels (dict): A dictionary containing the average time spent in each label. file (file object, optional): The file object to write to. If not provided, a file named "issue_metrics.md" will be created. num_issues_opened (int): The Number of items that remain opened. num_issues_closed (int): The number of issues that were closed. + labels (List[str]): A list of the labels that are used in the issues. Returns: None. """ - columns = get_non_hidden_columns() + columns = get_non_hidden_columns(labels) # If all the metrics are None, then there are no issues if not issues_with_metrics or len(issues_with_metrics) == 0: - with file or open("issue_metrics.md", "w", encoding="utf-8") as file: + with open("issue_metrics.md", "w", encoding="utf-8") as file: file.write("no issues found for the given search criteria\n\n") return # Sort the issues by time to first response - issues_with_metrics.sort(key=lambda x: x.time_to_first_response or timedelta.max) - with file or open("issue_metrics.md", "w", encoding="utf-8") as file: + with open("issue_metrics.md", "w", encoding="utf-8") as file: file.write("# Issue Metrics\n\n") # Write first table with overall metrics - file.write("| Metric | Value |\n") - file.write("| --- | ---: |\n") - if "Time to first response" in columns: - file.write( - f"| Average time to first response | {average_time_to_first_response} |\n" - ) - if "Time to close" in columns: - file.write(f"| Average time to close | {average_time_to_close} |\n") - if "Time to answer" in columns: - file.write(f"| Average time to answer | {average_time_to_answer} |\n") - file.write(f"| Number of items that remain open | {num_issues_opened} |\n") - file.write(f"| Number of items closed | {num_issues_closed} |\n") - file.write( - f"| Total number of items created | {len(issues_with_metrics)} |\n\n" + write_overall_metrics_table( + issues_with_metrics, + average_time_to_first_response, + average_time_to_close, + average_time_to_answer, + average_time_in_labels, + num_issues_opened, + num_issues_closed, + labels, + columns, + file, ) # Write second table with individual issue/pr/discussion metrics @@ -137,6 +141,44 @@ def write_to_markdown( file.write(f" {issue.time_to_close} |") if "Time to answer" in columns: file.write(f" {issue.time_to_answer} |") + if labels and issue.label_metrics: + for label in labels: + if f"Time spent in {label}" in columns: + file.write(f" {issue.label_metrics[label]} |") file.write("\n") print("Wrote issue metrics to issue_metrics.md") + + +def write_overall_metrics_table( + issues_with_metrics, + average_time_to_first_response, + average_time_to_close, + average_time_to_answer, + average_time_in_labels, + num_issues_opened, + num_issues_closed, + labels, + columns, + file, +): + """Write the overall metrics table to the markdown file.""" + file.write("| Metric | Value |\n") + file.write("| --- | ---: |\n") + if "Time to first response" in columns: + file.write( + f"| Average time to first response | {average_time_to_first_response} |\n" + ) + if "Time to close" in columns: + file.write(f"| Average time to close | {average_time_to_close} |\n") + if "Time to answer" in columns: + file.write(f"| Average time to answer | {average_time_to_answer} |\n") + if labels and average_time_in_labels: + for label in labels: + if f"Time spent in {label}" in columns and label in average_time_in_labels: + file.write( + f"| Average time spent in {label} | {average_time_in_labels[label]} |\n" + ) + file.write(f"| Number of items that remain open | {num_issues_opened} |\n") + file.write(f"| Number of items closed | {num_issues_closed} |\n") + file.write(f"| Total number of items created | {len(issues_with_metrics)} |\n\n") diff --git a/requirements.txt b/requirements.txt index afd6006..2fdb99a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ github3.py==4.0.1 python-dotenv==1.0.0 +pytz==2023.3 +Requests==2.31.0 diff --git a/test_issue_metrics.py b/test_issue_metrics.py index 6456f9a..ee50085 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -218,7 +218,7 @@ def test_main_no_issues_found( # Call main and check that it writes 'No issues found' issue_metrics.main() mock_write_to_markdown.assert_called_once_with( - None, None, None, None, None, None + None, None, None, None, None, None, None ) @@ -279,6 +279,7 @@ def test_get_per_issue_metrics(self): timedelta(days=1), None, None, + None, ), IssueWithMetrics( "Issue 2", @@ -286,6 +287,7 @@ def test_get_per_issue_metrics(self): timedelta(days=2), timedelta(days=3), None, + None, ), ] expected_num_issues_open = 1 @@ -310,5 +312,63 @@ def test_get_per_issue_metrics(self): ) +class TestDiscussionMetrics(unittest.TestCase): + """Test suite for the discussion_metrics function.""" + + def setUp(self): + # Mock a discussion dictionary + self.issue1 = { + "title": "Issue 1", + "url": "github.com/user/repo/issues/1", + "createdAt": "2023-01-01T00:00:00Z", + "comments": { + "nodes": [ + { + "createdAt": "2023-01-02T00:00:00Z", + } + ] + }, + "answerChosenAt": "2023-01-04T00:00:00Z", + "closedAt": "2023-01-05T00:00:00Z", + } + + self.issue2 = { + "title": "Issue 2", + "url": "github.com/user/repo/issues/2", + "createdAt": "2023-01-01T00:00:00Z", + "comments": {"nodes": [{"createdAt": "2023-01-03T00:00:00Z"}]}, + "answerChosenAt": "2023-01-05T00:00:00Z", + "closedAt": "2023-01-07T00:00:00Z", + } + + def test_get_per_issue_metrics_with_discussion(self): + """ + Test that the function correctly calculates + the metrics for a list of GitHub issues with discussions. + """ + + issues = [self.issue1, self.issue2] + metrics = get_per_issue_metrics(issues, discussions=True) + + # get_per_issue_metrics returns a tuple of + # (issues_with_metrics, num_issues_open, num_issues_closed) + self.assertEqual(len(metrics), 3) + + # Check that the metrics are correct, 0 issues open, 2 issues closed + self.assertEqual(metrics[1], 0) + self.assertEqual(metrics[2], 2) + + # Check that the issues_with_metrics has 2 issues in it + self.assertEqual(len(metrics[0]), 2) + + # Check that the issues_with_metrics has the correct metrics, + self.assertEqual(metrics[0][0].time_to_answer, timedelta(days=3)) + self.assertEqual(metrics[0][0].time_to_close, timedelta(days=4)) + self.assertEqual(metrics[0][0].time_to_first_response, timedelta(days=1)) + self.assertEqual(metrics[0][1].time_to_answer, timedelta(days=4)) + self.assertEqual(metrics[0][1].time_to_close, timedelta(days=6)) + self.assertEqual(metrics[0][1].time_to_first_response, timedelta(days=2)) + + if __name__ == "__main__": unittest.main() diff --git a/test_json_writer.py b/test_json_writer.py index 474883f..e2951b7 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -19,6 +19,9 @@ def test_write_to_json(self): time_to_first_response=timedelta(days=3), time_to_close=timedelta(days=6), time_to_answer=None, + labels_metrics={ + "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) + }, ), IssueWithMetrics( title="Issue 2", @@ -26,6 +29,7 @@ def test_write_to_json(self): time_to_first_response=timedelta(days=2), time_to_close=timedelta(days=4), time_to_answer=timedelta(days=1), + labels_metrics={}, ), ] average_time_to_first_response = timedelta(days=2.5) @@ -38,6 +42,7 @@ def test_write_to_json(self): "average_time_to_first_response": "2 days, 12:00:00", "average_time_to_close": "5 days, 0:00:00", "average_time_to_answer": "1 day, 0:00:00", + "average_time_in_labels": {"bug": "1 day, 16:24:12"}, "num_items_opened": 2, "num_items_closed": 1, "total_item_count": 2, @@ -48,6 +53,7 @@ def test_write_to_json(self): "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", + "label_metrics": {"bug": "1 day, 16:24:12"}, }, { "title": "Issue 2", @@ -55,6 +61,7 @@ def test_write_to_json(self): "time_to_first_response": "2 days, 0:00:00", "time_to_close": "4 days, 0:00:00", "time_to_answer": "1 day, 0:00:00", + "label_metrics": {}, }, ], } @@ -62,12 +69,15 @@ def test_write_to_json(self): # Call the function and check the output self.assertEqual( write_to_json( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels={ + "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) + }, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, ), json.dumps(expected_output), ) diff --git a/test_labels.py b/test_labels.py new file mode 100644 index 0000000..5293683 --- /dev/null +++ b/test_labels.py @@ -0,0 +1,92 @@ +""" Unit tests for labels.py """ +import unittest +from datetime import datetime, timedelta +from unittest.mock import MagicMock + +import github3 +import pytz +from classes import IssueWithMetrics + +from labels import get_average_time_in_labels, get_label_events, get_label_metrics + + +class TestLabels(unittest.TestCase): + """Unit tests for labels.py""" + + def setUp(self): + self.issue = MagicMock() # type: ignore + self.issue.issue = MagicMock(spec=github3.issues.Issue) # type: ignore + self.issue.created_at = "2020-01-01T00:00:00Z" + self.issue.closed_at = "2021-01-05T00:00:00Z" + self.issue.state = "closed" + self.issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "bug"}, + created_at=datetime(2021, 1, 1, tzinfo=pytz.UTC), + ), + MagicMock( + event="labeled", + label={"name": "feature"}, + created_at=datetime(2021, 1, 2, tzinfo=pytz.UTC), + ), + MagicMock( + event="unlabeled", + label={"name": "bug"}, + created_at=datetime(2021, 1, 3, tzinfo=pytz.UTC), + ), + ] + + def test_get_label_events(self): + """Test get_label_events""" + labels = ["bug"] + events = get_label_events(self.issue, labels) + self.assertEqual(len(events), 2) + self.assertEqual(events[0].label["name"], "bug") + self.assertEqual(events[1].label["name"], "bug") + + def test_get_label_metrics_closed_issue(self): + """Test get_label_metrics using a closed issue""" + labels = ["bug", "feature"] + metrics = get_label_metrics(self.issue, labels) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertEqual(metrics["feature"], timedelta(days=3)) + + def test_get_label_metrics_open_issue(self): + """Test get_label_metrics using an open issue""" + self.issue.state = "open" + labels = ["bug", "feature"] + metrics = get_label_metrics(self.issue, labels) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertLess( + metrics["feature"], + datetime.now(pytz.utc) - datetime(2021, 1, 2, tzinfo=pytz.UTC), + ) + self.assertGreater( + metrics["feature"], + datetime.now(pytz.utc) - datetime(2021, 1, 4, tzinfo=pytz.UTC), + ) + + +class TestGetAverageTimeInLabels(unittest.TestCase): + """Unit tests for get_average_time_in_labels""" + + def setUp(self): + self.issues_with_metrics = MagicMock() + self.issues_with_metrics = [ + IssueWithMetrics( + "issue1", "url1", None, None, None, {"bug": timedelta(days=2)} + ), + ] + + def test_get_average_time_in_labels(self): + """Test get_average_time_in_labels""" + labels = ["bug", "feature"] + metrics = get_average_time_in_labels(self.issues_with_metrics, labels) + self.assertEqual(len(metrics), 2) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertIsNone(metrics.get("feature")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test_markdown_writer.py b/test_markdown_writer.py index 75a03f5..3fcc19e 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -35,6 +35,7 @@ def test_write_to_markdown(self): timedelta(days=1), timedelta(days=2), timedelta(days=3), + {"bug": timedelta(days=1)}, ), IssueWithMetrics( "Issue 2", @@ -42,22 +43,26 @@ def test_write_to_markdown(self): timedelta(days=3), timedelta(days=4), timedelta(days=5), + {"bug": timedelta(days=2)}, ), ] average_time_to_first_response = timedelta(days=2) average_time_to_close = timedelta(days=3) average_time_to_answer = timedelta(days=4) + average_time_in_labels = {"bug": "1 day, 12:00:00"} num_issues_opened = 2 num_issues_closed = 1 # Call the function write_to_markdown( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=average_time_in_labels, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, + labels=["bug"], ) # Check that the function writes the correct markdown file @@ -70,15 +75,17 @@ def test_write_to_markdown(self): "| Average time to first response | 2 days, 0:00:00 |\n" "| Average time to close | 3 days, 0:00:00 |\n" "| Average time to answer | 4 days, 0:00:00 |\n" + "| Average time spent in bug | 1 day, 12:00:00 |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" - "| Title | URL | Time to first response | Time to close | Time to answer |\n" - "| --- | --- | --- | --- | --- |\n" + "| Title | URL | Time to first response | Time to close |" + " Time to answer | Time spent in bug |\n" + "| --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | 1 day, 0:00:00 | " - "2 days, 0:00:00 | 3 days, 0:00:00 |\n" + "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 |\n" "| Issue 2 | https://github.com/user/repo/issues/2 | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 |\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | 2 days, 0:00:00 |\n" ) self.assertEqual(content, expected_content) os.remove("issue_metrics.md") @@ -87,7 +94,7 @@ def test_write_to_markdown_no_issues(self): """Test that write_to_markdown writes the correct markdown file when no issues are found.""" # Call the function with no issues with patch("builtins.open", mock_open()) as mock_open_file: - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) # Check that the file was written correctly expected_output = "no issues found for the given search criteria\n\n" @@ -105,12 +112,14 @@ def setUp(self): os.environ["HIDE_TIME_TO_FIRST_RESPONSE"] = "True" os.environ["HIDE_TIME_TO_CLOSE"] = "True" os.environ["HIDE_TIME_TO_ANSWER"] = "True" + os.environ["HIDE_LABEL_METRICS"] = "True" def tearDown(self): # Unset the HIDE* environment variables os.environ.pop("HIDE_TIME_TO_FIRST_RESPONSE") os.environ.pop("HIDE_TIME_TO_CLOSE") os.environ.pop("HIDE_TIME_TO_ANSWER") + os.environ.pop("HIDE_LABEL_METRICS") def test_writes_markdown_file_with_non_hidden_columns_only(self): """ @@ -126,6 +135,9 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): time_to_first_response=timedelta(minutes=10), time_to_close=timedelta(days=1), time_to_answer=timedelta(hours=2), + labels_metrics={ + "label1": timedelta(days=1), + }, ), IssueWithMetrics( title="Issue 2", @@ -133,22 +145,30 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): time_to_first_response=timedelta(minutes=20), time_to_close=timedelta(days=2), time_to_answer=timedelta(hours=4), + labels_metrics={ + "label1": timedelta(days=1), + }, ), ] average_time_to_first_response = timedelta(minutes=15) average_time_to_close = timedelta(days=1.5) average_time_to_answer = timedelta(hours=3) + average_time_in_labels = { + "label1": timedelta(days=1), + } num_issues_opened = 2 num_issues_closed = 1 # Call the function write_to_markdown( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=average_time_in_labels, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, + labels=["label1"], ) # Check that the function writes the correct markdown file