From 35c25c0bb681edc89c89ee8a454a616d5d5cb840 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Wed, 12 Jul 2023 16:40:56 -0700 Subject: [PATCH 01/11] feat: add time in labels Signed-off-by: Zack Koppert --- .env-example | 1 + .pylintrc | 3 +- README.md | 75 +++++++++++++++++++++++-- classes.py | 3 + issue_metrics.py | 119 +++++++++++++++++++++++++++++++++++++++- json_writer.py | 16 ++++++ markdown_writer.py | 83 +++++++++++++++++++++------- test_issue_metrics.py | 4 +- test_json_writer.py | 16 ++++-- test_markdown_writer.py | 28 +++++----- 10 files changed, 299 insertions(+), 49 deletions(-) diff --git a/.env-example b/.env-example index 20d550e..df3a8d4 100644 --- a/.env-example +++ b/.env-example @@ -3,3 +3,4 @@ SEARCH_QUERY = "repo:owner/repo is:open is:issue" HIDE_TIME_TO_FIRST_RESPONSE = False HIDE_TIME_TO_CLOSE = False HIDE_TIME_TO_ANSWER = False +HIDE_LABEL_METRICS = False \ No newline at end of file diff --git a/.pylintrc b/.pylintrc index 1a27885..adec0e1 100644 --- a/.pylintrc +++ b/.pylintrc @@ -3,4 +3,5 @@ disable= redefined-argument-from-local, too-many-arguments, too-few-public-methods, - duplicate-code, \ No newline at end of file + duplicate-code, + too-many-locals, \ No newline at end of file diff --git a/README.md b/README.md index b300e20..cca20c0 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,16 @@ [![CodeQL](https://github.com/github/issue-metrics/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/codeql-analysis.yml) [![Docker Image CI](https://github.com/github/issue-metrics/actions/workflows/docker-image.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/docker-image.yml) [![Python package](https://github.com/github/issue-metrics/actions/workflows/python-package.yml/badge.svg)](https://github.com/github/issue-metrics/actions/workflows/python-package.yml) -This is a GitHub Action that searches for pull requests/issues/discussions in a repository and measures -the time to first response for each one. It then calculates the average time -to first response and writes the issues/pull requests/discussions with their metrics -to a Markdown file. The issues/pull requests/discussions to search for can be filtered by using a search query. +This is a GitHub Action that searches for pull requests/issues/discussions in a repository and measures and reports on +several metrics. The issues/pull requests/discussions to search for can be filtered by using a search query. + +The metrics that are measured are: +| Metric | Description | +|--------|-------------| +| Time to first response | The time between when an issue/pull request/discussion is created and when the first comment or review is made. | +| Time to close | The time between when an issue/pull request/discussion is created and when it is closed. | +| Time to answer | The time between when a discussion is created and when it is answered. | +| Time in label | The time between when a label has a specific label appplied to an issue/pull request/discussion and when it is removed. This requires the LABELS_TO_MEASURE env variable to be set. | This action was developed by the GitHub OSPO for our own use and developed in a way that we could open source it that it might be useful to you as well! If you want to know more about how we use it, reach out in an issue in this repository. @@ -37,9 +43,11 @@ Below are the allowed configuration options: |-----------------------|----------|---------|-------------| | `GH_TOKEN` | True | | The GitHub Token used to scan the repository. Must have read access to all repository you are interested in scanning. | | `SEARCH_QUERY` | True | | The query by which you can filter issues/prs which must contain a `repo:` entry or an `org:` entry. For discussions, include `type:discussions` in the query. | +| `LABELS_TO_MEASURE` | False | | A comma separated list of labels to measure how much time the label is applied. If not provided, no labels durations will be measured. Not compatible with discussions at this time. | | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to true, the time to first response will not be displayed in the generated markdown file. | | `HIDE_TIME_TO_CLOSE` | False | False | If set to true, the time to close will not be displayed in the generated markdown file. | | `HIDE_TIME_TO_ANSWER` | False | False | If set to true, the time to answer a discussion will not be displayed in the generated markdown file. | +| `HIDE_LABEL_METRICS` | False | False | If set to true, the time in label metrics will not be displayed in the generated markdown file. | ### Example workflows @@ -197,6 +205,65 @@ jobs: assignees: ``` +## Measuring time spent in labels + +**Note**: The discussions API currently doesn't support the `LabeledEvent` so this action cannot measure the time spent in a label for discussions. + +Sometimes it is helpful to know how long an issue or pull request spent in a particular label. This action can be configured to measure the time spent in a label. This is different from only wanting to measure issues with a specific label. If that is what you want, see the section on [configuring your search query](https://github.com/github/issue-metrics/blob/main/README.md#search_query-issues-or-pull-requests-open-or-closed). + +Here is an example workflow that does this: + +```yaml +name: Monthly issue metrics +on: + workflow_dispatch: + +jobs: + build: + name: issue metrics + runs-on: ubuntu-latest + + steps: + + - name: Run issue-metrics tool + uses: github/issue-metrics@v2 + env: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + LABELS_TO_MEASURE: 'waiting-for-manager-approval, waiting-for-security-review' + SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' + + - name: Create issue + uses: peter-evans/create-issue-from-file@v4 + with: + title: Monthly issue metrics report + content-filepath: ./issue_metrics.md + assignees: + +``` + +then the report will look like this: + +```markdown +# Issue Metrics + +| Metric | Value | +| --- | ---: | +| Average time to first response | 0:50:44.666667 | +| Average time to close | 6 days, 7:08:52 | +| Average time to answer | 1 day | +| Average time in waiting-for-manager-approval | 0:00:41 | +| Average time in waiting-for-security-review | 2 days, 4:25:03 | +| Number of items that remain open | 2 | +| Number of items closed | 1 | +| Total number of items created | 3 | + +| Title | URL | Time to first response | Time to close | Time to answer | Time in waiting-for-manager-approval | Time in waiting-for-security-review | +| --- | --- | --- | --- | --- | --- | --- | +| Pull Request Title 1 | https://github.com/user/repo/pulls/1 | 0:05:26 | None | None | None | None | +| Issue Title 2 | https://github.com/user/repo/issues/2 | 2:26:07 | None | None | 0:00:41 | 2 days, 4:25:03 | + +``` + ## Example issue_metrics.md output Here is the output with no hidden columns: diff --git a/classes.py b/classes.py index 4892c7b..8550275 100644 --- a/classes.py +++ b/classes.py @@ -17,6 +17,7 @@ class IssueWithMetrics: time_to_close (timedelta, optional): The time it took to close the issue. time_to_answer (timedelta, optional): The time it took to answer the discussions in the issue. + label_metrics (dict, optional): A dictionary containing the label metrics """ @@ -27,9 +28,11 @@ def __init__( time_to_first_response=None, time_to_close=None, time_to_answer=None, + labels_metrics=None, ): self.title = title self.html_url = html_url self.time_to_first_response = time_to_first_response self.time_to_close = time_to_close self.time_to_answer = time_to_answer + self.label_metrics = labels_metrics diff --git a/issue_metrics.py b/issue_metrics.py index 789d6c6..5126440 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -23,11 +23,13 @@ """ import os +from datetime import datetime, timedelta from os.path import dirname, join from typing import List, Union import github3 from dotenv import load_dotenv +import pytz from classes import IssueWithMetrics from discussions import get_discussions @@ -99,18 +101,89 @@ def auth_to_github() -> github3.GitHub: return github_connection # type: ignore +def get_label_events( + issue: github3.issues.Issue, labels: List[str] # type: ignore +) -> List[github3.issues.event]: # type: ignore + """ + Get the label events for a given issue if the label is of interest. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels of interest. + + Returns: + List[github3.issues.event]: A list of label events for the given issue. + """ + label_events = [] + for event in issue.issue.events(): + if event.event in ("labeled", "unlabeled") and event.label["name"] in labels: + label_events.append(event) + + return label_events + + +def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: # type: ignore + """ + Calculate the time spent with the given labels on a given issue. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels to measure time spent in. + + Returns: + dict: A dictionary containing the time spent in each label. + """ + label_metrics = {} + label_events = get_label_events(issue, labels) + + for label in labels: + label_metrics[label] = timedelta(0) + + # If the event is one of the labels we're looking for, add the time to the dictionary + unlabeled = False + for event in label_events: + if event.event == "labeled": + if event.label["name"] in labels: + label_metrics[ + event.label["name"] + ] -= event.created_at - datetime.fromisoformat(issue.created_at) + elif event.event == "unlabeled": + unlabeled = True + if event.label["name"] in labels: + label_metrics[ + event.label["name"] + ] += event.created_at - datetime.fromisoformat(issue.created_at) + + if not unlabeled: + for label in labels: + # if the issue is closed, add the time from the issue creation to the closed_at time + if issue.state == "closed": + label_metrics[label] += datetime.fromisoformat( + issue.closed_at + ) - datetime.fromisoformat(issue.created_at) + else: + # if the issue is open, add the time from the issue creation to now + label_metrics[label] += datetime.now(pytz.utc) - datetime.fromisoformat( + issue.created_at + ) + + return label_metrics + + def get_per_issue_metrics( issues: Union[List[dict], List[github3.issues.Issue]], # type: ignore discussions: bool = False, + labels: Union[List[str], None] = None, ) -> tuple[List, int, int]: """ - Calculate the metrics for each issue/pr/discussion in a list provided. + Calculate the metrics for each issue/pr in a list provided. Args: issues (Union[List[dict], List[github3.issues.Issue]]): A list of GitHub issues or discussions. discussions (bool, optional): Whether the issues are discussions or not. Defaults to False. + labels (List[str]): A list of labels to measure time spent in. Defaults to empty list. Returns: tuple[List[IssueWithMetrics], int, int]: A tuple containing a @@ -130,6 +203,7 @@ def get_per_issue_metrics( None, None, None, + None, ) issue_with_metrics.time_to_first_response = measure_time_to_first_response( None, issue @@ -147,10 +221,13 @@ def get_per_issue_metrics( None, None, None, + None, ) issue_with_metrics.time_to_first_response = measure_time_to_first_response( issue, None ) + if labels: + issue_with_metrics.label_metrics = get_label_metrics(issue, labels) if issue.state == "closed": # type: ignore issue_with_metrics.time_to_close = measure_time_to_close(issue, None) num_issues_closed += 1 @@ -203,6 +280,27 @@ def get_organization(search_query: str) -> Union[str, None]: return organization +def get_average_time_in_labels( + issues_with_metrics: List[IssueWithMetrics], +) -> dict[str, timedelta]: + """Calculate the average time spent in each label.""" + average_time_in_labels = {} + for issue in issues_with_metrics: + if issue.label_metrics: + for label in issue.label_metrics: + if label not in average_time_in_labels: + average_time_in_labels[label] = issue.label_metrics[label] + else: + average_time_in_labels[label] += issue.label_metrics[label] + + for label in average_time_in_labels: + average_time_in_labels[label] = average_time_in_labels[label] / len( + issues_with_metrics + ) + + return average_time_in_labels + + def main(): """Run the issue-metrics script. @@ -240,13 +338,20 @@ def main(): (ie. repo:owner/repo) or an organization (ie. org:organization)" ) + # Determine if there are label to measure + labels = os.environ.get("LABELS_TO_MEASURE") + if labels: + labels = labels.split(",") + else: + labels = [] + # Search for issues # If type:discussions is in the search_query, search for discussions using get_discussions() if "type:discussions" in search_query: issues = get_discussions(token, search_query) if len(issues) <= 0: print("No discussions found") - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) return else: if owner is None or repo_name is None: @@ -257,13 +362,14 @@ def main(): issues = search_issues(search_query, github_connection) if len(issues.items) <= 0: print("No issues found") - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) return # Get all the metrics issues_with_metrics, num_issues_open, num_issues_closed = get_per_issue_metrics( issues, discussions="type:discussions" in search_query, + labels=labels, ) average_time_to_first_response = get_average_time_to_first_response( @@ -275,12 +381,17 @@ def main(): average_time_to_answer = get_average_time_to_answer(issues_with_metrics) + # Get the average time in label for each label and store it in a dictionary + # where the key is the label and the value is the average time + average_time_in_labels = get_average_time_in_labels(issues_with_metrics) + # Write the results to json and a markdown file write_to_json( issues_with_metrics, average_time_to_first_response, average_time_to_close, average_time_to_answer, + average_time_in_labels, num_issues_open, num_issues_closed, ) @@ -289,8 +400,10 @@ def main(): average_time_to_first_response, average_time_to_close, average_time_to_answer, + average_time_in_labels, num_issues_open, num_issues_closed, + labels, ) diff --git a/json_writer.py b/json_writer.py index 53a8758..b540c9f 100644 --- a/json_writer.py +++ b/json_writer.py @@ -27,6 +27,7 @@ def write_to_json( average_time_to_first_response: Union[timedelta, None], average_time_to_close: Union[timedelta, None], average_time_to_answer: Union[timedelta, None], + average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], ) -> str: @@ -48,6 +49,9 @@ def write_to_json( "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", + "label_metrics": { + "bug": "1 day, 16:24:12" + } }, { "title": "Issue 2", @@ -55,6 +59,8 @@ def write_to_json( "time_to_first_response": "2 days, 0:00:00", "time_to_close": "4 days, 0:00:00", "time_to_answer": "1 day, 0:00:00", + "label_metrics": { + } }, ], } @@ -66,10 +72,15 @@ def write_to_json( return "" # Create a dictionary with the metrics + labels_metrics = {} + if average_time_in_labels: + for label, time in average_time_in_labels.items(): + labels_metrics[label] = str(time) metrics = { "average_time_to_first_response": str(average_time_to_first_response), "average_time_to_close": str(average_time_to_close), "average_time_to_answer": str(average_time_to_answer), + "average_time_in_labels": labels_metrics, "num_items_opened": num_issues_opened, "num_items_closed": num_issues_closed, "total_item_count": len(issues_with_metrics), @@ -78,6 +89,10 @@ def write_to_json( # Create a list of dictionaries with the issues and metrics issues = [] for issue in issues_with_metrics: + formatted_label_metrics = {} + if issue.label_metrics: + for label, time in issue.label_metrics.items(): + formatted_label_metrics[label] = str(time) issues.append( { "title": issue.title, @@ -85,6 +100,7 @@ def write_to_json( "time_to_first_response": str(issue.time_to_first_response), "time_to_close": str(issue.time_to_close), "time_to_answer": str(issue.time_to_answer), + "label_metrics": formatted_label_metrics, } ) diff --git a/markdown_writer.py b/markdown_writer.py index b0eac1d..000b6aa 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -31,12 +31,12 @@ from classes import IssueWithMetrics -def get_non_hidden_columns() -> List[str]: +def get_non_hidden_columns(labels) -> List[str]: """ Get a list of the columns that are not hidden. Args: - None + labels (List[str]): A list of the labels that are used in the issues. Returns: List[str]: A list of the columns that are not hidden. @@ -56,6 +56,11 @@ def get_non_hidden_columns() -> List[str]: if not hide_time_to_answer: columns.append("Time to answer") + hide_label_metrics = os.getenv("HIDE_LABEL_METRICS") + if not hide_label_metrics and labels: + for label in labels: + columns.append(f"Time spent in {label}") + return columns @@ -64,9 +69,10 @@ def write_to_markdown( average_time_to_first_response: Union[timedelta, None], average_time_to_close: Union[timedelta, None], average_time_to_answer: Union[timedelta, None], + average_time_in_labels: Union[dict, None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], - file=None, + labels=None, ) -> None: """Write the issues with metrics to a markdown file. @@ -76,43 +82,41 @@ def write_to_markdown( response for the issues. average_time_to_close (datetime.timedelta): The average time to close for the issues. average_time_to_answer (datetime.timedelta): The average time to answer the discussions. + average_time_in_labels (dict): A dictionary containing the average time spent in each label. file (file object, optional): The file object to write to. If not provided, a file named "issue_metrics.md" will be created. num_issues_opened (int): The Number of items that remain opened. num_issues_closed (int): The number of issues that were closed. + labels (List[str]): A list of the labels that are used in the issues. Returns: None. """ - columns = get_non_hidden_columns() + columns = get_non_hidden_columns(labels) # If all the metrics are None, then there are no issues if not issues_with_metrics or len(issues_with_metrics) == 0: - with file or open("issue_metrics.md", "w", encoding="utf-8") as file: + with open("issue_metrics.md", "w", encoding="utf-8") as file: file.write("no issues found for the given search criteria\n\n") return # Sort the issues by time to first response - issues_with_metrics.sort(key=lambda x: x.time_to_first_response or timedelta.max) - with file or open("issue_metrics.md", "w", encoding="utf-8") as file: + with open("issue_metrics.md", "w", encoding="utf-8") as file: file.write("# Issue Metrics\n\n") # Write first table with overall metrics - file.write("| Metric | Value |\n") - file.write("| --- | ---: |\n") - if "Time to first response" in columns: - file.write( - f"| Average time to first response | {average_time_to_first_response} |\n" - ) - if "Time to close" in columns: - file.write(f"| Average time to close | {average_time_to_close} |\n") - if "Time to answer" in columns: - file.write(f"| Average time to answer | {average_time_to_answer} |\n") - file.write(f"| Number of items that remain open | {num_issues_opened} |\n") - file.write(f"| Number of items closed | {num_issues_closed} |\n") - file.write( - f"| Total number of items created | {len(issues_with_metrics)} |\n\n" + write_overall_metrics_table( + issues_with_metrics, + average_time_to_first_response, + average_time_to_close, + average_time_to_answer, + average_time_in_labels, + num_issues_opened, + num_issues_closed, + labels, + columns, + file, ) # Write second table with individual issue/pr/discussion metrics @@ -137,6 +141,43 @@ def write_to_markdown( file.write(f" {issue.time_to_close} |") if "Time to answer" in columns: file.write(f" {issue.time_to_answer} |") + if labels and issue.label_metrics: + for label in labels: + file.write(f" {issue.label_metrics[label]} |") file.write("\n") print("Wrote issue metrics to issue_metrics.md") + + +def write_overall_metrics_table( + issues_with_metrics, + average_time_to_first_response, + average_time_to_close, + average_time_to_answer, + average_time_in_labels, + num_issues_opened, + num_issues_closed, + labels, + columns, + file, +): + """Write the overall metrics table to the markdown file.""" + file.write("| Metric | Value |\n") + file.write("| --- | ---: |\n") + if "Time to first response" in columns: + file.write( + f"| Average time to first response | {average_time_to_first_response} |\n" + ) + if "Time to close" in columns: + file.write(f"| Average time to close | {average_time_to_close} |\n") + if "Time to answer" in columns: + file.write(f"| Average time to answer | {average_time_to_answer} |\n") + if labels and average_time_in_labels: + for label in labels: + if f"Time spent in {label}" in columns and label in average_time_in_labels: + file.write( + f"| Average time spent in {label} | {average_time_in_labels[label]} |\n" + ) + file.write(f"| Number of items that remain open | {num_issues_opened} |\n") + file.write(f"| Number of items closed | {num_issues_closed} |\n") + file.write(f"| Total number of items created | {len(issues_with_metrics)} |\n\n") diff --git a/test_issue_metrics.py b/test_issue_metrics.py index 6456f9a..68baa8b 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -218,7 +218,7 @@ def test_main_no_issues_found( # Call main and check that it writes 'No issues found' issue_metrics.main() mock_write_to_markdown.assert_called_once_with( - None, None, None, None, None, None + None, None, None, None, None, None, None ) @@ -279,6 +279,7 @@ def test_get_per_issue_metrics(self): timedelta(days=1), None, None, + None, ), IssueWithMetrics( "Issue 2", @@ -286,6 +287,7 @@ def test_get_per_issue_metrics(self): timedelta(days=2), timedelta(days=3), None, + None, ), ] expected_num_issues_open = 1 diff --git a/test_json_writer.py b/test_json_writer.py index 474883f..6ca469d 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -38,6 +38,7 @@ def test_write_to_json(self): "average_time_to_first_response": "2 days, 12:00:00", "average_time_to_close": "5 days, 0:00:00", "average_time_to_answer": "1 day, 0:00:00", + "average_time_in_labels": {}, "num_items_opened": 2, "num_items_closed": 1, "total_item_count": 2, @@ -48,6 +49,7 @@ def test_write_to_json(self): "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", + "label_metrics": {}, }, { "title": "Issue 2", @@ -55,6 +57,7 @@ def test_write_to_json(self): "time_to_first_response": "2 days, 0:00:00", "time_to_close": "4 days, 0:00:00", "time_to_answer": "1 day, 0:00:00", + "label_metrics": {}, }, ], } @@ -62,12 +65,13 @@ def test_write_to_json(self): # Call the function and check the output self.assertEqual( write_to_json( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=None, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, ), json.dumps(expected_output), ) diff --git a/test_markdown_writer.py b/test_markdown_writer.py index 75a03f5..ddcee23 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -52,12 +52,13 @@ def test_write_to_markdown(self): # Call the function write_to_markdown( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=None, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, ) # Check that the function writes the correct markdown file @@ -87,7 +88,7 @@ def test_write_to_markdown_no_issues(self): """Test that write_to_markdown writes the correct markdown file when no issues are found.""" # Call the function with no issues with patch("builtins.open", mock_open()) as mock_open_file: - write_to_markdown(None, None, None, None, None, None) + write_to_markdown(None, None, None, None, None, None, None) # Check that the file was written correctly expected_output = "no issues found for the given search criteria\n\n" @@ -143,12 +144,13 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): # Call the function write_to_markdown( - issues_with_metrics, - average_time_to_first_response, - average_time_to_close, - average_time_to_answer, - num_issues_opened, - num_issues_closed, + issues_with_metrics=issues_with_metrics, + average_time_to_first_response=average_time_to_first_response, + average_time_to_close=average_time_to_close, + average_time_to_answer=average_time_to_answer, + average_time_in_labels=None, + num_issues_opened=num_issues_opened, + num_issues_closed=num_issues_closed, ) # Check that the function writes the correct markdown file From 04564685b5307654335edff9efd6a51b7e08d40c Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Thu, 13 Jul 2023 10:45:47 -0700 Subject: [PATCH 02/11] update dependencies Signed-off-by: Zack Koppert --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index afd6006..2fdb99a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ github3.py==4.0.1 python-dotenv==1.0.0 +pytz==2023.3 +Requests==2.31.0 From 0dc757c4b9ea0506ffcfe903378c5849e5e30d8c Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Thu, 13 Jul 2023 13:19:38 -0700 Subject: [PATCH 03/11] improve label test coverage Signed-off-by: Zack Koppert --- issue_metrics.py | 73 ++------------------------------------- labels.py | 75 +++++++++++++++++++++++++++++++++++++++++ markdown_writer.py | 3 +- test_json_writer.py | 12 +++++-- test_labels.py | 71 ++++++++++++++++++++++++++++++++++++++ test_markdown_writer.py | 30 +++++++++++++---- 6 files changed, 183 insertions(+), 81 deletions(-) create mode 100644 labels.py create mode 100644 test_labels.py diff --git a/issue_metrics.py b/issue_metrics.py index 5126440..fa73081 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -23,17 +23,17 @@ """ import os -from datetime import datetime, timedelta +from datetime import timedelta from os.path import dirname, join from typing import List, Union import github3 from dotenv import load_dotenv -import pytz from classes import IssueWithMetrics from discussions import get_discussions from json_writer import write_to_json +from labels import get_label_metrics from markdown_writer import write_to_markdown from time_to_answer import get_average_time_to_answer, measure_time_to_answer from time_to_close import get_average_time_to_close, measure_time_to_close @@ -101,75 +101,6 @@ def auth_to_github() -> github3.GitHub: return github_connection # type: ignore -def get_label_events( - issue: github3.issues.Issue, labels: List[str] # type: ignore -) -> List[github3.issues.event]: # type: ignore - """ - Get the label events for a given issue if the label is of interest. - - Args: - issue (github3.issues.Issue): A GitHub issue. - labels (List[str]): A list of labels of interest. - - Returns: - List[github3.issues.event]: A list of label events for the given issue. - """ - label_events = [] - for event in issue.issue.events(): - if event.event in ("labeled", "unlabeled") and event.label["name"] in labels: - label_events.append(event) - - return label_events - - -def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: # type: ignore - """ - Calculate the time spent with the given labels on a given issue. - - Args: - issue (github3.issues.Issue): A GitHub issue. - labels (List[str]): A list of labels to measure time spent in. - - Returns: - dict: A dictionary containing the time spent in each label. - """ - label_metrics = {} - label_events = get_label_events(issue, labels) - - for label in labels: - label_metrics[label] = timedelta(0) - - # If the event is one of the labels we're looking for, add the time to the dictionary - unlabeled = False - for event in label_events: - if event.event == "labeled": - if event.label["name"] in labels: - label_metrics[ - event.label["name"] - ] -= event.created_at - datetime.fromisoformat(issue.created_at) - elif event.event == "unlabeled": - unlabeled = True - if event.label["name"] in labels: - label_metrics[ - event.label["name"] - ] += event.created_at - datetime.fromisoformat(issue.created_at) - - if not unlabeled: - for label in labels: - # if the issue is closed, add the time from the issue creation to the closed_at time - if issue.state == "closed": - label_metrics[label] += datetime.fromisoformat( - issue.closed_at - ) - datetime.fromisoformat(issue.created_at) - else: - # if the issue is open, add the time from the issue creation to now - label_metrics[label] += datetime.now(pytz.utc) - datetime.fromisoformat( - issue.created_at - ) - - return label_metrics - - def get_per_issue_metrics( issues: Union[List[dict], List[github3.issues.Issue]], # type: ignore discussions: bool = False, diff --git a/labels.py b/labels.py new file mode 100644 index 0000000..8273cab --- /dev/null +++ b/labels.py @@ -0,0 +1,75 @@ +""" Functions for calculating time spent in labels. """ +from datetime import datetime, timedelta +from typing import List + +import github3 +import pytz + + +def get_label_events( + issue: github3.issues.Issue, labels: List[str] # type: ignore +) -> List[github3.issues.event]: # type: ignore + """ + Get the label events for a given issue if the label is of interest. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels of interest. + + Returns: + List[github3.issues.event]: A list of label events for the given issue. + """ + label_events = [] + for event in issue.issue.events(): + if event.event in ("labeled", "unlabeled") and event.label["name"] in labels: + label_events.append(event) + + return label_events + + +def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: # type: ignore + """ + Calculate the time spent with the given labels on a given issue. + + Args: + issue (github3.issues.Issue): A GitHub issue. + labels (List[str]): A list of labels to measure time spent in. + + Returns: + dict: A dictionary containing the time spent in each label. + """ + label_metrics = {} + label_events = get_label_events(issue, labels) + + for label in labels: + label_metrics[label] = timedelta(0) + + # If the event is one of the labels we're looking for, add the time to the dictionary + unlabeled = {} + for event in label_events: + if event.event == "labeled": + if event.label["name"] in labels: + label_metrics[ + event.label["name"] + ] -= event.created_at - datetime.fromisoformat(issue.created_at) + elif event.event == "unlabeled": + unlabeled[event.label["name"]] = True + if event.label["name"] in labels: + label_metrics[ + event.label["name"] + ] += event.created_at - datetime.fromisoformat(issue.created_at) + + for label in labels: + if label not in unlabeled: + # if the issue is closed, add the time from the issue creation to the closed_at time + if issue.state == "closed": + label_metrics[label] += datetime.fromisoformat( + issue.closed_at + ) - datetime.fromisoformat(issue.created_at) + else: + # if the issue is open, add the time from the issue creation to now + label_metrics[label] += datetime.now(pytz.utc) - datetime.fromisoformat( + issue.created_at + ) + + return label_metrics diff --git a/markdown_writer.py b/markdown_writer.py index 000b6aa..1f2a37c 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -143,7 +143,8 @@ def write_to_markdown( file.write(f" {issue.time_to_answer} |") if labels and issue.label_metrics: for label in labels: - file.write(f" {issue.label_metrics[label]} |") + if f"Time spent in {label}" in columns: + file.write(f" {issue.label_metrics[label]} |") file.write("\n") print("Wrote issue metrics to issue_metrics.md") diff --git a/test_json_writer.py b/test_json_writer.py index 6ca469d..e2951b7 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -19,6 +19,9 @@ def test_write_to_json(self): time_to_first_response=timedelta(days=3), time_to_close=timedelta(days=6), time_to_answer=None, + labels_metrics={ + "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) + }, ), IssueWithMetrics( title="Issue 2", @@ -26,6 +29,7 @@ def test_write_to_json(self): time_to_first_response=timedelta(days=2), time_to_close=timedelta(days=4), time_to_answer=timedelta(days=1), + labels_metrics={}, ), ] average_time_to_first_response = timedelta(days=2.5) @@ -38,7 +42,7 @@ def test_write_to_json(self): "average_time_to_first_response": "2 days, 12:00:00", "average_time_to_close": "5 days, 0:00:00", "average_time_to_answer": "1 day, 0:00:00", - "average_time_in_labels": {}, + "average_time_in_labels": {"bug": "1 day, 16:24:12"}, "num_items_opened": 2, "num_items_closed": 1, "total_item_count": 2, @@ -49,7 +53,7 @@ def test_write_to_json(self): "time_to_first_response": "3 days, 0:00:00", "time_to_close": "6 days, 0:00:00", "time_to_answer": "None", - "label_metrics": {}, + "label_metrics": {"bug": "1 day, 16:24:12"}, }, { "title": "Issue 2", @@ -69,7 +73,9 @@ def test_write_to_json(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, - average_time_in_labels=None, + average_time_in_labels={ + "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) + }, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, ), diff --git a/test_labels.py b/test_labels.py new file mode 100644 index 0000000..426ed84 --- /dev/null +++ b/test_labels.py @@ -0,0 +1,71 @@ +""" Unit tests for labels.py """ +import unittest +from datetime import datetime, timedelta +from unittest.mock import MagicMock + +import github3 +import pytz + +from labels import get_label_events, get_label_metrics + + +class TestLabels(unittest.TestCase): + """Unit tests for labels.py""" + + def setUp(self): + self.issue = MagicMock() # type: ignore + self.issue.issue = MagicMock(spec=github3.issues.Issue) # type: ignore + self.issue.created_at = "2020-01-01T00:00:00Z" + self.issue.closed_at = "2021-01-05T00:00:00Z" + self.issue.state = "closed" + self.issue.issue.events.return_value = [ + MagicMock( + event="labeled", + label={"name": "bug"}, + created_at=datetime(2021, 1, 1, tzinfo=pytz.UTC), + ), + MagicMock( + event="labeled", + label={"name": "feature"}, + created_at=datetime(2021, 1, 2, tzinfo=pytz.UTC), + ), + MagicMock( + event="unlabeled", + label={"name": "bug"}, + created_at=datetime(2021, 1, 3, tzinfo=pytz.UTC), + ), + ] + + def test_get_label_events(self): + """Test get_label_events""" + labels = ["bug"] + events = get_label_events(self.issue, labels) + self.assertEqual(len(events), 2) + self.assertEqual(events[0].label["name"], "bug") + self.assertEqual(events[1].label["name"], "bug") + + def test_get_label_metrics_closed_issue(self): + """Test get_label_metrics using a closed issue""" + labels = ["bug", "feature"] + metrics = get_label_metrics(self.issue, labels) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertEqual(metrics["feature"], timedelta(days=3)) + + def test_get_label_metrics_open_issue(self): + """Test get_label_metrics using an open issue""" + self.issue.state = "open" + labels = ["bug", "feature"] + metrics = get_label_metrics(self.issue, labels) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertLess( + metrics["feature"], + datetime.now(pytz.utc) - datetime(2021, 1, 2, tzinfo=pytz.UTC), + ) + self.assertGreater( + metrics["feature"], + datetime.now(pytz.utc) - datetime(2021, 1, 4, tzinfo=pytz.UTC), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test_markdown_writer.py b/test_markdown_writer.py index ddcee23..3fcc19e 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -35,6 +35,7 @@ def test_write_to_markdown(self): timedelta(days=1), timedelta(days=2), timedelta(days=3), + {"bug": timedelta(days=1)}, ), IssueWithMetrics( "Issue 2", @@ -42,11 +43,13 @@ def test_write_to_markdown(self): timedelta(days=3), timedelta(days=4), timedelta(days=5), + {"bug": timedelta(days=2)}, ), ] average_time_to_first_response = timedelta(days=2) average_time_to_close = timedelta(days=3) average_time_to_answer = timedelta(days=4) + average_time_in_labels = {"bug": "1 day, 12:00:00"} num_issues_opened = 2 num_issues_closed = 1 @@ -56,9 +59,10 @@ def test_write_to_markdown(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, - average_time_in_labels=None, + average_time_in_labels=average_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + labels=["bug"], ) # Check that the function writes the correct markdown file @@ -71,15 +75,17 @@ def test_write_to_markdown(self): "| Average time to first response | 2 days, 0:00:00 |\n" "| Average time to close | 3 days, 0:00:00 |\n" "| Average time to answer | 4 days, 0:00:00 |\n" + "| Average time spent in bug | 1 day, 12:00:00 |\n" "| Number of items that remain open | 2 |\n" "| Number of items closed | 1 |\n" "| Total number of items created | 2 |\n\n" - "| Title | URL | Time to first response | Time to close | Time to answer |\n" - "| --- | --- | --- | --- | --- |\n" + "| Title | URL | Time to first response | Time to close |" + " Time to answer | Time spent in bug |\n" + "| --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://github.com/user/repo/issues/1 | 1 day, 0:00:00 | " - "2 days, 0:00:00 | 3 days, 0:00:00 |\n" + "2 days, 0:00:00 | 3 days, 0:00:00 | 1 day, 0:00:00 |\n" "| Issue 2 | https://github.com/user/repo/issues/2 | 3 days, 0:00:00 | " - "4 days, 0:00:00 | 5 days, 0:00:00 |\n" + "4 days, 0:00:00 | 5 days, 0:00:00 | 2 days, 0:00:00 |\n" ) self.assertEqual(content, expected_content) os.remove("issue_metrics.md") @@ -106,12 +112,14 @@ def setUp(self): os.environ["HIDE_TIME_TO_FIRST_RESPONSE"] = "True" os.environ["HIDE_TIME_TO_CLOSE"] = "True" os.environ["HIDE_TIME_TO_ANSWER"] = "True" + os.environ["HIDE_LABEL_METRICS"] = "True" def tearDown(self): # Unset the HIDE* environment variables os.environ.pop("HIDE_TIME_TO_FIRST_RESPONSE") os.environ.pop("HIDE_TIME_TO_CLOSE") os.environ.pop("HIDE_TIME_TO_ANSWER") + os.environ.pop("HIDE_LABEL_METRICS") def test_writes_markdown_file_with_non_hidden_columns_only(self): """ @@ -127,6 +135,9 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): time_to_first_response=timedelta(minutes=10), time_to_close=timedelta(days=1), time_to_answer=timedelta(hours=2), + labels_metrics={ + "label1": timedelta(days=1), + }, ), IssueWithMetrics( title="Issue 2", @@ -134,11 +145,17 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): time_to_first_response=timedelta(minutes=20), time_to_close=timedelta(days=2), time_to_answer=timedelta(hours=4), + labels_metrics={ + "label1": timedelta(days=1), + }, ), ] average_time_to_first_response = timedelta(minutes=15) average_time_to_close = timedelta(days=1.5) average_time_to_answer = timedelta(hours=3) + average_time_in_labels = { + "label1": timedelta(days=1), + } num_issues_opened = 2 num_issues_closed = 1 @@ -148,9 +165,10 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, - average_time_in_labels=None, + average_time_in_labels=average_time_in_labels, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, + labels=["label1"], ) # Check that the function writes the correct markdown file From 942811254011a21366391319710663e3c8e98d94 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Thu, 13 Jul 2023 15:11:21 -0700 Subject: [PATCH 04/11] increase test coverage Signed-off-by: Zack Koppert --- test_issue_metrics.py | 58 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test_issue_metrics.py b/test_issue_metrics.py index 68baa8b..ee50085 100644 --- a/test_issue_metrics.py +++ b/test_issue_metrics.py @@ -312,5 +312,63 @@ def test_get_per_issue_metrics(self): ) +class TestDiscussionMetrics(unittest.TestCase): + """Test suite for the discussion_metrics function.""" + + def setUp(self): + # Mock a discussion dictionary + self.issue1 = { + "title": "Issue 1", + "url": "github.com/user/repo/issues/1", + "createdAt": "2023-01-01T00:00:00Z", + "comments": { + "nodes": [ + { + "createdAt": "2023-01-02T00:00:00Z", + } + ] + }, + "answerChosenAt": "2023-01-04T00:00:00Z", + "closedAt": "2023-01-05T00:00:00Z", + } + + self.issue2 = { + "title": "Issue 2", + "url": "github.com/user/repo/issues/2", + "createdAt": "2023-01-01T00:00:00Z", + "comments": {"nodes": [{"createdAt": "2023-01-03T00:00:00Z"}]}, + "answerChosenAt": "2023-01-05T00:00:00Z", + "closedAt": "2023-01-07T00:00:00Z", + } + + def test_get_per_issue_metrics_with_discussion(self): + """ + Test that the function correctly calculates + the metrics for a list of GitHub issues with discussions. + """ + + issues = [self.issue1, self.issue2] + metrics = get_per_issue_metrics(issues, discussions=True) + + # get_per_issue_metrics returns a tuple of + # (issues_with_metrics, num_issues_open, num_issues_closed) + self.assertEqual(len(metrics), 3) + + # Check that the metrics are correct, 0 issues open, 2 issues closed + self.assertEqual(metrics[1], 0) + self.assertEqual(metrics[2], 2) + + # Check that the issues_with_metrics has 2 issues in it + self.assertEqual(len(metrics[0]), 2) + + # Check that the issues_with_metrics has the correct metrics, + self.assertEqual(metrics[0][0].time_to_answer, timedelta(days=3)) + self.assertEqual(metrics[0][0].time_to_close, timedelta(days=4)) + self.assertEqual(metrics[0][0].time_to_first_response, timedelta(days=1)) + self.assertEqual(metrics[0][1].time_to_answer, timedelta(days=4)) + self.assertEqual(metrics[0][1].time_to_close, timedelta(days=6)) + self.assertEqual(metrics[0][1].time_to_first_response, timedelta(days=2)) + + if __name__ == "__main__": unittest.main() From 53e128bfb7b960cc9718c6c0e473c13d41cbf091 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Thu, 13 Jul 2023 16:54:43 -0700 Subject: [PATCH 05/11] test and fix corner cases Signed-off-by: Zack Koppert --- .pylintrc | 3 ++- issue_metrics.py | 30 ++++++----------------------- labels.py | 50 +++++++++++++++++++++++++++++++++++++++++++++--- test_labels.py | 23 +++++++++++++++++++++- 4 files changed, 77 insertions(+), 29 deletions(-) diff --git a/.pylintrc b/.pylintrc index adec0e1..96c0fce 100644 --- a/.pylintrc +++ b/.pylintrc @@ -4,4 +4,5 @@ disable= too-many-arguments, too-few-public-methods, duplicate-code, - too-many-locals, \ No newline at end of file + too-many-locals, + too-many-branches, \ No newline at end of file diff --git a/issue_metrics.py b/issue_metrics.py index fa73081..acbfdf5 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -23,7 +23,6 @@ """ import os -from datetime import timedelta from os.path import dirname, join from typing import List, Union @@ -33,7 +32,7 @@ from classes import IssueWithMetrics from discussions import get_discussions from json_writer import write_to_json -from labels import get_label_metrics +from labels import get_average_time_in_labels, get_label_metrics from markdown_writer import write_to_markdown from time_to_answer import get_average_time_to_answer, measure_time_to_answer from time_to_close import get_average_time_to_close, measure_time_to_close @@ -211,27 +210,6 @@ def get_organization(search_query: str) -> Union[str, None]: return organization -def get_average_time_in_labels( - issues_with_metrics: List[IssueWithMetrics], -) -> dict[str, timedelta]: - """Calculate the average time spent in each label.""" - average_time_in_labels = {} - for issue in issues_with_metrics: - if issue.label_metrics: - for label in issue.label_metrics: - if label not in average_time_in_labels: - average_time_in_labels[label] = issue.label_metrics[label] - else: - average_time_in_labels[label] += issue.label_metrics[label] - - for label in average_time_in_labels: - average_time_in_labels[label] = average_time_in_labels[label] / len( - issues_with_metrics - ) - - return average_time_in_labels - - def main(): """Run the issue-metrics script. @@ -279,6 +257,10 @@ def main(): # Search for issues # If type:discussions is in the search_query, search for discussions using get_discussions() if "type:discussions" in search_query: + if labels: + raise ValueError( + "The search query for discussions cannot include labels to measure" + ) issues = get_discussions(token, search_query) if len(issues) <= 0: print("No discussions found") @@ -314,7 +296,7 @@ def main(): # Get the average time in label for each label and store it in a dictionary # where the key is the label and the value is the average time - average_time_in_labels = get_average_time_in_labels(issues_with_metrics) + average_time_in_labels = get_average_time_in_labels(issues_with_metrics, labels) # Write the results to json and a markdown file write_to_json( diff --git a/labels.py b/labels.py index 8273cab..ad6fa7c 100644 --- a/labels.py +++ b/labels.py @@ -5,6 +5,8 @@ import github3 import pytz +from classes import IssueWithMetrics + def get_label_events( issue: github3.issues.Issue, labels: List[str] # type: ignore @@ -36,31 +38,42 @@ def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: labels (List[str]): A list of labels to measure time spent in. Returns: - dict: A dictionary containing the time spent in each label. + dict: A dictionary containing the time spent in each label or None. """ label_metrics = {} label_events = get_label_events(issue, labels) for label in labels: - label_metrics[label] = timedelta(0) + label_metrics[label] = None # If the event is one of the labels we're looking for, add the time to the dictionary unlabeled = {} + labeled = {} + if not label_events: + return label_metrics + + # Calculate the time to add or subtract to the time spent in label based on the label events for event in label_events: if event.event == "labeled": + labeled[event.label["name"]] = True if event.label["name"] in labels: + if label_metrics[event.label["name"]] is None: + label_metrics[event.label["name"]] = timedelta(0) label_metrics[ event.label["name"] ] -= event.created_at - datetime.fromisoformat(issue.created_at) elif event.event == "unlabeled": unlabeled[event.label["name"]] = True if event.label["name"] in labels: + if label_metrics[event.label["name"]] is None: + label_metrics[event.label["name"]] = timedelta(0) label_metrics[ event.label["name"] ] += event.created_at - datetime.fromisoformat(issue.created_at) for label in labels: - if label not in unlabeled: + # if the label is still on there, add the time from the last event to now + if label in labeled and label not in unlabeled: # if the issue is closed, add the time from the issue creation to the closed_at time if issue.state == "closed": label_metrics[label] += datetime.fromisoformat( @@ -73,3 +86,34 @@ def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: ) return label_metrics + + +def get_average_time_in_labels( + issues_with_metrics: List[IssueWithMetrics], + labels: List[str], +) -> dict[str, timedelta]: + """Calculate the average time spent in each label.""" + average_time_in_labels = {} + number_of_issues_in_labels = {} + for issue in issues_with_metrics: + if issue.label_metrics: + for label in issue.label_metrics: + if issue.label_metrics[label] is None: + continue + if label not in average_time_in_labels: + average_time_in_labels[label] = issue.label_metrics[label] + number_of_issues_in_labels[label] = 1 + else: + average_time_in_labels[label] += issue.label_metrics[label] + number_of_issues_in_labels[label] += 1 + + for label in average_time_in_labels: + average_time_in_labels[label] = ( + average_time_in_labels[label] / number_of_issues_in_labels[label] + ) + + for label in labels: + if label not in average_time_in_labels: + average_time_in_labels[label] = None + + return average_time_in_labels diff --git a/test_labels.py b/test_labels.py index 426ed84..5293683 100644 --- a/test_labels.py +++ b/test_labels.py @@ -5,8 +5,9 @@ import github3 import pytz +from classes import IssueWithMetrics -from labels import get_label_events, get_label_metrics +from labels import get_average_time_in_labels, get_label_events, get_label_metrics class TestLabels(unittest.TestCase): @@ -67,5 +68,25 @@ def test_get_label_metrics_open_issue(self): ) +class TestGetAverageTimeInLabels(unittest.TestCase): + """Unit tests for get_average_time_in_labels""" + + def setUp(self): + self.issues_with_metrics = MagicMock() + self.issues_with_metrics = [ + IssueWithMetrics( + "issue1", "url1", None, None, None, {"bug": timedelta(days=2)} + ), + ] + + def test_get_average_time_in_labels(self): + """Test get_average_time_in_labels""" + labels = ["bug", "feature"] + metrics = get_average_time_in_labels(self.issues_with_metrics, labels) + self.assertEqual(len(metrics), 2) + self.assertEqual(metrics["bug"], timedelta(days=2)) + self.assertIsNone(metrics.get("feature")) + + if __name__ == "__main__": unittest.main() From 4d3584bc1a3362b9ec19d6008826eb5796879764 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:43:04 -0700 Subject: [PATCH 06/11] Fix comment --- issue_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/issue_metrics.py b/issue_metrics.py index acbfdf5..ac83e8c 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -106,7 +106,7 @@ def get_per_issue_metrics( labels: Union[List[str], None] = None, ) -> tuple[List, int, int]: """ - Calculate the metrics for each issue/pr in a list provided. + Calculate the metrics for each issue/pr/discussion in a list provided. Args: issues (Union[List[dict], List[github3.issues.Issue]]): A list of From 41a50da63a9250278837b0d655cf19b444f56115 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:44:23 -0700 Subject: [PATCH 07/11] Clarify instructions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cca20c0..3f5ada7 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The metrics that are measured are: |--------|-------------| | Time to first response | The time between when an issue/pull request/discussion is created and when the first comment or review is made. | | Time to close | The time between when an issue/pull request/discussion is created and when it is closed. | -| Time to answer | The time between when a discussion is created and when it is answered. | +| Time to answer | (Discussions only) The time between when a discussion is created and when it is answered. | | Time in label | The time between when a label has a specific label appplied to an issue/pull request/discussion and when it is removed. This requires the LABELS_TO_MEASURE env variable to be set. | This action was developed by the GitHub OSPO for our own use and developed in a way that we could open source it that it might be useful to you as well! If you want to know more about how we use it, reach out in an issue in this repository. From 6c35a8a86dd54af5ac2f286c919a5ec3c5ba151c Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:47:04 -0700 Subject: [PATCH 08/11] fix labels env syntax --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f5ada7..f22fb77 100644 --- a/README.md +++ b/README.md @@ -229,7 +229,7 @@ jobs: uses: github/issue-metrics@v2 env: GH_TOKEN: ${{ secrets.GH_TOKEN }} - LABELS_TO_MEASURE: 'waiting-for-manager-approval, waiting-for-security-review' + LABELS_TO_MEASURE: 'waiting-for-manager-approval,waiting-for-security-review' SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' - name: Create issue From 3d1f4b3c47f648d3815ca17f7b57c87ae585338d Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:51:27 -0700 Subject: [PATCH 09/11] fix README sample report measuring labels --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f22fb77..86a7038 100644 --- a/README.md +++ b/README.md @@ -251,13 +251,13 @@ then the report will look like this: | Average time to first response | 0:50:44.666667 | | Average time to close | 6 days, 7:08:52 | | Average time to answer | 1 day | -| Average time in waiting-for-manager-approval | 0:00:41 | -| Average time in waiting-for-security-review | 2 days, 4:25:03 | +| Average time spent in waiting-for-manager-approval | 0:00:41 | +| Average time spent in waiting-for-security-review | 2 days, 4:25:03 | | Number of items that remain open | 2 | | Number of items closed | 1 | | Total number of items created | 3 | -| Title | URL | Time to first response | Time to close | Time to answer | Time in waiting-for-manager-approval | Time in waiting-for-security-review | +| Title | URL | Time to first response | Time to close | Time to answer | Time spent in waiting-for-manager-approval | Time spent in waiting-for-security-review | | --- | --- | --- | --- | --- | --- | --- | | Pull Request Title 1 | https://github.com/user/repo/pulls/1 | 0:05:26 | None | None | None | None | | Issue Title 2 | https://github.com/user/repo/issues/2 | 2:26:07 | None | None | 0:00:41 | 2 days, 4:25:03 | From e8b599d6bfa9526a2207f98265a78fa60473c955 Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:55:25 -0700 Subject: [PATCH 10/11] add LABELS_TO_MEASURE to example .env --- .env-example | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.env-example b/.env-example index df3a8d4..f79f1b8 100644 --- a/.env-example +++ b/.env-example @@ -1,6 +1,7 @@ GH_TOKEN = " " SEARCH_QUERY = "repo:owner/repo is:open is:issue" +LABELS_TO_MEASURE = "waiting-for-review,waiting-for-manager" HIDE_TIME_TO_FIRST_RESPONSE = False HIDE_TIME_TO_CLOSE = False HIDE_TIME_TO_ANSWER = False -HIDE_LABEL_METRICS = False \ No newline at end of file +HIDE_LABEL_METRICS = False From 3ecf55981b32e82a97c4b3684d3aa7b11b446d8f Mon Sep 17 00:00:00 2001 From: Zack Koppert Date: Fri, 14 Jul 2023 07:58:29 -0700 Subject: [PATCH 11/11] fix sample report with hidden columns --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 86a7038..d1032fa 100644 --- a/README.md +++ b/README.md @@ -301,7 +301,7 @@ Here is the output with all hidable columns hidden: | --- | --- | | Discussion Title 1 | https://github.com/user/repo/discussions/1 | | Pull Request Title 2 | https://github.com/user/repo/pulls/2 | -| Issue Title 3 | https://github.com/user/repo/issues/3 | 2:26:07 | +| Issue Title 3 | https://github.com/user/repo/issues/3 | ```