Skip to content

Commit

Permalink
Merge pull request #239 from github/only-measure-whats-reported
Browse files Browse the repository at this point in the history
fix: Performance fix to only measure statistics which are not hidden
  • Loading branch information
zkoppert authored Apr 16, 2024
2 parents 160cdc1 + 2cdc7ea commit 8d98d18
Show file tree
Hide file tree
Showing 4 changed files with 95 additions and 48 deletions.
22 changes: 22 additions & 0 deletions .github/scripts/env_vars_check.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash

# Find all test_*.py files
files=$(find . -name "test_*.py")
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color

# Loop through each file
for file in $files; do
# Search for instances of get_env_vars() with no arguments
result=$(grep -n "get_env_vars()" "$file")

# If any instances are found, print the file name and line number
if [ -n "$result" ]; then
echo "Found in $file:"
echo "$result"
echo -e "${RED}ERROR: get_env_vars() should always set test=True in test*.py files.${NC}"
exit 1
fi
done
echo -e " ${GREEN}PASS:${NC} All test*.py files call get_env_vars() with test=True."
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
.PHONY: test
test:
pytest -v --cov=. --cov-config=.coveragerc --cov-fail-under=80 --cov-report term-missing
.github/scripts/env_vars_check.sh

.PHONY: clean
clean:
Expand Down
102 changes: 58 additions & 44 deletions issue_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

import github3
from classes import IssueWithMetrics
from config import get_env_vars
from config import EnvVars, get_env_vars
from discussions import get_discussions
from json_writer import write_to_json
from labels import get_label_metrics, get_stats_time_in_labels
Expand Down Expand Up @@ -122,6 +122,7 @@ def auth_to_github(

def get_per_issue_metrics(
issues: Union[List[dict], List[github3.search.IssueSearchResult]], # type: ignore
env_vars: EnvVars,
discussions: bool = False,
labels: Union[List[str], None] = None,
ignore_users: Union[List[str], None] = None,
Expand All @@ -138,6 +139,7 @@ def get_per_issue_metrics(
Defaults to False.
labels (List[str]): A list of labels to measure time spent in. Defaults to empty list.
ignore_users (List[str]): A list of users to ignore when calculating metrics.
env_vars (EnvVars): The environment variables for the script.
Returns:
tuple[List[IssueWithMetrics], int, int]: A tuple containing a
Expand All @@ -161,24 +163,30 @@ def get_per_issue_metrics(
None,
None,
)
issue_with_metrics.time_to_first_response = measure_time_to_first_response(
None, issue, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
None,
issue,
ignore_users,
None,
None,
max_comments_to_eval,
heavily_involved,
)
issue_with_metrics.time_to_answer = measure_time_to_answer(issue)
if issue["closedAt"]:
issue_with_metrics.time_to_close = measure_time_to_close(None, issue)
num_issues_closed += 1
else:
num_issues_open += 1
if env_vars.hide_time_to_first_response is False:
issue_with_metrics.time_to_first_response = (
measure_time_to_first_response(None, issue, ignore_users)
)
if env_vars.enable_mentor_count:
issue_with_metrics.mentor_activity = count_comments_per_user(
None,
issue,
ignore_users,
None,
None,
max_comments_to_eval,
heavily_involved,
)
if env_vars.hide_time_to_answer is False:
issue_with_metrics.time_to_answer = measure_time_to_answer(issue)
if env_vars.hide_time_to_close is False:
if issue["closedAt"]:
issue_with_metrics.time_to_close = measure_time_to_close(
None, issue
)
num_issues_closed += 1
else:
num_issues_open += 1
else:
issue_with_metrics = IssueWithMetrics(
issue.title, # type: ignore
Expand All @@ -196,32 +204,37 @@ def get_per_issue_metrics(
pull_request = issue.issue.pull_request() # type: ignore
ready_for_review_at = get_time_to_ready_for_review(issue, pull_request)

issue_with_metrics.time_to_first_response = measure_time_to_first_response(
issue, None, pull_request, ready_for_review_at, ignore_users
)
issue_with_metrics.mentor_activity = count_comments_per_user(
issue,
None,
pull_request,
ready_for_review_at,
ignore_users,
max_comments_to_eval,
heavily_involved,
)
if labels:
issue_with_metrics.label_metrics = get_label_metrics(issue, labels)
if issue.state == "closed": # type: ignore
if pull_request:
issue_with_metrics.time_to_close = measure_time_to_merge(
pull_request, ready_for_review_at
if env_vars.hide_time_to_first_response is False:
issue_with_metrics.time_to_first_response = (
measure_time_to_first_response(
issue, None, pull_request, ready_for_review_at, ignore_users
)
else:
issue_with_metrics.time_to_close = measure_time_to_close(
issue, None
)
num_issues_closed += 1
elif issue.state == "open": # type: ignore
num_issues_open += 1
)
if env_vars.enable_mentor_count:
issue_with_metrics.mentor_activity = count_comments_per_user(
issue,
None,
pull_request,
ready_for_review_at,
ignore_users,
max_comments_to_eval,
heavily_involved,
)
if labels and env_vars.hide_label_metrics is False:
issue_with_metrics.label_metrics = get_label_metrics(issue, labels)
if env_vars.hide_time_to_close is False:
if issue.state == "closed": # type: ignore
if pull_request:
issue_with_metrics.time_to_close = measure_time_to_merge(
pull_request, ready_for_review_at
)
else:
issue_with_metrics.time_to_close = measure_time_to_close(
issue, None
)
num_issues_closed += 1
elif issue.state == "open": # type: ignore
num_issues_open += 1
issues_with_metrics.append(issue_with_metrics)

return issues_with_metrics, num_issues_open, num_issues_closed
Expand Down Expand Up @@ -324,6 +337,7 @@ def main():
ignore_users=ignore_users,
max_comments_to_eval=max_comments_eval,
heavily_involved=heavily_involved_cutoff,
env_vars=env_vars,
)

stats_time_to_first_response = get_stats_time_to_first_response(issues_with_metrics)
Expand Down
18 changes: 14 additions & 4 deletions test_issue_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ def test_get_env_vars(self):
"""Test that the function correctly retrieves the environment variables."""

# Call the function and check the result
search_query = get_env_vars().search_query
gh_token = get_env_vars().gh_token
search_query = get_env_vars(test=True).search_query
gh_token = get_env_vars(test=True).gh_token
gh_token_expected_result = "test_token"
search_query_expected_result = "is:issue is:open repo:user/repo"
self.assertEqual(gh_token, gh_token_expected_result)
Expand Down Expand Up @@ -238,6 +238,10 @@ def test_main_no_issues_found(
class TestGetPerIssueMetrics(unittest.TestCase):
"""Test suite for the get_per_issue_metrics function."""

@patch.dict(
os.environ,
{"GH_TOKEN": "test_token", "SEARCH_QUERY": "is:issue is:open repo:user/repo"},
)
def test_get_per_issue_metrics(self):
"""Test that the function correctly calculates the metrics for a list of GitHub issues."""
# Create mock data
Expand Down Expand Up @@ -286,7 +290,7 @@ def test_get_per_issue_metrics(self):
result_issues_with_metrics,
result_num_issues_open,
result_num_issues_closed,
) = get_per_issue_metrics(issues)
) = get_per_issue_metrics(issues, env_vars=get_env_vars(test=True))
expected_issues_with_metrics = [
IssueWithMetrics(
"Issue 1",
Expand Down Expand Up @@ -364,14 +368,20 @@ def setUp(self):
"closedAt": "2023-01-07T00:00:00Z",
}

@patch.dict(
os.environ,
{"GH_TOKEN": "test_token", "SEARCH_QUERY": "is:issue is:open repo:user/repo"},
)
def test_get_per_issue_metrics_with_discussion(self):
"""
Test that the function correctly calculates
the metrics for a list of GitHub issues with discussions.
"""

issues = [self.issue1, self.issue2]
metrics = get_per_issue_metrics(issues, discussions=True)
metrics = get_per_issue_metrics(
issues, discussions=True, env_vars=get_env_vars(test=True)
)

# get_per_issue_metrics returns a tuple of
# (issues_with_metrics, num_issues_open, num_issues_closed)
Expand Down

0 comments on commit 8d98d18

Please sign in to comment.