diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 098cd4c8c01..70899438082 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -91,7 +91,9 @@ jobs: user_name: 'github-actions[bot]' user_email: 'github-actions[bot]@users.noreply.github.com' - name: Comment about previewing documentation - if: ${{ github.event_name == 'pull_request' }} + if: | + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository uses: actions/github-script@v6 with: script: | diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 6af6ec139d3..ab759966c93 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -89,7 +89,7 @@ jobs: init_cime - pytest -vvv --machine docker --no-fortran-run CIME/tests/test_unit* + pytest -vvv --cov=CIME --machine docker --no-fortran-run CIME/tests/test_unit* # Run system tests system-testing: @@ -145,7 +145,7 @@ jobs: conda activate base - pytest -vvv --machine docker --no-fortran-run --no-teardown CIME/tests/test_sys* + pytest -vvv --cov=CIME --machine docker --no-fortran-run --no-teardown CIME/tests/test_sys* - name: Create testing log archive if: ${{ failure() }} shell: bash diff --git a/.gitignore b/.gitignore index 58e9dd92b66..f6351cf8996 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,4 @@ scripts/Tools/JENKINS* components libraries share +test_coverage/** diff --git a/CIME/SystemTests/system_tests_common.py b/CIME/SystemTests/system_tests_common.py index f15fbe959e0..a05616882df 100644 --- a/CIME/SystemTests/system_tests_common.py +++ b/CIME/SystemTests/system_tests_common.py @@ -26,6 +26,14 @@ from CIME.config import Config from CIME.provenance import save_test_time, get_test_success from CIME.locked_files import LOCKED_DIR, lock_file, is_locked +from CIME.baselines.performance import ( + get_latest_cpl_logs, + _perf_get_memory, + perf_compare_memory_baseline, + perf_compare_throughput_baseline, + perf_write_baseline, + load_coupler_customization, +) import CIME.build as build import glob, gzip, time, traceback, os @@ -493,7 +501,7 @@ def run_indv( self._case.case_st_archive(resubmit=True) def _coupler_log_indicates_run_complete(self): - newestcpllogfiles = self._get_latest_cpl_logs() + newestcpllogfiles = get_latest_cpl_logs(self._case) logger.debug("Latest Coupler log file(s) {}".format(newestcpllogfiles)) # Exception is raised if the file is not compressed allgood = len(newestcpllogfiles) @@ -598,43 +606,6 @@ def _st_archive_case_test(self): else: self._test_status.set_status(STARCHIVE_PHASE, TEST_FAIL_STATUS) - def _get_mem_usage(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - memlist = [] - meminfo = re.compile( - r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater" - ) - if cpllog is not None and os.path.isfile(cpllog): - if ".gz" == cpllog[-3:]: - fopen = gzip.open - else: - fopen = open - with fopen(cpllog, "rb") as f: - for line in f: - m = meminfo.match(line.decode("utf-8")) - if m: - memlist.append((float(m.group(1)), float(m.group(2)))) - # Remove the last mem record, it's sometimes artificially high - if len(memlist) > 0: - memlist.pop() - return memlist - - def _get_throughput(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - if cpllog is not None and os.path.isfile(cpllog): - with gzip.open(cpllog, "rb") as f: - cpltext = f.read().decode("utf-8") - m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) - if m: - return float(m.group(1)) - return None - def _phase_modifying_call(self, phase, function): """ Ensures that unexpected exceptions from phases will result in a FAIL result @@ -661,47 +632,29 @@ def _check_for_memleak(self): Examine memory usage as recorded in the cpl log file and look for unexpected increases. """ + config = load_coupler_customization(self._case) + + # default to 0.1 + tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") or 0.1 + + expect(tolerance > 0.0, "Bad value for memleak tolerance in test") + with self._test_status: - latestcpllogs = self._get_latest_cpl_logs() - for cpllog in latestcpllogs: - memlist = self._get_mem_usage(cpllog) + try: + memleak, comment = config.perf_check_for_memory_leak( + self._case, tolerance + ) + except AttributeError: + memleak, comment = perf_check_for_memory_leak(self._case, tolerance) - if len(memlist) < 3: - self._test_status.set_status( - MEMLEAK_PHASE, - TEST_PASS_STATUS, - comments="insuffiencient data for memleak test", - ) - else: - finaldate = int(memlist[-1][0]) - originaldate = int( - memlist[1][0] - ) # skip first day mem record, it can be too low while initializing - finalmem = float(memlist[-1][1]) - originalmem = float(memlist[1][1]) - memdiff = -1 - if originalmem > 0: - memdiff = (finalmem - originalmem) / originalmem - tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - expect(tolerance > 0.0, "Bad value for memleak tolerance in test") - if memdiff < 0: - self._test_status.set_status( - MEMLEAK_PHASE, - TEST_PASS_STATUS, - comments="data for memleak test is insuffiencient", - ) - elif memdiff < tolerance: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS) - else: - comment = "memleak detected, memory went from {:f} to {:f} in {:d} days".format( - originalmem, finalmem, finaldate - originaldate - ) - append_testlog(comment, self._orig_caseroot) - self._test_status.set_status( - MEMLEAK_PHASE, TEST_FAIL_STATUS, comments=comment - ) + if memleak: + append_testlog(comment, self._orig_caseroot) + + status = TEST_FAIL_STATUS + else: + status = TEST_PASS_STATUS + + self._test_status.set_status(MEMLEAK_PHASE, status, comments=comment) def compare_env_run(self, expected=None): """ @@ -728,121 +681,45 @@ def compare_env_run(self, expected=None): return False return True - def _get_latest_cpl_logs(self): + def _compare_memory(self): """ - find and return the latest cpl log file in the run directory + Compares current test memory usage to baseline. """ - coupler_log_path = self._case.get_value("RUNDIR") - cpllogs = glob.glob( - os.path.join(coupler_log_path, "{}*.log.*".format(self._cpllog)) - ) - lastcpllogs = [] - if cpllogs: - lastcpllogs.append(max(cpllogs, key=os.path.getctime)) - basename = os.path.basename(lastcpllogs[0]) - suffix = basename.split(".", 1)[1] - for log in cpllogs: - if log in lastcpllogs: - continue - - if log.endswith(suffix): - lastcpllogs.append(log) + with self._test_status: + below_tolerance, comment = perf_compare_memory_baseline(self._case) - return lastcpllogs + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) - def _compare_memory(self): - with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join( - self._case.get_value("BASELINE_ROOT"), baseline_name - ) - newestcpllogfiles = self._get_latest_cpl_logs() - if len(newestcpllogfiles) > 0: - memlist = self._get_mem_usage(newestcpllogfiles[0]) - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog + ".log") - if os.path.isfile(baselog) and len(memlist) > 3: - blmem = self._get_mem_usage(baselog) - blmem = 0 if blmem == [] else blmem[-1][1] - curmem = memlist[-1][1] - diff = 0.0 if blmem == 0 else (curmem - blmem) / blmem - tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - if ( - diff < tolerance - and self._test_status.get_status(MEMCOMP_PHASE) is None - ): - self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) - elif ( - self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS - ): - comment = "Error: Memory usage increase >{:d}% from baseline's {:f} to {:f}".format( - int(tolerance * 100), blmem, curmem - ) - self._test_status.set_status( - MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment - ) - append_testlog(comment, self._orig_caseroot) + if ( + below_tolerance + and self._test_status.get_status(MEMCOMP_PHASE) is None + ): + self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) + elif self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS: + self._test_status.set_status( + MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment + ) def _compare_throughput(self): + """ + Compares current test throughput to baseline. + """ with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join( - self._case.get_value("BASELINE_ROOT"), baseline_name - ) - newestcpllogfiles = self._get_latest_cpl_logs() - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog) - - if os.path.isfile(baselog): - # compare throughput to baseline - current = self._get_throughput(cpllog) - baseline = self._get_throughput(baselog) - # comparing ypd so bigger is better - if baseline is not None and current is not None: - diff = (baseline - current) / baseline - tolerance = self._case.get_value("TEST_TPUT_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - expect( - tolerance > 0.0, - "Bad value for throughput tolerance in test", - ) - comment = "TPUTCOMP: Computation time changed by {:.2f}% relative to baseline".format( - diff * 100 - ) - append_testlog(comment, self._orig_caseroot) - if ( - diff < tolerance - and self._test_status.get_status(THROUGHPUT_PHASE) is None - ): - self._test_status.set_status( - THROUGHPUT_PHASE, TEST_PASS_STATUS - ) - elif ( - self._test_status.get_status(THROUGHPUT_PHASE) - != TEST_FAIL_STATUS - ): - comment = "Error: TPUTCOMP: Computation time increase > {:d}% from baseline".format( - int(tolerance * 100) - ) - self._test_status.set_status( - THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment - ) - append_testlog(comment, self._orig_caseroot) + below_tolerance, comment = perf_compare_throughput_baseline(self._case) + + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) + + if ( + below_tolerance + and self._test_status.get_status(THROUGHPUT_PHASE) is None + ): + self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS) + elif self._test_status.get_status(THROUGHPUT_PHASE) != TEST_FAIL_STATUS: + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment + ) def _compare_baseline(self): """ @@ -884,18 +761,59 @@ def _generate_baseline(self): ) # copy latest cpl log to baseline # drop the date so that the name is generic - newestcpllogfiles = self._get_latest_cpl_logs() + newestcpllogfiles = get_latest_cpl_logs(self._case) with SharedArea(): + # TODO ever actually more than one cpl log? for cpllog in newestcpllogfiles: m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) + if m is not None: baselog = os.path.join(basegen_dir, m.group(1)) + ".gz" + safe_copy( cpllog, os.path.join(basegen_dir, baselog), preserve_meta=False, ) + perf_write_baseline(self._case, basegen_dir, cpllog) + + +def perf_check_for_memory_leak(case, tolerance): + leak = False + comment = "" + + latestcpllogs = get_latest_cpl_logs(case) + + for cpllog in latestcpllogs: + try: + memlist = _perf_get_memory(case, cpllog) + except RuntimeError: + return False, "insufficient data for memleak test" + + # last day - second day, skip first day, can be too low while initializing + elapsed_days = int(memlist[-1][0]) - int(memlist[1][0]) + + finalmem, originalmem = float(memlist[-1][1]), float(memlist[1][1]) + + memdiff = -1 if originalmem <= 0 else (finalmem - originalmem) / originalmem + + if memdiff < 0: + leak = False + comment = "data for memleak test is insufficient" + elif memdiff < tolerance: + leak = False + comment = "" + else: + leak = True + comment = ( + "memleak detected, memory went from {:f} to {:f} in {:d} days".format( + originalmem, finalmem, elapsed_days + ) + ) + + return leak, comment + class FakeTest(SystemTestsCommon): """ diff --git a/CIME/Tools/bless_test_results b/CIME/Tools/bless_test_results index d630aff69bd..cb6bd2f972a 100755 --- a/CIME/Tools/bless_test_results +++ b/CIME/Tools/bless_test_results @@ -8,20 +8,21 @@ blessing of diffs. You may need to load modules for cprnc to work. """ - from standard_script_setup import * from CIME.utils import expect from CIME.XML.machines import Machines from CIME.bless_test_results import bless_test_results -import argparse, sys, os +import argparse +import sys +import os +import logging _MACHINE = Machines() -############################################################################### + def parse_command_line(args, description): - ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [-n] [-r ] [-b ] [-c ] [ ...] [--verbose] OR @@ -45,35 +46,18 @@ OR formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument( - "-n", "--namelists-only", action="store_true", help="Only analyze namelists." - ) + create_bless_options(parser) - parser.add_argument( - "--hist-only", action="store_true", help="Only analyze history files." - ) + create_baseline_options(parser) - parser.add_argument( - "-b", - "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", - ) + create_test_options(parser) - parser.add_argument( - "--baseline-root", - help="Root of baselines. Default will use the BASELINE_ROOT from the case.", - ) + CIME.utils.setup_standard_logging_options(parser) parser.add_argument( "-c", "--compiler", - default=default_compiler, + default=_MACHINE.get_default_compiler(), help="Compiler of run you want to bless", ) @@ -85,36 +69,15 @@ OR "This option forces the bless to happen regardless.", ) - parser.add_argument( + mutual_execution = parser.add_mutually_exclusive_group() + + mutual_execution.add_argument( "--report-only", action="store_true", help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines", ) - parser.add_argument( - "-r", - "--test-root", - default=default_testroot, - help="Path to test results that are being blessed", - ) - - parser.add_argument( - "--new-test-root", - help="If bless_test_results needs to create cases (for blessing namelists), use this root area", - ) - - parser.add_argument( - "--new-test-id", - help="If bless_test_results needs to create cases (for blessing namelists), use this test id", - ) - - parser.add_argument( - "-t", - "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", - ) - - parser.add_argument( + mutual_execution.add_argument( "-f", "--force", action="store_true", @@ -135,73 +98,82 @@ OR args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - expect( - not (args.report_only and args.force), - "Makes no sense to use -r and -f simultaneously", + return vars(args) + + +def create_bless_options(parser): + bless_group = parser.add_argument_group("Bless options") + + mutual_bless_group = bless_group.add_mutually_exclusive_group(required=True) + + mutual_bless_group.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." + ) + + mutual_bless_group.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." ) - expect( - not (args.namelists_only and args.hist_only), - "Makes no sense to use --namelists-only and --hist-only simultaneously", + + mutual_bless_group.add_argument( + "--tput-only", action="store_true", help="Only analyze throughput." ) - return ( - args.baseline_name, - args.baseline_root, - args.test_root, - args.compiler, - args.test_id, - args.namelists_only, - args.hist_only, - args.report_only, - args.force, - args.pes_file, - args.bless_tests, - args.no_skip_pass, - args.new_test_root, - args.new_test_id, + mutual_bless_group.add_argument( + "--mem-only", action="store_true", help="Only analyze memory." ) -############################################################################### -def _main_func(description): - ############################################################################### - ( - baseline_name, - baseline_root, - test_root, - compiler, - test_id, - namelists_only, - hist_only, - report_only, - force, - pes_file, - bless_tests, - no_skip_pass, - new_test_root, - new_test_id, - ) = parse_command_line(sys.argv, description) - - success = bless_test_results( - baseline_name, - baseline_root, - test_root, - compiler, - test_id=test_id, - namelists_only=namelists_only, - hist_only=hist_only, - report_only=report_only, - force=force, - pesfile=pes_file, - bless_tests=bless_tests, - no_skip_pass=no_skip_pass, - new_test_root=new_test_root, - new_test_id=new_test_id, +def create_baseline_options(parser): + baseline_group = parser.add_argument_group("Baseline options") + + baseline_group.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) + + baseline_group.add_argument( + "--baseline-root", + help="Root of baselines. Default will use the BASELINE_ROOT from the case.", + ) + + +def create_test_options(parser): + default_testroot = _MACHINE.get_value("CIME_OUTPUT_ROOT") + + test_group = parser.add_argument_group("Test options") + + test_group.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being blessed", + ) + + test_group.add_argument( + "--new-test-root", + help="If bless_test_results needs to create cases (for blessing namelists), use this root area", ) - sys.exit(0 if success else 1) + test_group.add_argument( + "--new-test-id", + help="If bless_test_results needs to create cases (for blessing namelists), use this test id", + ) + + test_group.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) + + +def _main_func(description): + kwargs = parse_command_line(sys.argv, description) + + success = bless_test_results(**kwargs) + + sys.exit(0 if success else 1) -############################################################################### if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/baselines/__init__.py b/CIME/baselines/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/CIME/baselines/performance.py b/CIME/baselines/performance.py new file mode 100644 index 00000000000..f8f1fda77a1 --- /dev/null +++ b/CIME/baselines/performance.py @@ -0,0 +1,550 @@ +import os +import glob +import re +import gzip +import logging +from CIME.config import Config +from CIME.utils import expect + +logger = logging.getLogger(__name__) + + +def perf_compare_throughput_baseline(case, baseline_dir=None): + """ + Compares model throughput. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-tput.log") + + try: + baseline = read_baseline_file(baseline_file) + except FileNotFoundError as e: + comment = f"Could not read baseline throughput file: {e!s}" + + logger.debug(comment) + + return None, comment + + tolerance = case.get_value("TEST_TPUT_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + expect( + tolerance > 0.0, + "Bad value for throughput tolerance in test", + ) + + try: + below_tolerance, comment = config.perf_compare_throughput_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comment = _perf_compare_throughput_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comment + + +def perf_compare_memory_baseline(case, baseline_dir=None): + """ + Compares model highwater memory usage. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-mem.log") + + try: + baseline = read_baseline_file(baseline_file) + except FileNotFoundError as e: + comment = f"Could not read baseline memory usage: {e!s}" + + logger.debug(comment) + + return None, comment + + tolerance = case.get_value("TEST_MEMLEAK_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + try: + below_tolerance, comments = config.perf_compare_memory_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comments = _perf_compare_memory_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comments + + +def perf_write_baseline(case, basegen_dir, throughput=True, memory=True): + """ + Writes the baseline performance files. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + basegen_dir : str + Path to baseline directory. + throughput : bool + If true, write throughput baseline. + memory : bool + If true, write memory baseline. + """ + config = load_coupler_customization(case) + + if throughput: + try: + tput = perf_get_throughput(case, config) + except RuntimeError as e: + logger.debug("Could not get throughput: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-tput.log") + + write_baseline_file(baseline_file, tput) + + if memory: + try: + mem = perf_get_memory(case, config) + except RuntimeError as e: + logger.info("Could not get memory usage: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-mem.log") + + write_baseline_file(baseline_file, mem) + + +def load_coupler_customization(case): + """ + Loads customizations from the coupler `cime_config` directory. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + CIME.config.Config + Runtime configuration. + """ + comp_root_dir_cpl = case.get_value("COMP_ROOT_DIR_CPL") + + cpl_customize = os.path.join(comp_root_dir_cpl, "cime_config", "customize") + + return Config.load(cpl_customize) + + +def perf_get_throughput(case, config): + """ + Gets the model throughput. + + First attempts to use a coupler define method to retrieve the + models throughput. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput. + """ + try: + tput = config.perf_get_throughput(case) + except AttributeError: + tput = _perf_get_throughput(case) + + if tput is None: + raise RuntimeError("Could not get default throughput") from None + + tput = str(tput) + + return tput + + +def perf_get_memory(case, config): + """ + Gets the model memory usage. + + First attempts to use a coupler defined method to retrieve the + models memory usage. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model memory usage. + """ + try: + mem = config.perf_get_memory(case) + except AttributeError: + mem = _perf_get_memory(case) + + if mem is None: + raise RuntimeError("Could not get default memory usage") from None + + mem = str(mem[-1][1]) + + return mem + + +def write_baseline_file(baseline_file, value): + """ + Writes value to `baseline_file`. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + value : str + Value to write. + """ + with open(baseline_file, "w") as fd: + fd.write(value) + + +def _perf_get_memory(case, cpllog=None): + """ + Default function to retrieve memory usage from the coupler log. + + If the usage is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + cpllog : str + Overrides the default coupler log. + + Returns + ------- + str or None + Model memory usage or `None`. + + Raises + ------ + RuntimeError + If not enough sample were found. + """ + if cpllog is None: + cpllog = get_latest_cpl_logs(case) + else: + cpllog = [ + cpllog, + ] + + try: + memlist = get_cpl_mem_usage(cpllog[0]) + except (FileNotFoundError, IndexError): + memlist = None + + logger.debug("Could not parse memory usage from coupler log") + else: + if len(memlist) <= 3: + raise RuntimeError( + f"Found {len(memlist)} memory usage samples, need atleast 4" + ) + + return memlist + + +def _perf_get_throughput(case): + """ + Default function to retrieve throughput from the coupler log. + + If the throughput is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput or `None`. + """ + cpllog = get_latest_cpl_logs(case) + + try: + tput = get_cpl_throughput(cpllog[0]) + except (FileNotFoundError, IndexError): + tput = None + + logger.debug("Could not parse throughput from coupler log") + + return tput + + +def get_latest_cpl_logs(case): + """ + find and return the latest cpl log file in the run directory + """ + coupler_log_path = case.get_value("RUNDIR") + + cpllog_name = "drv" if case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + + cpllogs = glob.glob(os.path.join(coupler_log_path, "{}*.log.*".format(cpllog_name))) + + lastcpllogs = [] + + if cpllogs: + lastcpllogs.append(max(cpllogs, key=os.path.getctime)) + + basename = os.path.basename(lastcpllogs[0]) + + suffix = basename.split(".", 1)[1] + + for log in cpllogs: + if log in lastcpllogs: + continue + + if log.endswith(suffix): + lastcpllogs.append(log) + + return lastcpllogs + + +def get_cpl_mem_usage(cpllog): + """ + Read memory usage from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + list + Memory usage (data, highwater) as recorded by the coupler or empty list. + """ + memlist = [] + + meminfo = re.compile(r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater") + + if cpllog is not None and os.path.isfile(cpllog): + if ".gz" == cpllog[-3:]: + fopen = gzip.open + else: + fopen = open + + with fopen(cpllog, "rb") as f: + for line in f: + m = meminfo.match(line.decode("utf-8")) + + if m: + memlist.append((float(m.group(1)), float(m.group(2)))) + + # Remove the last mem record, it's sometimes artificially high + if len(memlist) > 0: + memlist.pop() + + return memlist + + +def get_cpl_throughput(cpllog): + """ + Reads throuhgput from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + int or None + Throughput as recorded by the coupler or None + """ + if cpllog is not None and os.path.isfile(cpllog): + with gzip.open(cpllog, "rb") as f: + cpltext = f.read().decode("utf-8") + + m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) + + if m: + return float(m.group(1)) + return None + + +def read_baseline_file(baseline_file): + """ + Reads value from `baseline_file`. + + Strips comments and returns the raw content to be decoded. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + + Returns + ------- + str + Value stored in baseline file without comments. + """ + with open(baseline_file) as fd: + lines = [x.strip() for x in fd.readlines() if not x.startswith("#")] + + return "\n".join(lines) + + +def _perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Default throughput baseline comparison. + + Compares the throughput from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + current = _perf_get_throughput(case) + + try: + # default baseline is stored as single float + baseline = float(baseline) + except ValueError: + comment = "Could not compare throughput to baseline, as basline had no value." + + return None, comment + + # comparing ypd so bigger is better + diff = (baseline - current) / baseline + + below_tolerance = None + + if diff is not None: + below_tolerance = diff < tolerance + + if below_tolerance: + comment = "TPUTCOMP: Computation time changed by {:.2f}% relative to baseline".format( + diff * 100 + ) + else: + comment = "Error: TPUTCOMP: Computation time increase > {:d}% from baseline".format( + int(tolerance * 100) + ) + + return below_tolerance, comment + + +def _perf_compare_memory_baseline(case, baseline, tolerance): + """ + Default memory usage baseline comparison. + + Compares the highwater memory usage from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + try: + current = _perf_get_memory(case) + except RuntimeError as e: + return None, str(e) + else: + current = current[-1][1] + + try: + # default baseline is stored as single float + baseline = float(baseline) + except ValueError: + baseline = 0.0 + + try: + diff = (current - baseline) / baseline + except ZeroDivisionError: + diff = 0.0 + + # Should we check if tolerance is above 0 + below_tolerance = None + comment = "" + + if diff is not None: + below_tolerance = diff < tolerance + + if below_tolerance: + comment = "MEMCOMP: Memory usage highwater has changed by {:.2f}% relative to baseline".format( + diff * 100 + ) + else: + comment = "Error: Memory usage increase >{:d}% from baseline's {:f} to {:f}".format( + int(tolerance * 100), baseline, current + ) + + return below_tolerance, comment diff --git a/CIME/bless_test_results.py b/CIME/bless_test_results.py index 62637851cb4..0f09b91eb0b 100644 --- a/CIME/bless_test_results.py +++ b/CIME/bless_test_results.py @@ -11,10 +11,90 @@ from CIME.hist_utils import generate_baseline, compare_baseline from CIME.case import Case from CIME.test_utils import get_test_status_files +from CIME.baselines.performance import ( + perf_compare_throughput_baseline, + perf_compare_memory_baseline, + perf_write_baseline, +) import os, time logger = logging.getLogger(__name__) + +def bless_throughput( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + below_threshold, comment = perf_compare_throughput_baseline( + case, baseline_dir=baseline_dir + ) + + if below_threshold: + logger.info("Diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, memory=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline throughput for {test_name!r}: {e!s}" + + return success, reason + + +def bless_memory( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + below_threshold, comment = perf_compare_memory_baseline( + case, baseline_dir=baseline_dir + ) + + if below_threshold: + logger.info("Diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, throughput=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline memory usage for test {test_name!r}: {e!s}" + + return success, reason + + ############################################################################### def bless_namelists( test_name, @@ -112,6 +192,8 @@ def bless_test_results( test_id=None, namelists_only=False, hist_only=False, + tput_only=False, + mem_only=False, report_only=False, force=False, pesfile=None, @@ -119,6 +201,7 @@ def bless_test_results( no_skip_pass=False, new_test_root=None, new_test_id=None, + **_, # Capture all for extra ): ############################################################################### test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) @@ -172,9 +255,15 @@ def bless_test_results( if bless_tests in [[], None] or CIME.utils.match_any( test_name, bless_tests_counts ): - overall_result, phase = ts.get_overall_test_status( - ignore_namelists=True, ignore_memleak=True - ) + ts_kwargs = dict(ignore_namelists=True, ignore_memleak=True) + + if tput_only: + ts_kwargs["check_throughput"] = True + + if mem_only: + ts_kwargs["check_memory"] = True + + overall_result, phase = ts.get_overall_test_status(**ts_kwargs) # See if we need to bless namelist if not hist_only: @@ -219,14 +308,13 @@ def bless_test_results( hist_bless = False # Now, do the bless - if not nl_bless and not hist_bless: + if not nl_bless and not hist_bless and not tput_only and not mem_only: logger.info( "Nothing to bless for test: {}, overall status: {}".format( test_name, overall_result ) ) else: - logger.info( "###############################################################################" ) @@ -305,6 +393,32 @@ def bless_test_results( if not success: broken_blesses.append((test_name, reason)) + if tput_only: + success, reason = bless_throughput( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + + if mem_only: + success, reason = bless_memory( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + # Emit a warning if items in bless_tests did not match anything if bless_tests: for bless_test, bless_count in bless_tests_counts.items(): diff --git a/CIME/case/case.py b/CIME/case/case.py index 6de8bb2a217..2bf14540205 100644 --- a/CIME/case/case.py +++ b/CIME/case/case.py @@ -207,6 +207,13 @@ def __init__(self, case_root=None, read_only=True, record=False, non_local=False self.initialize_derived_attributes() + def get_baseline_dir(self): + baseline_root = self.get_value("BASELINE_ROOT") + + baseline_name = self.get_value("BASECMP_CASE") + + return os.path.join(baseline_root, baseline_name) + def check_if_comp_var(self, vid): for env_file in self._env_entryid_files: new_vid, new_comp, iscompvar = env_file.check_if_comp_var(vid) diff --git a/CIME/config.py b/CIME/config.py index 3cef6cc0530..d2306d354d0 100644 --- a/CIME/config.py +++ b/CIME/config.py @@ -9,19 +9,132 @@ logger = logging.getLogger(__name__) -class Config: +class ConfigBase: def __new__(cls): if not hasattr(cls, "_instance"): - cls._instance = super(Config, cls).__new__(cls) + cls._instance = super(ConfigBase, cls).__new__(cls) return cls._instance def __init__(self): - if getattr(self, "_loaded", False): - return - self._attribute_config = {} + @property + def loaded(self): + return getattr(self, "_loaded", False) + + @classmethod + def instance(cls): + """Access singleton. + + Explicit way to access singleton, same as calling constructor. + """ + return cls() + + @classmethod + def load(cls, customize_path): + obj = cls() + + logger.debug("Searching %r for files to load", customize_path) + + customize_files = glob.glob(f"{customize_path}/**/*.py", recursive=True) + + # filter out any tests + customize_files = [ + x for x in customize_files if "tests" not in x and "conftest" not in x + ] + + customize_module_spec = importlib.machinery.ModuleSpec("cime_customize", None) + + customize_module = importlib.util.module_from_spec(customize_module_spec) + + sys.modules["CIME.customize"] = customize_module + + for x in sorted(customize_files): + obj._load_file(x, customize_module) + + setattr(obj, "_loaded", True) + + return obj + + def _load_file(self, file_path, customize_module): + logger.debug("Loading file %r", file_path) + + raw_config = utils.import_from_file("raw_config", file_path) + + # filter user define variables and functions + user_defined = [x for x in dir(raw_config) if not x.endswith("__")] + + # set values on this object, will overwrite existing + for x in user_defined: + try: + value = getattr(raw_config, x) + except AttributeError: + # should never hit this + logger.fatal("Attribute %r missing on obejct", x) + + sys.exit(1) + else: + setattr(customize_module, x, value) + + self._set_attribute(x, value) + + def _set_attribute(self, name, value, desc=None): + if hasattr(self, name): + logger.debug("Overwriting %r attribute", name) + + logger.debug("Setting attribute %r with value %r", name, value) + + setattr(self, name, value) + + self._attribute_config[name] = { + "desc": desc, + "default": value, + } + + def print_rst_table(self): + max_variable = max([len(x) for x in self._attribute_config.keys()]) + max_default = max( + [len(str(x["default"])) for x in self._attribute_config.values()] + ) + max_type = max( + [len(type(x["default"]).__name__) for x in self._attribute_config.values()] + ) + max_desc = max([len(x["desc"]) for x in self._attribute_config.values()]) + + divider_row = ( + f"{'='*max_variable} {'='*max_default} {'='*max_type} {'='*max_desc}" + ) + + rows = [ + divider_row, + f"Variable{' '*(max_variable-8)} Default{' '*(max_default-7)} Type{' '*(max_type-4)} Description{' '*(max_desc-11)}", + divider_row, + ] + + for variable, value in sorted( + self._attribute_config.items(), key=lambda x: x[0] + ): + variable_fill = max_variable - len(variable) + default_fill = max_default - len(str(value["default"])) + type_fill = max_type - len(type(value["default"]).__name__) + + rows.append( + f"{variable}{' '*variable_fill} {value['default']}{' '*default_fill} {type(value['default']).__name__}{' '*type_fill} {value['desc']}" + ) + + rows.append(divider_row) + + print("\n".join(rows)) + + +class Config(ConfigBase): + def __init__(self): + super().__init__() + + if self.loaded: + return + self._set_attribute( "additional_archive_components", ("drv", "dart"), @@ -195,107 +308,3 @@ def __init__(self): "{srcroot}/libraries/mct", desc="Sets the path to the mct library.", ) - - @classmethod - def instance(cls): - """Access singleton. - - Explicit way to access singleton, same as calling constructor. - """ - return cls() - - @classmethod - def load(cls, customize_path): - obj = cls() - - logger.debug("Searching %r for files to load", customize_path) - - customize_files = glob.glob(f"{customize_path}/**/*.py", recursive=True) - - # filter out any tests - customize_files = [ - x for x in customize_files if "tests" not in x and "conftest" not in x - ] - - customize_module_spec = importlib.machinery.ModuleSpec("cime_customize", None) - - customize_module = importlib.util.module_from_spec(customize_module_spec) - - sys.modules["CIME.customize"] = customize_module - - for x in sorted(customize_files): - obj._load_file(x, customize_module) - - setattr(obj, "_loaded", True) - - return obj - - def _load_file(self, file_path, customize_module): - logger.debug("Loading file %r", file_path) - - raw_config = utils.import_from_file("raw_config", file_path) - - # filter user define variables and functions - user_defined = [x for x in dir(raw_config) if not x.endswith("__")] - - # set values on this object, will overwrite existing - for x in user_defined: - try: - value = getattr(raw_config, x) - except AttributeError: - # should never hit this - logger.fatal("Attribute %r missing on obejct", x) - - sys.exit(1) - else: - setattr(customize_module, x, value) - - self._set_attribute(x, value) - - def _set_attribute(self, name, value, desc=None): - if hasattr(self, name): - logger.debug("Overwriting %r attribute", name) - - logger.debug("Setting attribute %r with value %r", name, value) - - setattr(self, name, value) - - self._attribute_config[name] = { - "desc": desc, - "default": value, - } - - def print_rst_table(self): - max_variable = max([len(x) for x in self._attribute_config.keys()]) - max_default = max( - [len(str(x["default"])) for x in self._attribute_config.values()] - ) - max_type = max( - [len(type(x["default"]).__name__) for x in self._attribute_config.values()] - ) - max_desc = max([len(x["desc"]) for x in self._attribute_config.values()]) - - divider_row = ( - f"{'='*max_variable} {'='*max_default} {'='*max_type} {'='*max_desc}" - ) - - rows = [ - divider_row, - f"Variable{' '*(max_variable-8)} Default{' '*(max_default-7)} Type{' '*(max_type-4)} Description{' '*(max_desc-11)}", - divider_row, - ] - - for variable, value in sorted( - self._attribute_config.items(), key=lambda x: x[0] - ): - variable_fill = max_variable - len(variable) - default_fill = max_default - len(str(value["default"])) - type_fill = max_type - len(type(value["default"]).__name__) - - rows.append( - f"{variable}{' '*variable_fill} {value['default']}{' '*default_fill} {type(value['default']).__name__}{' '*type_fill} {value['desc']}" - ) - - rows.append(divider_row) - - print("\n".join(rows)) diff --git a/CIME/test_status.py b/CIME/test_status.py index 90714631eb8..5f306b7db0e 100644 --- a/CIME/test_status.py +++ b/CIME/test_status.py @@ -460,7 +460,7 @@ def _get_overall_status_based_on_phases( if rv == TEST_PASS_STATUS: rv = NAMELIST_FAIL_STATUS - elif phase == BASELINE_PHASE: + elif phase in [BASELINE_PHASE, THROUGHPUT_PHASE, MEMCOMP_PHASE]: if rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS]: phase_responsible_for_status = phase rv = TEST_DIFF_STATUS @@ -512,7 +512,9 @@ def get_overall_test_status( >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP') ('PASS', 'RUN') >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP', check_throughput=True) - ('FAIL', 'TPUTCOMP') + ('DIFF', 'TPUTCOMP') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP', check_memory=True) + ('DIFF', 'MEMCOMP') >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') ('NLFAIL', 'RUN') >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') diff --git a/CIME/tests/test_unit_baselines_performance.py b/CIME/tests/test_unit_baselines_performance.py new file mode 100644 index 00000000000..c73c2c15bd3 --- /dev/null +++ b/CIME/tests/test_unit_baselines_performance.py @@ -0,0 +1,691 @@ +#!/usr/bin/env python3 + +import gzip +import tempfile +import unittest +from unittest import mock +from pathlib import Path + +from CIME.baselines import performance +from CIME.tests.test_unit_system_tests import CPLLOG + + +def create_mock_case(tempdir, get_latest_cpl_logs=None): + caseroot = Path(tempdir, "0", "caseroot") + + rundir = caseroot / "run" + + if get_latest_cpl_logs is not None: + get_latest_cpl_logs.return_value = (str(rundir / "cpl.log.gz"),) + + baseline_root = Path(tempdir, "baselines") + + baseline_root.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + + return case, caseroot, rundir, baseline_root + + +class TestUnitBaselinesPerformance(unittest.TestCase): + @mock.patch("CIME.baselines.performance._perf_get_memory") + def test_perf_get_memory_default_no_value(self, _perf_get_memory): + _perf_get_memory.return_value = None + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.side_effect = AttributeError + + with self.assertRaises(RuntimeError): + performance.perf_get_memory(case, config) + + @mock.patch("CIME.baselines.performance._perf_get_memory") + def test_perf_get_memory_default(self, _perf_get_memory): + _perf_get_memory.return_value = [(1, 1000)] + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.side_effect = AttributeError + + mem = performance.perf_get_memory(case, config) + + assert mem == "1000" + + def test_perf_get_memory(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.return_value = "1000" + + mem = performance.perf_get_memory(case, config) + + assert mem == "1000" + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + def test_perf_get_throughput_default_no_value(self, _perf_get_throughput): + _perf_get_throughput.return_value = None + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.side_effect = AttributeError + + with self.assertRaises(RuntimeError): + performance.perf_get_throughput(case, config) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + def test_perf_get_throughput_default(self, _perf_get_throughput): + _perf_get_throughput.return_value = 100 + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.side_effect = AttributeError + + tput = performance.perf_get_throughput(case, config) + + assert tput == "100" + + def test_perf_get_throughput(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.return_value = "100" + + tput = performance.perf_get_throughput(case, config) + + assert tput == "100" + + def test_get_cpl_throughput_no_file(self): + throughput = performance.get_cpl_throughput("/tmp/cpl.log") + + assert throughput is None + + def test_get_cpl_throughput(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + throughput = performance.get_cpl_throughput(str(cpl_log_path)) + + assert throughput == 719.635 + + def test_get_cpl_mem_usage_gz(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + mem_usage = performance.get_cpl_mem_usage(str(cpl_log_path)) + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + @mock.patch("CIME.baselines.performance.os.path.isfile") + def test_get_cpl_mem_usage(self, isfile): + isfile.return_value = True + + with mock.patch( + "builtins.open", mock.mock_open(read_data=CPLLOG.encode("utf-8")) + ) as mock_file: + mem_usage = performance.get_cpl_mem_usage("/tmp/cpl.log") + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + def test_read_baseline_file_multi_line(self): + with mock.patch( + "builtins.open", + mock.mock_open(read_data="#comment about data\n1000.0\n2000.0\n"), + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "1000.0\n2000.0" + + def test_read_baseline_file_content(self): + with mock.patch( + "builtins.open", mock.mock_open(read_data="1000.0") + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "1000.0" + + def test_read_baseline_file(self): + with mock.patch("builtins.open", mock.mock_open(read_data="")) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "" + + def test_write_baseline_file(self): + with mock.patch("builtins.open", mock.mock_open()) as mock_file: + performance.write_baseline_file("/tmp/cpl-tput.log", "1000") + + mock_file.assert_called_with("/tmp/cpl-tput.log", "w") + mock_file.return_value.write.assert_called_with("1000") + + @mock.patch("CIME.baselines.performance.get_cpl_throughput") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_throughput(self, get_latest_cpl_logs, get_cpl_throughput): + get_cpl_throughput.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + tput = performance._perf_get_throughput(case) + + assert tput == None + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory_override(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + mem = performance._perf_get_memory(case, "/tmp/override") + + assert mem == None + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + mem = performance._perf_get_memory(case) + + assert mem == None + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_skip( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = "100" + + perf_get_memory.return_value = "1000" + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline( + case, + baseline_root, + False, + False, + ) + + perf_get_throughput.assert_not_called() + perf_get_memory.assert_not_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_runtimeerror( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.side_effect = RuntimeError + + perf_get_memory.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_perf_write_baseline( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = "100" + + perf_get_memory.return_value = "1000" + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_any_call(str(baseline_root / "cpl-tput.log"), "100") + write_baseline_file.assert_any_call(str(baseline_root / "cpl-mem.log"), "1000") + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.side_effect = FileNotFoundError + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance is None + assert comment == "Could not read baseline throughput file: " + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "" + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance is None + assert ( + comment + == "Could not compare throughput to baseline, as basline had no value." + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Computation time changed by -0.80% relative to baseline" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "1000" + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert not below_tolerance + assert ( + comment == "Error: TPUTCOMP: Computation time increase > 5% from baseline" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Computation time changed by -0.80% relative to baseline" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater has changed by 0.00% relative to baseline" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_not_enough_samples( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = ["1000.0"] + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance is None + assert comment == "Found 2 memory usage samples, need atleast 4" + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.side_effect = FileNotFoundError + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance is None + assert comment == "Could not read baseline memory usage: " + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater has changed by 0.30% relative to baseline" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 2000.0), + (2, 2001.0), + (3, 2002.0), + (4, 2003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert not below_tolerance + assert ( + comment + == "Error: Memory usage increase >5% from baseline's 1000.000000 to 2003.000000" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater has changed by 0.30% relative to baseline" + ) + + def test_get_latest_cpl_logs_found_multiple(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + cpl_log_2_path = run_dir / "cpl-2023-01-01.log.gz" + cpl_log_2_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 2 + assert sorted(latest_cpl_logs) == sorted( + [str(cpl_log_path), str(cpl_log_2_path)] + ) + + def test_get_latest_cpl_logs_found_single(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 1 + assert latest_cpl_logs[0] == str(cpl_log_path) + + def test_get_latest_cpl_logs(self): + case = mock.MagicMock() + case.get_value.side_effect = ( + f"/tmp/run", + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 0 diff --git a/CIME/tests/test_unit_system_tests.py b/CIME/tests/test_unit_system_tests.py index 3bd091900e3..99486178f36 100644 --- a/CIME/tests/test_unit_system_tests.py +++ b/CIME/tests/test_unit_system_tests.py @@ -1,17 +1,522 @@ #!/usr/bin/env python3 import os +import tempfile +import gzip from re import A import unittest from unittest import mock from pathlib import Path +from CIME.config import Config from CIME.SystemTests.system_tests_common import SystemTestsCommon from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo from CIME.SystemTests.system_tests_compare_n import SystemTestsCompareN +CPLLOG = """ + tStamp_write: model date = 00010102 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010102 0 memory = 1673.89 MB (highwater) 387.77 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010103 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010103 0 memory = 1673.89 MB (highwater) 390.09 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010104 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010104 0 memory = 1673.89 MB (highwater) 391.64 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010105 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010105 0 memory = 1673.89 MB (highwater) 392.67 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010106 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010106 0 memory = 1673.89 MB (highwater) 393.44 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + +(seq_mct_drv): =============== SUCCESSFUL TERMINATION OF CPL7-e3sm =============== +(seq_mct_drv): =============== at YMD,TOD = 00010106 0 =============== +(seq_mct_drv): =============== # simulated days (this run) = 5.000 =============== +(seq_mct_drv): =============== compute time (hrs) = 0.000 =============== +(seq_mct_drv): =============== # simulated years / cmp-day = 719.635 =============== +(seq_mct_drv): =============== pes min memory highwater (MB) 851.957 =============== +(seq_mct_drv): =============== pes max memory highwater (MB) 1673.891 =============== +(seq_mct_drv): =============== pes min memory last usage (MB) 182.742 =============== +(seq_mct_drv): =============== pes max memory last usage (MB) 393.441 =============== +""" + + +def create_mock_case(tempdir, idx=None, cpllog_data=None): + if idx is None: + idx = 0 + + case = mock.MagicMock() + + caseroot = Path(tempdir, str(idx), "caseroot") + baseline_root = caseroot.parent / "baselines" + run_dir = caseroot / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + if cpllog_data is not None: + cpllog = run_dir / "cpl.log.gz" + + with gzip.open(cpllog, "w") as fd: + fd.write(cpllog_data.encode("utf-8")) + + case.get_latest_cpl_log.return_value = str(cpllog) + + hist_file = run_dir / "cpl.hi.2023-01-01.nc" + hist_file.touch() + + case.get_env.return_value.get_latest_hist_files.return_value = [str(hist_file)] + + case.get_compset_components.return_value = [] + + return case, caseroot, baseline_root, run_dir + + +class TestUnitSystemTests(unittest.TestCase): + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common._perf_get_memory") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_runtime_error( + self, + get_latest_cpl_logs, + _perf_get_memory, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + _perf_get_memory.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="insufficient data for memleak test" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common._perf_get_memory") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_not_enough_samples( + self, + get_latest_cpl_logs, + _perf_get_memory, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + _perf_get_memory.return_value = [ + (1, 1000.0), + (2, 0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="data for memleak test is insufficient" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common._perf_get_memory") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_found( + self, + get_latest_cpl_logs, + _perf_get_memory, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + _perf_get_memory.return_value = [ + (1, 1000.0), + (2, 2000.0), + (3, 3000.0), + (4, 3000.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + expected_comment = "memleak detected, memory went from 2000.000000 to 3000.000000 in 2 days" + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "FAIL", comments=expected_comment + ) + + append_testlog.assert_any_call(expected_comment, str(caseroot)) + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common._perf_get_memory") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak( + self, + get_latest_cpl_logs, + _perf_get_memory, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + _perf_get_memory.return_value = [ + (1, 3040.0), + (2, 3002.0), + (3, 3030.0), + (4, 3008.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput(self, append_testlog, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = ( + True, + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_error_diff( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_fail( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = ( + False, + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory(self, append_testlog, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = ( + True, + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_erorr_diff( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_erorr_fail( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = ( + False, + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + str(caseroot), + ) + + def test_generate_baseline(self): + with tempfile.TemporaryDirectory() as tempdir: + case, caseroot, baseline_root, run_dir = create_mock_case( + tempdir, cpllog_data=CPLLOG + ) + + get_value_calls = [ + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + str(run_dir), + "case.std", + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "ERIO.ne30_g16_rx1.A.docker_gnu.G.20230919_193255_z9hg2w", + "mct", + str(run_dir), + "ERIO", + "ERIO.ne30_g16_rx1.A.docker_gnu", + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + str(run_dir), + "mct", + "/tmp/components/cpl", + str(run_dir), + "mct", + str(run_dir), + "mct", + ] + + if Config.instance().create_bless_log: + get_value_calls.insert(12, os.getcwd()) + + case.get_value.side_effect = get_value_calls + + common = SystemTestsCommon(case) + + common._generate_baseline() + + baseline_dir = baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + + assert (baseline_dir / "cpl.log.gz").exists() + assert (baseline_dir / "cpl-tput.log").exists() + assert (baseline_dir / "cpl-mem.log").exists() + assert (baseline_dir / "cpl.hi.2023-01-01.nc").exists() + + with open(baseline_dir / "cpl-tput.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert lines[0] == "719.635" + + with open(baseline_dir / "cpl-mem.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert lines[0] == "1673.89" -class TestCaseSubmit(unittest.TestCase): def test_kwargs(self): case = mock.MagicMock() diff --git a/CIME/utils.py b/CIME/utils.py index 1293f6d3171..1a32319c8d1 100644 --- a/CIME/utils.py +++ b/CIME/utils.py @@ -1625,20 +1625,25 @@ def find_files(rootdir, pattern): def setup_standard_logging_options(parser): + group = parser.add_argument_group("Logging options") + helpfile = os.path.join(os.getcwd(), os.path.basename("{}.log".format(sys.argv[0]))) - parser.add_argument( + + group.add_argument( "-d", "--debug", action="store_true", help="Print debug information (very verbose) to file {}".format(helpfile), ) - parser.add_argument( + + group.add_argument( "-v", "--verbose", action="store_true", help="Add additional context (time and file) to log messages", ) - parser.add_argument( + + group.add_argument( "-s", "--silent", action="store_true", diff --git a/doc/source/users_guide/testing.rst b/doc/source/users_guide/testing.rst index 8ea7c29467c..40868d2bbdd 100644 --- a/doc/source/users_guide/testing.rst +++ b/doc/source/users_guide/testing.rst @@ -371,29 +371,148 @@ Interpreting test output is pretty easy, looking at an example:: You can see that `create_test <../Tools_user/create_test.html>`_ informs the user of the case directory and of the progress and duration of the various test phases. -=================== -Managing baselines -=================== -.. _`Managing baselines`: +========= +Baselines +========= +.. _`Baselines`: -A big part of testing is managing your baselines (sometimes called gold results). We have provided -tools to help the user do this without having to repeat full runs of test cases with `create_test <../Tools_user/create_test.html>`_ . +A big part of testing is managing your baselines (sometimes called gold results). We have provided tools to help the user do this without having to repeat full runs of test cases with `create_test <../Tools_user/create_test.html>`_ . -bless_test_results: Takes a batch of cases of tests that have already been run and copy their -results to a baseline area. +------------------- +Creating a baseline +------------------- +.. _`Creating a baseline`: -compare_test_results: Takes a batch of cases of tests that have already been run and compare their -results to a baseline area. +A baseline can be generated by passing ``-g`` to `create_test <../Tools_user/create_test.html>`_. There are additional options to control generating baselines.:: -Take a batch of results for the jenkins user for the testid 'mytest' and copy the results to -baselines for 'master':: + ./scripts/create_test -b master -g SMS.ne30_f19_g16_rx1.A - ./bless_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master +-------------------- +Comparing a baseline +-------------------- +.. _`Comparing a baseline`: -Take a batch of results for the jenkins user for the testid 'mytest' and compare the results to -baselines for 'master':: +Comparing the output of a test to a baseline is achieved by passing ``-c`` to `create_test <../Tools_user/create_test.html>`_.:: + + ./scripts/create_test -b master -c SMS.ne30_f19_g16_rx1.A + +------------------ +Managing baselines +------------------ +.. _`Managing baselines`: - ./compare_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master +Once a baseline has been generated it can be managed using the `bless_test_results <../Tools_user/bless_test_results.html>`_ tool. The tool provides the ability to bless different features of the baseline. The currently supported features are namelist files, history files, and performance metrics. The performance metrics are separated into throughput and memory usage. + +The following command can be used to compare a test to a baseline and bless an update to the history file.:: + + ./CIME/Tools/bless_test_results -b master --hist-only SMS.ne30_f19_g16_rx1.A + +The `compare_test_results <../Tools_user/compare_test_results.html>_` tool can be used to quickly compare tests to baselines and report any `diffs`.:: + + ./CIME/Tools/compare_test_results -b master SMS.ne30_f19_g16_rx1.A + +--------------------- +Performance baselines +--------------------- +.. _`Performance baselines`: +By default performance baselines are generated by parsing the coupler log and comparing the throughput in SYPD (Simulated Years Per Day) and the memory usage high water. + +This can be customized by creating a python module under ``$DRIVER_ROOT/cime_config/customize``. There are four hooks that can be used to customize the generation and comparison. + +- perf_get_throughput +- perf_get_memory +- perf_compare_throughput_baseline +- perf_compare_memory_baseline + +.. + TODO need to add api docs and link +The following pseudo code is an example of this customization.:: + + # $DRIVER/cime_config/customize/perf_baseline.py + + def perf_get_throughput(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing throughput value. + """ + current = analyze_throughput(...) + + return json.dumps(current) + + def perf_get_memory(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing memory value. + """ + current = analyze_memory(case) + + return json.dumps(current) + + def perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline throughput value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether throughput diff is below tolerance. + str + Comments about the results. + """ + current = analyze_throughput(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments + + def perf_compare_memory_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline memory value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether memory diff is below tolerance. + str + Comments about the results. + """ + current = analyze_memory(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments ============= Adding tests diff --git a/setup.cfg b/setup.cfg index 772767f44b9..1c4058ebd85 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,6 @@ console_scripts = [tool:pytest] junit_family=xunit2 -addopts = --cov=CIME --cov-report term-missing --cov-report html:test_coverage/html --cov-report xml:test_coverage/coverage.xml -s python_files = test_*.py testpaths = CIME/tests