diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5ed818d..2fcaa1f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,68 +11,33 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.8 # Specify the desired Python version - - - name: Set up Conda - uses: conda-incubator/setup-miniconda@v2 + uses: actions/checkout@v3 with: - channel: 'defaults' # Specify the Conda channel you want to use - python-version: 3.8 # Specify the Python version for the Conda environment - - - name: Set up Conda and initialize shell + path: WrfHydroForcing + + - name: Run testing container run: | - conda init bash - conda create -n esmpy_env - eval "$(conda shell.bash hook)" - source activate esmpy_env - shell: bash + chmod -R 0777 $GITHUB_WORKSPACE/WrfHydroForcing && \ + docker run -t --name test_container \ + -v $GITHUB_WORKSPACE/WrfHydroForcing:/home/docker/WrfHydroForcing \ + wrfhydro/wrf_hydro_forcing:latest - - name: Set up ESMF and wgrib2 - run: | - mkdir $HOME/.esmf-src \ - cd $HOME/.esmf-src \ - wget http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_7_1_0r/esmf_7_1_0r_src.tar.gz \ - tar xfz esmf_7_1_0r_src.tar.gz \ - cd esmf \ - export ESMF_DIR=$PWD \ - export ESMF_COMM=mpich3 \ - make \ - make install - - cd $HOME/.esmf-src/esmf/src/addon/ESMPy - python setup.py build --ESMFMKFILE=$HOME/.esmf-src/esmf/DEFAULTINSTALLDIR/lib/libO/Linux.gfortran.64.mpich3.default/esmf.mk install + # - name: Copy test results from container + # if: ${{ always() }} + # run: docker cp test_container:/home/docker/test_out $GITHUB_WORKSPACE/test_report - cd $HOME - wget ftp://ftp.cpc.ncep.noaa.gov/wd51we/wgrib2/wgrib2.tgz - tar xfz wgrib2.tgz - rm wgrib2.tgz - cd grib2 - export CC=gcc - export FC=gfortran - make - make lib - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements.txt # If you have a requirements.txt file - conda install -c conda-forge esmpy - python setup.py install - - name: Run pytest - run: pytest + # - name: Run pytest + # run: | + # cd core/tests + # pytest - # Optional: Generate test coverage report - - name: Install coverage tool - run: pip install coverage + # # Optional: Generate test coverage report + # - name: Install coverage tool + # run: pip install coverage - - name: Run pytest with coverage - run: coverage run -m pytest + # - name: Run pytest with coverage + # run: coverage run -m pytest - - name: Generate coverage report - run: coverage html -i + # - name: Generate coverage report + # run: coverage html -i diff --git a/core/Regridding/regrid_base.py b/core/Regridding/regrid_base.py index c47803a..d7e2442 100644 --- a/core/Regridding/regrid_base.py +++ b/core/Regridding/regrid_base.py @@ -3,7 +3,12 @@ from abc import ABC, abstractmethod from contextlib import contextmanager -import ESMF +# ESMF was renamed to esmpy in v8.4.0 +try: + import esmpy as ESMF +except ModuleNotFoundError: + import ESMF + import numpy as np from core import err_handler diff --git a/core/bias_correction.py b/core/bias_correction.py index ae46c7a..7bf98f8 100755 --- a/core/bias_correction.py +++ b/core/bias_correction.py @@ -44,6 +44,7 @@ def run_bias_correction(input_forcings, config_options, geo_meta_wrf_hydro, mpi_ str(BiasCorrTempEnum.GFS.name) : ncar_temp_gfs_bias_correct, str(BiasCorrTempEnum.HRRR.name) : ncar_temp_hrrr_bias_correct } + bias_correct_temperature[input_forcings.t2dBiasCorrectOpt](input_forcings, config_options, mpi_config, 0) err_handler.check_program_status(config_options, mpi_config) @@ -383,7 +384,7 @@ def ncar_sw_hrrr_bias_correct(input_forcings, geo_meta_wrf_hydro, config_options # the cosine of the sol_zen_ang is proportional to the solar intensity # (not accounting for humidity or cloud cover) sol_cos = np.sin(geo_meta_wrf_hydro.latitude_grid * d2r) * math.sin(decl) + np.cos(geo_meta_wrf_hydro.latitude_grid * d2r) * math.cos(decl) * np.cos(ha) - + # Check for any values outside of [-1,1] (this can happen due to floating point rounding) sol_cos[np.where(sol_cos < -1.0)] = -1.0 sol_cos[np.where(sol_cos > 1.0)] = 1.0 @@ -794,6 +795,7 @@ def ncar_wspd_gfs_bias_correct(input_forcings, config_options, mpi_config, force ugrd_idx = input_forcings.grib_vars.index('UGRD') vgrd_idx = input_forcings.grib_vars.index('VGRD') + OutputEnum = config_options.OutputEnum for ind, xvar in enumerate(OutputEnum): if xvar.name == input_forcings.input_map_output[ugrd_idx]: u_in = ind @@ -820,20 +822,19 @@ def ncar_wspd_gfs_bias_correct(input_forcings, config_options, mpi_config, force # TODO: cache the "other" value so we don't repeat this calculation unnecessarily bias_corrected = ugrid_out if force_num == ugrd_idx else vgrid_out - OutputEnum = config_options.OutputEnum for ind, xvar in enumerate(OutputEnum): if xvar.name == input_forcings.input_map_output[force_num]: outId = ind break wind_in = None try: - wind_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] + wind_in = input_forcings.final_forcings[outId, :, :] except NumpyExceptions as npe: config_options.errMsg = "Unable to extract incoming windspeed from forcing object for: " + \ input_forcings.productName + " (" + str(npe) + ")" err_handler.log_critical(config_options, mpi_config) err_handler.check_program_status(config_options, mpi_config) - + ind_valid = None try: ind_valid = np.where(wind_in != config_options.globalNdv) @@ -852,7 +853,7 @@ def ncar_wspd_gfs_bias_correct(input_forcings, config_options, mpi_config, force err_handler.check_program_status(config_options, mpi_config) try: - input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = wind_in[:, :] + input_forcings.final_forcings[outId, :, :] = wind_in[:, :] except NumpyExceptions as npe: config_options.errMsg = "Unable to place temporary windspeed array back into forcing object for: " + \ input_forcings.productName + " (" + str(npe) + ")" @@ -970,6 +971,7 @@ def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, for # Open the NetCDF file. try: id_nldas_param = Dataset(nldas_param_file, 'r') + id_nldas_param.set_auto_mask(False) # don't mask missing since it won't survive broadcast except OSError as err: config_options.errMsg = "Unable to open parameter file: " + nldas_param_file + " (" + str(err) + ")" err_handler.log_critical(config_options, mpi_config) @@ -1078,6 +1080,7 @@ def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, for nldas_param_1 = None nldas_param_2 = None nldas_zero_pcp = None + err_handler.check_program_status(config_options, mpi_config) # Scatter NLDAS parameters @@ -1298,7 +1301,7 @@ def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, for # regridding object. # 6.) Place the data into the final output arrays for further processing (downscaling). # 7.) Reset variables for memory efficiency and exit the routine. - + np.seterr(all='ignore') if mpi_config.rank == 0: @@ -1314,12 +1317,15 @@ def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, for config_options.statusMsg = "Looping over local arrays to calculate bias corrections." err_handler.log_msg(config_options, mpi_config) # Process each of the pixel cells for this local processor on the CFS grid. + outId = None + for ind, xvar in enumerate(config_options.OutputEnum): + if xvar.name == input_forcings.input_map_output[force_num]: + outId = ind + break for x_local in range(0, input_forcings.nx_local): for y_local in range(0, input_forcings.ny_local): - cfs_prev_tmp = input_forcings.coarse_input_forcings1[input_forcings.input_map_output[force_num], - y_local, x_local] - cfs_next_tmp = input_forcings.coarse_input_forcings2[input_forcings.input_map_output[force_num], - y_local, x_local] + cfs_prev_tmp = input_forcings.coarse_input_forcings1[outId, y_local, x_local] + cfs_next_tmp = input_forcings.coarse_input_forcings2[outId, y_local, x_local] # Check for any missing parameter values. If any missing values exist, # set this flag to False. Further down, if it's False, we will simply @@ -1541,9 +1547,9 @@ def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, for err_handler.check_program_status(config_options, mpi_config) try: - input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = \ + input_forcings.final_forcings[outId, :, :] = \ input_forcings.esmf_field_out.data except NumpyExceptions as npe: config_options.errMsg = "Unable to extract ESMF field data for CFSv2: " + str(npe) err_handler.log_critical(config_options, mpi_config) - err_handler.check_program_status(config_options, mpi_config) + err_handler.check_program_status(config_options, mpi_config) \ No newline at end of file diff --git a/core/config.py b/core/config.py index 65e68b4..ac022a5 100755 --- a/core/config.py +++ b/core/config.py @@ -98,7 +98,7 @@ def __init__(self, config): self.customFcstFreq = None self.rqiMethod = None self.rqiThresh = 1.0 - self.globalNdv = -9999.0 + self.globalNdv = -999999.0 self.d_program_init = datetime.datetime.utcnow() self.errFlag = 0 self.nwmVersion = None @@ -140,7 +140,18 @@ def read_config(self): self.number_inputs = len(inputs) # Create Enums dynamically from the yaml files - self.forcingInputModYaml = config['YamlConfig']['forcingInputModYaml'] + try: + yaml_config = config['YamlConfig'] + except KeyError: + raise KeyError("Unable to locate YamlConfig in configuration file.") + err_handler.err_out_screen('Unable to locate YamlConfig in configuration file.') + + try: + self.forcingInputModYaml = yaml_config['forcingInputModYaml'] + except KeyError: + raise KeyError("Unable to locate forcingInputModYaml in configuration file.") + err_handler.err_out_screen('Unable to locate forcingInputModYaml in configuration file.') + forcing_yaml_stream = open(self.forcingInputModYaml) forcingConfig = yaml.safe_load(forcing_yaml_stream) dynamicForcing = {} @@ -148,15 +159,24 @@ def read_config(self): dynamicForcing[k] = k self.ForcingEnum = enum.Enum('ForcingEnum', dynamicForcing) - self.suppPrecipModYaml = config['YamlConfig']['suppPrecipModYaml'] + try: + self.suppPrecipModYaml = yaml_config['suppPrecipModYaml'] + except KeyError: + raise KeyError("Unable to locate suppPrecipModYaml in configuration file.") + err_handler.err_out_screen('Unable to locate suppPrecipModYaml in configuration file.') + supp_yaml_stream = open(self.suppPrecipModYaml) suppConfig = yaml.safe_load(supp_yaml_stream) dynamicSupp = {} for k in suppConfig.keys(): dynamicSupp[k] = k self.SuppForcingPcpEnum = enum.Enum('SuppForcingPcpEnum', dynamicSupp) + try: + self.outputVarAttrYaml = yaml_config['outputVarAttrYaml'] + except KeyError: + raise KeyError("Unable to locate outputVarAttrYaml in configuration file.") + err_handler.err_out_screen('Unable to locate outputVarAttrYaml in configuration file.') - self.outputVarAttrYaml = config['YamlConfig']['outputVarAttrYaml'] out_yaml_stream = open(self.outputVarAttrYaml) outConfig = yaml.safe_load(out_yaml_stream) dynamicOut = {} @@ -401,25 +421,25 @@ def read_config(self): # Putting a constraint here that CFSv2-NLDAS bias correction (NWM only) is chosen, it must be turned on # for ALL variables. if self.runCfsNldasBiasCorrect: - if set(self.precipBiasCorrectOpt) != {'CFS_V2'}: + if self.precipBiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'Precipitation under this configuration.') - if min(self.lwBiasCorrectOpt) != {'CFS_V2'}: + if self.lwBiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'long-wave radiation under this configuration.') - if min(self.swBiasCorrectOpt) != {'CFS_V2'}: + if self.swBiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'short-wave radiation under this configuration.') - if min(self.t2BiasCorrectOpt) != {'CFS_V2'}: + if self.t2BiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'surface temperature under this configuration.') - if min(self.windBiasCorrect) != {'CFS_V2'}: + if self.windBiasCorrect != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'wind forcings under this configuration.') - if min(self.q2BiasCorrectOpt) != {'CFS_V2'}: + if self.q2BiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'specific humidity under this configuration.') - if min(self.psfcBiasCorrectOpt) != {'CFS_V2'}: + if self.psfcBiasCorrectOpt != ['CFS_V2']: err_handler.err_out_screen('CFSv2-NLDAS NWM bias correction must be activated for ' 'surface pressure under this configuration.') # Make sure we don't have any other forcings activated. This can only be ran for CFSv2. @@ -430,7 +450,7 @@ def read_config(self): # Read in the temperature downscaling options. # Create temporary array to hold flags of if we need input parameter files. - param_flag = np.empty([len(self.input_forcings)], np.int) + param_flag = np.empty([len(self.input_forcings)], int) param_flag[:] = 0 try: self.t2dDownscaleOpt = [input['Downscaling']['Temperature'] for input in inputs] @@ -944,7 +964,6 @@ def read_config(self): except KeyError: err_handler.err_out_screen('Unable to locate SuppPcpDirectories in SuppForcing section ' 'in the configuration file.') - # Loop through and ensure all supp pcp directories exist. Also strip out any whitespace # or new line characters. for dirTmp in range(0, len(self.supp_precip_dirs)): @@ -1059,7 +1078,7 @@ def read_config(self): for optTmp in self.input_forcings: if optTmp == 'CFS_V2': try: - self.cfsv2EnsMember = input['Ensembles']['cfsEnsNumber'] + self.cfsv2EnsMember = [input['Ensembles']['cfsEnsNumber'] for input in inputs if 'cfsEnsNumber' in input['Ensembles']][0] except KeyError: err_handler.err_out_screen('Unable to locate cfsEnsNumber under the Ensembles ' 'section of the configuration file') @@ -1072,4 +1091,4 @@ def use_data_at_current_time(self): hrs_since_start = self.current_output_date - self.current_fcst_cycle return hrs_since_start <= datetime.timedelta(hours = self.supp_pcp_max_hours) else: - return True + return True diff --git a/core/disaggregateMod.py b/core/disaggregateMod.py index f86d731..ad4281d 100755 --- a/core/disaggregateMod.py +++ b/core/disaggregateMod.py @@ -114,9 +114,9 @@ def ext_ana_disaggregate(input_forcings, supplemental_precip, config_options, mp ana_sum = np.array([],dtype=np.float32) target_data = np.array([],dtype=np.float32) - ana_all_zeros = np.array([],dtype=np.bool) - ana_no_zeros = np.array([],dtype=np.bool) - target_data_no_zeros = np.array([],dtype=np.bool) + ana_all_zeros = np.array([],dtype=bool) + ana_no_zeros = np.array([],dtype=bool) + target_data_no_zeros = np.array([],dtype=bool) if mpi_config.rank == 0: config_options.statusMsg = f"Performing hourly disaggregation of {supplemental_precip.file_in2}" err_handler.log_msg(config_options, mpi_config) diff --git a/core/downscale.py b/core/downscale.py index 8de6986..ad6abb4 100755 --- a/core/downscale.py +++ b/core/downscale.py @@ -764,15 +764,17 @@ def TOPO_RAD_ADJ_DRVR(GeoMetaWrfHydro,input_forcings,ConfigOptions,COSZEN,declin COSZEN[np.where(COSZEN < 1E-4)] = 1E-4 - corr_frac = np.empty([ny, nx], np.int) - # shadow_mask = np.empty([ny,nx],np.int) - diffuse_frac = np.empty([ny, nx], np.int) + corr_frac = np.empty([ny, nx], int) + + # shadow_mask = np.empty([ny,nx],int) + diffuse_frac = np.empty([ny, nx], int) corr_frac[:, :] = 0 diffuse_frac[:, :] = 0 # shadow_mask[:,:] = 0 indTmp = np.where((GeoMetaWrfHydro.slope[:,:] == 0.0) & (SWDOWN <= 10.0)) + corr_frac[indTmp] = 1 term1 = np.sin(xxlat) * np.cos(hrang2d) diff --git a/core/forecastMod.py b/core/forecastMod.py index d1f61bc..75cbe1b 100755 --- a/core/forecastMod.py +++ b/core/forecastMod.py @@ -123,7 +123,7 @@ def process_forecasts(ConfigOptions, wrfHydroGeoMeta, inputForcingMod, suppPcpMo show_message = True for outStep in range(1, ConfigOptions.num_output_steps + 1): # Reset out final grids to missing values. - OutputObj.output_local[:, :, :] = -9999.0 + OutputObj.output_local[:, :, :] = ConfigOptions.globalNdv ConfigOptions.current_output_step = outStep OutputObj.outDate = ConfigOptions.current_fcst_cycle + datetime.timedelta( diff --git a/core/geoMod.py b/core/geoMod.py index eda307c..722df13 100755 --- a/core/geoMod.py +++ b/core/geoMod.py @@ -1,6 +1,11 @@ import math -import ESMF +# ESMF was renamed to esmpy in v8.4.0 +try: + import esmpy as ESMF +except ModuleNotFoundError: + import ESMF + import numpy as np from netCDF4 import Dataset diff --git a/core/ioMod.py b/core/ioMod.py index c850e05..05ef921 100755 --- a/core/ioMod.py +++ b/core/ioMod.py @@ -26,7 +26,7 @@ def __init__(self,GeoMetaWrfHydro,ConfigOptions): self.output_local = None self.outPath = None self.outDate = None - self.out_ndv = -9999 + self.out_ndv = -999999 # Create local "slabs" to hold final output grids. These # will be collected during the output routine below. @@ -163,7 +163,7 @@ def output_final_ldasin(self, ConfigOptions, geoMetaWrfHydro, MpiConfig): break try: - idOut.model_total_valid_times = float(ConfigOptions.actual_output_steps) + idOut.model_total_valid_times = np.int32(ConfigOptions.actual_output_steps) except: ConfigOptions.errMsg = "Unable to create total_valid_times global attribute in: " + self.outPath err_handler.log_critical(ConfigOptions, MpiConfig) @@ -172,7 +172,7 @@ def output_final_ldasin(self, ConfigOptions, geoMetaWrfHydro, MpiConfig): if ConfigOptions.spatial_meta is not None: # apply spatial_global_atts to output globals attrs for k, v in geoMetaWrfHydro.spatial_global_atts.items(): - if k not in ("Source_Software", "history", "processing_notes"): # don't add these + if k not in ("Source_Software", "history", "processing_notes", "version"): # don't add these idOut.setncattr(k, v) # Create variables. @@ -535,6 +535,11 @@ def open_grib2(GribFileIn,NetCdfFileOut,Wgrib2Cmd,ConfigOptions,MpiConfig, err = None exitcode = None + # remove temporary grib2.tbl file + g2path = os.path.join(ConfigOptions.scratch_dir, "grib2.tbl") + if os.path.isfile(g2path): + os.remove(g2path) + # Ensure file exists. if not os.path.isfile(NetCdfFileOut): ConfigOptions.errMsg = "Expected NetCDF file: " + NetCdfFileOut + \ diff --git a/core/parallel.py b/core/parallel.py index 910b578..f93ef8d 100755 --- a/core/parallel.py +++ b/core/parallel.py @@ -140,7 +140,7 @@ def scatter_array_scatterv_no_cache(self,geoMeta,src_array,ConfigOptions): data_type_flag = 1 if src_array.dtype == np.float64: data_type_flag = 2 - if src_array.dtype == np.bool: + if src_array.dtype == bool: data_type_flag = 3 # Broadcast the data_type_flag to other processors @@ -206,7 +206,7 @@ def scatter_array_scatterv_no_cache(self,geoMeta,src_array,ConfigOptions): recvbuf=np.empty([counts[self.rank]],np.float32) elif data_type_flag == 3: data_type = MPI.BOOL - recvbuf = np.empty([counts[self.rank]], np.bool) + recvbuf = np.empty([counts[self.rank]], bool) else: data_type = MPI.DOUBLE recvbuf = np.empty([counts[self.rank]], np.float64) diff --git a/core/regrid.py b/core/regrid.py index a71dafb..8cb1342 100755 --- a/core/regrid.py +++ b/core/regrid.py @@ -2,7 +2,11 @@ """ Regridding module file for regridding input forcing files. """ -import ESMF +# ESMF was renamed to esmpy in v8.4.0 +try: + import esmpy as ESMF +except ModuleNotFoundError: + import ESMF import os import sys import traceback @@ -83,7 +87,7 @@ def regrid_ak_ext_ana(input_forcings, config_options, wrf_hydro_geo_meta, mpi_co except: config_options.errMsg = f"Unable to open input NetCDF file: {input_forcings.file_in}" err_handler.log_critical(config_options, mpi_config) - + ds.set_auto_scale(True) ds.set_auto_mask(False) @@ -126,7 +130,7 @@ def regrid_ak_ext_ana(input_forcings, config_options, wrf_hydro_geo_meta, mpi_co np.float32) input_forcings.regridded_forcings2 = np.empty([len(config_options.OutputEnum), wrf_hydro_geo_meta.ny_local, wrf_hydro_geo_meta.nx_local], np.float32) - + for force_count, nc_var in enumerate(input_forcings.netcdf_var_names): var_tmp = None if mpi_config.rank == 0: @@ -139,7 +143,7 @@ def regrid_ak_ext_ana(input_forcings, config_options, wrf_hydro_geo_meta, mpi_co except (ValueError, KeyError, AttributeError) as err: config_options.errMsg = f"Unable to extract: {nc_var} from: {input_forcings.file_in2} ({str(err)})" err_handler.log_critical(config_options, mpi_config) - + err_handler.check_program_status(config_options, mpi_config) var_sub_tmp = mpi_config.scatter_array(input_forcings, var_tmp, config_options) err_handler.check_program_status(config_options, mpi_config) @@ -195,7 +199,7 @@ def _regrid_ak_ext_ana_pcp_stage4(supplemental_precip, config_options, wrf_hydro lat_var = "latitude" lon_var = "longitude" - + if supplemental_precip.fileType != NETCDF: # This file shouldn't exist.... but if it does (previously failed # execution of the program), remove it..... @@ -2080,9 +2084,13 @@ def regrid_mrms_hourly(supplemental_precip, config_options, wrf_hydro_geo_meta, # Set any pixel cells outside the input domain to the global missing value, and set negative precip values to 0 try: + if len(np.argwhere(supplemental_precip.esmf_field_out.data < 0)) > 0: + supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.esmf_field_out.data < 0)] = config_options.globalNdv + # config_options.statusMsg = "WARNING: Found negative precipitation values in MRMS data, setting to missing_value" + # err_handler.log_warning(config_options, mpi_config) + supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.regridded_mask == 0)] = \ config_options.globalNdv - supplemental_precip.esmf_field_out.data[np.where(supplemental_precip.esmf_field_out.data < 0)] = 0 except (ValueError, ArithmeticError) as npe: config_options.errMsg = "Unable to run mask search on MRMS supplemental precip: " + str(npe) @@ -2650,7 +2658,7 @@ def regrid_hourly_nbm(forcings_or_precip, config_options, wrf_hydro_geo_meta, mp config_options.statusMsg = "Exceeded max hours for NBM data, will not use NBM in final layering." err_handler.log_msg(config_options, mpi_config) return - + # If the expected file is missing, this means we are allowing missing files, simply # exit out of this routine as the regridded fields have already been set to NDV. if not os.path.exists(forcings_or_precip.file_in1): @@ -2761,7 +2769,7 @@ def regrid_hourly_nbm(forcings_or_precip, config_options, wrf_hydro_geo_meta, mp terrain_tmp = os.path.join(config_options.scratch_dir, 'nbm_terrain_temp.nc') cmd = f"$WGRIB2 {config_options.grid_meta} -netcdf {terrain_tmp}" - hgt_tmp = ioMod.open_grib2(config_options.grid_meta, terrain_tmp, cmd, config_options, + hgt_tmp = ioMod.open_grib2(config_options.grid_meta, terrain_tmp, cmd, config_options, mpi_config, 'DIST_surface') if mpi_config.rank == 0: var_tmp = hgt_tmp.variables['DIST_surface'][0, :, :] diff --git a/core/tests/config_data/DOMAIN_Default b/core/tests/config_data/DOMAIN_Default deleted file mode 120000 index 54150f2..0000000 --- a/core/tests/config_data/DOMAIN_Default +++ /dev/null @@ -1 +0,0 @@ -/glade/p/cisl/nwc/nwmv30_finals/Hawaii/DOMAIN_Default \ No newline at end of file diff --git a/core/tests/config_data/HIRESW.Hawaii b/core/tests/config_data/HIRESW.Hawaii deleted file mode 120000 index 14010bb..0000000 --- a/core/tests/config_data/HIRESW.Hawaii +++ /dev/null @@ -1 +0,0 @@ -/glade/scratch/zhangyx/ForcingEngine/Forcing_Inputs/HIRESW.Hawaii \ No newline at end of file diff --git a/core/tests/config_data/NAM.Hawaii b/core/tests/config_data/NAM.Hawaii deleted file mode 120000 index e6a9e17..0000000 --- a/core/tests/config_data/NAM.Hawaii +++ /dev/null @@ -1 +0,0 @@ -/glade/scratch/zhangyx/ForcingEngine/Forcing_Inputs/NAM.Hawaii \ No newline at end of file diff --git a/core/tests/config_data/Puerto_Rico b/core/tests/config_data/Puerto_Rico deleted file mode 120000 index e6744c6..0000000 --- a/core/tests/config_data/Puerto_Rico +++ /dev/null @@ -1 +0,0 @@ -/glade/p/cisl/nwc/nwm_forcings/NWM_v21_Params/Puerto_Rico \ No newline at end of file diff --git a/core/tests/fixtures.py b/core/tests/fixtures.py new file mode 100644 index 0000000..24bf497 --- /dev/null +++ b/core/tests/fixtures.py @@ -0,0 +1,133 @@ +import pytest + +from core.parallel import MpiConfig +from core.config import ConfigOptions +from core.geoMod import GeoMetaWrfHydro +from core.forcingInputMod import initDict + +import numpy as np + + +@pytest.fixture +def mpi_meta(config_options): + config_options.read_config() + + mpi_meta = MpiConfig() + mpi_meta.initialize_comm(config_options) + + return mpi_meta + +@pytest.fixture +def geo_meta(config_options, mpi_meta): + config_options.read_config() + + WrfHydroGeoMeta = GeoMetaWrfHydro() + WrfHydroGeoMeta.initialize_destination_geo(config_options, mpi_meta) + WrfHydroGeoMeta.initialize_geospatial_metadata(config_options, mpi_meta) + + return WrfHydroGeoMeta + +@pytest.fixture +def config_options(): + config_path = './yaml/configOptions_valid.yaml' + config_options = ConfigOptions(config_path) + config_options.read_config() + yield config_options + +@pytest.fixture +def config_options_cfs(): + config_path = './yaml/configOptions_cfs.yaml' + config_options = ConfigOptions(config_path) + config_options.read_config() + yield config_options + +@pytest.fixture +def config_options_downscale(): + config_path = './yaml/configOptions_downscale.yaml' + config_options = ConfigOptions(config_path) + config_options.read_config() + yield config_options + +@pytest.fixture +def unbiased_input_forcings(config_options,geo_meta): + """ + Prepare the mock input forcing grid + """ + config_options.read_config() + + inputForcingMod = initDict(config_options,geo_meta) + input_forcings = inputForcingMod[config_options.input_forcings[0]] + + # put a value of 1.5 at every gridpoint, for 10 mock variables + input_forcings.final_forcings = np.full((10,geo_meta.ny_local, geo_meta.nx_local),1.5) + input_forcings.fcst_hour2 = 12 + + return input_forcings + +def isWithinTolerance(val1, val2, tolerance=None): + """ + Make sure val1==val2, within the provided tolerance + """ + if tolerance is None: + tolerance = 0.0 + + diff = abs(val1 - val2) + return diff <= tolerance + + +def run_and_compare(funct, config_options, unbiased_input_forcings, mpi_meta, geo_meta=None, + compval=None, tolerance=None, equal=False, index=(0,0), var='T2D', isDownscaleFunct=False): + """ + Run the provided function, and sample the output to compare + against the expected value. Also ensure the overall original and modified grid sums are different. + :param funct: The function to call + :param config_options: + :param unbiased_input_forcings: + :param mpi_meta: + :param geo_meta: Can be None if not needed for function call + :param compval: Expected corrected value. Can be None to skip this test + :tolerance: Ensure that abs(compval-sampledval) <= tolerance. If None, the values must match exactly + :param equal: If true, the modified grid should be equivalent to the original grid + """ + #config_options.read_config() + config_options.current_output_date = config_options.b_date_proc + config_options.current_output_step = 60 + + unbiased_input_forcings.define_product() + final_forcings = np.copy(unbiased_input_forcings.final_forcings) + if isDownscaleFunct: + funct(unbiased_input_forcings, config_options, geo_meta, mpi_meta) + else: + varnum = unbiased_input_forcings.input_map_output.index(var) + if geo_meta: + funct(unbiased_input_forcings, geo_meta, config_options, mpi_meta, varnum) + else: + funct(unbiased_input_forcings, config_options, mpi_meta, varnum) + + if equal: + assert np.sum(final_forcings) == np.sum(unbiased_input_forcings.final_forcings) + else: + assert np.sum(final_forcings) != np.sum(unbiased_input_forcings.final_forcings) + + if compval is not None: + outId = getOutputId(config_options, unbiased_input_forcings, var) + idx = (outId, index[0], index[1]) + + pointval = unbiased_input_forcings.final_forcings[idx] + # TODO set acceptable tolerance + print("expected value: %s, got: %s" % (compval, pointval)) + assert isWithinTolerance(pointval, compval, tolerance) + +def getOutputId(config_options, input_forcings, varname): + OutputEnum = config_options.OutputEnum + varnum = input_forcings.input_map_output.index(varname) + outId = 0 + for ind, xvar in enumerate(OutputEnum): + if xvar.name == input_forcings.input_map_output[varnum]: + outId = ind + break + + return outId + + + diff --git a/core/tests/gdrive_download.py b/core/tests/gdrive_download.py new file mode 100644 index 0000000..5a4646a --- /dev/null +++ b/core/tests/gdrive_download.py @@ -0,0 +1,72 @@ +from argparse import ArgumentParser + +import requests +import os + + +def maybe_download_testdata(): + # See if we've downloaded the test data from + # google drive yet, and download it if not + if os.path.isdir("config_data"): + return + + id = "1qZbSpgUbi6Is5VlJzsvZ3R5w1WtA1uS3" + outfile = "config_data.tar.gz" + + download_file_from_google_drive(id, outfile) + os.system("tar xzf %s" % outfile) + os.remove(outfile) + + +def download_file_from_google_drive(id, destination): + print('downloading google drive file id ' + id + ' to ' + destination) + URL = "https://docs.google.com/uc?export=download" + + session = requests.Session() + + response = session.get(URL, params={'id': id, 'alt': 'media', 'confirm':'t'} + , stream=True) + token = get_confirm_token(response) + + if token: + params = {'id': id, 'confirm': token} + response = session.get(URL, params=params, stream=True) + + save_response_content(response, destination) + + +def get_confirm_token(response): + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + + return None + + +def save_response_content(response, destination): + CHUNK_SIZE = 32768 + + with open(destination, "wb") as f: + for chunk in response.iter_content(CHUNK_SIZE): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + + +def main(): + + parser = ArgumentParser() + parser.add_argument("--file_id", + dest="file_id", + help="Google drive file ID. Get from shareable link") + parser.add_argument("--dest_file", + dest="dest_file", + help="Full path including filename for downloaded file.") + + args = parser.parse_args() + file_id = args.file_id + dest_file = args.dest_file + + download_file_from_google_drive(file_id, dest_file) + +if __name__ == "__main__": + main() diff --git a/core/tests/run_tests.py b/core/tests/run_tests.py new file mode 100644 index 0000000..8be98ed --- /dev/null +++ b/core/tests/run_tests.py @@ -0,0 +1,65 @@ +from gdrive_download import maybe_download_testdata +import subprocess + +COLOR_GREEN = "\033[92m" +COLOR_NORM = "\033[0m" +COLOR_RED = "\033[91m" + + +def run(): + maybe_download_testdata() + + failed = 0 + + print ("Testing without mpirun...") + + cmd = "python -m pytest --with-mpi" + test_proc = subprocess.run(cmd, shell=True) + if test_proc.returncode != 0: + print("%s----Tests not using mpirun failed!!!%s" % (COLOR_RED, COLOR_NORM)) + failed += 1 + else: + print("%s---- Tests not using mpirun passed%s" % (COLOR_GREEN, COLOR_NORM)) + + print ("Testing with 1 MPI process...") + + cmd = "mpirun -n 1 python -m pytest --with-mpi" + test_proc = subprocess.run(cmd, shell=True) + if test_proc.returncode != 0: + print("%s---- MPI nprocs = 1 tests failed!!!%s" % (COLOR_RED, COLOR_NORM)) + failed += 1 + else: + print("%s---- MPI nprocs = 1 tests passed%s" % (COLOR_GREEN, COLOR_NORM)) + + print ("Testing with 4 MPI processes...") + + cmd = "mpirun -n 4 python -m pytest --with-mpi" + test_proc = subprocess.run(cmd, shell=True) + if test_proc.returncode != 0: + print("%s---- MPI nprocs = 4 tests failed!!!%s" % (COLOR_RED, COLOR_NORM)) + failed += 1 + else: + print("%s---- MPI nprocs = 4 tests passed%s" % (COLOR_GREEN, COLOR_NORM)) + + # print ("Testing with 8 MPI processes...") + + cmd = "mpirun -n 8 python -m pytest --with-mpi" + test_proc = subprocess.run(cmd, shell=True) + if test_proc.returncode != 0: + print("%s---- MPI nprocs = 8 tests failed!!!%s" % (COLOR_RED, COLOR_NORM)) + failed += 1 + else: + print("%s---- MPI nprocs = 8 tests passed%s" % (COLOR_GREEN, COLOR_NORM)) + + if failed > 0: + print("%s---- Total %s MPI configurations failed!!!%s" % (failed, COLOR_RED, COLOR_NORM)) + exit(-1) + + print("%s******** All tests passed ***********%s" % (COLOR_GREEN, COLOR_NORM)) + exit(0) + + + + +if __name__ == "__main__": + run() diff --git a/core/tests/test_bias_correction.py b/core/tests/test_bias_correction.py new file mode 100644 index 0000000..2530a7f --- /dev/null +++ b/core/tests/test_bias_correction.py @@ -0,0 +1,611 @@ +import pytest +import numpy as np +from datetime import datetime + +from core.parallel import MpiConfig +from core.config import ConfigOptions +from core.geoMod import GeoMetaWrfHydro +from core.forcingInputMod import * +from core.enumConfig import * + +from fixtures import * +import core.bias_correction as BC + + +def date(datestr): + return datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S') + +var_list = { + 'T2D': 300, + 'Q2D': 50.0, + 'U2D': 2.5, + 'V2D': 1.5, + 'RAINRATE': 2.5, + 'SWDOWN': 400, + 'LWDOWN': 400, + 'PSFC': 90000 +} + +expected_values = { + 'no_bias_correct': [ + { 'index': (0,0), 'value': 1.5, 'tolerance': None} + ], + 'ncar_tbl_correction': [ + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'T2D', 'fcst_hour': 0}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'Q2D', 'fcst_hour': 0}, + { 'index': (0,0), 'value': 1.85, 'tolerance': None, 'var': 'U2D', 'fcst_hour': 0}, + { 'index': (0,0), 'value': 1.85, 'tolerance': None, 'var': 'V2D', 'fcst_hour': 0}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'T2D', 'fcst_hour': 6}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'Q2D', 'fcst_hour': 6}, + { 'index': (0,0), 'value': 1.6, 'tolerance': None, 'var': 'U2D', 'fcst_hour': 6}, + { 'index': (0,0), 'value': 1.6, 'tolerance': None, 'var': 'V2D', 'fcst_hour': 6}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'T2D', 'fcst_hour': 12}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'Q2D', 'fcst_hour': 12}, + { 'index': (0,0), 'value': 1.52, 'tolerance': None, 'var': 'U2D', 'fcst_hour': 12}, + { 'index': (0,0), 'value': 1.52, 'tolerance': None, 'var': 'V2D', 'fcst_hour': 12}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'T2D', 'fcst_hour': 18}, + { 'index': (0,0), 'value': 2.5, 'tolerance': None, 'var': 'Q2D', 'fcst_hour': 18}, + { 'index': (0,0), 'value': 1.45, 'tolerance': None, 'var': 'U2D', 'fcst_hour': 18}, + { 'index': (0,0), 'value': 1.45, 'tolerance': None, 'var': 'V2D', 'fcst_hour': 18} + ], + 'ncar_blanket_adjustment_lw': [ + { 'index': (0,0), 'value': 9.82194214668789, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 7.763851170382089, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 6.8580578533121095, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 8.91614882961791, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 7.479010060854788, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 4.6895996292499, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 6.160989939145213, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 8.950400370750101, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 0} + ], + 'ncar_sw_hrrr_bias_correct': [ + { 'index': (0,0), 'value': 1.5826259963214397, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.4999999948202003, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.4999999948202003, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.540613193064928, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.4394609276205301, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.500000003795177, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.500000003795177, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.4702432015910745, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 0} + ], + 'ncar_temp_hrrr_bias_correct': [ + { 'index': (0,0), 'value': 1.462288117950048, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.49941014460614, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.5757118820499518, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.53858985539386, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 0.9518785484037021, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 0.8684798631054194, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 0.884121451596298, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 0.9675201368945807, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 0} + ], + 'ncar_temp_gfs_bias_correct': [ + { 'index': (0,0), 'value': 2.6484931133084233, 'tolerance': None, 'date': date('2023-01-01 00:00:00')}, + { 'index': (0,0), 'value': 2.1467845464398003, 'tolerance': None, 'date': date('2023-04-01 06:00:00')}, + { 'index': (0,0), 'value': 0.23150688669157682, 'tolerance': None, 'date': date('2023-07-01 12:00:00')}, + { 'index': (0,0), 'value': 0.7332154535601993, 'tolerance': None, 'date': date('2023-10-01 18:00:00')} + ], + 'ncar_lwdown_gfs_bias_correct': [ + { 'index': (0,0), 'value': 10.897517774766143, 'tolerance': None, 'date': date('2023-01-01 00:00:00')}, + { 'index': (0,0), 'value': 12.813333511002988, 'tolerance': None, 'date': date('2023-04-01 06:00:00')}, + { 'index': (0,0), 'value': 11.902482225233857, 'tolerance': None, 'date': date('2023-07-01 12:00:00')}, + { 'index': (0,0), 'value': 9.986666488997013, 'tolerance': None, 'date': date('2023-10-01 18:00:00')} + ], + 'ncar_wspd_hrrr_bias_correct': [ + { 'index': (0,0), 'value': 1.7324061943600948, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.6027854243986395, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.592862924985717, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.7224836949471725, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 1}, + { 'index': (0,0), 'value': 1.2131465997822362, 'tolerance': None, 'date': date('2023-01-01 00:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.080473211999704, 'tolerance': None, 'date': date('2023-04-01 06:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.1504572971498712, 'tolerance': None, 'date': date('2023-07-01 12:00:00'), 'ana': 0}, + { 'index': (0,0), 'value': 1.2831306849324031, 'tolerance': None, 'date': date('2023-10-01 18:00:00'), 'ana': 0} + ], + 'ncar_wspd_gfs_bias_correct': [ + { 'index': (0,0), 'value': 1.560235849440387, 'tolerance': None, 'date': date('2023-01-01 00:00:00')}, + { 'index': (0,0), 'value': 1.2559415578811088, 'tolerance': None, 'date': date('2023-04-01 06:00:00')}, + { 'index': (0,0), 'value': 1.1569214380849937, 'tolerance': None, 'date': date('2023-07-01 12:00:00')}, + { 'index': (0,0), 'value': 1.4612157296442718, 'tolerance': None, 'date': date('2023-10-01 18:00:00')} + ], + 'cfsv2_nldas_nwm_bias_correct': [ + { 'index': (0,0), 'value': 285, 'tolerance': None, 'var': 'T2D' }, + { 'index': (0,0), 'value': 0.009524456432886543, 'tolerance': None, 'var': 'Q2D' }, + { 'index': (0,0), 'value': 3.562956632266148, 'tolerance': None, 'var': 'U2D' }, + { 'index': (0,0), 'value': 0.11510974533140078, 'tolerance': None, 'var': 'V2D' }, + { 'index': (0,0), 'value': 0.01622283237712793, 'tolerance': None, 'var': 'RAINRATE' }, + { 'index': (0,0), 'value': 0, 'tolerance': None, 'var': 'SWDOWN' }, + { 'index': (0,0), 'value': 272, 'tolerance': None, 'var': 'LWDOWN' }, + { 'index': (0,0), 'value': 49999 , 'tolerance': None, 'var': 'PSFC' } + ] +} + +####################################################### +# +# This test suite creates a mock 'forcing grid' filled +# with a known value, calls each bias_correction function, +# and samples a gridpoint to compare against the expected +# bias-corrected value for that function. +# +# The functions currently expect an exact (full precision) +# match, but a tolerance can be set for each test or +# globally in the run_and_compare() function +# +####################################################### + +@pytest.fixture +def unbiased_input_forcings_cfs(config_options_cfs,geo_meta,mpi_meta): + """ + Prepare the mock input forcing grid for CFS + """ + # Don't run for mpi processes > 0, since the + # CFS and NLDAS parameters are spatially dependent and we're not sure + # how we are splitting up the grid. + if mpi_meta.size > 1: + return + config_options_cfs.read_config() + + inputForcingMod = initDict(config_options_cfs,geo_meta) + input_forcings = inputForcingMod[config_options_cfs.input_forcings[0]] + # TODO Can we construct this path from the config file? Parts of the path are currently hard-coded + # im time_handling.py, however + input_forcings.file_in2 = "config_data/CFSv2/cfs.20190923/06/6hrly_grib_01/flxf2019100106.01.2019092306.grb2" + input_forcings.fcst_hour2 = 192 + input_forcings.fcst_date1 = datetime.strptime("2019100106", "%Y%m%d%H%M") + input_forcings.fcst_date2 = input_forcings.fcst_date1 + + input_forcings.regrid_inputs(config_options_cfs,geo_meta,mpi_meta) + + # put a value of 1.5 at every gridpoint, for 10 mock variables + input_forcings.final_forcings = np.full((10,geo_meta.ny_local, geo_meta.nx_local),1.5) + input_forcings.fcst_hour2 = 12 + + return input_forcings + +##### No bias correction tests ######### +@pytest.mark.mpi +def test_no_bias_correct(config_options, mpi_meta, unbiased_input_forcings): + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='no_bias_correct', equal=True) + + +##### NCAR table bias correction tests ######### +@pytest.mark.mpi +def test_ncar_tbl_correction_0z_temp(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='T2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_0z_rh(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='Q2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_0z_u(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='U2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_0z_v(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='V2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_6z_temp(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 6 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='T2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_6z_rh(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 6 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='Q2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_6z_u(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 6 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='U2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_6z_v(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 6 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='V2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_12z_temp(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 12 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='T2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_12z_rh(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 12 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='Q2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_12z_u(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 12 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='U2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_12z_v(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 12 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='V2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_18z_temp(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 18 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='T2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_18z_rh(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 18 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='Q2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_18z_u(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 18 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='U2D') + +@pytest.mark.mpi +def test_ncar_tbl_correction_18z_v(config_options, mpi_meta, unbiased_input_forcings): + unbiased_input_forcings.fcst_hour2 = 18 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_tbl_correction', var='V2D') + +##### NCAR blanket LW Bias Correction tests ######### +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_0z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_6z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_12z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_18z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_0z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_6z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_12z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_blanket_adjustment_lw_18z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_blanket_adjustment_lw', var='LWDOWN') + +##### NCAR HRRR SW bias correction tests ######### +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_0z(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_6z(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_12z(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_18z(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_0z_noana(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_6z_noana(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_12z_noana(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +@pytest.mark.mpi +def test_ncar_sw_hrrr_bias_correct_18z_noana(config_options, geo_meta, mpi_meta, unbiased_input_forcings): + # This bias correction is affected by spatial position. Since the spatial point is different depending on how + # many processes we have, we skip this test for n_processes > 1 + if mpi_meta.size > 1: return + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=geo_meta, type='ncar_sw_hrrr_bias_correct', var='SWDOWN') + +##### NCAR HRRR temperature bias correction tests ######### +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_0z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_6z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_12z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_18z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_0z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_6z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_12z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_hrrr_bias_correct_18z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_hrrr_bias_correct', var='T2D') + +##### NCAR GFS temperature bias correction tests ######### +@pytest.mark.mpi +def test_ncar_temp_gfs_bias_correct_0z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_gfs_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_gfs_bias_correct_6z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_gfs_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_gfs_bias_correct_12z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_gfs_bias_correct', var='T2D') + +@pytest.mark.mpi +def test_ncar_temp_gfs_bias_correct_18z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_temp_gfs_bias_correct', var='T2D') + +##### NCAR GFS LW bias correction tests ######### +@pytest.mark.mpi +def test_ncar_lwdown_gfs_bias_correct_0z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_lwdown_gfs_bias_correct', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_lwdown_gfs_bias_correct_6z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_lwdown_gfs_bias_correct', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_lwdown_gfs_bias_correct_12z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_lwdown_gfs_bias_correct', var='LWDOWN') + +@pytest.mark.mpi +def test_ncar_lwdown_gfs_bias_correct_18z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_lwdown_gfs_bias_correct', var='LWDOWN') + +##### NCAR HRRR wind speed bias correction tests ######### +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_0z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_6z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_12z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_18z(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 1 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_0z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-01-01 00:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_6z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-04-01 06:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_12z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-07-01 12:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_hrrr_bias_correct_18z_noana(config_options, mpi_meta, unbiased_input_forcings): + config_options.b_date_proc = date('2023-10-01 18:00:00') + config_options.ana_flag = 0 + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_hrrr_bias_correct', var='U2D') + +##### NCAR GFS Wind Speed bias correction tests ######### +@pytest.mark.mpi +def test_ncar_wspd_gfs_bias_correct_0z(config_options, unbiased_input_forcings, mpi_meta): + config_options.b_date_proc = date('2023-01-01 00:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_gfs_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_gfs_bias_correct_6z(config_options, unbiased_input_forcings, mpi_meta): + config_options.b_date_proc = date('2023-04-01 06:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_gfs_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_gfs_bias_correct_12z(config_options, unbiased_input_forcings, mpi_meta): + config_options.b_date_proc = date('2023-07-01 12:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_gfs_bias_correct', var='U2D') + +@pytest.mark.mpi +def test_ncar_wspd_gfs_bias_correct_18z(config_options, unbiased_input_forcings, mpi_meta): + config_options.b_date_proc = date('2023-10-01 18:00:00') + run_unit_test(config_options, mpi_meta, unbiased_input_forcings, type='ncar_wspd_gfs_bias_correct', var='U2D') + +##### CFS bias correction tests ######### +@pytest.mark.mpi +def test_cfsv2_nldas_nwm_bias_correct(config_options_cfs, unbiased_input_forcings_cfs, mpi_meta,geo_meta): + # Don't run for mpi processes > 0, since the + # CFS and NLDAS parameters are spatially dependent and we're not sure + # how we are splitting up the grid. + if mpi_meta.size > 1: + return + for var in var_list: + val = var_list[var] + unbiased_input_forcings_cfs.final_forcings = np.full((10,geo_meta.ny_local, geo_meta.nx_local),val) + unbiased_input_forcings_cfs.coarse_input_forcings1 = np.full((10,geo_meta.ny_local, geo_meta.nx_local),val) + unbiased_input_forcings_cfs.coarse_input_forcings2 = np.full((10,geo_meta.ny_local, geo_meta.nx_local),val) + run_unit_test(config_options_cfs, mpi_meta, unbiased_input_forcings_cfs, type='cfsv2_nldas_nwm_bias_correct', var=var) + +##### Tests for the top-level 'run_bias_correction()' function ####### +@pytest.mark.mpi +def test_run_bias_correction1(unbiased_input_forcings, config_options, geo_meta, mpi_meta): + unbiased_input_forcings.t2dBiasCorrectOpt = BiasCorrTempEnum.HRRR.name + + run_bias_correction(unbiased_input_forcings, config_options, geo_meta, mpi_meta, "ncar_temp_hrrr_bias_correct", varname='T2D') + +@pytest.mark.mpi +def test_run_bias_correction2(unbiased_input_forcings, config_options, geo_meta, mpi_meta): + unbiased_input_forcings.windBiasCorrectOpt = BiasCorrWindEnum.HRRR.name + + run_bias_correction(unbiased_input_forcings, config_options, geo_meta, mpi_meta, "ncar_wspd_hrrr_bias_correct", varname='U2D') + +@pytest.mark.mpi +def test_run_bias_correction3(unbiased_input_forcings, config_options, geo_meta, mpi_meta): + unbiased_input_forcings.q2dBiasCorrectOpt = BiasCorrHumidEnum.CUSTOM.name + + run_bias_correction(unbiased_input_forcings, config_options, geo_meta, mpi_meta, "ncar_tbl_correction", varname='Q2D') + + +############ Helper functions ################ + +def run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta=None, + type='ncar_sw_hrrr_bias_correct', var='SWDOWN', equal=False): + for spec in expected_values[type]: + if 'fcst_hour' in spec and spec['fcst_hour'] != unbiased_input_forcings.fcst_hour2: + continue + if 'var' in spec and spec['var'] != var: + continue + if 'ana' in spec and spec['ana'] != config_options.ana_flag: + continue + if 'date' in spec and spec['date'] != config_options.b_date_proc: + continue + + funct = getattr(BC, type) + + run_and_compare(funct, config_options, unbiased_input_forcings, mpi_meta, geo_meta=geo_meta, + compval=spec['value'], tolerance=spec['tolerance'], index=spec['index'], + var=var, equal=equal) + +def run_bias_correction(unbiased_input_forcings, config_options, geo_meta, mpi_meta, specname, varname="T2D"): + config_options.current_output_step = 60 + config_options.current_output_date = config_options.b_date_proc + + final_forcings = np.copy(unbiased_input_forcings.final_forcings) + + BC.run_bias_correction(unbiased_input_forcings,config_options,geo_meta,mpi_meta) + assert np.sum(final_forcings) != np.sum(unbiased_input_forcings.final_forcings) + for spec in expected_values[specname]: + if 'fcst_hour' in spec and spec['fcst_hour'] != unbiased_input_forcings.fcst_hour2: + continue + if 'var' in spec and spec['var'] != varname: + continue + if 'ana' in spec and spec['ana'] != config_options.ana_flag: + continue + if 'date' in spec and spec['date'] != config_options.b_date_proc: + continue + + outId = getOutputId(config_options,unbiased_input_forcings, varname) + index = (outId, spec['index'][0], spec['index'][1]) + pointval = unbiased_input_forcings.final_forcings[index] + assert isWithinTolerance(pointval, spec['value'], spec['tolerance']) + + diff --git a/core/tests/test_downscale.py b/core/tests/test_downscale.py new file mode 100644 index 0000000..414d700 --- /dev/null +++ b/core/tests/test_downscale.py @@ -0,0 +1,159 @@ +import pytest +import numpy as np +from datetime import datetime + +from core.parallel import MpiConfig +from core.config import ConfigOptions +from core.geoMod import GeoMetaWrfHydro +from core.forcingInputMod import * +from core.enumConfig import * + +from fixtures import * +import core.downscale as DS + + +expected_values = { + 'no_downscale': [ + { 'index': (0,0), 'value': 1.5, 'tolerance': None} + ], + 'simple_lapse': [ + { 'index': (0,0), 'value': 2.1489999890327454, 'tolerance': None} + ], + 'param_lapse': [ + { 'index': (0,0), 'value': 3.0, 'tolerance': None} + ], + 'pressure_down_classic': [ + { 'index': (0,0), 'value': 4.9140393659641175, 'tolerance': None} + ], + 'q2_down_classic': [ + { 'index': (0,0), 'value': 0.9992289527938468, 'tolerance': None} + ], + 'nwm_monthly_PRISM_downscale': [ + { 'index': (0,0), 'value': 2.357142857142857, 'tolerance': None} + ], + 'ncar_topo_adj': [ + { 'index': (0,0), 'value': 1400.0, 'tolerance': None} + ] +} + + +############### Downscale function tests ########################## + + +@pytest.mark.mpi +def test_no_downscale(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, type='no_downscale', equal=True) + +@pytest.mark.mpi +def test_simple_lapse(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + unbiased_input_forcings.q2dDownscaleOpt = DownScaleHumidEnum.REGRID_TEMP_PRESS + unbiased_input_forcings.height = geo_meta.height + 100 + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, type='simple_lapse') + +@pytest.mark.mpi +def test_param_lapse(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + unbiased_input_forcings.q2dDownscaleOpt = DownScaleHumidEnum.REGRID_TEMP_PRESS + unbiased_input_forcings.height = geo_meta.height + 100 + unbiased_input_forcings.paramDir = config_options_downscale.dScaleParamDirs[0] + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, type='param_lapse') + +@pytest.mark.mpi +def test_pressure_down_classic(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + unbiased_input_forcings.height = geo_meta.height + 100 + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, + type='pressure_down_classic',var='PSFC') + +@pytest.mark.mpi +def test_q2_down_classic(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + unbiased_input_forcings.t2dTmp = unbiased_input_forcings.t2dTmp * 0 + 273.15 + unbiased_input_forcings.psfcTmp = unbiased_input_forcings.psfcTmp * 0 + 100000 + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, + type='q2_down_classic',var='Q2D') + +@pytest.mark.mpi +def test_nwm_monthly_PRISM_downscale(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + config_options_downscale.current_output_date = datetime.strptime("20101001", "%Y%m%d") + config_options_downscale.prev_output_date = datetime.strptime("20100901", "%Y%m%d") + unbiased_input_forcings.paramDir = config_options_downscale.dScaleParamDirs[0] + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, + type='nwm_monthly_PRISM_downscale',var='RAINRATE') + +@pytest.mark.mpi +def test_ncar_topo_adj(unbiased_input_forcings, config_options_downscale, geo_meta, mpi_meta): + config_options_downscale.current_output_date = datetime.strptime("2010122000", "%Y%m%d%H") + geo_meta.latitude_grid = geo_meta.latitude_grid + 20.0 + unbiased_input_forcings.final_forcings = np.full((10,geo_meta.ny_local, geo_meta.nx_local),1500.0) + run_unit_test(config_options_downscale, mpi_meta, unbiased_input_forcings, geo_meta, + type='ncar_topo_adj',var='SWDOWN') + + +############### Helper function tests ########################## + + +@pytest.mark.mpi +def test_radconst(config_options_downscale): + config_options_downscale.current_output_date = datetime.strptime("20230427","%Y%m%d") + DECLIN, SOLCON = DS.radconst(config_options_downscale) + + assert isWithinTolerance(DECLIN, 0.23942772252574912, None) + assert isWithinTolerance(SOLCON, 1350.9227993143058, None) + + config_options_downscale.current_output_date = datetime.strptime("20231027","%Y%m%d") + DECLIN, SOLCON = DS.radconst(config_options_downscale) + + assert isWithinTolerance(DECLIN, -0.24225978751208468, None) + assert isWithinTolerance(SOLCON, 1388.352238489423, None) + + +@pytest.mark.mpi +def test_calc_coszen(config_options_downscale, geo_meta, mpi_meta): + # Skip this test for nprocs > 1, since the latitudes will be + # different for each scattered array + if mpi_meta.size > 1 and mpi_meta.rank != 0: + return True + + config_options_downscale.current_output_date = datetime.strptime("2023042706","%Y%m%d%H") + declin, solcon = DS.radconst(config_options_downscale) + coszen, hrang = DS.calc_coszen(config_options_downscale, declin, geo_meta) + + assert isWithinTolerance(coszen[0][0], -0.24323696, 1e-7) + assert isWithinTolerance(hrang[0][0], -4.3573823, 1e-7) + + config_options_downscale.current_output_date = datetime.strptime("2023102718","%Y%m%d%H") + declin, solcon = DS.radconst(config_options_downscale) + coszen, hrang = DS.calc_coszen(config_options_downscale, declin, geo_meta) + + assert isWithinTolerance(coszen[0][0], 0.29333305, 1e-7) + assert isWithinTolerance(hrang[0][0], -1.1556603, 1e-7) + + +@pytest.mark.mpi +def test_rel_hum(config_options_downscale, unbiased_input_forcings, geo_meta): + unbiased_input_forcings.t2dTmp = np.full((geo_meta.ny_local, geo_meta.nx_local),300) + unbiased_input_forcings.psfcTmp = np.full((geo_meta.ny_local, geo_meta.nx_local),100000) + unbiased_input_forcings.final_forcings = np.full((10, geo_meta.ny_local, geo_meta.nx_local),0.015) + + rh = DS.rel_hum(unbiased_input_forcings, config_options_downscale) + + assert isWithinTolerance(rh[0][0], 68.33107966724083, None) + + +@pytest.mark.mpi +def test_mixhum_ptrh(config_options_downscale, unbiased_input_forcings, geo_meta): + unbiased_input_forcings.t2dTmp = np.full((geo_meta.ny_local, geo_meta.nx_local),300) + unbiased_input_forcings.psfcTmp = np.full((geo_meta.ny_local, geo_meta.nx_local),100000) + unbiased_input_forcings.final_forcings = np.full((10, geo_meta.ny_local, geo_meta.nx_local),0.015) + + relHum = DS.rel_hum(unbiased_input_forcings, config_options_downscale) + + rh = DS.mixhum_ptrh(unbiased_input_forcings, relHum, 2, config_options_downscale) + + assert isWithinTolerance(rh[0][0], 9.039249311422024, None) + + +def run_unit_test(config_options, mpi_meta, unbiased_input_forcings, geo_meta, type=None, equal=False, var='T2D'): + funct = getattr(DS, type) + for spec in expected_values[type]: + run_and_compare(funct, config_options, unbiased_input_forcings, mpi_meta, geo_meta=geo_meta, equal=equal, + compval=spec['value'], tolerance=spec['tolerance'], index=spec['index'], var=var, isDownscaleFunct=True) + diff --git a/core/tests/test_geo_meta.py b/core/tests/test_geo_meta.py new file mode 100644 index 0000000..b4adecf --- /dev/null +++ b/core/tests/test_geo_meta.py @@ -0,0 +1,37 @@ +import pytest +from netCDF4 import Dataset + +from core.parallel import MpiConfig +from core.config import ConfigOptions +from core.geoMod import GeoMetaWrfHydro + +from fixtures import * + +import numpy as np + + +@pytest.mark.mpi +def test_get_processor_bounds(geo_meta,mpi_meta): + geo_meta.get_processor_bounds() + + assert geo_meta.x_lower_bound == 0 + assert geo_meta.x_upper_bound == geo_meta.nx_global + + assert abs((geo_meta.ny_global / mpi_meta.size) - geo_meta.ny_local) < mpi_meta.size + assert abs(geo_meta.y_lower_bound - (mpi_meta.rank * geo_meta.ny_local)) < mpi_meta.size + assert abs(geo_meta.y_upper_bound - ((mpi_meta.rank + 1) * geo_meta.ny_local)) < mpi_meta.size + +@pytest.mark.mpi +def test_calc_slope(geo_meta,config_options): + config_options.read_config() + + idTmp = Dataset(config_options.geogrid,'r') + (slopeOut,slp_azi) = geo_meta.calc_slope(idTmp,config_options) + + assert slopeOut.shape == slp_azi.shape + assert slopeOut.size == slp_azi.size + + # test the sums of these grids are as expected, + # within floating point rounding tolerance + assert abs(np.sum(slopeOut) - 1527.7666) < 0.001 + assert abs(np.sum(slp_azi) - 58115.258) < 0.001 \ No newline at end of file diff --git a/core/tests/test_input_forcings.py b/core/tests/test_input_forcings.py index 2837218..8eb0aa2 100644 --- a/core/tests/test_input_forcings.py +++ b/core/tests/test_input_forcings.py @@ -2,6 +2,7 @@ from core.config import ConfigOptions from core.forcingInputMod import input_forcings + @pytest.fixture def config_options(): config_path = './yaml/configOptions_valid.yaml' diff --git a/core/tests/test_mpi_meta.py b/core/tests/test_mpi_meta.py new file mode 100644 index 0000000..58123dc --- /dev/null +++ b/core/tests/test_mpi_meta.py @@ -0,0 +1,62 @@ +import pytest +from core.parallel import MpiConfig +from core.config import ConfigOptions +from core.geoMod import GeoMetaWrfHydro +import numpy as np + +from fixtures import * + + + +@pytest.mark.mpi +def test_create_mpi_config(config_options, mpi_meta): + config_options.read_config() + +@pytest.mark.mpi +def test_broadcast_parameter(config_options, mpi_meta): + + # int + value = 999 if mpi_meta.rank == 0 else None + assert (mpi_meta.rank == 0 and value == 999) or (mpi_meta.rank != 0 and value is None) + param = mpi_meta.broadcast_parameter(value, config_options, param_type=int) + assert param == 999 + + # float + value = 999.999 if mpi_meta.rank == 0 else None + assert (mpi_meta.rank == 0 and value == 999.999) or (mpi_meta.rank != 0 and value is None) + param = mpi_meta.broadcast_parameter(value, config_options, param_type=float) + assert param == 999.999 + + # bool + value = True if mpi_meta.rank == 0 else None + assert (mpi_meta.rank == 0 and value == True) or (mpi_meta.rank != 0 and value is None) + param = mpi_meta.broadcast_parameter(value, config_options, param_type=bool) + assert param == True + + +@pytest.mark.mpi +def test_scatter_array(config_options, mpi_meta, geo_meta): + + target_data = None + if mpi_meta.rank == 0: + target_data = np.random.randn(geo_meta.ny_global,geo_meta.nx_global) + + # get the sum of the full grid. + value = np.sum(target_data) if mpi_meta.rank == 0 else None + value = mpi_meta.broadcast_parameter(value, config_options, param_type=float) + + target_data = mpi_meta.scatter_array(geo_meta,target_data,config_options) + + # None of the sub grids' sum should equal the sum of the full grid + # unless there's just a single process + assert np.sum(target_data) != value or mpi_meta.size == 1 + + # The x dimension size should be the same as the full grid + assert target_data.shape[1] == geo_meta.nx_global + + # The length of the y dimension of the sub grid should equal (full grid.y / num_processes), + # account for rounding when scattering the array + assert abs((target_data.shape[0] * mpi_meta.size) - geo_meta.ny_global) < mpi_meta.size + + + \ No newline at end of file diff --git a/core/tests/yaml/configOptions_cfs.yaml b/core/tests/yaml/configOptions_cfs.yaml new file mode 100644 index 0000000..5b93ed9 --- /dev/null +++ b/core/tests/yaml/configOptions_cfs.yaml @@ -0,0 +1,69 @@ +Forecast: + AnAFlag: false + Frequency: 60 + LookBack: 180 + RefcstBDateProc: '201909230600' + RefcstEDateProc: '201910010600' + Shift: 0 + +Geospatial: + GeogridIn: config_data/DOMAIN_Default/geo_em.d01.1km_Hawaii_NWMv3.0.nc + SpatialMetaIn: config_data/DOMAIN_Default/GEOGRID_LDASOUT_Spatial_Metadata_1km_Hawaii_NWMv3.0.nc + +Input: +- BiasCorrection: + Humidity: CFS_V2 + Longwave: CFS_V2 + Precip: CFS_V2 + Pressure: CFS_V2 + Shortwave: CFS_V2 + Temperature: CFS_V2 + Wind: CFS_V2 + Dir: config_data/CFSv2 + Downscaling: + Humidity: REGRID_TEMP_PRESS + ParamDir: config_data/CFSv2 + Precip: NONE + Pressure: ELEV + Shortwave: ELEV + Temperature: LAPSE_675 + Forcing: CFS_V2 + Horizon: 60 + IgnoredBorderWidths: 0.0 + Mandatory: true + Offset: 0 + RegriddingOpt: ESMF_BILINEAR + TemporalInterp: NONE + Type: GRIB2 + Ensembles: + cfsEnsNumber: 1 + +Output: + CompressOutput: false + Dir: output/Hawaii_AnA_withMRMS_test + FloatOutput: SCALE_OFFSET + Frequency: 60 + ScratchDir: output/Hawaii_AnA_withMRMS_test + +Regridding: + WeightsDir: null + +Retrospective: + Flag: false + +SuppForcing: +- Pcp: MRMS + PcpDir: config_data/HIRESW.Hawaii + PcpInputOffsets: 0 + PcpMandatory: false + PcpParamDir: config_data/Puerto_Rico + PcpTemporalInterp: 0 + PcpType: GRIB2 + RegridOptPcp: 1 + RqiMethod: NONE + RqiThreshold: 1.0 + +YamlConfig: + forcingInputModYaml: 'yaml/forcingInputMod.yaml' + outputVarAttrYaml: 'yaml/outputVarAttr.yaml' + suppPrecipModYaml: 'yaml/suppPrecipMod.yaml' diff --git a/core/tests/yaml/configOptions_downscale.yaml b/core/tests/yaml/configOptions_downscale.yaml new file mode 100644 index 0000000..0cc3688 --- /dev/null +++ b/core/tests/yaml/configOptions_downscale.yaml @@ -0,0 +1,67 @@ +Forecast: + AnAFlag: true + Frequency: 60 + LookBack: 180 + RefcstBDateProc: '202302261200' + RefcstEDateProc: '202302261800' + Shift: 0 + +Geospatial: + GeogridIn: config_data/DOMAIN_Default/geo_em.d01.1km_Hawaii_NWMv3.0.nc + SpatialMetaIn: config_data/DOMAIN_Default/GEOGRID_LDASOUT_Spatial_Metadata_1km_Hawaii_NWMv3.0.nc + +Input: +- BiasCorrection: + Humidity: NONE + Longwave: NONE + Precip: NONE + Pressure: NONE + Shortwave: NONE + Temperature: NONE + Wind: NONE + Dir: config_data/NAM.Hawaii + Downscaling: + Humidity: REGRID_TEMP_PRESS + ParamDir: config_data/params + Precip: NONE + Pressure: ELEV + Shortwave: ELEV + Temperature: LAPSE_675 + Forcing: NAM_NEST_HI + Horizon: 60 + IgnoredBorderWidths: 0.0 + Mandatory: true + Offset: 0 + RegriddingOpt: ESMF_BILINEAR + TemporalInterp: NONE + Type: GRIB2 + +Output: + CompressOutput: false + Dir: output/Hawaii_AnA_withMRMS_test + FloatOutput: SCALE_OFFSET + Frequency: 60 + ScratchDir: output/Hawaii_AnA_withMRMS_test + +Regridding: + WeightsDir: null + +Retrospective: + Flag: false + +SuppForcing: +- Pcp: MRMS + PcpDir: config_data/HIRESW.Hawaii + PcpInputOffsets: 0 + PcpMandatory: false + PcpParamDir: config_data/Puerto_Rico + PcpTemporalInterp: 0 + PcpType: GRIB2 + RegridOptPcp: 1 + RqiMethod: NONE + RqiThreshold: 1.0 + +YamlConfig: + forcingInputModYaml: 'yaml/forcingInputMod.yaml' + outputVarAttrYaml: 'yaml/outputVarAttr.yaml' + suppPrecipModYaml: 'yaml/suppPrecipMod.yaml' diff --git a/core/time_handling.py b/core/time_handling.py index 5c94122..52f314f 100755 --- a/core/time_handling.py +++ b/core/time_handling.py @@ -8,8 +8,8 @@ import numpy as np from strenum import StrEnum from core import err_handler -#from core.forcingInputMod import input_forcings -#NETCDF = input_forcings.NETCDF +from core.forcingInputMod import input_forcings +NETCDF = input_forcings.NETCDF def calculate_lookback_window(config_options): diff --git a/genForcing.py b/genForcing.py index 1ac2fd9..cf5d4fd 100755 --- a/genForcing.py +++ b/genForcing.py @@ -1,8 +1,12 @@ import argparse import os -import ESMF - +# ESMF was renamed to esmpy in v8.4.0 +try: + import esmpy as ESMF +except ModuleNotFoundError: + import ESMF + from core import config from core import err_handler from core import forcingInputMod diff --git a/requirements.txt b/requirements.txt index 29fdbb1..ec5986a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,11 +7,13 @@ charset-normalizer cycler decorator entrypoints +h5py ipykernel ipython -netCDF4 +netcdf4 numpy pytest +pytest-mpi pyaml PyYAML requests diff --git a/setup.py b/setup.py index d7fd30d..fb1fef2 100755 --- a/setup.py +++ b/setup.py @@ -9,5 +9,5 @@ author='Ishita Srivastava', author_email='ishitas@ucar.edu', description='', - install_requires=['netCDF4', 'numpy', 'mpi4py','ESMPy'] + install_requires=['netCDF4', 'numpy', 'mpi4py','esmpy'] )