From a4231e1f7e5f744e08c10f22532a07a331c68d31 Mon Sep 17 00:00:00 2001 From: gsketefian <31046882+gsketefian@users.noreply.github.com> Date: Mon, 26 Oct 2020 02:23:30 -0600 Subject: [PATCH] Port workflow to Orion (#309) ## DESCRIPTION OF CHANGES: * Add stanzas for Orion where necessary. * Add new module files for Orion. * On Orion, both the slurm partition and the slurm QOS need to be specified in the rocoto XML in order to be able to have wall times longer than 30 mins (the partition needs to be specified because it is by default "debug", which has a limit of 30 mins). Thus, introduce modifications to more easily specify slurm partitions: * Remove the workflow variables QUEUE_DEFAULT_TAG, QUEUE_HPSS_TAG, and QUEUE_FCST_TAG that are currently used to determine whether QUEUE_DEFAULT, QUEUE_HPSS, and QUEUE_FCST specify the names of queue/QOS's or slurm partitions. * Add the workflow variables PARTITION_DEFAULT_TAG, PARTITION_HPSS_TAG, and PARTITION_FCST_TAG. These will be used to specify slurm partitions only, and the variables QUEUE_DEFAULT, QUEUE_HPSS, and QUEUE_FCST will be used to specify queues/QOS's only. IMPORTANT NOTE: On Orion, in order to load the regional_workflow environment needed for generating an experiment, the user must first issue the following commands: ``` module use -a /apps/contrib/miniconda3-noaa-gsl/modulefiles module load miniconda3 conda activate regional_workflow ``` ## TESTS CONDUCTED: Ran 11 WE2E tests on Orion, Hera, and Cheyenne. Results on Orion: ``` community_ensemble_2mems SUCCESS DOT_OR_USCORE SUCCESS grid_GSD_HRRR_AK_50km FAILURE - In the run_fcst task. * Error message: !!! (1) Error in subr radiation_aerosols: unrealistic surface pressure = 1 NaN new_ESGgrid SUCCESS new_GFDLgrid SUCCESS regional_001 SUCCESS regional_002 SUCCESS suite_FV3_GFS_v15p2 SUCCESS suite_FV3_GFS_v16beta SUCCESS suite_FV3_GSD_SAR SUCCESS suite_FV3_GSD_v0 SUCCESS ``` Results on Hera: ``` community_ensemble_2mems SUCCESS DOT_OR_USCORE SUCCESS grid_GSD_HRRR_AK_50km SUCCESS new_ESGgrid SUCCESS new_GFDLgrid SUCCESS regional_001 SUCCESS regional_002 SUCCESS suite_FV3_GFS_v15p2 SUCCESS suite_FV3_GFS_v16beta SUCCESS suite_FV3_GSD_SAR SUCCESS suite_FV3_GSD_v0 SUCCESS ``` Results on Cheyenne: ``` community_ensemble_2mems SUCCESS DOT_OR_USCORE SUCCESS grid_GSD_HRRR_AK_50km FAILURE - In run_fcst task. * Error message: !!! (1) Error in subr radiation_aerosols: unrealistic surface pressure = 1 NaN new_ESGgrid SUCCESS new_GFDLgrid SUCCESS regional_001 SUCCESS regional_002 SUCCESS suite_FV3_GFS_v15p2 SUCCESS suite_FV3_GFS_v16beta SUCCESS suite_FV3_GSD_SAR SUCCESS suite_FV3_GSD_v0 SUCCESS ``` All succeed except GSD_HRRR_AK_50km on Orion and Cheyenne. It is not clear why grid_GSD_HRRR_AK_50km fails on Orion and Cheyenne but not Hera. Seems to point to a bug in the forecast model. These two failures are not so important since this grid will soon be deprecated. Also tested successfully on Jet by @JeffBeck-NOAA and on Odin and Stampede by @ywangwof. ## ISSUE: This resolves Issue #152. ## CONTRIBUTORS: @JeffBeck-NOAA @ywangwof @christinaholtNOAA --- modulefiles/tasks/orion/get_extrn_ics | 6 + modulefiles/tasks/orion/get_extrn_lbcs | 6 + modulefiles/tasks/orion/make_grid.local | 5 + modulefiles/tasks/orion/make_ics.local | 5 + modulefiles/tasks/orion/make_lbcs.local | 5 + modulefiles/tasks/orion/run_fcst.local | 5 + scripts/exregional_make_grid.sh | 219 ++++++++++----------- scripts/exregional_make_ics.sh | 5 + scripts/exregional_make_lbcs.sh | 5 + scripts/exregional_make_orog.sh | 177 ++++++++--------- scripts/exregional_make_sfc_climo.sh | 72 +++---- scripts/exregional_run_post.sh | 101 +++++----- tests/run_experiments.sh | 4 +- ush/config.community.sh | 4 - ush/config_defaults.sh | 67 ++++--- ush/generate_FV3LAM_wflow.sh | 7 +- ush/get_extrn_mdl_file_dir_info.sh | 15 ++ ush/launch_FV3LAM_wflow.sh | 8 +- ush/load_modules_run_task.sh | 30 +-- ush/set_extrn_mdl_params.sh | 72 ++++--- ush/setup.sh | 243 +++++++++++++----------- ush/templates/FV3LAM_wflow.xml | 22 ++- ush/valid_param_vals.sh | 2 +- ush/wrappers/run_post.sh | 2 +- 24 files changed, 602 insertions(+), 485 deletions(-) create mode 100644 modulefiles/tasks/orion/get_extrn_ics create mode 100644 modulefiles/tasks/orion/get_extrn_lbcs create mode 100644 modulefiles/tasks/orion/make_grid.local create mode 100644 modulefiles/tasks/orion/make_ics.local create mode 100644 modulefiles/tasks/orion/make_lbcs.local create mode 100644 modulefiles/tasks/orion/run_fcst.local diff --git a/modulefiles/tasks/orion/get_extrn_ics b/modulefiles/tasks/orion/get_extrn_ics new file mode 100644 index 0000000000..a9d5b44121 --- /dev/null +++ b/modulefiles/tasks/orion/get_extrn_ics @@ -0,0 +1,6 @@ +#%Module##################################################### +## Module file for get_extrn_ics task. +############################################################# + +module purge + diff --git a/modulefiles/tasks/orion/get_extrn_lbcs b/modulefiles/tasks/orion/get_extrn_lbcs new file mode 100644 index 0000000000..09f37151af --- /dev/null +++ b/modulefiles/tasks/orion/get_extrn_lbcs @@ -0,0 +1,6 @@ +#%Module##################################################### +## Module file for get_extrn_lbcs task. +############################################################# + +module purge + diff --git a/modulefiles/tasks/orion/make_grid.local b/modulefiles/tasks/orion/make_grid.local new file mode 100644 index 0000000000..e7b20cb665 --- /dev/null +++ b/modulefiles/tasks/orion/make_grid.local @@ -0,0 +1,5 @@ +module use -a /apps/contrib/miniconda3-noaa-gsl/modulefiles +module load miniconda3 +if [module-info mode load] { + system "conda activate regional_workflow" +} diff --git a/modulefiles/tasks/orion/make_ics.local b/modulefiles/tasks/orion/make_ics.local new file mode 100644 index 0000000000..e7b20cb665 --- /dev/null +++ b/modulefiles/tasks/orion/make_ics.local @@ -0,0 +1,5 @@ +module use -a /apps/contrib/miniconda3-noaa-gsl/modulefiles +module load miniconda3 +if [module-info mode load] { + system "conda activate regional_workflow" +} diff --git a/modulefiles/tasks/orion/make_lbcs.local b/modulefiles/tasks/orion/make_lbcs.local new file mode 100644 index 0000000000..e7b20cb665 --- /dev/null +++ b/modulefiles/tasks/orion/make_lbcs.local @@ -0,0 +1,5 @@ +module use -a /apps/contrib/miniconda3-noaa-gsl/modulefiles +module load miniconda3 +if [module-info mode load] { + system "conda activate regional_workflow" +} diff --git a/modulefiles/tasks/orion/run_fcst.local b/modulefiles/tasks/orion/run_fcst.local new file mode 100644 index 0000000000..e7b20cb665 --- /dev/null +++ b/modulefiles/tasks/orion/run_fcst.local @@ -0,0 +1,5 @@ +module use -a /apps/contrib/miniconda3-noaa-gsl/modulefiles +module load miniconda3 +if [module-info mode load] { + system "conda activate regional_workflow" +} diff --git a/scripts/exregional_make_grid.sh b/scripts/exregional_make_grid.sh index 639bf3ab0d..a7921b5bbf 100755 --- a/scripts/exregional_make_grid.sh +++ b/scripts/exregional_make_grid.sh @@ -82,8 +82,6 @@ print_input_args valid_args # # The orography code runs with threads. On Cray, the code is optimized # for six threads. Do not change. -# Note that OMP_NUM_THREADS and OMP_STACKSIZE only affect the threaded <== I don't think this is true. Remove?? -# executions on Cray; they don't affect executions on theia. # #----------------------------------------------------------------------- # @@ -92,88 +90,75 @@ export OMP_STACKSIZE=2048m # #----------------------------------------------------------------------- # -# Load modules and set various computational parameters and directories. -# -# Note: -# These module loads should all be moved to modulefiles. This has been -# done for Hera but must still be done for other machines. +# Set the machine-dependent run command. Also, set resource limits as +# necessary. # #----------------------------------------------------------------------- # case $MACHINE in + "WCOSS_CRAY") + { save_shell_opts; set +x; } > /dev/null 2>&1 + . $MODULESHOME/init/sh + module load PrgEnv-intel cfp-intel-sandybridge/1.1.0 + module list + { restore_shell_opts; } > /dev/null 2>&1 + export NODES=1 + export APRUN="aprun -n 1 -N 1 -j 1 -d 1 -cc depth" + export KMP_AFFINITY=disabled + ulimit -s unlimited + ulimit -a + ;; + + "WCOSS_DELL_P3") + { save_shell_opts; set +x; } > /dev/null 2>&1 + module list + { restore_shell_opts; } > /dev/null 2>&1 + export APRUN="mpirun" + ulimit -s unlimited + ;; + + "HERA") + APRUN="time" + ;; + + "ORION") + APRUN="time" + ;; + + "JET") + APRUN="time" + ulimit -a + ;; + + "ODIN") + export APRUN="srun -n 1" + ulimit -s unlimited + ulimit -a + ;; + + "CHEYENNE") + APRUN="time" + ;; + + "STAMPEDE") + export APRUN="time" + ulimit -s unlimited + ulimit -a + ;; + + *) + print_err_msg_exit "\ +Run command has not been specified for this machine: + MACHINE = \"$MACHINE\" + APRUN = \"$APRUN\"" + ;; -"WCOSS_CRAY") -# - { save_shell_opts; set +x; } > /dev/null 2>&1 - - . $MODULESHOME/init/sh - module load PrgEnv-intel cfp-intel-sandybridge/1.1.0 - module list - - { restore_shell_opts; } > /dev/null 2>&1 - - export NODES=1 - export APRUN="aprun -n 1 -N 1 -j 1 -d 1 -cc depth" - export KMP_AFFINITY=disabled - - ulimit -s unlimited - ulimit -a - ;; - -"WCOSS_DELL_P3") -# - { save_shell_opts; set +x; } > /dev/null 2>&1 - - module list - - { restore_shell_opts; } > /dev/null 2>&1 - - export APRUN="mpirun" - - ulimit -s unlimited - ;; - -"HERA") -# - APRUN="time" -# -# ulimit -s unlimited -# ulimit -a - ;; -# - -"JET") -# - APRUN="time" - ulimit -a - ;; - - -"ODIN") -# - export APRUN="srun -n 1" - - ulimit -s unlimited - ulimit -a - ;; - -"CHEYENNE") - APRUN="time" - ;; - -"STAMPEDE") -# - export APRUN="time" - - ulimit -s unlimited - ulimit -a - ;; esac # #----------------------------------------------------------------------- # -# Create the (cycle-independent) subdirectories under the experiment +# Create the (cycle-independent) subdirectories under the experiment # directory (EXPTDIR) that are needed by the various steps and substeps # in this script. # @@ -219,12 +204,12 @@ mkdir_vrfy -p "$tmpdir" # size specified by the argument to the --halo flag does not extend be- # yond the boundaries of the parent grid (tile 6). In this case, since # the values passed to the --istart_nest, ..., and --jend_nest flags al- -# ready include a halo (because these arguments are +# ready include a halo (because these arguments are # -# ${ISTART_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, -# ${IEND_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, +# ${ISTART_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, +# ${IEND_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, # ${JSTART_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, and -# ${JEND_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, +# ${JEND_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG}, # # i.e. they include "WITH_WIDE_HALO_" in their names), it is reasonable # to pass as the argument to --halo a zero. However, make_hgrid re- @@ -242,7 +227,7 @@ mkdir_vrfy -p "$tmpdir" # --nlon 2*${RES} \ # --grid_name C${RES}_grid \ # --do_schmidt --stretch_factor ${STRETCH_FAC} \ -# --target_lon ${LON_CTR} +# --target_lon ${LON_CTR} # --target_lat ${LAT_CTR} \ # --nest_grid --parent_tile 6 --refine_ratio ${GFDLgrid_REFINE_RATIO} \ # --istart_nest ${ISTART_OF_RGNL_DOM_WITH_WIDE_HALO_ON_T6SG} \ @@ -347,7 +332,7 @@ if [ "${GRID_GEN_METHOD}" = "GFDLgrid" ]; then --halo 1 \ --great_circle_algorithm || \ print_err_msg_exit "\ -Call to executable (exec_fp) that generates grid files returned with +Call to executable (exec_fp) that generates grid files returned with nonzero exit code. exec_fp = \"${exec_fp}\"" # @@ -365,15 +350,15 @@ elif [ "${GRID_GEN_METHOD}" = "ESGgrid" ]; then rgnl_grid_nml_fp="$tmpdir/${RGNL_GRID_NML_FN}" print_info_msg "$VERBOSE" " -Creating namelist file (rgnl_grid_nml_fp) to be read in by the grid +Creating namelist file (rgnl_grid_nml_fp) to be read in by the grid generation executable (exec_fp): rgnl_grid_nml_fp = \"${rgnl_grid_nml_fp}\" exec_fp = \"${exec_fp}\"" # -# Create a multiline variable that consists of a yaml-compliant string -# specifying the values that the namelist variables need to be set to -# (one namelist variable per line, plus a header and footer). Below, -# this variable will be passed to a python script that will create the +# Create a multiline variable that consists of a yaml-compliant string +# specifying the values that the namelist variables need to be set to +# (one namelist variable per line, plus a header and footer). Below, +# this variable will be passed to a python script that will create the # namelist file. # settings=" @@ -391,8 +376,8 @@ generation executable (exec_fp): # ${USHDIR}/set_namelist.py -q -u "$settings" -o ${rgnl_grid_nml_fp} || \ print_err_msg_exit "\ -Call to python script set_namelist.py to set the variables in the -regional_esg_grid namelist file failed. Parameters passed to this script +Call to python script set_namelist.py to set the variables in the +regional_esg_grid namelist file failed. Parameters passed to this script are: Full path to output namelist file: rgnl_grid_nml_fp = \"${rgn_grid_nml_fp}\" @@ -406,9 +391,9 @@ $settings" print_err_msg_exit "\ Call to executable (exec_fp) that generates a ESGgrid-type regional grid returned with nonzero exit code: - exec_fp = \"${exec_fp}\"" + exec_fp = \"${exec_fp}\"" # -# Set the name of the regional grid file generated by the above call. +# Set the name of the regional grid file generated by the above call. # This must be the same name as in the regional_esg_grid code. # grid_fn="regional_grid.nc" @@ -496,7 +481,7 @@ mv_vrfy "${grid_fp_orig}" "${grid_fp}" # be using (i.e. if RUN_TASK_MAKE_OROG or RUN_TASK_MAKE_SURF_CLIMO is set # to "FALSE", in which case RES_IN_FIXLAM_FILENAMES will not be set to a # null string), check that the grid resolution contained in the variable -# CRES set above matches the resolution appearing in the names of the +# CRES set above matches the resolution appearing in the names of the # preexisting orography and/or surface climatology files. # #----------------------------------------------------------------------- @@ -505,7 +490,7 @@ if [ ! -z "${RES_IN_FIXLAM_FILENAMES}" ]; then res="${CRES:1}" if [ "$res" -ne "${RES_IN_FIXLAM_FILENAMES}" ]; then print_err_msg_exit "\ -The resolution (res) calculated for the grid does not match the resolution +The resolution (res) calculated for the grid does not match the resolution (RES_IN_FIXLAM_FILENAMES) appearing in the names of the orography and/or surface climatology files: res = \"$res\" @@ -515,10 +500,10 @@ fi # #----------------------------------------------------------------------- # -# Partially "shave" the halo from the grid file having a wide halo to +# Partially "shave" the halo from the grid file having a wide halo to # generate two new grid files -- one with a 3-grid-wide halo and another # with a 4-cell-wide halo. These are needed as inputs by the forecast -# model as well as by the code (chgres_cube) that generates the lateral +# model as well as by the code (chgres_cube) that generates the lateral # boundary condition files. <== Are these also needed by make_sfc_climo??? # #----------------------------------------------------------------------- @@ -529,27 +514,27 @@ exec_fn="shave" exec_fp="$EXECDIR/${exec_fn}" if [ ! -f "${exec_fp}" ]; then print_err_msg_exit "\ -The executable (exec_fp) for \"shaving\" down the halo in the grid file +The executable (exec_fp) for \"shaving\" down the halo in the grid file does not exist: exec_fp = \"${exec_fp}\" Please ensure that you've built this executable." fi # # Set the full path to the "unshaved" grid file, i.e. the one with a wide -# halo. This is the input grid file for generating both the grid file +# halo. This is the input grid file for generating both the grid file # with a 3-cell-wide halo and the one with a 4-cell-wide halo. # unshaved_fp="${grid_fp}" # -# We perform the work in tmpdir, so change location to that directory. -# Once it is complete, we will move the resultant file from tmpdir to +# We perform the work in tmpdir, so change location to that directory. +# Once it is complete, we will move the resultant file from tmpdir to # GRID_DIR. # cd_vrfy "$tmpdir" # # Create an input namelist file for the shave executable to generate a -# grid file with a 3-cell-wide halo from the one with a wide halo. Then -# call the shave executable. Finally, move the resultant file to the +# grid file with a 3-cell-wide halo from the one with a wide halo. Then +# call the shave executable. Finally, move the resultant file to the # GRID_DIR directory. # print_info_msg "$VERBOSE" " @@ -574,8 +559,8 @@ The namelist file (nml_fn) used in this call is in directory tmpdir: mv_vrfy ${shaved_fp} ${GRID_DIR} # # Create an input namelist file for the shave executable to generate a -# grid file with a 4-cell-wide halo from the one with a wide halo. Then -# call the shave executable. Finally, move the resultant file to the +# grid file with a 4-cell-wide halo from the one with a wide halo. Then +# call the shave executable. Finally, move the resultant file to the # GRID_DIR directory. # print_info_msg "$VERBOSE" " @@ -647,7 +632,7 @@ halo failed." # #----------------------------------------------------------------------- # -# Create symlinks in the FIXLAM directory to the grid and mosaic files +# Create symlinks in the FIXLAM directory to the grid and mosaic files # generated above in the GRID_DIR directory. # #----------------------------------------------------------------------- @@ -661,28 +646,28 @@ failed." # #----------------------------------------------------------------------- # -# Call a function (set_FV3nml_sfc_climo_filenames) to set the values of -# those variables in the forecast model's namelist file that specify the -# paths to the surface climatology files. These files will either already +# Call a function (set_FV3nml_sfc_climo_filenames) to set the values of +# those variables in the forecast model's namelist file that specify the +# paths to the surface climatology files. These files will either already # be avaialable in a user-specified directory (SFC_CLIMO_DIR) or will be -# generated by the MAKE_SFC_CLIMO_TN task. They (or symlinks to them) +# generated by the MAKE_SFC_CLIMO_TN task. They (or symlinks to them) # will be placed (or wll already exist) in the FIXLAM directory. # -# Also, if running ensemble forecasts, call a function (set_FV3nml_stoch_params) -# to create a new FV3 namelist file for each ensemble member that contains -# a unique set of stochastic parameters (i.e. relative to the namelist +# Also, if running ensemble forecasts, call a function (set_FV3nml_stoch_params) +# to create a new FV3 namelist file for each ensemble member that contains +# a unique set of stochastic parameters (i.e. relative to the namelist # files of the other members). # -# Note that unless RUN_TASK_MAKE_GRID is set to "FALSE", the call to -# set_FV3nml_sfc_climo_filenames has to be performed here instead of -# earlier during experiment generation because the surface climatology -# file names depend on the grid resolution variable CRES, and that may +# Note that unless RUN_TASK_MAKE_GRID is set to "FALSE", the call to +# set_FV3nml_sfc_climo_filenames has to be performed here instead of +# earlier during experiment generation because the surface climatology +# file names depend on the grid resolution variable CRES, and that may # not be available until the above steps in this script have been performed. # -# Similarly, unless RUN_TASK_MAKE_GRID is set to "FALSE", the call to -# set_FV3nml_stoch_params must be performed here because it uses the +# Similarly, unless RUN_TASK_MAKE_GRID is set to "FALSE", the call to +# set_FV3nml_stoch_params must be performed here because it uses the # namelist file generated by the call to set_FV3nml_sfc_climo_filenames -# as a starting point (base) and modifies it to add the stochastic +# as a starting point (base) and modifies it to add the stochastic # parameters. Thus, the changes made by set_FV3nml_sfc_climo_filenames # must already be in the base namelist file. # @@ -699,7 +684,7 @@ for the various ensemble members failed." fi create_diag_table_files || print_err_msg_exit "\ -Call to function to create a diagnostics table file under each cycle +Call to function to create a diagnostics table file under each cycle directory failed." # #----------------------------------------------------------------------- diff --git a/scripts/exregional_make_ics.sh b/scripts/exregional_make_ics.sh index 7463de5fb7..e684657b5d 100755 --- a/scripts/exregional_make_ics.sh +++ b/scripts/exregional_make_ics.sh @@ -93,6 +93,11 @@ case "$MACHINE" in APRUN="srun" ;; + "ORION") + ulimit -s unlimited + APRUN="srun" + ;; + "JET") ulimit -s unlimited APRUN="srun" diff --git a/scripts/exregional_make_lbcs.sh b/scripts/exregional_make_lbcs.sh index 7967c68dd1..61dd0ebffd 100755 --- a/scripts/exregional_make_lbcs.sh +++ b/scripts/exregional_make_lbcs.sh @@ -93,6 +93,11 @@ case "$MACHINE" in APRUN="srun" ;; + "ORION") + ulimit -s unlimited + APRUN="srun" + ;; + "JET") ulimit -s unlimited APRUN="srun" diff --git a/scripts/exregional_make_orog.sh b/scripts/exregional_make_orog.sh index 77d5b6c219..bc817088df 100755 --- a/scripts/exregional_make_orog.sh +++ b/scripts/exregional_make_orog.sh @@ -78,8 +78,6 @@ print_input_args valid_args # # The orography code runs with threads. On Cray, the code is optimized # for six threads. Do not change. -# Note that OMP_NUM_THREADS and OMP_STACKSIZE only affect the threaded <== I don't think this is true. Remove?? -# executions on Cray; they don't affect executions on theia. # #----------------------------------------------------------------------- # @@ -98,61 +96,63 @@ export OMP_STACKSIZE=2048m # case $MACHINE in - -"WCOSS_CRAY") -# - { save_shell_opts; set +x; } > /dev/null 2>&1 - - . $MODULESHOME/init/sh - module load PrgEnv-intel cfp-intel-sandybridge/1.1.0 - module list - - { restore_shell_opts; } > /dev/null 2>&1 - - export NODES=1 - export APRUN="aprun -n 1 -N 1 -j 1 -d 1 -cc depth" - export KMP_AFFINITY=disabled - - ulimit -s unlimited - ulimit -a - ;; - -"WCOSS_DELL_P3") - ulimit -s unlimited - ulimit -a - APRUN="mpirun" - ;; - -"HERA") - ulimit -s unlimited - ulimit -a - APRUN="time" - ;; - - -"JET") - ulimit -s unlimited - ulimit -a - export APRUN="time" - ;; - - -"ODIN") -# - export APRUN="srun -n 1" - - ulimit -s unlimited - ulimit -a - ;; - - -"CHEYENNE") - APRUN="time" - ;; - -"STAMPEDE") - export APRUN="time" - ;; + "WCOSS_CRAY") + { save_shell_opts; set +x; } > /dev/null 2>&1 + . $MODULESHOME/init/sh + module load PrgEnv-intel cfp-intel-sandybridge/1.1.0 + module list + { restore_shell_opts; } > /dev/null 2>&1 + export NODES=1 + export APRUN="aprun -n 1 -N 1 -j 1 -d 1 -cc depth" + export KMP_AFFINITY=disabled + ulimit -s unlimited + ulimit -a + ;; + + "WCOSS_DELL_P3") + ulimit -s unlimited + ulimit -a + APRUN="mpirun" + ;; + + "HERA") + ulimit -s unlimited + ulimit -a + APRUN="time" + ;; + + "ORION") + ulimit -s unlimited + ulimit -a + APRUN="time" + ;; + + "JET") + ulimit -s unlimited + ulimit -a + export APRUN="time" + ;; + + "ODIN") + export APRUN="srun -n 1" + ulimit -s unlimited + ulimit -a + ;; + + "CHEYENNE") + APRUN="time" + ;; + + "STAMPEDE") + export APRUN="time" + ;; + + *) + print_err_msg_exit "\ +Run command has not been specified for this machine: + MACHINE = \"$MACHINE\" + APRUN = \"$APRUN\"" + ;; esac # @@ -197,7 +197,7 @@ fi # # Create a temporary (work) directory in which to generate the raw orography # file and change location to it. -# +# tmp_dir="${raw_dir}/tmp" mkdir_vrfy -p "${tmp_dir}" cd_vrfy "${tmp_dir}" @@ -235,7 +235,7 @@ grid_fp="${FIXLAM}/${grid_fn}" # them to a text file. # # Note that it doesn't matter what lonb and latb are set to below because -# if we specify an input grid file to the executable read in (which is +# if we specify an input grid file to the executable read in (which is # what we do below), then if lonb and latb are not set to the dimensions # of the grid specified in that file (divided by 2 since the grid file # specifies a "supergrid"), then lonb and latb effectively get reset to @@ -269,7 +269,7 @@ cat "${input_redirect_fn}" # #----------------------------------------------------------------------- # -# Call the executable to generate the raw orography file corresponding +# Call the executable to generate the raw orography file corresponding # to tile 7 (the regional domain) only. # # The following will create an orography file named @@ -287,21 +287,18 @@ cat "${input_redirect_fn}" print_info_msg "$VERBOSE" " Starting orography file generation..." - case $MACHINE in - -"WCOSS_CRAY") + "WCOSS_CRAY") # # On WCOSS and WCOSS_C, use cfp to run multiple tiles simulatneously for # the orography. For now, we have only one tile in the regional case, # but in the future we will have more. First, create an input file for # cfp. # - ufs_utils_ushdir="${UFS_UTILS_DIR}/ush" - res="0" # What should this be set to??? - - printf "%s\n" "\ + ufs_utils_ushdir="${UFS_UTILS_DIR}/ush" + res="0" # What should this be set to??? + printf "%s\n" "\ ${ufs_utils_ushdir}/${orog_gen_scr} \ $res \ ${TILE_RGNL} \ @@ -310,34 +307,28 @@ ${raw_dir} \ ${UFS_UTILS_DIR} \ ${TOPO_DIR} \ ${tmp_dir}" \ - >> ${tmp_dir}/orog.file1 - - aprun -j 1 -n 4 -N 4 -d 6 -cc depth cfp ${tmp_dir}/orog.file1 - rm_vrfy ${tmp_dir}/orog.file1 - ;; - - -"WCOSS_DELL_P3") - - ufs_utils_ushdir="${UFS_UTILS_DIR}/ush" - res="0" # What should this be set to??? - - "${exec_fp}" < "${input_redirect_fn}" || \ - print_err_msg_exit "\ + >> ${tmp_dir}/orog.file1 + aprun -j 1 -n 4 -N 4 -d 6 -cc depth cfp ${tmp_dir}/orog.file1 + rm_vrfy ${tmp_dir}/orog.file1 + ;; + + "WCOSS_DELL_P3") + ufs_utils_ushdir="${UFS_UTILS_DIR}/ush" + res="0" # What should this be set to??? + "${exec_fp}" < "${input_redirect_fn}" || \ + print_err_msg_exit "\ Call to executable (exec_fp) that generates the raw orography file returned with nonzero exit code: exec_fp = \"${exec_fp}\"" - ;; + ;; - -"CHEYENNE" | "HERA" | "JET" | "ODIN" | "STAMPEDE") - $APRUN "${exec_fp}" < "${input_redirect_fn}" || \ - print_err_msg_exit "\ -Call to executable (exec_fp) that generates the raw orography file returned + "CHEYENNE" | "HERA" | "ORION" | "JET" | "ODIN" | "STAMPEDE") + $APRUN "${exec_fp}" < "${input_redirect_fn}" || \ + print_err_msg_exit "\ +Call to executable (exec_fp) that generates the raw orography file returned with nonzero exit code: exec_fp = \"${exec_fp}\"" - ;; - + ;; esac # @@ -365,22 +356,22 @@ mv_vrfy "${raw_orog_fp_orig}" "${raw_orog_fp}" # # Note that the orography filtering code assumes that the regional grid # is a GFDLgrid type of grid; it is not designed to handle ESGgrid type -# regional grids. If the flag "regional" in the orography filtering +# regional grids. If the flag "regional" in the orography filtering # namelist file is set to .TRUE. (which it always is will be here; see -# below), then filtering code will first calculate a resolution (i.e. +# below), then filtering code will first calculate a resolution (i.e. # number of grid points) value named res_regional for the assumed GFDLgrid # type regional grid using the formula # # res_regional = res*stretch_fac*real(refine_ratio) # # Here res, stretch_fac, and refine_ratio are the values passed to the -# code via the namelist. res and stretch_fac are assumed to be the +# code via the namelist. res and stretch_fac are assumed to be the # resolution (in terms of number of grid points) and the stretch factor # of the (GFDLgrid type) regional grid's parent global cubed-sphere grid, # and refine_ratio is the ratio of the number of grid cells on the regional # grid to a single cell on tile 6 of the parent global grid. After # calculating res_regional, the code interpolates/extrapolates between/ -# beyond a set of (currently 7) resolution values for which the four +# beyond a set of (currently 7) resolution values for which the four # filtering parameters (n_del2_weak, cd4, max_slope, peak_fac) are provided # (by GFDL) to obtain the corresponding values of these parameters at a # resolution of res_regional. These interpolated/extrapolated values are diff --git a/scripts/exregional_make_sfc_climo.sh b/scripts/exregional_make_sfc_climo.sh index b7709a615b..31f2c877f1 100755 --- a/scripts/exregional_make_sfc_climo.sh +++ b/scripts/exregional_make_sfc_climo.sh @@ -122,58 +122,60 @@ EOF # #----------------------------------------------------------------------- # -# Set the run machine-dependent run command. +# Set the machine-dependent run command. # #----------------------------------------------------------------------- # case $MACHINE in -"WCOSS_CRAY") - APRUN=${APRUN:-"aprun -j 1 -n 6 -N 6"} - ;; - -"WCOSS_DELL_P3") + "WCOSS_CRAY") + APRUN=${APRUN:-"aprun -j 1 -n 6 -N 6"} + ;; + "WCOSS_DELL_P3") # Specify computational resources. - export NODES=2 - export ntasks=48 - export ptile=24 - export threads=1 - export MP_LABELIO=yes - export OMP_NUM_THREADS=$threads + export NODES=2 + export ntasks=48 + export ptile=24 + export threads=1 + export MP_LABELIO=yes + export OMP_NUM_THREADS=$threads + APRUN="mpirun" + ;; - APRUN="mpirun" - ;; + "HERA") + APRUN="srun" + ;; -"HERA") - APRUN="srun" - ;; + "ORION") + APRUN="srun" + ;; -"JET") - APRUN="srun" - ;; + "JET") + APRUN="srun" + ;; -"CHEYENNE") - nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) - APRUN="mpirun -np $nprocs" - ;; + "CHEYENNE") + nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) + APRUN="mpirun -np $nprocs" + ;; -"ODIN") - nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) - APRUN="srun -n $nprocs" - ;; + "ODIN") + nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) + APRUN="srun -n $nprocs" + ;; -"STAMPEDE") - nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) - APRUN="ibrun -np ${nprocs}" - ;; + "STAMPEDE") + nprocs=$(( NNODES_MAKE_SFC_CLIMO*PPN_MAKE_SFC_CLIMO )) + APRUN="ibrun -np ${nprocs}" + ;; -*) - print_err_msg_exit "\ + *) + print_err_msg_exit "\ Run command has not been specified for this machine: MACHINE = \"$MACHINE\" APRUN = \"$APRUN\"" - ;; + ;; esac # diff --git a/scripts/exregional_run_post.sh b/scripts/exregional_run_post.sh index 650126bf85..957c19c210 100755 --- a/scripts/exregional_run_post.sh +++ b/scripts/exregional_run_post.sh @@ -80,59 +80,67 @@ print_input_args valid_args # #----------------------------------------------------------------------- # -print_info_msg "$VERBOSE" " -Starting post-processing for fhr = $fhr hr..." - case $MACHINE in -"WCOSS_CRAY") + "WCOSS_CRAY") # Specify computational resources. - export NODES=2 - export ntasks=48 - export ptile=24 - export threads=1 - export MP_LABELIO=yes - export OMP_NUM_THREADS=$threads + export NODES=2 + export ntasks=48 + export ptile=24 + export threads=1 + export MP_LABELIO=yes + export OMP_NUM_THREADS=$threads - APRUN="aprun -j 1 -n${ntasks} -N${ptile} -d${threads} -cc depth" - ;; + APRUN="aprun -j 1 -n${ntasks} -N${ptile} -d${threads} -cc depth" + ;; -"WCOSS_DELL_P3") + "WCOSS_DELL_P3") # Specify computational resources. - export NODES=2 - export ntasks=48 - export ptile=24 - export threads=1 - export MP_LABELIO=yes - export OMP_NUM_THREADS=$threads - - APRUN="mpirun" - ;; + export NODES=2 + export ntasks=48 + export ptile=24 + export threads=1 + export MP_LABELIO=yes + export OMP_NUM_THREADS=$threads + + APRUN="mpirun" + ;; + + "HERA") + APRUN="srun" + ;; -"HERA") - APRUN="srun" - ;; + "ORION") + APRUN="srun" + ;; -"JET") - APRUN="srun" - ;; + "JET") + APRUN="srun" + ;; -"ODIN") - APRUN="srun -n 1" - ;; + "ODIN") + APRUN="srun -n 1" + ;; -"CHEYENNE") - module list - nprocs=$(( NNODES_RUN_POST*PPN_RUN_POST )) - APRUN="mpirun -np $nprocs" - ;; + "CHEYENNE") + module list + nprocs=$(( NNODES_RUN_POST*PPN_RUN_POST )) + APRUN="mpirun -np $nprocs" + ;; -"STAMPEDE") - nprocs=$(( NNODES_RUN_POST*PPN_RUN_POST )) - APRUN="ibrun -n $nprocs" - ;; + "STAMPEDE") + nprocs=$(( NNODES_RUN_POST*PPN_RUN_POST )) + APRUN="ibrun -n $nprocs" + ;; + + *) + print_err_msg_exit "\ +Run command has not been specified for this machine: + MACHINE = \"$MACHINE\" + APRUN = \"$APRUN\"" + ;; esac # @@ -148,7 +156,7 @@ if [ ${USE_CUSTOM_POST_CONFIG_FILE} = "TRUE" ]; then post_config_fp="${CUSTOM_POST_CONFIG_FP}" print_info_msg " ==================================================================== -Copying the user-defined post flat file specified by CUSTOM_POST_CONFIG_FP +Copying the user-defined post flat file specified by CUSTOM_POST_CONFIG_FP to the post forecast hour directory (fhr_dir): CUSTOM_POST_CONFIG_FP = \"${CUSTOM_POST_CONFIG_FP}\" fhr_dir = \"${fhr_dir}\" @@ -157,7 +165,7 @@ else post_config_fp="${EMC_POST_DIR}/parm/postxconfig-NT-fv3lam.txt" print_info_msg " ==================================================================== -Copying the default post flat file specified by post_config_fp to the post +Copying the default post flat file specified by post_config_fp to the post forecast hour directory (fhr_dir): post_config_fp = \"${post_config_fp}\" fhr_dir = \"${fhr_dir}\" @@ -169,7 +177,7 @@ cp_vrfy ${EXECDIR}/ncep_post . # #----------------------------------------------------------------------- # -# Get the cycle date and hour (in formats of yyyymmdd and hh, respectively) +# Get the cycle date and hour (in formats of yyyymmdd and hh, respectively) # from cdate. # #----------------------------------------------------------------------- @@ -214,6 +222,9 @@ EOF # #----------------------------------------------------------------------- # +print_info_msg "$VERBOSE" " +Starting post-processing for fhr = $fhr hr..." + ${APRUN} ./ncep_post < itag || print_err_msg_exit "\ Call to executable to run post for forecast hour $fhr returned with non- zero exit code." @@ -221,14 +232,14 @@ zero exit code." #----------------------------------------------------------------------- # # Move (and rename) the output files from the work directory to their -# final location (postprd_dir). Then delete the work directory. +# final location (postprd_dir). Then delete the work directory. # #----------------------------------------------------------------------- # # #----------------------------------------------------------------------- # -# A separate ${post_fhr} forecast hour variable is required for the post +# A separate ${post_fhr} forecast hour variable is required for the post # files, since they may or may not be three digits long, depending on the # length of the forecast. # diff --git a/tests/run_experiments.sh b/tests/run_experiments.sh index 9eabafd68f..c11afdd095 100755 --- a/tests/run_experiments.sh +++ b/tests/run_experiments.sh @@ -1,4 +1,4 @@ -#!/bin/bash -l +#!/bin/bash # #----------------------------------------------------------------------- @@ -706,6 +706,8 @@ PTMP=\"${PTMP}\"" extrn_mdl_source_basedir="/mnt/lfs1/BMC/fim/Gerard.Ketefian/UFS_CAM/staged_extrn_mdl_files" elif [ "$MACHINE" = "CHEYENNE" ]; then extrn_mdl_source_basedir="/glade/p/ral/jntp/UFS_CAM/staged_extrn_mdl_files" + elif [ "$MACHINE" = "ORION" ]; then + extrn_mdl_source_basedir="/work/noaa/gsd-fv3-dev/gsketefia/UFS/staged_extrn_mdl_files" else print_err_msg_exit "\ The base directory (extrn_mdl_source_basedir) in which the user-staged diff --git a/ush/config.community.sh b/ush/config.community.sh index 1b5b9018a1..e63716cf9c 100644 --- a/ush/config.community.sh +++ b/ush/config.community.sh @@ -2,10 +2,6 @@ MACHINE="hera" ACCOUNT="an_account" EXPT_SUBDIR="test_community" -QUEUE_DEFAULT="batch" -QUEUE_HPSS="service" -QUEUE_FCST="batch" - VERBOSE="TRUE" RUN_ENVIR="community" diff --git a/ush/config_defaults.sh b/ush/config_defaults.sh index d9441b62f6..fa021b6dcf 100644 --- a/ush/config_defaults.sh +++ b/ush/config_defaults.sh @@ -51,36 +51,51 @@ RUN_ENVIR="nco" # order for the experiment generation script to set it depending on the # machine. # +# PARTITION_DEFAULT: +# If using the slurm job scheduler (i.e. if SCHED is set to "slurm"), +# the default partition to which to submit workflow tasks. If a task +# does not have a specific variable that specifies the partition to which +# it will be submitted (e.g. PARTITION_HPSS, PARTITION_FCST; see below), +# it will be submitted to the partition specified by this variable. If +# this is not set or is set to an empty string, it will be (re)set to a +# machine-dependent value. This is not used if SCHED is not set to +# "slurm". +# # QUEUE_DEFAULT: -# The default queue to which workflow tasks are submitted. If a task -# does not have a specific variable that specifies the queue to which it -# will be submitted (e.g. QUEUE_HPSS, QUEUE_FCST; see below), it will be -# submitted to the queue specified by this variable. If this is not set -# or is set to an empty string, it will be (re)set to a machine-dependent -# value. +# The default queue or QOS (if using the slurm job scheduler, where QOS +# is Quality of Service) to which workflow tasks are submitted. If a +# task does not have a specific variable that specifies the queue to which +# it will be submitted (e.g. QUEUE_HPSS, QUEUE_FCST; see below), it will +# be submitted to the queue specified by this variable. If this is not +# set or is set to an empty string, it will be (re)set to a machine- +# dependent value. # -# QUEUE_DEFAULT_TAG: -# The rocoto xml tag to use for specifying the default queue. For most -# platforms this should be "queue" +# PARTITION_HPSS: +# If using the slurm job scheduler (i.e. if SCHED is set to "slurm"), +# the partition to which the tasks that get or create links to external +# model files [which are needed to generate initial conditions (ICs) and +# lateral boundary conditions (LBCs)] are submitted. If this is not set +# or is set to an empty string, it will be (re)set to a machine-dependent +# value. This is not used if SCHED is not set to "slurm". # # QUEUE_HPSS: -# The queue to which the tasks that get or create links to external model -# files [which are needed to generate initial conditions (ICs) and lateral -# boundary conditions (LBCs)] are submitted. If this is not set or is -# set to an empty string, it will be (re)set to a machine-dependent value. +# The queue or QOS to which the tasks that get or create links to external +# model files [which are needed to generate initial conditions (ICs) and +# lateral boundary conditions (LBCs)] are submitted. If this is not set +# or is set to an empty string, it will be (re)set to a machine-dependent +# value. # -# QUEUE_HPSS_TAG: -# The rocoto xml tag to use for specifying the HPSS queue. For slurm-based -# platforms this is typically "partition", for others it may be "queue" +# PARTITION_FCST: +# If using the slurm job scheduler (i.e. if SCHED is set to "slurm"), +# the partition to which the task that runs forecasts is submitted. If +# this is not set or set to an empty string, it will be (re)set to a +# machine-dependent value. This is not used if SCHED is not set to +# "slurm". # # QUEUE_FCST: -# The queue to which the task that runs a forecast is submitted. If this -# is not set or set to an empty string, it will be (re)set to a machine- -# dependent value. -# -# QUEUE_FCST_TAG: -# The rocoto xml tag to use for specifying the fcst queue. For most -# platforms this should be "queue" +# The queue or QOS to which the task that runs a forecast is submitted. +# If this is not set or set to an empty string, it will be (re)set to a +# machine-dependent value. # # mach_doc_end # @@ -89,12 +104,12 @@ RUN_ENVIR="nco" MACHINE="BIG_COMPUTER" ACCOUNT="project_name" SCHED="" +PARTITION_DEFAULT="" QUEUE_DEFAULT="" -QUEUE_DEFAULT_TAG="queue" +PARTITION_HPSS="" QUEUE_HPSS="" -QUEUE_HPSS_TAG="partition" +PARTITION_FCST="" QUEUE_FCST="" -QUEUE_FCST_TAG="queue" # #----------------------------------------------------------------------- # diff --git a/ush/generate_FV3LAM_wflow.sh b/ush/generate_FV3LAM_wflow.sh index 7391b3456a..4104f4f3fb 100755 --- a/ush/generate_FV3LAM_wflow.sh +++ b/ush/generate_FV3LAM_wflow.sh @@ -165,12 +165,12 @@ settings="\ # 'account': $ACCOUNT 'sched': $SCHED + 'partition_default': ${PARTITION_DEFAULT} 'queue_default': ${QUEUE_DEFAULT} - 'queue_default_tag': ${QUEUE_DEFAULT_TAG} + 'partition_hpss': ${PARTITION_HPSS} 'queue_hpss': ${QUEUE_HPSS} - 'queue_hpss_tag': ${QUEUE_HPSS_TAG} + 'partition_fcst': ${PARTITION_FCST} 'queue_fcst': ${QUEUE_FCST} - 'queue_fcst_tag': ${QUEUE_FCST_TAG} 'machine': ${MACHINE} # # Workflow task names. @@ -201,7 +201,6 @@ settings="\ # 'ncores_run_fcst': ${PE_MEMBER01} 'native_run_fcst': --cpus-per-task 4 --exclusive - 'partition_run_fcst': sjet,vjet,kjet,xjet # # Number of logical processes per node for each task. If running without # threading, this is equal to the number of MPI processes per node. diff --git a/ush/get_extrn_mdl_file_dir_info.sh b/ush/get_extrn_mdl_file_dir_info.sh index 47c42cea6b..1933e3c007 100755 --- a/ush/get_extrn_mdl_file_dir_info.sh +++ b/ush/get_extrn_mdl_file_dir_info.sh @@ -560,6 +560,9 @@ bination of external model (extrn_mdl_name) and analysis or forecast "HERA") sysdir="" ;; + "ORION") + sysdir="$sysbasedir" + ;; "JET") sysdir="" ;; @@ -594,6 +597,9 @@ has not been specified for this external model and machine combination: "HERA") sysdir="$sysbasedir/gfs.${yyyymmdd}/${hh}" ;; + "ORION") + sysdir="$sysbasedir" + ;; "JET") sysdir="$sysbasedir" ;; @@ -628,6 +634,9 @@ has not been specified for this external model and machine combination: "HERA") sysdir="$sysbasedir" ;; + "ORION") + sysdir="$sysbasedir" + ;; "JET") sysdir="$sysbasedir/${yyyymmdd}${hh}/postprd" ;; @@ -659,6 +668,9 @@ has not been specified for this external model and machine combination: "HERA") sysdir="$sysbasedir" ;; + "ORION") + sysdir="$sysbasedir" + ;; "JET") sysdir="$sysbasedir/${yyyymmdd}${hh}/postprd" ;; @@ -689,6 +701,9 @@ has not been specified for this external model and machine combination: "HERA") sysdir="$sysbasedir" ;; + "ORION") + sysdir="$sysbasedir" + ;; "JET") sysdir="$sysbasedir" ;; diff --git a/ush/launch_FV3LAM_wflow.sh b/ush/launch_FV3LAM_wflow.sh index 8004e32e44..60910d812d 100755 --- a/ush/launch_FV3LAM_wflow.sh +++ b/ush/launch_FV3LAM_wflow.sh @@ -92,8 +92,12 @@ expt_name="${EXPT_SUBDIR}" #----------------------------------------------------------------------- # if [ "$MACHINE" != "CHEYENNE" ]; then - module purge - module load rocoto + if [ "$MACHINE" = "ORION" ]; then + module load contrib rocoto + else + module purge + module load rocoto + fi fi # #----------------------------------------------------------------------- diff --git a/ush/load_modules_run_task.sh b/ush/load_modules_run_task.sh index fb66658fd7..b469e5d775 100755 --- a/ush/load_modules_run_task.sh +++ b/ush/load_modules_run_task.sh @@ -114,6 +114,10 @@ case "$MACHINE" in "HERA") . /apps/lmod/lmod/init/sh ;; +# + "ORION") + . /apps/lmod/lmod/init/sh + ;; # "JET") . /apps/lmod/lmod/init/sh @@ -271,23 +275,23 @@ Call to \"module use\" command failed." # if [ ${use_default_modulefile} -eq 0 ]; then - module use "${modules_dir}" || print_err_msg_exit "\ - Call to \"module use\" command failed." + module use -a "${modules_dir}" || print_err_msg_exit "\ +Call to \"module use\" command failed." - module load ${modulefile_name} || print_err_msg_exit "\ - Loading of module file (modulefile_name; in directory specified by mod- - ules_dir) for the specified task (task_name) failed: - task_name = \"${task_name}\" - modulefile_name = \"${modulefile_name}\" - modules_dir = \"${modules_dir}\"" + module load "${modulefile_name}" || print_err_msg_exit "\ +Loading of module file (modulefile_name; in directory specified by mod- +ules_dir) for the specified task (task_name) failed: + task_name = \"${task_name}\" + modulefile_name = \"${modulefile_name}\" + modules_dir = \"${modules_dir}\"" else # using default modulefile - module load ${default_modulefile_name} || print_err_msg_exit "\ - Loading of default module file failed: - task_name = \"${task_name}\" - default_modulefile_name = \"${default_modulefile_name}\" - default_modules_dir = \"${default_modules_dir}\"" + module load "${default_modulefile_name}" || print_err_msg_exit "\ +Loading of default module file failed: + task_name = \"${task_name}\" + default_modulefile_name = \"${default_modulefile_name}\" + default_modules_dir = \"${default_modules_dir}\"" fi diff --git a/ush/set_extrn_mdl_params.sh b/ush/set_extrn_mdl_params.sh index 7a5b8104d2..e28dad99ce 100644 --- a/ush/set_extrn_mdl_params.sh +++ b/ush/set_extrn_mdl_params.sh @@ -1,9 +1,9 @@ # #----------------------------------------------------------------------- # -# This file defines and then calls a function that sets parameters rela- -# ting to the external model used for initial conditions (ICs) and the -# one used for lateral boundary conditions (LBCs). +# This file defines and then calls a function that sets parameters +# associated with the external model used for initial conditions (ICs) +# and the one used for lateral boundary conditions (LBCs). # #----------------------------------------------------------------------- # @@ -32,15 +32,14 @@ local func_name="${FUNCNAME[0]}" #----------------------------------------------------------------------- # # Set the system directory (i.e. location on disk, not on HPSS) in which -# the files generated by the external model specified by EXTRN_MDL_- -# NAME_ICS that are necessary for generating initial condition (IC) -# and surface files for the FV3-LAM are stored (usually for a limited -# time, e.g. for the GFS external model, 2 weeks on WCOSS and 2 days on -# theia). If for a given cycle these files are available in this system -# directory, they will be copied over to a subdirectory within the cy- -# cle's run directory. If these files are not available in the system -# directory, then we search for them elsewhere, e.g. in the mass store -# (HPSS). +# the files generated by the external model specified by EXTRN_MDL_NAME_ICS +# that are necessary for generating initial condition (IC) and surface +# files for the FV3SAR are stored (usually for a limited time, e.g. for +# the GFS external model, 2 weeks on WCOSS and 2 days on hera). If for +# a given cycle these files are available in this system directory, they +# will be copied over to a subdirectory under the cycle directory. If +# these files are not available in the system directory, then we search +# for them elsewhere, e.g. in the mass store (HPSS). # #----------------------------------------------------------------------- # @@ -63,6 +62,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_ICS="" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_ICS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_ICS="" ;; @@ -89,6 +91,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_ICS="/scratch1/NCEPDEV/rstprod/com/gfs/prod" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_ICS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_ICS="/public/data/grids/gfs/nemsio" ;; @@ -109,11 +114,12 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_ICS="/scratch2/BMC/public/data/gsd/rap/full/wrfnat" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_ICS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_ICS="/misc/whome/rtrr/rap" ;; -# This goes with the comment below for the if-statement (-z EXTRN_MDL_SYSBASEDIR_ICS). -# Should not need this case. "CHEYENNE") EXTRN_MDL_SYSBASEDIR_ICS="dummy_value" ;; @@ -125,6 +131,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_ICS="/scratch2/BMC/public/data/gsd/hrrr/conus/wrfnat" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_ICS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_ICS="/misc/whome/rtrr/hrrr" ;; @@ -153,9 +162,9 @@ fi # #----------------------------------------------------------------------- # -# Set EXTRN_MDL_LBCS_OFFSET_HRS, which is the number of hours to -# shift the starting time of the external model that provides lateral -# boundary conditions. +# Set EXTRN_MDL_LBCS_OFFSET_HRS, which is the number of hours to shift +# the starting time of the external model that provides lateral boundary +# conditions. # #----------------------------------------------------------------------- # @@ -177,15 +186,14 @@ esac #----------------------------------------------------------------------- # # Set the system directory (i.e. location on disk, not on HPSS) in which -# the files generated by the external model specified by EXTRN_MDL_- -# NAME_LBCS that are necessary for generating lateral boundary condition -# (LBC) files for the FV3-LAM are stored (usually for a limited time, -# e.g. for the GFS external model, 2 weeks on WCOSS and 2 days on the- -# ia). If for a given cycle these files are available in this system -# directory, they will be copied over to a subdirectory within the cy- -# cle's run directory. If these files are not available in the system -# directory, then we search for them elsewhere, e.g. in the mass store -# (HPSS). +# the files generated by the external model specified by EXTRN_MDL_NAME_LBCS +# that are necessary for generating lateral boundary condition (LBC) files +# for the FV3SAR are stored (usually for a limited time, e.g. for the GFS +# external model, 2 weeks on WCOSS and 2 days on hera). If for a given +# cycle these files are available in this system directory, they will be +# copied over to a subdirectory under the cycle directory. If these files +# are not available in the system directory, then we search for them +# elsewhere, e.g. in the mass store (HPSS). # #----------------------------------------------------------------------- # @@ -208,6 +216,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_LBCS="" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_LBCS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_LBCS="" ;; @@ -234,6 +245,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_LBCS="/scratch1/NCEPDEV/rstprod/com/gfs/prod" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_LBCS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_LBCS="/public/data/grids/gfs/nemsio" ;; @@ -254,6 +268,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_LBCS="/scratch2/BMC/public/data/gsd/rap/full/wrfnat" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_LBCS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_LBCS="/misc/whome/rtrr/rap" ;; @@ -268,6 +285,9 @@ else "HERA") EXTRN_MDL_SYSBASEDIR_LBCS="/scratch2/BMC/public/data/gsd/hrrr/conus/wrfnat" ;; + "ORION") + EXTRN_MDL_SYSBASEDIR_LBCS="" + ;; "JET") EXTRN_MDL_SYSBASEDIR_LBCS="/misc/whome/rtrr/hrrr" ;; diff --git a/ush/setup.sh b/ush/setup.sh index c1970cba47..9d95344b6c 100755 --- a/ush/setup.sh +++ b/ush/setup.sh @@ -360,72 +360,86 @@ check_var_valid_value "MACHINE" "valid_vals_MACHINE" #----------------------------------------------------------------------- # case $MACHINE in -# -"WCOSS_CRAY") -# - NCORES_PER_NODE="24" - SCHED="lsfcray" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"dev"} - QUEUE_HPSS=${QUEUE_HPSS:-"dev_transfer"} - QUEUE_HPSS_TAG="queue" # lsfcray does not support "partition" tag - QUEUE_FCST=${QUEUE_FCST:-"dev"} - ;; -# -"WCOSS_DELL_P3") -# - NCORES_PER_NODE=24 - SCHED="lsf" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"dev"} - QUEUE_HPSS=${QUEUE_HPSS:-"dev_transfer"} - QUEUE_HPSS_TAG="queue" # lsf does not support "partition" tag - QUEUE_FCST=${QUEUE_FCST:-"dev"} - ;; -# -"HERA") -# - NCORES_PER_NODE=24 - SCHED="${SCHED:-slurm}" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"batch"} - QUEUE_HPSS=${QUEUE_HPSS:-"service"} - QUEUE_FCST=${QUEUE_FCST:-"batch"} - ;; -# -"JET") -# - NCORES_PER_NODE=24 - SCHED="${SCHED:-slurm}" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"batch"} - QUEUE_HPSS=${QUEUE_HPSS:-"service"} - QUEUE_FCST=${QUEUE_FCST:-"batch"} - ;; -# -"ODIN") -# - NCORES_PER_NODE=24 - SCHED="${SCHED:-slurm}" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-""} - QUEUE_HPSS=${QUEUE_HPSS:-""} - QUEUE_FCST=${QUEUE_FCST:-""} - ;; -# -"CHEYENNE") -# - NCORES_PER_NODE=36 - SCHED="${SCHED:-pbspro}" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"regular"} - QUEUE_HPSS=${QUEUE_HPSS:-"regular"} - QUEUE_HPSS_TAG="queue" # pbspro does not support "partition" tag - QUEUE_FCST=${QUEUE_FCST:-"regular"} - ;; -# -"STAMPEDE") -# - NCORES_PER_NODE=68 - SCHED="slurm" - QUEUE_DEFAULT=${QUEUE_DEFAULT:-"normal"} - QUEUE_HPSS=${QUEUE_HPSS:-"development"} - QUEUE_FCST=${QUEUE_FCST:-"normal"} - ;; + + "WCOSS_CRAY") + NCORES_PER_NODE="24" + SCHED="lsfcray" + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"dev"} + QUEUE_HPSS=${QUEUE_HPSS:-"dev_transfer"} + QUEUE_FCST=${QUEUE_FCST:-"dev"} + ;; + + "WCOSS_DELL_P3") + NCORES_PER_NODE=24 + SCHED="lsf" + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"dev"} + QUEUE_HPSS=${QUEUE_HPSS:-"dev_transfer"} + QUEUE_FCST=${QUEUE_FCST:-"dev"} + ;; + + "HERA") + NCORES_PER_NODE=40 + SCHED="${SCHED:-slurm}" + PARTITION_DEFAULT=${PARTITION_DEFAULT:-"hera"} + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"batch"} + PARTITION_HPSS=${PARTITION_HPSS:-"service"} + QUEUE_HPSS=${QUEUE_HPSS:-"batch"} + PARTITION_FCST=${PARTITION_FCST:-"hera"} + QUEUE_FCST=${QUEUE_FCST:-"batch"} + ;; + + "ORION") + NCORES_PER_NODE=40 + SCHED="${SCHED:-slurm}" + PARTITION_DEFAULT=${PARTITION_DEFAULT:-"orion"} + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"batch"} + PARTITION_HPSS=${PARTITION_HPSS:-"service"} + QUEUE_HPSS=${QUEUE_HPSS:-"batch"} + PARTITION_FCST=${PARTITION_FCST:-"orion"} + QUEUE_FCST=${QUEUE_FCST:-"batch"} + ;; + + "JET") + NCORES_PER_NODE=24 + SCHED="${SCHED:-slurm}" + PARTITION_DEFAULT=${PARTITION_DEFAULT:-"sjet,vjet,kjet,xjet"} + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"batch"} + PARTITION_HPSS=${PARTITION_HPSS:-"service"} + QUEUE_HPSS=${QUEUE_HPSS:-"batch"} + PARTITION_FCST=${PARTITION_FCST:-"sjet,vjet,kjet,xjet"} + QUEUE_FCST=${QUEUE_FCST:-"batch"} + ;; + + "ODIN") + NCORES_PER_NODE=24 + SCHED="${SCHED:-slurm}" + PARTITION_DEFAULT=${PARTITION_DEFAULT:-"workq"} + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"workq"} + PARTITION_HPSS=${PARTITION_HPSS:-"workq"} + QUEUE_HPSS=${QUEUE_HPSS:-"workq"} + PARTITION_FCST=${PARTITION_FCST:-"workq"} + QUEUE_FCST=${QUEUE_FCST:-"workq"} + ;; + + "CHEYENNE") + NCORES_PER_NODE=36 + SCHED="${SCHED:-pbspro}" + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"regular"} + QUEUE_HPSS=${QUEUE_HPSS:-"regular"} + QUEUE_FCST=${QUEUE_FCST:-"regular"} + ;; + + "STAMPEDE") + NCORES_PER_NODE=68 + SCHED="slurm" + PARTITION_DEFAULT=${PARTITION_DEFAULT:-"normal"} + QUEUE_DEFAULT=${QUEUE_DEFAULT:-"normal"} + PARTITION_HPSS=${PARTITION_HPSS:-"normal"} + QUEUE_HPSS=${QUEUE_HPSS:-"normal"} + PARTITION_FCST=${PARTITION_FCST:-"normal"} + QUEUE_FCST=${QUEUE_FCST:-"normal"} + ;; + esac # #----------------------------------------------------------------------- @@ -652,57 +666,64 @@ TEMPLATE_DIR="$USHDIR/templates" case $MACHINE in -"WCOSS_CRAY") - FIXgsm=${FIXgsm:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_sfc_climo"} - ;; - -"WCOSS_DELL_P3") - FIXgsm=${FIXgsm:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_sfc_climo"} - ;; - -"HERA") - FIXgsm=${FIXgsm:-"/scratch1/NCEPDEV/global/glopara/fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/scratch1/NCEPDEV/global/glopara/fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/climo_fields_netcdf"} - ;; - -"JET") - FIXgsm=${FIXgsm:-"/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/lfs1/HFIP/hwrf-data/git/fv3gfs/fix/fix_sfc_climo"} - ;; - -"ODIN") - FIXgsm=${FIXgsm:-"/scratch/ywang/fix/theia_fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/scratch/ywang/fix/theia_fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/scratch/ywang/fix/climo_fields_netcdf"} - ;; -"CHEYENNE") - FIXgsm=${FIXgsm:-"/glade/p/ral/jntp/UFS_CAM/fix/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/glade/p/ral/jntp/UFS_CAM/fix/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/glade/p/ral/jntp/UFS_CAM/fix/climo_fields_netcdf"} - ;; - -"STAMPEDE") - FIXgsm=${FIXgsm:-"/work/00315/tg455890/stampede2/regional_fv3/fix_am"} - TOPO_DIR=${TOPO_DIR:-"/work/00315/tg455890/stampede2/regional_fv3/fix_orog"} - SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/work/00315/tg455890/stampede2/regional_fv3/climo_fields_netcdf"} - ;; - -*) - print_err_msg_exit "\ + "WCOSS_CRAY") + FIXgsm=${FIXgsm:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/gpfs/hps3/emc/global/noscrub/emc.glopara/git/fv3gfs/fix/fix_sfc_climo"} + ;; + + "WCOSS_DELL_P3") + FIXgsm=${FIXgsm:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/gpfs/dell2/emc/modeling/noscrub/emc.glopara/git/fv3gfs/fix/fix_sfc_climo"} + ;; + + "HERA") + FIXgsm=${FIXgsm:-"/scratch1/NCEPDEV/global/glopara/fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/scratch1/NCEPDEV/global/glopara/fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/scratch1/NCEPDEV/da/George.Gayno/ufs_utils.git/climo_fields_netcdf"} + ;; + + "ORION") + FIXgsm=${FIXgsm:-"/work/noaa/fv3-cam/emc.campara/fix_fv3cam/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/work/noaa/fv3-cam/emc.campara/fix_fv3cam/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/work/noaa/gsd-fv3-dev/gsketefia/UFS/climo_fields_netcdf"} + ;; + + "JET") + FIXgsm=${FIXgsm:-"/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/lfs1/HFIP/hwrf-data/git/fv3gfs/fix/fix_sfc_climo"} + ;; + + "ODIN") + FIXgsm=${FIXgsm:-"/scratch/ywang/fix/theia_fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/scratch/ywang/fix/theia_fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/scratch/ywang/fix/climo_fields_netcdf"} + ;; + + "CHEYENNE") + FIXgsm=${FIXgsm:-"/glade/p/ral/jntp/UFS_CAM/fix/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/glade/p/ral/jntp/UFS_CAM/fix/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/glade/p/ral/jntp/UFS_CAM/fix/climo_fields_netcdf"} + ;; + + "STAMPEDE") + FIXgsm=${FIXgsm:-"/work/00315/tg455890/stampede2/regional_fv3/fix_am"} + TOPO_DIR=${TOPO_DIR:-"/work/00315/tg455890/stampede2/regional_fv3/fix_orog"} + SFC_CLIMO_INPUT_DIR=${SFC_CLIMO_INPUT_DIR:-"/work/00315/tg455890/stampede2/regional_fv3/climo_fields_netcdf"} + ;; + + *) + print_err_msg_exit "\ One or more fix file directories have not been specified for this machine: MACHINE = \"$MACHINE\" FIXgsm = \"${FIXgsm:-\"\"} TOPO_DIR = \"${TOPO_DIR:-\"\"} SFC_CLIMO_INPUT_DIR = \"${SFC_CLIMO_INPUT_DIR:-\"\"} - You can specify the missing location(s) in config.sh" - ;; + ;; + esac # #----------------------------------------------------------------------- diff --git a/ush/templates/FV3LAM_wflow.xml b/ush/templates/FV3LAM_wflow.xml index 0ffbdd1039..f95c0ecd27 100644 --- a/ush/templates/FV3LAM_wflow.xml +++ b/ush/templates/FV3LAM_wflow.xml @@ -63,9 +63,22 @@ tasks other than GET_EXTRN_ICS_TN, GET_EXTRN_LBCS_TN, and RUN_FCST_TN; the "HPSS" type is used for the GET_EXTRN_ICS_TN and GET_EXTRN_LBCS_TN tasks; and the "FCST" type is used for the RUN_FCST_TN task. --> -&ACCOUNT;<{{ queue_default_tag }}>&QUEUE_DEFAULT;"> -&ACCOUNT;<{{ queue_hpss_tag }}>&QUEUE_HPSS;"> -&ACCOUNT;<{{ queue_fcst_tag }}>&QUEUE_FCST;"> + +{%- if (partition_default != "") %} +&ACCOUNT;&QUEUE_DEFAULT;{{ partition_default }}"> +{%- else %} +&ACCOUNT;&QUEUE_DEFAULT;"> +{%- endif %} +{%- if (partition_hpss != "") %} +&ACCOUNT;&QUEUE_HPSS;{{ partition_hpss }}"> +{%- else %} +&ACCOUNT;&QUEUE_HPSS;"> +{%- endif %} +{%- if (partition_fcst != "") %} +&ACCOUNT;&QUEUE_FCST;{{ partition_fcst }}"> +{%- else %} +&ACCOUNT;&QUEUE_FCST;"> +{%- endif %} ]> @@ -343,9 +356,6 @@ MODULES_RUN_TASK_FP script. {% if machine in ["JET", "HERA"] %} {{ ncores_run_fcst }} {{ native_run_fcst }} - {% if machine == "JET" %} - {{ partition_run_fcst }} - {% endif %} {% else %} {{ nnodes_run_fcst }}:ppn={{ ppn_run_fcst }} &NCORES_PER_NODE; diff --git a/ush/valid_param_vals.sh b/ush/valid_param_vals.sh index db91f05bdc..f6093a2dad 100644 --- a/ush/valid_param_vals.sh +++ b/ush/valid_param_vals.sh @@ -1,6 +1,6 @@ valid_vals_RUN_ENVIR=("nco" "community") valid_vals_VERBOSE=("TRUE" "true" "YES" "yes" "FALSE" "false" "NO" "no") -valid_vals_MACHINE=("WCOSS_CRAY" "WCOSS_DELL_P3" "THEIA" "HERA" "JET" "ODIN" "CHEYENNE" "STAMPEDE") +valid_vals_MACHINE=("WCOSS_CRAY" "WCOSS_DELL_P3" "HERA" "ORION" "JET" "ODIN" "CHEYENNE" "STAMPEDE") valid_vals_SCHED=("slurm" "pbspro" "lsf" "lsfcray" "none") valid_vals_PREDEF_GRID_NAME=( \ "EMC_CONUS_3km" \ diff --git a/ush/wrappers/run_post.sh b/ush/wrappers/run_post.sh index 174cc62b55..1c8f3d4dfb 100755 --- a/ush/wrappers/run_post.sh +++ b/ush/wrappers/run_post.sh @@ -10,6 +10,6 @@ export ENSMEM_INDX="" num_fcst_hrs=${FCST_LEN_HRS} for (( i=0; i<=$((num_fcst_hrs)); i++ )); do - export fhr=`printf "%02i" ${i}` + export fhr=`printf "%03i" ${i}` ${JOBSDIR}/JREGIONAL_RUN_POST done