diff --git a/.github/actions/testing-setup/action.yml b/.github/actions/testing-setup/action.yml index 1ab9f4144b..c6fae4ad58 100644 --- a/.github/actions/testing-setup/action.yml +++ b/.github/actions/testing-setup/action.yml @@ -22,6 +22,7 @@ runs: shell: bash run: | echo "::group::Install linux packages" + sudo apt-get update sudo apt-get install netcdf-bin libnetcdf-dev libnetcdff-dev mpich libmpich-dev echo "::endgroup::" diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 4e6f73abd6..7dd1f3c703 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -23,5 +23,5 @@ jobs: - name: Create validation data run: make run.symmetric -k -s - - name: Regression test + - name: Regression test run: make test.regressions DO_REGRESSION_TESTS=true -k -s diff --git a/.gitignore b/.gitignore index ccaecbbead..25f7524d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,13 +4,7 @@ html -# Build output -*.o -*.mod -MOM6 - - -# Autoconf +# Autoconf output aclocal.m4 autom4te.cache/ config.log diff --git a/.readthedocs.yml b/.readthedocs.yml index 8d42c8c5cd..f7ad4421b4 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,8 +1,9 @@ version: 2 # Extra formats -formats: - - pdf +# PDF generation is failing for now; disabled on 2020-12-02 +#formats: +# - pdf # Build documentation sphinx: diff --git a/ac/configure.ac b/ac/configure.ac index b069fdd56f..ad8ed83603 100644 --- a/ac/configure.ac +++ b/ac/configure.ac @@ -83,25 +83,56 @@ AX_FC_CHECK_MODULE([mpi], # netCDF configuration -# NOTE: `nf-config --flibs` combines library paths (-L) and libraries (-l), -# even though these ought to be separated in the invocation of `ld`. -# -# We use `sed` to strip the -l and pass the -L to LDFLAGS, and rely on autoconf -# to configure the -l flags. -AC_PROG_SED - -AC_PATH_PROG([NF_CONFIG], [nf-config]) -AS_IF([test -n "$NF_CONFIG"], - [CPPFLAGS="$CPPFLAGS $($NF_CONFIG --fflags)" - FCFLAGS="$FCFLAGS $($NF_CONFIG --fflags)" - LDFLAGS="$LDFLAGS $($NF_CONFIG --flibs | $SED -e 's/-l[[^ ]]*//g')"], - [AC_MSG_ERROR([Could not find nf-config.])]) - -AX_FC_CHECK_MODULE([netcdf], - [], [AC_MSG_ERROR([Could not find netcdf module.])]) -AX_FC_CHECK_LIB([netcdff], [nf_create], [netcdf], - [], [AC_MSG_ERROR([Could not link netcdff library.])] -) +# Search for the Fortran netCDF module, fallback to nf-config. +AX_FC_CHECK_MODULE([netcdf], [], [ + AS_UNSET([ax_fc_cv_mod_netcdf]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([FCFLAGS], ["$FCFLAGS -I$($NF_CONFIG --includedir)"]) + ], [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_MODULE([netcdf], [], [ + AC_MSG_ERROR([Could not find netcdf module.]) + ]) +]) + +# FMS may invoke netCDF C calls, so we link to libnetcdf. +AC_LANG_PUSH([C]) +AC_CHECK_LIB([netcdf], [nc_create], [], [ + AS_UNSET([ac_cv_lib_netcdf_nc_create]) + AC_PATH_PROG([NC_CONFIG], [nc-config]) + AS_IF([test -n "$NC_CONFIG"], [ + AC_SUBST([LDFLAGS], + ["$LDFLAGS -L$($NC_CONFIG --libdir)"] + ) + ], [AC_MSG_ERROR([Could not find nc-config.])] + ) + AC_CHECK_LIB([netcdf], [nc_create], [], [ + AC_MSG_ERROR([Could not find libnetcdf.]) + ]) +]) +AC_LANG_POP([C]) + +# NOTE: We test for nf_create, rather than nf90_create, because AX_FC_CHECK_LIB +# is currently not yet able to properly probe inside modules. +# Testing of the nf90_* functions will require a macro update. +# NOTE: nf-config does not have a --libdir flag, so we use --prefix and assume +# that libraries are in the $prefix/lib directory. + +# Link to Fortran netCDF library, netcdff +AX_FC_CHECK_LIB([netcdff], [nf_create], [], [], [ + AS_UNSET([ax_fc_cv_lib_netcdff_nf_create]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([LDFLAGS], + ["$LDFLAGS -L$($NF_CONFIG --prefix)/lib"] + ) + ], [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_LIB([netcdff], [nf_create], [], [], [ + AC_MSG_ERROR([Could not find libnetcdff.]) + ]) +]) # Force 8-byte reals diff --git a/ac/deps/Makefile b/ac/deps/Makefile index 0ed4fd19a7..bba42a3b11 100644 --- a/ac/deps/Makefile +++ b/ac/deps/Makefile @@ -13,7 +13,7 @@ MKMF_COMMIT ?= master # FMS framework FMS_URL ?= https://github.com/NOAA-GFDL/FMS.git -FMS_COMMIT ?= 2019.01.03 +FMS_COMMIT ?= 2020.04 # List of source files to link this Makefile's dependencies to model Makefiles diff --git a/ac/deps/configure.fms.ac b/ac/deps/configure.fms.ac index 4ac86f6445..bf899126cc 100644 --- a/ac/deps/configure.fms.ac +++ b/ac/deps/configure.fms.ac @@ -15,6 +15,7 @@ AC_PROG_CC AX_MPI CC=$MPICC + # FMS configuration # Linux and macOS have a gettid system call, but it is not implemented in older @@ -74,25 +75,38 @@ AC_DEFINE([use_libMPI]) # netCDF configuration -# NOTE: `nf-config --flibs` combines library paths (-L) and libraries (-l), -# even though these ought to be separated in the invocation of `ld`. -# -# We use `sed` to strip the -l and pass the -L to LDFLAGS, and rely on autoconf -# to configure the -l flags. -AC_PROG_SED - -AC_PATH_PROG([NF_CONFIG], [nf-config]) -AS_IF([test -n "$NF_CONFIG"], - [CPPFLAGS="$CPPFLAGS $($NF_CONFIG --fflags)" - FCFLAGS="$FCFLAGS $($NF_CONFIG --fflags)" - LDFLAGS="$LDFLAGS $($NF_CONFIG --flibs | $SED -e 's/-l[[^ ]]*//g')"], - [AC_MSG_ERROR([Could not find nf-config.])]) - -AX_FC_CHECK_MODULE([netcdf], - [], [AC_MSG_ERROR([Could not find netcdf module.])]) -AX_FC_CHECK_LIB([netcdff], [nf_create], [netcdf], - [], [AC_MSG_ERROR([Could not link netcdff library.])] -) +# Check for netcdf.h header function declarations. +# If unavailable, then try to invoke nc-create. +AC_LANG_PUSH([C]) +AC_CHECK_HEADERS([netcdf.h], [], [ + AS_UNSET([ac_cv_header_netcdf_h]) + AC_PATH_PROG([NC_CONFIG], [nc-config]) + AS_IF([test -n "$NC_CONFIG"], [ + AC_SUBST([CPPFLAGS], ["$CPPFLAGS -I$($NC_CONFIG --includedir)"]) + ], + [AC_MSG_ERROR([Could not find nc-config.])] + ) + AC_CHECK_HEADERS([netcdf.h], [], [ + AC_MSG_ERROR([Could not find netcdf.h]) + ]) +]) +AC_LANG_POP([C]) + +# Search for the Fortran netCDF module, fallback to nf-config. +AX_FC_CHECK_MODULE([netcdf], [], [ + AS_UNSET([ax_fc_cv_mod_netcdf]) + AC_PATH_PROG([NF_CONFIG], [nf-config]) + AS_IF([test -n "$NF_CONFIG"], [ + AC_SUBST([FCFLAGS], ["$FCFLAGS -I$($NF_CONFIG --includedir)"]) + ], + [AC_MSG_ERROR([Could not find nf-config.])] + ) + AX_FC_CHECK_MODULE([netcdf], [], [ + AC_MSG_ERROR([Could not find netcdf module.]) + ]) +]) + +# FMS requires this macro to signal netCDF support. AC_DEFINE([use_netCDF]) diff --git a/ac/m4/ax_fc_check_lib.m4 b/ac/m4/ax_fc_check_lib.m4 index c0accab6cd..a7f848cd60 100644 --- a/ac/m4/ax_fc_check_lib.m4 +++ b/ac/m4/ax_fc_check_lib.m4 @@ -18,7 +18,7 @@ dnl library with different -L flags, or perhaps other ld configurations. dnl dnl Results are cached in the ax_fc_cv_lib_LIBRARY_FUNCTION variable. dnl -AC_DEFUN([AX_FC_CHECK_LIB],[dnl +AC_DEFUN([AX_FC_CHECK_LIB],[ AS_VAR_PUSHDEF([ax_fc_Lib], [ax_fc_cv_lib_$1_$2]) m4_ifval([$6], [ax_fc_lib_msg_LDFLAGS=" with $6"], @@ -29,14 +29,15 @@ AC_DEFUN([AX_FC_CHECK_LIB],[dnl LDFLAGS="$6 $LDFLAGS" ax_fc_check_lib_save_LIBS=$LIBS LIBS="-l$1 $7 $LIBS" - AS_IF([test -n $3], + AS_IF([test -n "$3"], [ax_fc_use_mod="use $3"], [ax_fc_use_mod=""]) - AC_LINK_IFELSE([ - AC_LANG_PROGRAM([], [dnl + AC_LINK_IFELSE([dnl +dnl Begin 7-column code block +AC_LANG_PROGRAM([], [dnl $ax_fc_use_mod - call $2]dnl - ) + call $2])dnl +dnl End code block ], [AS_VAR_SET([ax_fc_Lib], [yes])], [AS_VAR_SET([ax_fc_Lib], [no])] diff --git a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 b/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 index 7075fb7c10..be960accd6 100644 --- a/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 +++ b/config_src/coupled_driver/MOM_surface_forcing_gfdl.F90 @@ -1664,7 +1664,8 @@ subroutine check_mask_val_consistency(val, mask, i, j, varname, G) real, intent(in) :: val !< value of flux/variable passed by IOB real, intent(in) :: mask !< value of ocean mask - integer, intent(in) :: i, j !< model grid cell indices + integer, intent(in) :: i !< model grid cell indices + integer, intent(in) :: j !< model grid cell indices character(len=*), intent(in) :: varname !< variable name type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure ! Local variables diff --git a/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 b/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 index f3d63dd061..e50f2ccf0b 100644 --- a/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 +++ b/config_src/external/GFDL_ocean_BGC/FMS_coupler_util.F90 @@ -12,11 +12,15 @@ module FMS_coupler_util subroutine extract_coupler_values(BC_struc, BC_index, BC_element, array_out, ilb, jlb, & is, ie, js, je, conversion) real, dimension(ilb:,jlb:),intent(out) :: array_out !< The array being filled with the input values - integer, intent(in) :: ilb, jlb !< Lower bounds + integer, intent(in) :: ilb !< Lower bounds + integer, intent(in) :: jlb !< Lower bounds type(coupler_2d_bc_type), intent(in) :: BC_struc !< The type from which the data is being extracted integer, intent(in) :: BC_index !< The boundary condition number being extracted integer, intent(in) :: BC_element !< The element of the boundary condition being extracted - integer, optional, intent(in) :: is, ie, js, je !< The i- and j- limits of array_out to be filled + integer, optional, intent(in) :: is !< The i- limits of array_out to be filled + integer, optional, intent(in) :: ie !< The i- limits of array_out to be filled + integer, optional, intent(in) :: js !< The j- limits of array_out to be filled + integer, optional, intent(in) :: je !< The j- limits of array_out to be filled real, optional, intent(in) :: conversion !< A number that every element is multiplied by end subroutine extract_coupler_values @@ -24,11 +28,15 @@ end subroutine extract_coupler_values subroutine set_coupler_values(array_in, BC_struc, BC_index, BC_element, ilb, jlb,& is, ie, js, je, conversion) real, dimension(ilb:,jlb:), intent(in) :: array_in !< The array containing the values to load into the BC - integer, intent(in) :: ilb, jlb !< Lower bounds + integer, intent(in) :: ilb !< Lower bounds + integer, intent(in) :: jlb !< Lower bounds type(coupler_2d_bc_type), intent(inout) :: BC_struc !< The type into which the data is being loaded integer, intent(in) :: BC_index !< The boundary condition number being set integer, intent(in) :: BC_element !< The element of the boundary condition being set - integer, optional, intent(in) :: is, ie, js, je !< The i- and j- limits of array_out to be filled + integer, optional, intent(in) :: is !< The i- limits of array_out to be filled + integer, optional, intent(in) :: ie !< The i- limits of array_out to be filled + integer, optional, intent(in) :: js !< The j- limits of array_out to be filled + integer, optional, intent(in) :: je !< The j- limits of array_out to be filled real, optional, intent(in) :: conversion !< A number that every element is multiplied by end subroutine set_coupler_values diff --git a/config_src/mct_driver/mom_ocean_model_mct.F90 b/config_src/mct_driver/mom_ocean_model_mct.F90 index 2f94c9b7f9..5a04739971 100644 --- a/config_src/mct_driver/mom_ocean_model_mct.F90 +++ b/config_src/mct_driver/mom_ocean_model_mct.F90 @@ -56,7 +56,7 @@ module MOM_ocean_model_mct use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data use mpp_domains_mod, only : domain2d, mpp_get_layout, mpp_get_global_domain use mpp_domains_mod, only : mpp_define_domains, mpp_get_compute_domain, mpp_get_data_domain -use fms_mod, only : stdout +use MOM_io, only : stdout use mpp_mod, only : mpp_chksum use MOM_EOS, only : gsw_sp_from_sr, gsw_pt_from_ct use MOM_wave_interface, only : wave_parameters_CS, MOM_wave_interface_init @@ -409,10 +409,6 @@ subroutine ocean_model_init(Ocean_sfc, OS, Time_init, Time_in, gas_fields_ocn, i call close_param_file(param_file) call diag_mediator_close_registration(OS%diag) - - if (is_root_pe()) & - write(*,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' - call callTree_leave("ocean_model_init(") end subroutine ocean_model_init @@ -1053,20 +1049,18 @@ subroutine ocean_public_type_chksum(id, timestep, ocn) integer, intent(in) :: timestep !< The number of elapsed timesteps type(ocean_public_type), intent(in) :: ocn !< A structure containing various publicly !! visible ocean surface fields. - integer :: n, m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep - write(outunit,100) 'ocean%t_surf ',mpp_chksum(ocn%t_surf ) - write(outunit,100) 'ocean%s_surf ',mpp_chksum(ocn%s_surf ) - write(outunit,100) 'ocean%u_surf ',mpp_chksum(ocn%u_surf ) - write(outunit,100) 'ocean%v_surf ',mpp_chksum(ocn%v_surf ) - write(outunit,100) 'ocean%sea_lev ',mpp_chksum(ocn%sea_lev) - write(outunit,100) 'ocean%frazil ',mpp_chksum(ocn%frazil ) - write(outunit,100) 'ocean%melt_potential ',mpp_chksum(ocn%melt_potential) - - call coupler_type_write_chksums(ocn%fields, outunit, 'ocean%') + integer :: n, m + + write(stdout,*) "BEGIN CHECKSUM(ocean_type):: ", id, timestep + write(stdout,100) 'ocean%t_surf ',mpp_chksum(ocn%t_surf ) + write(stdout,100) 'ocean%s_surf ',mpp_chksum(ocn%s_surf ) + write(stdout,100) 'ocean%u_surf ',mpp_chksum(ocn%u_surf ) + write(stdout,100) 'ocean%v_surf ',mpp_chksum(ocn%v_surf ) + write(stdout,100) 'ocean%sea_lev ',mpp_chksum(ocn%sea_lev) + write(stdout,100) 'ocean%frazil ',mpp_chksum(ocn%frazil ) + write(stdout,100) 'ocean%melt_potential ',mpp_chksum(ocn%melt_potential) + + call coupler_type_write_chksums(ocn%fields, stdout, 'ocean%') 100 FORMAT(" CHECKSUM::",A20," = ",Z20) end subroutine ocean_public_type_chksum diff --git a/config_src/mct_driver/mom_surface_forcing_mct.F90 b/config_src/mct_driver/mom_surface_forcing_mct.F90 index 92b5d148bb..82105e040e 100644 --- a/config_src/mct_driver/mom_surface_forcing_mct.F90 +++ b/config_src/mct_driver/mom_surface_forcing_mct.F90 @@ -34,10 +34,10 @@ module MOM_surface_forcing_mct use coupler_types_mod, only : coupler_type_initialized, coupler_type_spawn use coupler_types_mod, only : coupler_type_copy_data use data_override_mod, only : data_override_init, data_override -use fms_mod, only : stdout use mpp_mod, only : mpp_chksum use time_interp_external_mod, only : init_external_field, time_interp_external use time_interp_external_mod, only : time_interp_external_init +use MOM_io, only: stdout implicit none ; private @@ -1361,37 +1361,35 @@ subroutine ice_ocn_bnd_type_chksum(id, timestep, iobt) !! ocean in a coupled model whose checksums are reported ! local variables - integer :: n,m, outunit - - outunit = stdout() - - write(outunit,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep - write(outunit,100) 'iobt%u_flux ' , mpp_chksum( iobt%u_flux ) - write(outunit,100) 'iobt%v_flux ' , mpp_chksum( iobt%v_flux ) - write(outunit,100) 'iobt%t_flux ' , mpp_chksum( iobt%t_flux ) - write(outunit,100) 'iobt%q_flux ' , mpp_chksum( iobt%q_flux ) - write(outunit,100) 'iobt%salt_flux ' , mpp_chksum( iobt%salt_flux ) - write(outunit,100) 'iobt%seaice_melt_heat' , mpp_chksum( iobt%seaice_melt_heat) - write(outunit,100) 'iobt%seaice_melt ' , mpp_chksum( iobt%seaice_melt ) - write(outunit,100) 'iobt%lw_flux ' , mpp_chksum( iobt%lw_flux ) - write(outunit,100) 'iobt%sw_flux_vis_dir' , mpp_chksum( iobt%sw_flux_vis_dir) - write(outunit,100) 'iobt%sw_flux_vis_dif' , mpp_chksum( iobt%sw_flux_vis_dif) - write(outunit,100) 'iobt%sw_flux_nir_dir' , mpp_chksum( iobt%sw_flux_nir_dir) - write(outunit,100) 'iobt%sw_flux_nir_dif' , mpp_chksum( iobt%sw_flux_nir_dif) - write(outunit,100) 'iobt%lprec ' , mpp_chksum( iobt%lprec ) - write(outunit,100) 'iobt%fprec ' , mpp_chksum( iobt%fprec ) - write(outunit,100) 'iobt%runoff ' , mpp_chksum( iobt%runoff ) - write(outunit,100) 'iobt%calving ' , mpp_chksum( iobt%calving ) - write(outunit,100) 'iobt%p ' , mpp_chksum( iobt%p ) + integer :: n,m + + write(stdout,*) "BEGIN CHECKSUM(ice_ocean_boundary_type):: ", id, timestep + write(stdout,100) 'iobt%u_flux ' , mpp_chksum( iobt%u_flux ) + write(stdout,100) 'iobt%v_flux ' , mpp_chksum( iobt%v_flux ) + write(stdout,100) 'iobt%t_flux ' , mpp_chksum( iobt%t_flux ) + write(stdout,100) 'iobt%q_flux ' , mpp_chksum( iobt%q_flux ) + write(stdout,100) 'iobt%salt_flux ' , mpp_chksum( iobt%salt_flux ) + write(stdout,100) 'iobt%seaice_melt_heat' , mpp_chksum( iobt%seaice_melt_heat) + write(stdout,100) 'iobt%seaice_melt ' , mpp_chksum( iobt%seaice_melt ) + write(stdout,100) 'iobt%lw_flux ' , mpp_chksum( iobt%lw_flux ) + write(stdout,100) 'iobt%sw_flux_vis_dir' , mpp_chksum( iobt%sw_flux_vis_dir) + write(stdout,100) 'iobt%sw_flux_vis_dif' , mpp_chksum( iobt%sw_flux_vis_dif) + write(stdout,100) 'iobt%sw_flux_nir_dir' , mpp_chksum( iobt%sw_flux_nir_dir) + write(stdout,100) 'iobt%sw_flux_nir_dif' , mpp_chksum( iobt%sw_flux_nir_dif) + write(stdout,100) 'iobt%lprec ' , mpp_chksum( iobt%lprec ) + write(stdout,100) 'iobt%fprec ' , mpp_chksum( iobt%fprec ) + write(stdout,100) 'iobt%runoff ' , mpp_chksum( iobt%runoff ) + write(stdout,100) 'iobt%calving ' , mpp_chksum( iobt%calving ) + write(stdout,100) 'iobt%p ' , mpp_chksum( iobt%p ) if (associated(iobt%ustar_berg)) & - write(outunit,100) 'iobt%ustar_berg ' , mpp_chksum( iobt%ustar_berg ) + write(stdout,100) 'iobt%ustar_berg ' , mpp_chksum( iobt%ustar_berg ) if (associated(iobt%area_berg)) & - write(outunit,100) 'iobt%area_berg ' , mpp_chksum( iobt%area_berg ) + write(stdout,100) 'iobt%area_berg ' , mpp_chksum( iobt%area_berg ) if (associated(iobt%mass_berg)) & - write(outunit,100) 'iobt%mass_berg ' , mpp_chksum( iobt%mass_berg ) + write(stdout,100) 'iobt%mass_berg ' , mpp_chksum( iobt%mass_berg ) 100 FORMAT(" CHECKSUM::",A20," = ",Z20) - call coupler_type_write_chksums(iobt%fluxes, outunit, 'iobt%') + call coupler_type_write_chksums(iobt%fluxes, stdout, 'iobt%') end subroutine ice_ocn_bnd_type_chksum diff --git a/config_src/mct_driver/ocn_comp_mct.F90 b/config_src/mct_driver/ocn_comp_mct.F90 index 741ce832e8..1872fff335 100644 --- a/config_src/mct_driver/ocn_comp_mct.F90 +++ b/config_src/mct_driver/ocn_comp_mct.F90 @@ -42,6 +42,7 @@ module ocn_comp_mct use MOM_constants, only: CELSIUS_KELVIN_OFFSET use MOM_domains, only: AGRID, BGRID_NE, CGRID_NE, pass_vector use mpp_domains_mod, only: mpp_get_compute_domain +use MOM_io, only: stdout ! Previously inlined - now in separate modules use MOM_ocean_model_mct, only: ocean_public_type, ocean_state_type @@ -88,7 +89,6 @@ module ocn_comp_mct type(cpl_indices_type) :: ind !< Variable IDs logical :: sw_decomp !< Controls whether shortwave is decomposed into 4 components real :: c1, c2, c3, c4 !< Coeffs. used in the shortwave decomposition i/o - integer :: stdout !< standard output unit. (by default, points to ocn.log.* ) character(len=384) :: pointer_filename !< Name of the ascii file that contains the path !! and filename of the latest restart file. end type MCT_MOM_Data @@ -194,14 +194,14 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) - glb%stdout = shr_file_getUnit() ! get an unused unit number + stdout = shr_file_getUnit() ! get an unused unit number ! open the ocn_modelio.nml file and then open a log file associated with stdout ocn_modelio_name = 'ocn_modelio.nml' // trim(inst_suffix) - call shr_file_setIO(ocn_modelio_name,glb%stdout) + call shr_file_setIO(ocn_modelio_name,stdout) ! set the shr log io unit number - call shr_file_setLogUnit(glb%stdout) + call shr_file_setLogUnit(stdout) end if call set_calendar_type(NOLEAP) !TODO: confirm this @@ -218,23 +218,23 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ! Debugging clocks if (debug .and. is_root_pe()) then - write(glb%stdout,*) 'ocn_init_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StartTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StopTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, PrevTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_init_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, TimeStep=ocn_cpl_interval, rc=rc) call ESMF_TimeIntervalGet(ocn_cpl_interval, yy=year, mm=month, d=day, s=seconds, sn=seconds_n, sd=seconds_d, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d + write(stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d endif npes = num_pes() @@ -298,7 +298,7 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ! read name of restart file in the pointer file nu = shr_file_getUnit() restart_pointer_file = trim(glb%pointer_filename) - if (is_root_pe()) write(glb%stdout,*) 'Reading ocn pointer file: ',restart_pointer_file + if (is_root_pe()) write(stdout,*) 'Reading ocn pointer file: ',restart_pointer_file restartfile = ""; restartfiles = ""; open(nu, file=restart_pointer_file, form='formatted', status='unknown') do @@ -316,13 +316,13 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) enddo close(nu) if (is_root_pe()) then - write(glb%stdout,*) 'Reading restart file(s): ',trim(restartfiles) + write(stdout,*) 'Reading restart file(s): ',trim(restartfiles) end if call shr_file_freeUnit(nu) call ocean_model_init(glb%ocn_public, glb%ocn_state, time0, time_start, input_restart_file=trim(restartfiles)) endif if (is_root_pe()) then - write(glb%stdout,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' + write(stdout,'(/12x,a/)') '======== COMPLETED MOM INITIALIZATION ========' end if ! Initialize ocn_state%sfc_state out of sight @@ -383,7 +383,7 @@ subroutine ocn_init_mct( EClock, cdata_o, x2o_o, o2x_o, NLFilename ) ncouple_per_day = seconds_in_day / ocn_cpl_dt mom_cpl_dt = seconds_in_day / ncouple_per_day if (mom_cpl_dt /= ocn_cpl_dt) then - write(glb%stdout,*) 'ERROR mom_cpl_dt and ocn_cpl_dt must be identical' + write(stdout,*) 'ERROR mom_cpl_dt and ocn_cpl_dt must be identical' call exit(0) end if @@ -457,7 +457,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (is_root_pe()) then call shr_file_getLogUnit(shrlogunit) call shr_file_getLogLevel(shrloglev) - call shr_file_setLogUnit(glb%stdout) + call shr_file_setLogUnit(stdout) endif ! Query the beginning time of the current coupling interval @@ -484,7 +484,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (runtype /= "continue" .and. runtype /= "branch") then if (debug .and. is_root_pe()) then - write(glb%stdout,*) 'doubling first interval duration!' + write(stdout,*) 'doubling first interval duration!' endif ! shift back the start time by one coupling interval (to align the start time with other components) @@ -500,19 +500,19 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) if (debug .and. is_root_pe()) then call ESMF_ClockGet(EClock, CurrTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, current time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StartTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, start time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, StopTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, stop time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, PrevTime=time_var, rc=rc) call ESMF_TimeGet(time_var, yy=year, mm=month, dd=day, h=hour, m=minute, s=seconds, rc=rc) - write(glb%stdout,*) 'ocn_run_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds + write(stdout,*) 'ocn_run_mct, previous time: y,m,d-',year,month,day,'h,m,s=',hour,minute,seconds call ESMF_ClockGet(EClock, TimeStep=ocn_cpl_interval, rc=rc) call ESMF_TimeIntervalGet(ocn_cpl_interval, yy=year, mm=month, d=day, s=seconds, sn=seconds_n, sd=seconds_d, rc=rc) - write(glb%stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d + write(stdout,*) 'ocn_init_mct, time step: y,m,d-',year,month,day,'s,sn,sd=',seconds,seconds_n,seconds_d endif ! set the cdata pointers: @@ -525,10 +525,10 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) !glb%sw_decomp = .false. !END TODO: if (glb%sw_decomp) then - call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, glb%stdout, Eclock, & + call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, stdout, Eclock, & c1=glb%c1, c2=glb%c2, c3=glb%c3, c4=glb%c4) else - call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, glb%stdout, Eclock ) + call ocn_import(x2o_o%rattr, glb%ind, glb%grid, Ice_ocean_boundary, glb%ocn_public, stdout, Eclock ) end if ! Update internal ocean @@ -540,7 +540,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) !--- write out intermediate restart file when needed. ! Check alarms for flag to write restart at end of day write_restart_at_eod = seq_timemgr_RestartAlarmIsOn(EClock) - if (debug .and. is_root_pe()) write(glb%stdout,*) 'ocn_run_mct, write_restart_at_eod=', write_restart_at_eod + if (debug .and. is_root_pe()) write(stdout,*) 'ocn_run_mct, write_restart_at_eod=', write_restart_at_eod if (write_restart_at_eod) then ! case name @@ -575,7 +575,7 @@ subroutine ocn_run_mct( EClock, cdata_o, x2o_o, o2x_o) endif close(nu) - write(glb%stdout,*) 'ocn restart pointer file written: ',trim(restartname) + write(stdout,*) 'ocn restart pointer file written: ',trim(restartname) endif call shr_file_freeUnit(nu) @@ -761,7 +761,7 @@ end subroutine ocn_domain_mct else if (trim(starttype) == trim(seq_infodata_start_type_brnch)) then get_runtype = "branch" else - write(glb%stdout,*) 'ocn_comp_mct ERROR: unknown starttype' + write(stdout,*) 'ocn_comp_mct ERROR: unknown starttype' call exit(0) end if return diff --git a/config_src/nuopc_driver/mom_cap.F90 b/config_src/nuopc_driver/mom_cap.F90 index c2a2e98838..fc6bb5035e 100644 --- a/config_src/nuopc_driver/mom_cap.F90 +++ b/config_src/nuopc_driver/mom_cap.F90 @@ -353,7 +353,7 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) file=__FILE__)) & return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_count + read(value, *, iostat=iostat) scalar_field_count if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldCount not an integer: "//trim(value), & @@ -376,7 +376,7 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) file=__FILE__)) & return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_idx_grid_nx + read(value, *, iostat=iostat) scalar_field_idx_grid_nx if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldIdxGridNX not an integer: "//trim(value), & @@ -399,7 +399,7 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) file=__FILE__)) & return if (isPresent .and. isSet) then - read(value, '(i)', iostat=iostat) scalar_field_idx_grid_ny + read(value, *, iostat=iostat) scalar_field_idx_grid_ny if (iostat /= 0) then call ESMF_LogSetError(ESMF_RC_ARG_BAD, & msg=subname//": ScalarFieldIdxGridNY not an integer: "//trim(value), & @@ -1434,14 +1434,14 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) !--------------------------------- if (len_trim(scalar_field_name) > 0) then - call State_SetScalar(dble(nxg),scalar_field_idx_grid_nx, exportState, localPet, & + call State_SetScalar(real(nxg,ESMF_KIND_R8),scalar_field_idx_grid_nx, exportState, localPet, & scalar_field_name, scalar_field_count, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & return - call State_SetScalar(dble(nyg),scalar_field_idx_grid_ny, exportState, localPet, & + call State_SetScalar(real(nyg,ESMF_KIND_R8),scalar_field_idx_grid_ny, exportState, localPet, & scalar_field_name, scalar_field_count, rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & diff --git a/docs/.gitignore b/docs/.gitignore index 497c10b69d..e8b6a0513b 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -4,8 +4,15 @@ doxygen.log APIs MOM6.tags details/tutorial + + # Ignore sphinx-build output _build api src xml + + +# Citation output +bib*.aux +citelist.doc* diff --git a/docs/Doxyfile_nortd b/docs/Doxyfile_nortd index d72b668cf2..ca18bf49ee 100644 --- a/docs/Doxyfile_nortd +++ b/docs/Doxyfile_nortd @@ -279,7 +279,7 @@ ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly\2\endxmlonly" +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" diff --git a/docs/Doxyfile_nortd_latex b/docs/Doxyfile_nortd_latex index 5666939154..f779d0215d 100644 --- a/docs/Doxyfile_nortd_latex +++ b/docs/Doxyfile_nortd_latex @@ -279,7 +279,7 @@ ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly\2\endxmlonly" +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" diff --git a/docs/Doxyfile_rtd b/docs/Doxyfile_rtd index e3e747eee1..561de8382d 100644 --- a/docs/Doxyfile_rtd +++ b/docs/Doxyfile_rtd @@ -256,7 +256,7 @@ ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly\2\endxmlonly" +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" diff --git a/docs/Doxyfile_rtd_dox b/docs/Doxyfile_rtd_dox index 3318512f08..b9c7c3c0d3 100644 --- a/docs/Doxyfile_rtd_dox +++ b/docs/Doxyfile_rtd_dox @@ -256,7 +256,7 @@ ALIASES += footnote{1}="\latexonly\footnote\{\1\}\endlatexonly\htmlonly\2\endxmlonly" +ALIASES += imagelatex{3}="\latexonly\begin{DoxyImage}\3{\1}\doxyfigcaption{\2}\end{DoxyImage}\endlatexonly\xmlonly\2\endxmlonly" # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" @@ -814,12 +814,24 @@ INPUT = ../src \ ../config_src/dynamic_symmetric \ ../config_src/external \ ../config_src/coupled_driver \ - ../src/core/MOM.F90 \ ../src/ALE/MOM_ALE.F90 \ + ../src/ALE/PCM_functions.F90 \ + ../src/core/MOM.F90 \ + ../src/core/MOM_density_integrals.F90 \ + ../src/diagnostics/MOM_wave_structure.F90 \ ../src/equation_of_state/MOM_EOS.F90 \ + ../src/framework/MOM_diag_mediator.F90 \ + ../src/framework/MOM_domains.F90 \ + ../src/framework/MOM_dyn_horgrid.F90 \ + ../src/framework/MOM_file_parser.F90 \ ../src/framework/MOM_unit_scaling.F90 \ ../src/ice_shelf/MOM_ice_shelf.F90 \ - ../src/parameterizations/lateral/MOM_MEKE.F90 + ../src/parameterizations/lateral/MOM_MEKE.F90 \ + ../src/parameterizations/vertical/MOM_bkgnd_mixing.F90 \ + ../src/parameterizations/vertical/MOM_energetic_PBL.F90 \ + ../src/parameterizations/vertical/MOM_set_viscosity.F90 \ + ../src/tracer/oil_tracer.F90 \ + ../src/user/user_initialization.F90 # Basic testing units below diff --git a/docs/README.md b/docs/README.md index ef9d36792a..b071ce5927 100644 --- a/docs/README.md +++ b/docs/README.md @@ -42,6 +42,9 @@ Doxygen in run in two modes. For the API manual, it is instructed to generate o If the software is installed, local copies of html and pdf instead of those found at RTD. The html and pdf versions produced by doxygen will not have any of the additional documentation from the sphinx portion of the pipeline. +RTD by default runs html(sphinx) pipeline and adds a bit of styling. The RTD can be instructed to also produce a PDF. However, the current pipeline exceeds 900 seconds +execution time and fails if RTD is instructed to generate the html and PDF in one session. By default, the PDF generation is currently turned off. + ## Starting structure We define `SRC` as the root of the source directory assuming the `$(SRC)` directory was created from downloading the tree from github using `git clone`. @@ -96,7 +99,7 @@ on the [MOM6 developer's wiki](https://github.com/NOAA-GFDL/MOM6/wiki/Doxygen). NOTE: Not all doxygen commands are supported through the sphinx documentation processor. Support can be added by adding an [issue](https://github.com/NOAA-GFDL/MOM6/issues) to the github repository. -For the API documentation, the tree will look like this: +For the API documentation, the resultant tree will look like this: ``` SRC/ docs/ @@ -107,7 +110,7 @@ SRC/ MOM6.tags ``` -The main driver for doxygen is a configuration file. The content linked to [MOM6 developer's wiki](https://github.com/NOAA-GFDL/MOM6/wiki/Doxygen) uses the `Doxyfile_nortd` configuration file. +The main driver for doxygen is a configuration file. The content on [MOM6 developer's wiki](https://github.com/NOAA-GFDL/MOM6/wiki/Doxygen) uses the `Doxyfile_nortd` configuration file. By default, the html directory is only available after processing the documentation. Please see [software operation](software-operation) on how to generate the pdf companion of the documentation. @@ -121,7 +124,7 @@ SRC/ MOM6.tags ``` -The documentation generated by Sphinx and RTD uses the `Doxyfile_rtd`. These options can be overridden at the command line. See software operation for more details. +The documentation generated by Sphinx and RTD uses the `Doxyfile_rtd`. These options can be overridden at the command line. See [software operation](software-operation) for more details. The `_build/doxygen_warn_rtd_log.txt` should be reviewed for warnings and errors. @@ -145,7 +148,7 @@ The `html_log.txt` should be reviewed for warnings and errors. ## Read the Docs -The RTD site can be configured to watch for updates on a github repository. A documentation update may be triggered when an update is pushed to the repository. The entire documentation process is run twice. The first run produces html. The second run produces a pdf. +The RTD site can be configured to watch for updates on a github repository. A documentation update may be triggered when an update is pushed to the repository. The entire documentation process is run twice. The first run produces html. The second run produces a pdf (if so instructed). NOTE: There is a rough execution time limit of about 900 seconds. Trying to do more than that will cause a "timeout" error. @@ -249,6 +252,7 @@ as RTD runs sphinx processing directly using `sphinx-build` and not the Makefile ##### PAPER The default value is empty (`PAPER=`). There are two options currently available in the Makefile (a4 or letter). +The `PAPER` argument has no impact on html rendered content. #### conf.py @@ -274,19 +278,6 @@ NOTE: This command does not have any impact if an existing binary is found. This option activates the NCAR configuration file: `ncar/Doxyfile_ncar_rtd` -### Local web server - -Python provides a way to quickly stand up a private web server for checking documentation. It requires knowledge of -the IP address if you are using a remote server, otherwise `localhost` should work. - -You can start the server on any port. Port 8080 is shown here as an example. -```bash -python3 -m http.server 8080 -``` - -After starting the server, you can browse to the known IP using `http://IP/` or if you are on the same -machine use `http://localhost/`. - # Software installation On a relatively bare system, you can install a fairly stable documentation pipeline. @@ -346,7 +337,9 @@ PDF generation requires the following packages ### doxygen -Download latest [source](https://www.doxygen.nl/download.html). Latest is `doxygen-1.8.20.src.tar.gz`. +You may choose to download the [source](https://www.doxygen.nl/download.html). + +Latest is `doxygen-1.8.20.src.tar.gz`. ```bash tar xzf doxygen-1.8.20.src.tar.gz @@ -358,15 +351,20 @@ make sudo make install ``` -Make install attempts to place the compiled version into /usr/local/bin. You can link to a -specific executable within the virtual environment. At this point we also recommend -renaming `doxygen` to `doxygen-1.8.20` within `/usr/local/bin`. +The makefile for doxygen attempts to install the compiled version into /usr/local/bin. +You can link to a specific executable within the virtual environment. At this point we +also recommend renaming `doxygen` to `doxygen-1.8.20` within `/usr/local/bin`. + +NOTE: The makefile for the documentation framework will attempt to compile a local doxygen +binary of version 1.8.13 if a binary cannot be found in the `$PATH`. #### Testing -Currently, the majority of testing has been done with the following versions: +A lot of manual testing has been completed using the following versions: * 1.8.13 +* 1.8.14 * 1.8.19 +* 1.8.20 ### Read the Docs @@ -383,56 +381,6 @@ See Sphinx run options below. We capture up to three logfiles for RTD. The mai Logfiles were renamed to `*.txt` to allow easier access and viewing from most websites. Most websites force download of `*.log` files. -### python3 virtual enviroment - -Setup a virtual environment for processing: - -```bash -python3 -m venv venv/mom6Doc -source venv/mom6Doc/bin/activate -# cd to the docs directory within the MOM6 repo -pip3 install -r requirements.txt -``` - -The `deactivate` command allows you to exit from the virtual environment. - -NOTE: RTD will not upgrade the sphinx module if `#egg=` is specified in the `requirements.txt` file. - -### debugging - -A useful commnad line tool for debugging sphinx and extensions is the python debugger. -Add the following line to stop to any portion of the python code to create a break -point. - -```python -import pdb; pdb.set_trace() -``` - -Run `make html` without redirection to a log file. - -For only processing .dox files, run -`make clean; make html DOXYGEN_CONF=Doxyfile_rtd_dox UPDATEHTMLEQS=Y` - -## Example execution - -The following example assumes a virtual environment as setup above using `mom6Doc`. -The same environment is possible using anaconda. - -``` -$ source venv/mom6Doc/bin/activate -(mom6Doc) $ cd docs -(mom6Doc) $ make clean -(mom6Doc) $ make html >& _build/html_log.txt -(mom6Doc) $ make latexpdf >& _build/latex_log.txt -``` - -The last command may appear to hang. On error, latex will request input from the keyboard. -Press `R` and enter. This will keep latex running to completion or stop after 100 errors -are reached. - -Once the documentation is built, you can use a web browser to look around in the `_build` -directory. - # Credits ## 2020 @@ -442,10 +390,10 @@ to process the MOM6 documentation. The versions are tagged and placed into the | Source | Modified | Version | Development | | ------ | -------- | ------- | ----------- | | [sphinx](https://github.com/sphinx-doc/sphinx) | [sphinx-3.2.1mom6.4](https://github.com/jr3cermak/sphinx) | B:3.2.1mom6.4 | B:dev | -| [sphinxcontrib-autodoc-doxygen](https://github.com/rmcgibbo/sphinxcontrib-autodoc_doxygen) | [sphinxcontrib-autodoc-doxygen](https://github.com/jr3cermak/sphinxcontrib-autodoc_doxygen) | T:0.7.12 | B:dev | +| [sphinxcontrib-autodoc-doxygen](https://github.com/rmcgibbo/sphinxcontrib-autodoc_doxygen) | [sphinxcontrib-autodoc-doxygen](https://github.com/jr3cermak/sphinxcontrib-autodoc_doxygen) | T:0.7.13 | B:dev | | [sphinx-fortran](https://github.com/VACUMM/sphinx-fortran) | [sphinx-fortran](https://github.com/jr3cermak/sphinx-fortran) | T:1.2.2 | B:dev | | [flint](https://github.com/marshallward/flint) | [flint](https://github.com/jr3cermak/flint) | T:0.0.1 | B:dev | -| [MOM6](https://github.com/NOAA-GFDL/MOM6) | [esmg-docs](https://github.com/ESMG/MOM6/tree/esmg-docs) | [esmg-docs](https://github.com/jr3cermak/MOM6/tree/esmg-docs) | B:[dev/rob](https://github.com/jr3cermak/MOM6/tree/dev-rob) | +| [MOM6](https://github.com/NOAA-GFDL/MOM6) | [esmg-docs](https://github.com/ESMG/MOM6/tree/esmg-docs) | [esmg-docs](https://github.com/ESMG/MOM6/tree/esmg-docs) | B:[esmg-test](https://github.com/jr3cermak/MOM6/tree/esmg-test) | T: tag B: branch diff --git a/docs/conf.py b/docs/conf.py index 8581f5ca25..43f09417fd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -181,7 +181,7 @@ def latexPassthru(name, rawtext, text, lineno, inliner, options={}, content=[]): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'src', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ['_build', 'details', 'src', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/docs/details/general.md b/docs/details/general.md index 9933758ac5..c15a67786b 100644 --- a/docs/details/general.md +++ b/docs/details/general.md @@ -39,3 +39,71 @@ latex escaped math in equations. If a latex string for `\theta` is read in as tab (`\t`) and cause mahem with the processing. Reference: [string literals](https://docs.python.org/3/reference/lexical_analysis.html?#literals) + +# Debugging + +## python3 virtual enviroment + +Setup a virtual environment for processing: + +```bash +python3 -m venv venv/mom6Doc +source venv/mom6Doc/bin/activate +# cd to the docs directory within the MOM6 repo +pip3 install -r requirements.txt +``` + +The `deactivate` command allows you to exit from the virtual environment. + +NOTE: RTD will not upgrade the sphinx module if `#egg=` is specified in the `requirements.txt` file. + +### debugging + +A useful commnad line tool for debugging sphinx and extensions is the python debugger. +Add the following line to stop to any portion of the python code to create a break +point. + +```python +import pdb; pdb.set_trace() +``` + +Run `make html` without redirection to a log file. + +For only processing .dox files and some specific F90 files, edit and use the +`Doxyfile_rtd_dox` file. This limits the document processing to fewer files and +allows for rapid testing. + +`make clean; make html DOXYGEN_CONF=Doxyfile_rtd_dox UPDATEHTMLEQS=Y` + +## Example execution + +The following example assumes a virtual environment as setup above using `mom6Doc`. +A similar environment is possible using anaconda. + +``` +$ source venv/mom6Doc/bin/activate +(mom6Doc) $ cd docs +(mom6Doc) $ make clean +(mom6Doc) $ make html >& _build/html_log.txt +(mom6Doc) $ make latexpdf >& _build/latex_log.txt +``` + +The last command may appear to hang. On error, latex will request input from the keyboard. +Press `R` and enter. This will keep latex running to completion or stop after 100 errors +are reached. + +Once the documentation is built, you can use a web browser to look around in the `_build` +directory. + +## Local web server + +Python provides a way to quickly stand up a private web server for checking documentation. It requires knowledge of +the IP address if you are using a remote server, otherwise `localhost` should work. + +You can start the server on any port. Port 8080 is shown here as an example. +```bash +python3 -m http.server 8080 +``` + +After starting the server, you can browse to the known IP using `http://IP:8080/` or if you are on the same +machine use `http://localhost:8080/`. diff --git a/docs/discrete_time.rst b/docs/discrete_time.rst new file mode 100644 index 0000000000..23a8720d6e --- /dev/null +++ b/docs/discrete_time.rst @@ -0,0 +1,12 @@ +Time Discretization +=================== + +.. toctree:: + :maxdepth: 2 + + api/generated/pages/Timestep_Overview + api/generated/pages/Barotropic_Momentum_Equations + api/generated/pages/Baroclinic_Momentum_Equations + api/generated/pages/Barotropic_Baroclinic_Coupling + api/generated/pages/Tracer_Timestep + api/generated/pages/ALE_Timestep diff --git a/docs/forcing.rst b/docs/forcing.rst new file mode 100644 index 0000000000..911f708b68 --- /dev/null +++ b/docs/forcing.rst @@ -0,0 +1,8 @@ +Forcing +======= + +.. toctree:: + :maxdepth: 2 + + api/generated/pages/Solar_Radiation + api/generated/pages/Tracer_Fluxes diff --git a/docs/grids.rst b/docs/grids.rst new file mode 100644 index 0000000000..98b35ba106 --- /dev/null +++ b/docs/grids.rst @@ -0,0 +1,11 @@ +Grids +===== + +We love grids. + +.. toctree:: + :maxdepth: 2 + + api/generated/pages/Global_Grids + api/generated/pages/Regional_Grids + api/generated/pages/Vertical_Grids diff --git a/docs/images/Newton_PPM.png b/docs/images/Newton_PPM.png new file mode 100644 index 0000000000..86749c98a1 Binary files /dev/null and b/docs/images/Newton_PPM.png differ diff --git a/docs/images/PPM_1d.png b/docs/images/PPM_1d.png new file mode 100644 index 0000000000..19d54ebbe7 Binary files /dev/null and b/docs/images/PPM_1d.png differ diff --git a/docs/images/bottom_drag.png b/docs/images/bottom_drag.png new file mode 100644 index 0000000000..84a91def77 Binary files /dev/null and b/docs/images/bottom_drag.png differ diff --git a/docs/images/bt_bc_thickness.png b/docs/images/bt_bc_thickness.png new file mode 100644 index 0000000000..12748483b6 Binary files /dev/null and b/docs/images/bt_bc_thickness.png differ diff --git a/docs/images/cell_3d.svg b/docs/images/cell_3d.svg new file mode 100644 index 0000000000..3e70cad799 --- /dev/null +++ b/docs/images/cell_3d.svg @@ -0,0 +1,314 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + T, S, h + e_i,j,k + e_i,j,k+1 + v + u + + diff --git a/docs/images/h_PPM.png b/docs/images/h_PPM.png new file mode 100644 index 0000000000..5e247ecaff Binary files /dev/null and b/docs/images/h_PPM.png differ diff --git a/docs/images/sbl1.svg b/docs/images/sbl1.svg new file mode 100644 index 0000000000..ef66a233c5 --- /dev/null +++ b/docs/images/sbl1.svg @@ -0,0 +1,376 @@ + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + z + x + + + + + + + + + + Left + Right + k = 1 + k = 2 + k = 3 + k = 4 + k = 5 + + Boundary layer depth + o + o + o + o + o + o + o + o + + diff --git a/docs/images/sbl2.svg b/docs/images/sbl2.svg new file mode 100644 index 0000000000..1ef25b5d13 --- /dev/null +++ b/docs/images/sbl2.svg @@ -0,0 +1,486 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + z + x + + + + + + + + + + Left + Right + k = 1 + k = 2 + k = 3 + k = 4 + k = 5 + + Boundary layer depth + o + o + o + o + o + o + o + o + + + + + + diff --git a/docs/images/timestep_MOM6.png b/docs/images/timestep_MOM6.png new file mode 100644 index 0000000000..4febb50365 Binary files /dev/null and b/docs/images/timestep_MOM6.png differ diff --git a/docs/images/timestep_ROMS.png b/docs/images/timestep_ROMS.png new file mode 100644 index 0000000000..134dbdc599 Binary files /dev/null and b/docs/images/timestep_ROMS.png differ diff --git a/docs/images/timesteps_4.png b/docs/images/timesteps_4.png new file mode 100644 index 0000000000..63e2700129 Binary files /dev/null and b/docs/images/timesteps_4.png differ diff --git a/docs/index.rst b/docs/index.rst index 5ffcf17fc1..53ded57f57 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,15 +14,20 @@ Contents: about equations discrete_space + discrete_time + tracers + grids parameterizations + other_physics working_with_MOM6 + forcing + parallel testing apiref - + zzbibliography Indices and tables ================== * :ref:`genindex` * :ref:`search` - diff --git a/docs/other_physics.rst b/docs/other_physics.rst new file mode 100644 index 0000000000..44cf6d4637 --- /dev/null +++ b/docs/other_physics.rst @@ -0,0 +1,8 @@ +Other Physics +============= + +.. toctree:: + :maxdepth: 2 + + api/generated/pages/Equation_of_State + api/generated/pages/Sea_Ice diff --git a/docs/parallel.rst b/docs/parallel.rst new file mode 100644 index 0000000000..284e02c0b9 --- /dev/null +++ b/docs/parallel.rst @@ -0,0 +1,8 @@ +Parallel Implementation +======================= + +.. toctree:: + :maxdepth: 2 + + api/generated/pages/Domain_Decomposition + api/generated/pages/Parallel_IO diff --git a/docs/requirements.txt b/docs/requirements.txt index 7c8a0afe4e..52fcf95bc0 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ git+https://github.com/jr3cermak/sphinx.git@v3.2.1mom6.4 -git+https://github.com/jr3cermak/sphinxcontrib-autodoc_doxygen.git@0.7.12#egg=sphinxcontrib-autodoc_doxygen +git+https://github.com/jr3cermak/sphinxcontrib-autodoc_doxygen.git@0.7.13#egg=sphinxcontrib-autodoc_doxygen git+https://github.com/jr3cermak/sphinx-fortran.git@1.2.2#egg=sphinx-fortran git+https://github.com/jr3cermak/flint.git@0.0.1#egg=flint sphinx-rtd-theme diff --git a/docs/tracers.rst b/docs/tracers.rst new file mode 100644 index 0000000000..6190fe096d --- /dev/null +++ b/docs/tracers.rst @@ -0,0 +1,12 @@ +Tracers in MOM6 +================= + +.. toctree:: + :maxdepth: 1 + + api/generated/pages/Tracer_Advection.rst + api/generated/pages/Tracer_Transport_Equations.rst + api/generated/pages/Horizontal_Diffusion.rst + api/generated/pages/Vertical_Diffusion.rst + api/generated/pages/Passive_Tracers + diff --git a/docs/zotero.bib b/docs/zotero.bib index 29c1d43161..f0e1a3b44d 100644 --- a/docs/zotero.bib +++ b/docs/zotero.bib @@ -2355,7 +2355,7 @@ @article{russell1981 doi = {10.1175/1520-0450(1981)020<1483:ANFDSF>2.0.CO;2} } -@inbook{huynh1997, +@inproceedings{huynh1997, title = {Schemes and constraints for advection}, booktitle = {Fifteenth International Conference on Numerical Methods in Fluid Dynamics}, @@ -2482,12 +2482,11 @@ @inproceedings{millero1978 number = {28}, pages = {29--35}, year = {1978}, - url = - {https://www.researchgate.net/publication/292574579_Freezing_point_of_seawater} + url = {https://www.researchgate.net/publication/292574579_Freezing_point_of_seawater} } @article{roquet2015, - author = {Roquet, F. and G. Madec and T. J. McDougall, and Barker, P. M.}, + author = {Roquet, F. and G. Madec and T. J. McDougall and P. M. Barker}, year = {2015}, title = {Accurate polynomial expressions for the density and specific volume of seawater using the TEOS-10 standard}, diff --git a/src/ALE/MOM_remapping.F90 b/src/ALE/MOM_remapping.F90 index 71ba83f3ba..1b3c5884de 100644 --- a/src/ALE/MOM_remapping.F90 +++ b/src/ALE/MOM_remapping.F90 @@ -13,8 +13,7 @@ module MOM_remapping use PLM_functions, only : PLM_reconstruction, PLM_boundary_extrapolation use PPM_functions, only : PPM_reconstruction, PPM_boundary_extrapolation use PQM_functions, only : PQM_reconstruction, PQM_boundary_extrapolation_v1 - -use iso_fortran_env, only : stdout=>output_unit, stderr=>error_unit +use MOM_io, only : stdout, stderr implicit none ; private @@ -1636,7 +1635,7 @@ logical function remapping_unit_tests(verbose) h_neglect = hNeglect_dflt h_neglect_edge = hNeglect_dflt ; if (answers_2018) h_neglect_edge = 1.0e-10 - write(*,*) '==== MOM_remapping: remapping_unit_tests =================' + write(stdout,*) '==== MOM_remapping: remapping_unit_tests =================' remapping_unit_tests = .false. ! Normally return false thisTest = .false. @@ -1645,19 +1644,19 @@ logical function remapping_unit_tests(verbose) err=x0(i)-0.75*real(i-1) if (abs(err)>real(i-1)*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed buildGridFromH() 1' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed buildGridFromH() 1' remapping_unit_tests = remapping_unit_tests .or. thisTest call buildGridFromH(n1, h1, x1) do i=1,n1+1 err=x1(i)-real(i-1) if (abs(err)>real(i-1)*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed buildGridFromH() 2' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed buildGridFromH() 2' remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = .false. call initialize_remapping(CS, 'PPM_H4', answers_2018=answers_2018) - if (verbose) write(*,*) 'h0 (test data)' + if (verbose) write(stdout,*) 'h0 (test data)' if (verbose) call dumpGrid(n0,h0,x0,u0) call dzFromH1H2( n0, h0, n1, h1, dx1 ) @@ -1666,9 +1665,9 @@ logical function remapping_unit_tests(verbose) err=u1(i)-8.*(0.5*real(1+n1)-real(i)) if (abs(err)>real(n1-1)*epsilon(err)) thisTest = .true. enddo - if (verbose) write(*,*) 'h1 (by projection)' + if (verbose) write(stdout,*) 'h1 (by projection)' if (verbose) call dumpGrid(n1,h1,x1,u1) - if (thisTest) write(*,*) 'remapping_unit_tests: Failed remapping_core_w()' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed remapping_core_w()' remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = .false. @@ -1690,7 +1689,7 @@ logical function remapping_unit_tests(verbose) err=u1(i)-8.*(0.5*real(1+n1)-real(i)) if (abs(err)>2.*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed remapByProjection()' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed remapByProjection()' remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = .false. @@ -1698,14 +1697,14 @@ logical function remapping_unit_tests(verbose) call remapByDeltaZ( n0, h0, u0, ppoly0_E, ppoly0_coefs, & n1, x1-x0(1:n1+1), & INTEGRATION_PPM, u1, hn1, h_neglect ) - if (verbose) write(*,*) 'h1 (by delta)' + if (verbose) write(stdout,*) 'h1 (by delta)' if (verbose) call dumpGrid(n1,h1,x1,u1) hn1=hn1-h1 do i=1,n1 err=u1(i)-8.*(0.5*real(1+n1)-real(i)) if (abs(err)>2.*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed remapByDeltaZ() 1' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed remapByDeltaZ() 1' remapping_unit_tests = remapping_unit_tests .or. thisTest thisTest = .false. @@ -1715,19 +1714,19 @@ logical function remapping_unit_tests(verbose) call remapByDeltaZ( n0, h0, u0, ppoly0_E, ppoly0_coefs, & n2, dx2, & INTEGRATION_PPM, u2, hn2, h_neglect ) - if (verbose) write(*,*) 'h2' + if (verbose) write(stdout,*) 'h2' if (verbose) call dumpGrid(n2,h2,x2,u2) - if (verbose) write(*,*) 'hn2' + if (verbose) write(stdout,*) 'hn2' if (verbose) call dumpGrid(n2,hn2,x2,u2) do i=1,n2 err=u2(i)-8./2.*(0.5*real(1+n2)-real(i)) if (abs(err)>2.*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed remapByDeltaZ() 2' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed remapByDeltaZ() 2' remapping_unit_tests = remapping_unit_tests .or. thisTest - if (verbose) write(*,*) 'Via sub-cells' + if (verbose) write(stdout,*) 'Via sub-cells' thisTest = .false. call remap_via_sub_cells( n0, h0, u0, ppoly0_E, ppoly0_coefs, & n2, h2, INTEGRATION_PPM, .false., u2, err ) @@ -1737,7 +1736,7 @@ logical function remapping_unit_tests(verbose) err=u2(i)-8./2.*(0.5*real(1+n2)-real(i)) if (abs(err)>2.*epsilon(err)) thisTest = .true. enddo - if (thisTest) write(*,*) 'remapping_unit_tests: Failed remap_via_sub_cells() 2' + if (thisTest) write(stdout,*) 'remapping_unit_tests: Failed remap_via_sub_cells() 2' remapping_unit_tests = remapping_unit_tests .or. thisTest call remap_via_sub_cells( n0, h0, u0, ppoly0_E, ppoly0_coefs, & @@ -1748,9 +1747,9 @@ logical function remapping_unit_tests(verbose) 3, (/2.25,1.5,1./), INTEGRATION_PPM, .false., u2, err ) if (verbose) call dumpGrid(3,h2,x2,u2) - if (.not. remapping_unit_tests) write(*,*) 'Pass' + if (.not. remapping_unit_tests) write(stdout,*) 'Pass' - write(*,*) '===== MOM_remapping: new remapping_unit_tests ==================' + write(stdout,*) '===== MOM_remapping: new remapping_unit_tests ==================' deallocate(ppoly0_E, ppoly0_S, ppoly0_coefs) allocate(ppoly0_coefs(5,6)) @@ -1879,7 +1878,7 @@ logical function remapping_unit_tests(verbose) deallocate(ppoly0_E, ppoly0_S, ppoly0_coefs) - if (.not. remapping_unit_tests) write(*,*) 'Pass' + if (.not. remapping_unit_tests) write(stdout,*) 'Pass' end function remapping_unit_tests diff --git a/src/core/MOM_dynamics_split_RK2.F90 b/src/core/MOM_dynamics_split_RK2.F90 index 4659973a3a..c74970f097 100644 --- a/src/core/MOM_dynamics_split_RK2.F90 +++ b/src/core/MOM_dynamics_split_RK2.F90 @@ -1220,6 +1220,9 @@ subroutine initialize_dyn_split_RK2(u, v, h, uh, vh, eta, Time, G, GV, US, param Accel_diag%PFv => CS%PFv Accel_diag%CAu => CS%CAu Accel_diag%CAv => CS%CAv + Accel_diag%u_accel_bt => CS%u_accel_bt + Accel_diag%v_accel_bt => CS%v_accel_bt + ! Accel_diag%pbce => CS%pbce ! Accel_diag%u_accel_bt => CS%u_accel_bt ; Accel_diag%v_accel_bt => CS%v_accel_bt diff --git a/src/core/MOM_variables.F90 b/src/core/MOM_variables.F90 index c0bae377bd..2cfce980dc 100644 --- a/src/core/MOM_variables.F90 +++ b/src/core/MOM_variables.F90 @@ -172,7 +172,9 @@ module MOM_variables du_dt_visc => NULL(), &!< Zonal acceleration due to vertical viscosity [L T-2 ~> m s-2] dv_dt_visc => NULL(), &!< Meridional acceleration due to vertical viscosity [L T-2 ~> m s-2] du_dt_dia => NULL(), & !< Zonal acceleration due to diapycnal mixing [L T-2 ~> m s-2] - dv_dt_dia => NULL() !< Meridional acceleration due to diapycnal mixing [L T-2 ~> m s-2] + dv_dt_dia => NULL(), & !< Meridional acceleration due to diapycnal mixing [L T-2 ~> m s-2] + u_accel_bt => NULL(), &!< Pointer to the zonal barotropic-solver acceleration [L T-2 ~> m s-2] + v_accel_bt => NULL() !< Pointer to the meridional barotropic-solver acceleration [L T-2 ~> m s-2] real, pointer, dimension(:,:,:) :: du_other => NULL() !< Zonal velocity changes due to any other processes that are !! not due to any explicit accelerations [L T-1 ~> m s-1]. diff --git a/src/core/_Baroclinic_Momentum.dox b/src/core/_Baroclinic_Momentum.dox new file mode 100644 index 0000000000..b342f86bee --- /dev/null +++ b/src/core/_Baroclinic_Momentum.dox @@ -0,0 +1,37 @@ +/*! \page Baroclinic_Momentum_Equations Baroclinic Momentum Equations + +\section section_BC_momentum Baroclinic Momentum Equations + +The baroclinic momentum equations are the stacked shallow water equations: + +\f[ + \frac{\partial \vec{u}_k}{\partial t} + (f + \nabla_s \times \vec{u}_k) \hat{z} + \times \vec{u}_k = - \frac{\nabla_s p_k}{\rho} - \nabla_s (\phi_k + \frac{1}{2} || + \vec{u}_k ||^2 ) + \frac{\nabla \cdot \tilde{\tau}_k}{\rho} +\f] +\f[ + \frac{\partial h_k}{\partial t} + \nabla_s \cdot (\vec{u}h_k) = 0 +\f] + +The timestepping for these equations is a (quasi?) second-order Runge-Kutta step +for the inertial oscillations and a forward-backward Euler step for the pressure +(gravity) waves. Using the graphical notation from \cite shchepetkin2005, it looks +like: + +\image html timestep_MOM6.png "Graphical notation for timestepping schemes in which the black line represents the ideal solution and the red line shows the actual solution. Phase errors are represented by the grey shapes between the bars normal to the circle." +\image latex timestep_MOM6.png "Graphical notation for timestepping schemes in which the black line represents the ideal solution and the red line shows the actual solution. Phase errors are represented by the grey shapes between the bars normal to the circle." + +The timestep used in ROMS looks instead like: + +\image html timestep_ROMS.png "Graphical notation for the Adams-Bashforth technique used in the ROMS model." +\image latex timestep_ROMS.png "Graphical notation for the Adams-Bashforth technique used in the ROMS model." + +The ROMS timestepping has smaller phase errors, strong damping at high +frequency. The MOM6 use as a global climate model has made the phase +errors of lower priority. However, the phase errors may become more +problematic for future uses of MOM6. While the MOM6 use of the ALE +remapping makes an Adams-Bashforth scheme impractical, there may be a +better timestepping scheme out there for MOM6. Please let the MOM6 +developers know if you would like to work on this problem. + +*/ diff --git a/src/core/_Barotropic_Baroclinic_Coupling.dox b/src/core/_Barotropic_Baroclinic_Coupling.dox new file mode 100644 index 0000000000..f8bb366197 --- /dev/null +++ b/src/core/_Barotropic_Baroclinic_Coupling.dox @@ -0,0 +1,305 @@ +/*! \page Barotropic_Baroclinic_Coupling Barotropic-Baroclinic Coupling + +\brief Time-averaged accelerations + +The barotropic equations are timestepped with a timestep to resolve the surface +gravity waves. With care, the baroclinic timestep only need resolve the inertial +oscillations. The barotropic accelerations are averaged over the many barotropic +timesteps taken between baroclinic steps. At time \f$n\f$, the baroclinic +accelerations are computed. The vertical average of that acceleration is +subtracted off and replaced by the time-averaged acceleration from the group of +barotropic timesteps: + +\f[ + \Delta t \frac{\partial \vec{u}}{\partial t} = \Delta t \left( + \frac{\partial \vec{u}}{\partial t} - \frac{\partial \vec{u}_{BT}}{\partial t} + \right)^n + \Delta t \overline{\frac{\partial \vec{u}_{BT}}{\partial t}}^{\Delta t} +\f] + +Similarly, the velocities used in the tracer equation are a careful blend of the +barotropic and baroclinic solutions: + +\f[ + \Delta t \frac{\partial \theta}{\partial t} + \Delta t \left( \tilde{\vec{u}} + \cdot \nabla \theta + \widetilde{w} \frac{\partial \theta}{\partial z} \right) +\f] +with +\f[ + \tilde{\vec{u}} = \vec{u}_{BC} + \overline{\vec{u}_{BT}}^{\Delta t} +\f] +\f[ + \frac{\partial \widetilde{w}}{\partial z} = - \nabla \cdot \tilde{\vec{u}} +\f] + +\section SSH_Estimates Two estimates of the free surface height + +The vertically discrete, temporally continuous layer continuity equations are: + +\f[ + \frac{\partial h_k}{\partial t} = - \nabla \cdot (\vec{u} h_k) = - \nabla \cdot + \mathbf{F} (u_k, h_k) +\f] + +The relationship between the free surface height and the layer thicknesses +\f$h_k\f$ is: + +\f[ + \eta = \sum_{k=1}^N h_k - D +\f] + +We get an evolution equation for the free surface height: + +\f[ + \frac{\partial \eta}{\partial t} = \sum_{k=1}^N \frac{\partial + h_k}{\partial t} = - \nabla \cdot \sum_{k=1}^N \mathbf{F} (u_k, h_k) +\f] + +If the algorithms for the fluxes in the continuity equations are \em linear in +the velocity, the free surface height can be rewritten as: + +\f[ + \frac{\partial \eta}{\partial t} \&= - \nabla \cdot \sum_{k=1}^N \mathbf{F} (u_k, h_k) + = - \nabla \cdot \sum_{k=1}^N (\vec{u}_k h_k) \\ + \&= - \nabla \cdot \left[ \sum_{k=1}^N h_k \frac{\sum_{k=1}^N (\vec{u}_k h_k)} + {\sum_{k=1}^M k_k} \right] \equiv - \nabla \cdot H \mathbf{U} +\f] +where + +\f[ + \mathbf{U} \equiv \frac{\sum_{k=1}^N (\vec{u}_k h_k)} {\sum_{k=1}^M k_k} +\f] +\f[ + H \equiv \sum_{k=1}^N h_k +\f] +However, ALE models like MOM6 require positive-definite, nonlinear continuity +solvers (MOM6 uses \ref PPM). We need a different way to reconcile this +estimate of free surface height with the one coming from the barotropic equations. +After rejecting several other options, MOM6 is now using the scheme from +\cite hallberg2009. The barotropic update of \f$\eta\f$ is given by: + +\f[ + \frac{\eta^{n+1} - \eta^n}{\Delta t} + \nabla \cdot \left( \overline{UH} \right) = 0 +\f] + +Determine the \f$\Delta U\f$ such that: + +\f[ + \sum_{k=1}^N \mathbf{F} (\tilde{u}_k, h_k) = \left( \overline{UH} \right) +\f] +where +\f[ + \tilde{u}_k = u_k + \Delta U +\f] + +The layer timestep equation becomes: + +\f[ + h_k^{n+1} = h_k^n - \Delta t \nabla \cdot \mathbf{F} (\tilde{u}_k, h_k) +\f] + +This scheme has these properties: + +\li Total mass is conserved layer-wise. +\li Layer equations retain their flux form. +\li Total salt, heat, and other tracers are exactly conserved. +\li Free surface heights exactly agree. +\li Requires (very few) completely local iterations. +\li The velocity corrections are barotropic, and more likely to correspond to the +layers whose flow was deficient than in some older schemes. + +The solution is unique provided that: +\f[ + \frac{\partial}{\partial \tilde{u}_k} \mathbf{F}(\tilde{u}_k, h_k) > 0 +\f] +This is a reasonable requirement for any discretization of the continuity +equation. In the continuous limit, \f$\mathbf{F} (u,h) = uh\f$, so one +interpretation is: +\f[ + \frac{\partial}{\partial \tilde{u}_k} \mathbf{F}(\tilde{u}_k, h_k) = + h_{k,\mbox{Marginal}} +\f] +With the PPM continuity scheme: +\f[ + F_{i+1/2} = \frac{1}{\Delta t} \int_{x_{i+1/2} - u \Delta t}^{x_{i+1/2}} h_i^n + (x) dx +\f] +leads to: +\f[ + \frac{\partial F_{i+1/2}}{\partial u_{i+1/2}} = h_i^n ( x_{i+1/2} - u_{i+1/2} + \Delta t ) \equiv h_{k, \mbox{Marginal}} +\f] +\f$h_i(x) > 0\f$ is already required for positive definiteness. + +Newton's method iterations quickly give \f$\Delta U\f$: +\f[ + \Delta U^{m+1} = \Delta U^m + \frac{\left( \overline{UH} \right) - \sum_{k=1}^N + F(u_k + \Delta U^m, h_k)}{\sum_{k=1}^N h_{k,\mbox{Marginal}}} +\f] + +\subsection subsec_practical How practical is this iterative approach? + +The piecewise parabolic method continuity solver uses two steps: + +\li Set up the positive-definite subgridscale profiles, \f$h_{PPM}(x)\f$. + +\image html h_PPM.png "Piecewise parabolic reconstructions of \f$h(x)\f$." +\imagelatex{h_PPM.png,Piecewise parabolic reconstructions of $h(x)$.,\includegraphics[width=\textwidth\,height=\textheight/2\,keepaspectratio=true]} + +\li Integrating the profiles to determine \f$F\f$. Step 1 is typically +\f$\sim 3\f$ times as expensive as step 2. \f$F(u,h)\f$ is piecewise cubic in +\f$u\f$, but often nearly linear. Newton's method iterations converge quickly: + +\image html Newton_PPM.png "Newton's method iterations for finding \f$\\Delta U\f$." +\imagelatex{Newton_PPM.png,Newton's method iterations for finding $\Delta U$.,\includegraphics[width=\textwidth\,height=\textheight/2\,keepaspectratio=true]} + +In a global model the sea surface heights converge everywhere to a tolerance of +\f$10^{-6}\f$ m within five iterations. These five iterations add \f$\sim 1.6\f$ +times more CPU time to the PPM continuity solver and the continuity solver is just +12\% of the total model time. + +\subsection bottom_drag A note on bottom drag + +Barotropic accelerations do not lead to barotropic flows after a timestep when +vertical viscosity is taken into account. + +\f[ + u_k^{n+1} = u_k^n + \Delta t A_k + \Delta t \frac{\tau_{k-1/2} - + \tau_{k+1/2}}{h_k} +\f] + +\f[ + \tau_{1/2} = \tau_{Wind} +\f] +\f[ + \tau_{k+1/2} = \nu_{k+1/2} \frac{u_k^{n+1} - u_{k+1}^{n+1}}{h_{k+1/2}} +\f] +\f[ + \tau_{N+1/2} = \nu_{N+1/2} \frac{2 u_N^{n+1}}{h_{N+1/2}} +\f] +\f[ + \gamma_k \equiv \frac{1}{\Delta t} \frac{\partial u_k^{n+1}} + {\overline{\partial A}} +\f] + +A tridiagonal equation for \f$\gamma_k\f$ results, going from 0 for thin layers +near the bottom to 1 far above the bottom. + +\f[ + \gamma_1 = 1 + \frac{1}{h_1} \left[ - \frac{\nu_{3/2} \Delta t}{h_{3/2}} + (\gamma_1 - \gamma_2) \right] +\f] +\f[ + \gamma_k = 1 + \frac{1}{h_k} \left[ \frac{\nu_{k-1/2} \Delta t}{h_{k-1/2}} + (\gamma_{k-1} - \gamma_k) - \frac{\nu_{k+1/2} \Delta t}{h_{k+1/2}} + (\gamma_k - \gamma_{k+1}) \right] +\f] +\f[ + \gamma_N = 1 + \frac{1}{h_N} \left[ \frac{\nu_{N-1/2} \Delta t}{h_{N-1/2}} + (\gamma_{N-1} - \gamma_N) - \frac{2\nu_{N+1/2} \Delta t}{h_{N+1/2}} + \gamma_N \right] +\f] + +In the continuous limit: + +\f[ + \gamma(z) = 1 + \Delta t \frac{d}{dz} \left( \nu \frac{d \gamma}{dz} \right) +\f] +with boundary conditions: +\f[ + \frac{d \gamma}{dz} (0) = 0 +\f] +\f[ + \gamma(-D) = 0 +\f] + +For deep water and constant \f$\nu\f$, we get: +\f[ + \gamma (z) = 1 - \exp \left( - \sqrt{\nu \Delta t} (z + D) \right) +\f] + +\image html bottom_drag.png "The continuous solution for barotropic flow plus a no-slip condition at the bottom." +\image latex bottom_drag.png "The continuous solution for barotropic flow plus a no-slip condition at the bottom." + +We want a scheme in which the split model looks exactly like the unsplit +model would if we had taken all those short 3D timesteps. Rather than +applying a barotropic velocity, we use a barotropic acceleration and +allow the continuity solver to determine the transports consistent with a +no-slip bottom boundary layer and perhaps also a no-slip surface boundary +layer under an ice shelf. + +From above, the barotropic equation is: + +\f[ + \frac{\eta^{n+1} - \eta^n}{\Delta t} + \nabla \cdot \left( \overline{UH} \right) = 0 +\f] +We need to determine the \f$\Delta \overline{A}\f$ such that: +\f[ + \sum_{k=1}^N \mathbf{F} (\tilde{u}_k, h_k) = \left( \overline{UH} \right) +\f] +where +\f[ + \tilde{u}_k = u_k + \gamma_k \Delta \overline{A} \Delta t +\f] + +\section bt-bc_details Additional details about the split time stepping + +\li Transports are used as input and output to the barotropic solver. The continuity +solver is inverted to determine velocities. + +\f[ + \frac{\partial \eta}{\partial t} = \nabla \cdot \overline{U} + M +\f] +\f[ + \overline{U} (\overline{u}) = \frac{1}{\Delta t} \int_0^{\overline{u} \Delta t} + H(x) dx +\f] +\f[ + \overline{u}^n = \overline{U}^{-1} \left( \sum_{k=1}^N U_k^n \right) +\f] +\f[ + u_k^{n+1} = \tilde{u}_k^{n+1} + \Delta \overline{u} +\f] + +We need to find \f$\Delta \overline{u}\f$ such that: + +\f[ + \sum_{k=1}^N U_k \left( \tilde{u}_k^{n+1} + \Delta \overline{u} \right) = + \overline{U}^{n+1} +\f] + +\li Barotropic accelerations are treated as anomalies from the baroclinic state: + +\f[ + \frac{\partial \overline{u}}{\partial t} \&= - f \hat{k} \times (\overline{u} - + \overline{u}_{Cor}) - \nabla \overline{g} (\eta - \eta_{PF}) - \\ + \& \frac{c_D \left( ||u_{Bot}|| + ||u_{Shelf}|| \right)}{\sum_{k=1}^N h_k} + (\overline{u} - \overline{u}_{Drag}) + \frac{\sum_{k=1}^N h_k \frac{\partial + u_k}{\partial t}} {\sum_{k=1}^N h_k} +\f] + +\li Bottom drag (and under ice surface-drag) is treated implicitly. +\li The barotropic continuity solver uses flow-dependent thickness fits which +approximate the sum of the layer thickness transports, to accommodate wetting and +drying. An image of this is shown here: + +\image html bt_bc_thickness.png "The barotropic transports depend on the baroclinic flows and thicknesses." +\image latex bt_bc_thickness.png "The barotropic transports depend on the baroclinic flows and thicknesses." + +\section time-split_summary Summary of MOM6 split time stepping + +\li We use an efficient approach for handling fast modes via simplified 2-d +equations, while the 3-d baroclinic timestep is determined by baroclinic dynamics. + +\li The barotropic solver determines free surface height and time-averaged +depth-integrated transports. + +\li By using anomalies, the MOM6 split solver gives identical answers to an +equivalent unsplit scheme for short timesteps. + +\li This scheme has been demonstrated to work with wetting and drying, as well as +under ice-shelves. + +\li The linear barotropic solver allows MOM6 to automatically set a stable +barotropic timestep (e.g.\ to 98\% of maximum). + +*/ diff --git a/src/core/_Barotropic_Momentum.dox b/src/core/_Barotropic_Momentum.dox new file mode 100644 index 0000000000..39442263b0 --- /dev/null +++ b/src/core/_Barotropic_Momentum.dox @@ -0,0 +1,50 @@ +/*! \page Barotropic_Momentum_Equations Barotropic Momentum Equations + +\brief Barotropic Momentum Equations + +The barotropic equations are timestepped on a relatively short timestep compared to the +rest of the model. Since the timestep constraints for this are known, the barotropic +timestep is computed at runtime. + +The 2-d linear momentum equations with integrated continuity are: + +\f[ + \frac{\partial \eta}{\partial t} + \nabla \cdot \left( ( D + \eta) \vec{u}_{BT} + h_k \right) = P - E +\f] +\f[ + \frac{\partial \vec{u}_{BT}}{\partial t} = - g \nabla \eta - f \hat{z} \times + \vec{u}_{BT} + \vec{F}_{BT} +\f] +where +\f[ + \vec{u}_{BT} \equiv \frac{1}{D + \eta} \int_{-D}^\eta \vec{u}dz +\f] + +and \f$\vec{F}_{BT}\f$ is the barotropic momentum forcing from baroclinic +processes. Note that explicit mass fluxes such as evaporation and +precipitation change the model volume explicitly. + +In the mode splitting between baroclinic and barotropic processes, it is important +to include the contribution of free surface waves on the internal interface +heights on the pressure gradient force, shown here as \f$g_{Eff}\f$: + +\f[ + \frac{\partial p}{\partial z} = -\rho g +\f] +\f[ + g_{Eff} = g + \frac{\partial}{\partial \eta} \left[ \frac{1}{D + \eta} + \int_{-D}^\eta p dz \right] +\f] + +The barotropic momentum equation then becomes: + +\f[ + \frac{\partial \vec{u}_{BT}}{\partial t} + f \hat{z} \times + \vec{u}_{BT} + \frac{1}{\rho_0} \nabla g_{Eff} \eta = \mbox{Residual} +\f] + +Without including the internal wave motion in the barotropic equations, one can +generate instabilities (\cite bleck1990, \cite hallberg1997a). + +*/ diff --git a/src/core/_Sea_ice.dox b/src/core/_Sea_ice.dox new file mode 100644 index 0000000000..bec05af17c --- /dev/null +++ b/src/core/_Sea_ice.dox @@ -0,0 +1,5 @@ +/*! \page Sea_Ice Sea Ice Considerations + +\section Frazil Ice Formation + +*/ diff --git a/src/core/_Solar_radiation.dox b/src/core/_Solar_radiation.dox new file mode 100644 index 0000000000..1103c93c3b --- /dev/null +++ b/src/core/_Solar_radiation.dox @@ -0,0 +1,7 @@ +/*! \page Solar_Radiation Solar Radiation + +\section Jerlov_WT Jerlov water type + +\section Chl_Absorb Absorption by Chlorophyll + +*/ diff --git a/src/core/_Timestep_Overview.dox b/src/core/_Timestep_Overview.dox new file mode 100644 index 0000000000..a75566303d --- /dev/null +++ b/src/core/_Timestep_Overview.dox @@ -0,0 +1,10 @@ +/*! \page Timestep_Overview Timestepping Overview + +In MOM6, it is common to have at least four different timesteps: the barotropic +timestep, the baroclinic (momentum dynamics) timestep, the tracer timestep, and the +remapping interval. There can also be a forcing timestep on which model coupling occurs. + +\image html timesteps_4.png "Graphic representation of the various timesteps used by MOM6." +\image latex timesteps_4.png "Graphic representation of the various timesteps used by MOM6." + +*/ diff --git a/src/diagnostics/MOM_diagnostics.F90 b/src/diagnostics/MOM_diagnostics.F90 index c3a992b806..e0dc3c95d4 100644 --- a/src/diagnostics/MOM_diagnostics.F90 +++ b/src/diagnostics/MOM_diagnostics.F90 @@ -99,6 +99,7 @@ module MOM_diagnostics KE => NULL(), & !< KE per unit mass [L2 T-2 ~> m2 s-2] dKE_dt => NULL(), & !< time derivative of the layer KE [H L2 T-3 ~> m3 s-3] PE_to_KE => NULL(), & !< potential energy to KE term [m3 s-3] + KE_BT => NULL(), & !< barotropic contribution to KE term [m3 s-3] KE_CorAdv => NULL(), & !< KE source from the combined Coriolis and !! advection terms [H L2 T-3 ~> m3 s-3]. !! The Coriolis source should be zero, but is not due to truncation @@ -110,14 +111,16 @@ module MOM_diagnostics KE_dia => NULL() !< KE source from diapycnal diffusion [H L2 T-3 ~> m3 s-3] !>@{ Diagnostic IDs - integer :: id_u = -1, id_v = -1, id_h = -1 + integer :: id_u = -1, id_v = -1, id_h = -1 + integer :: id_usq = -1, id_vsq = -1, id_uv = -1 integer :: id_e = -1, id_e_D = -1 integer :: id_du_dt = -1, id_dv_dt = -1 ! integer :: id_hf_du_dt = -1, id_hf_dv_dt = -1 integer :: id_hf_du_dt_2d = -1, id_hf_dv_dt_2d = -1 integer :: id_col_ht = -1, id_dh_dt = -1 integer :: id_KE = -1, id_dKEdt = -1 - integer :: id_PE_to_KE = -1, id_KE_Coradv = -1 + integer :: id_PE_to_KE = -1, id_KE_BT = -1 + integer :: id_KE_Coradv = -1 integer :: id_KE_adv = -1, id_KE_visc = -1 integer :: id_KE_horvisc = -1, id_KE_dia = -1 integer :: id_uh_Rlay = -1, id_vh_Rlay = -1 @@ -230,6 +233,10 @@ subroutine calculate_diagnostic_fields(u, v, h, uh, vh, tv, ADp, CDp, p_surf, & !! calculating interface heights [H ~> m or kg m-2]. ! Local variables + real, dimension(SZIB_(G),SZJ_(G),SZK_(G)) :: usq ! squared eastward velocity [L2 T-2 ~> m2 s-2] + real, dimension(SZI_(G),SZJB_(G),SZK_(G)) :: vsq ! squared northward velocity [L2 T-2 ~> m2 s-2] + real, dimension(SZI_(G),SZJ_(G),SZK_(G)) :: uv ! u x v at h-points [L2 T-2 ~> m2 s-2] + integer, dimension(2) :: EOSdom ! The i-computational domain for the equation of state integer i, j, k, is, ie, js, je, Isq, Ieq, Jsq, Jeq, nz, nkmb @@ -337,6 +344,28 @@ subroutine calculate_diagnostic_fields(u, v, h, uh, vh, tv, ADp, CDp, p_surf, & if (CS%id_h > 0) call post_data(CS%id_h, h, CS%diag) + if (CS%id_usq > 0) then + do k=1,nz ; do j=js,je ; do I=Isq,Ieq + usq(I,j,k) = u(I,j,k) * u(I,j,k) + enddo ; enddo ; enddo + call post_data(CS%id_usq, usq, CS%diag) + endif + + if (CS%id_vsq > 0) then + do k=1,nz ; do J=Jsq,Jeq ; do i=is,ie + vsq(i,J,k) = v(i,J,k) * v(i,J,k) + enddo ; enddo ; enddo + call post_data(CS%id_vsq, vsq, CS%diag) + endif + + if (CS%id_uv > 0) then + do k=1,nz ; do j=js,je ; do i=is,ie + uv(i,j,k) = (0.5*(u(I-1,j,k) + u(I,j,k))) * & + (0.5*(v(i,J-1,k) + v(i,J,k))) + enddo ; enddo ; enddo + call post_data(CS%id_uv, uv, CS%diag) + endif + if (associated(CS%e)) then call find_eta(h, tv, G, GV, US, CS%e, eta_bt) if (CS%id_e > 0) call post_data(CS%id_e, CS%e, CS%diag) @@ -994,9 +1023,9 @@ subroutine calculate_energy_diagnostics(u, v, h, uh, vh, ADp, CDp, G, GV, US, CS endif if (.not.G%symmetric) then - if (associated(CS%dKE_dt) .OR. associated(CS%PE_to_KE) .OR. associated(CS%KE_CorAdv) .OR. & - associated(CS%KE_adv) .OR. associated(CS%KE_visc) .OR. associated(CS%KE_horvisc).OR. & - associated(CS%KE_dia) ) then + if (associated(CS%dKE_dt) .OR. associated(CS%PE_to_KE) .OR. associated(CS%KE_BT) .OR. & + associated(CS%KE_CorAdv) .OR. associated(CS%KE_adv) .OR. associated(CS%KE_visc) .OR. & + associated(CS%KE_horvisc) .OR. associated(CS%KE_dia) ) then call create_group_pass(CS%pass_KE_uv, KE_u, KE_v, G%Domain, To_North+To_East) endif endif @@ -1040,6 +1069,24 @@ subroutine calculate_energy_diagnostics(u, v, h, uh, vh, ADp, CDp, G, GV, US, CS if (CS%id_PE_to_KE > 0) call post_data(CS%id_PE_to_KE, CS%PE_to_KE, CS%diag) endif + if (associated(CS%KE_BT)) then + do k=1,nz + do j=js,je ; do I=Isq,Ieq + KE_u(I,j) = uh(I,j,k) * G%dxCu(I,j) * ADp%u_accel_bt(I,j,k) + enddo ; enddo + do J=Jsq,Jeq ; do i=is,ie + KE_v(i,J) = vh(i,J,k) * G%dyCv(i,J) * ADp%v_accel_bt(i,J,k) + enddo ; enddo + if (.not.G%symmetric) & + call do_group_pass(CS%pass_KE_uv, G%domain) + do j=js,je ; do i=is,ie + CS%KE_BT(i,j,k) = 0.5 * G%IareaT(i,j) & + * (KE_u(I,j) + KE_u(I-1,j) + KE_v(i,J) + KE_v(i,J-1)) + enddo ; enddo + enddo + if (CS%id_KE_BT > 0) call post_data(CS%id_KE_BT, CS%KE_BT, CS%diag) + endif + if (associated(CS%KE_CorAdv)) then do k=1,nz do j=js,je ; do I=Isq,Ieq @@ -1519,6 +1566,7 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag real :: wave_speed_tol ! The fractional tolerance for finding the wave speeds [nondim] logical :: better_speed_est ! If true, use a more robust estimate of the first ! mode wave speed as the starting point for iterations. + logical :: split ! True if using the barotropic-baroclinic split algorithm logical :: use_temperature, adiabatic logical :: default_2018_answers, remap_answers_2018 integer :: isd, ied, jsd, jed, IsdB, IedB, JsdB, JedB, nz, nkml, nkbl @@ -1568,6 +1616,7 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag "If true, use the order of arithmetic and expressions that recover the "//& "answers from the end of 2018. Otherwise, use updated and more robust "//& "forms of the same expressions.", default=default_2018_answers) + call get_param(param_file, mdl, "SPLIT", split, default=.true., do_not_log=.true.) if (GV%Boussinesq) then thickness_units = "m" ; flux_units = "m3 s-1" ; convert_H = GV%H_to_m @@ -1641,6 +1690,13 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag CS%id_v = register_diag_field('ocean_model', 'v', diag%axesCvL, Time, & 'Meridional velocity', 'm s-1', conversion=US%L_T_to_m_s, cmor_field_name='vo', & cmor_standard_name='sea_water_y_velocity', cmor_long_name='Sea Water Y Velocity') + CS%id_usq = register_diag_field('ocean_model', 'usq', diag%axesCuL, Time, & + 'Zonal velocity squared', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_vsq = register_diag_field('ocean_model', 'vsq', diag%axesCvL, Time, & + 'Meridional velocity squared', 'm2 s-2', conversion=US%L_T_to_m_s**2) + CS%id_uv = register_diag_field('ocean_model', 'uv', diag%axesTL, Time, & + 'Product between zonal and meridional velocities at h-points', 'm2 s-2', & + conversion=US%L_T_to_m_s**2) CS%id_h = register_diag_field('ocean_model', 'h', diag%axesTL, Time, & 'Layer Thickness', thickness_units, v_extensive=.true., conversion=convert_H) @@ -1779,6 +1835,13 @@ subroutine MOM_diagnostics_init(MIS, ADp, CDp, Time, G, GV, US, param_file, diag 'm3 s-3', conversion=GV%H_to_m*(US%L_T_to_m_s**2)*US%s_to_T) if (CS%id_PE_to_KE>0) call safe_alloc_ptr(CS%PE_to_KE,isd,ied,jsd,jed,nz) + if (split) then + CS%id_KE_BT = register_diag_field('ocean_model', 'KE_BT', diag%axesTL, Time, & + 'Barotropic contribution to Kinetic Energy', & + 'm3 s-3', conversion=GV%H_to_m*(US%L_T_to_m_s**2)*US%s_to_T) + if (CS%id_KE_BT>0) call safe_alloc_ptr(CS%KE_BT,isd,ied,jsd,jed,nz) + endif + CS%id_KE_Coradv = register_diag_field('ocean_model', 'KE_Coradv', diag%axesTL, Time, & 'Kinetic Energy Source from Coriolis and Advection', & 'm3 s-3', conversion=GV%H_to_m*(US%L_T_to_m_s**2)*US%s_to_T) @@ -2193,9 +2256,9 @@ subroutine set_dependent_diagnostics(MIS, ADp, CDp, G, GV, CS) IsdB = G%IsdB ; IedB = G%IedB ; JsdB = G%JsdB ; JedB = G%JedB if (associated(CS%dKE_dt) .or. associated(CS%PE_to_KE) .or. & - associated(CS%KE_CorAdv) .or. associated(CS%KE_adv) .or. & - associated(CS%KE_visc) .or. associated(CS%KE_horvisc) .or. & - associated(CS%KE_dia)) & + associated(CS%KE_BT) .or. associated(CS%KE_CorAdv) .or. & + associated(CS%KE_adv) .or. associated(CS%KE_visc) .or. & + associated(CS%KE_horvisc) .or. associated(CS%KE_dia)) & call safe_alloc_ptr(CS%KE,isd,ied,jsd,jed,nz) if (associated(CS%dKE_dt)) then @@ -2247,6 +2310,7 @@ subroutine MOM_diagnostics_end(CS, ADp) if (associated(CS%KE)) deallocate(CS%KE) if (associated(CS%dKE_dt)) deallocate(CS%dKE_dt) if (associated(CS%PE_to_KE)) deallocate(CS%PE_to_KE) + if (associated(CS%KE_BT)) deallocate(CS%KE_BT) if (associated(CS%KE_Coradv)) deallocate(CS%KE_Coradv) if (associated(CS%KE_adv)) deallocate(CS%KE_adv) if (associated(CS%KE_visc)) deallocate(CS%KE_visc) diff --git a/src/diagnostics/MOM_sum_output.F90 b/src/diagnostics/MOM_sum_output.F90 index 7d11ac0608..03204e4322 100644 --- a/src/diagnostics/MOM_sum_output.F90 +++ b/src/diagnostics/MOM_sum_output.F90 @@ -12,7 +12,7 @@ module MOM_sum_output use MOM_forcing_type, only : forcing use MOM_grid, only : ocean_grid_type use MOM_interface_heights, only : find_eta -use MOM_io, only : create_file, fieldtype, flush_file, open_file, reopen_file +use MOM_io, only : create_file, fieldtype, flush_file, open_file, reopen_file, stdout use MOM_io, only : file_exists, slasher, vardesc, var_desc, write_field, get_filename_appendix use MOM_io, only : APPEND_FILE, ASCII_FILE, SINGLE_FILE, WRITEONLY_FILE use MOM_open_boundary, only : ocean_OBC_type, OBC_segment_type @@ -829,11 +829,11 @@ subroutine write_energy(u, v, h, tv, day, n, G, GV, US, CS, tracer_CSp, OBC, dt_ if (is_root_pe()) then if (CS%use_temperature) then - write(*,'(A," ",A,": En ",ES12.6, ", MaxCFL ", F8.5, ", Mass ", & + write(stdout,'(A," ",A,": En ",ES12.6, ", MaxCFL ", F8.5, ", Mass ", & & ES18.12, ", Salt ", F15.11,", Temp ", F15.11)') & trim(date_str), trim(n_str), En_mass, max_CFL(1), mass_tot, salin, temp else - write(*,'(A," ",A,": En ",ES12.6, ", MaxCFL ", F8.5, ", Mass ", & + write(stdout,'(A," ",A,": En ",ES12.6, ", MaxCFL ", F8.5, ", Mass ", & & ES18.12)') & trim(date_str), trim(n_str), En_mass, max_CFL(1), mass_tot endif @@ -855,39 +855,39 @@ subroutine write_energy(u, v, h, tv, day, n, G, GV, US, CS, tracer_CSp, OBC, dt_ endif if (CS%ntrunc > 0) then - write(*,'(A," Energy/Mass:",ES12.5," Truncations ",I0)') & + write(stdout,'(A," Energy/Mass:",ES12.5," Truncations ",I0)') & trim(date_str), En_mass, CS%ntrunc endif if (CS%write_stocks) then - write(*,'(" Total Energy: ",Z16.16,ES24.16)') toten, toten - write(*,'(" Total Mass: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & + write(stdout,'(" Total Energy: ",Z16.16,ES24.16)') toten, toten + write(stdout,'(" Total Mass: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & mass_tot, mass_chg, mass_anom, mass_anom/mass_tot if (CS%use_temperature) then if (Salt == 0.) then - write(*,'(" Total Salt: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5)') & + write(stdout,'(" Total Salt: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5)') & Salt*0.001, Salt_chg*0.001, Salt_anom*0.001 else - write(*,'(" Total Salt: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & + write(stdout,'(" Total Salt: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & Salt*0.001, Salt_chg*0.001, Salt_anom*0.001, Salt_anom/Salt endif if (Heat == 0.) then - write(*,'(" Total Heat: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5)') & + write(stdout,'(" Total Heat: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5)') & Heat, Heat_chg, Heat_anom else - write(*,'(" Total Heat: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & + write(stdout,'(" Total Heat: ",ES24.16,", Change: ",ES24.16," Error: ",ES12.5," (",ES8.1,")")') & Heat, Heat_chg, Heat_anom, Heat_anom/Heat endif endif do m=1,nTr_stocks - write(*,'(" Total ",a,": ",ES24.16,X,a)') & + write(stdout,'(" Total ",a,": ",ES24.16,X,a)') & trim(Tr_names(m)), Tr_stocks(m), trim(Tr_units(m)) if (Tr_minmax_got(m)) then - write(*,'(64X,"Global Min:",ES24.16,X,"at: (", f7.2,","f7.2,","f8.2,")" )') & + write(stdout,'(64X,"Global Min:",ES24.16,X,"at: (", f7.2,","f7.2,","f8.2,")" )') & Tr_min(m),Tr_min_x(m),Tr_min_y(m),Tr_min_z(m) - write(*,'(64X,"Global Max:",ES24.16,X,"at: (", f7.2,","f7.2,","f8.2,")" )') & + write(stdout,'(64X,"Global Max:",ES24.16,X,"at: (", f7.2,","f7.2,","f8.2,")" )') & Tr_max(m),Tr_max_x(m),Tr_max_y(m),Tr_max_z(m) endif diff --git a/src/equation_of_state/_Equation_of_State.dox b/src/equation_of_state/_Equation_of_State.dox new file mode 100644 index 0000000000..2e18b49f54 --- /dev/null +++ b/src/equation_of_state/_Equation_of_State.dox @@ -0,0 +1,62 @@ +/*! \page Equation_of_State Equation of State + +Within MOM6, there is a wrapper for the equation of state, so that all calls look +the same from the rest of the model. The equation of state code has to calculate +not just in situ density, but also the compressibility and various derivatives of +the density. There is also code for computing specific volume and the +freezing temperature. + +\section Linear_EOS Linear Equation of State + +Compute the required quantities with uniform values for \f$\alpha = \frac{\partial +\rho}{\partial T}\f$ and \f$\beta = \frac{\partial \rho}{\partial S}\f$, (DRHO_DT, +DRHO_DS in MOM_input, also uses RHO_T0_S0). + +\section Wright_EOS Wright Equation of State + +Compute the required quantities using the equation of state from \cite wright1997. +This equation of state is in the form: +\f[ + \alpha(s, \theta, p) = A(s, \theta) + \frac{\lambda(s, \theta)}{P(s, \theta) + p} +\f] +where \f$A, \lambda\f$ and \f$P\f$ are functions only of \f$s\f$ and \f$\theta\f$ +and \f$\alpha = 1/ \rho\f$ is the specific volume. This form is useful for the +pressure gradient computation as discussed in \ref section_PG. + +\section NEMO_EOS NEMO Equation of State + +Compute the required quantities using the equation of state from \cite roquet2015. + +\section UNESCO_EOS UNESCO Equation of State + +Compute the required quantities using the equation of state from \cite jackett1995. + +\section TEOS-10_EOS TEOS-10 Equation of State + +Compute the required quantities using the equation of state from +[TEOS-10](http://www.teos-10.org/). + +\section TFREEZE Freezing Temperature of Sea Water + +There are three choices for computing the freezing point of sea water: + +\li Linear The freezing temperature is a linear function of the salinity and +pressure: +\f[ + T_{Fr} = (T_{Fr0} + a\,S) + b\,P +\f] +where \f$T_{Fr0},a,b\f$ are contants which can be set in MOM_input (TFREEZE_S0_P0, +DTFREEZE_DS, DTFREEZE_DP). + +\li Millero The \cite millero1978 equation is used, but modified so that it is a function +of potential temperature rather than in situ temperature: +\f[ + T_{Fr} = S(a + (b \sqrt{\max(S,0.0)} + c\, S)) + d\,P +\f] +where \f$a,b, c, d\f$ are fixed contants. + +\li TEOS-10 The TEOS-10 package is used to compute the freezing conservative temperature +[degC] from absolute salinity [g/kg], and pressure [Pa]. This one must be used +if you are using the NEMO or TEOS-10 equation of state. + +*/ diff --git a/src/framework/MOM_diag_vkernels.F90 b/src/framework/MOM_diag_vkernels.F90 index b7c1130521..3d6e3e3f65 100644 --- a/src/framework/MOM_diag_vkernels.F90 +++ b/src/framework/MOM_diag_vkernels.F90 @@ -4,7 +4,7 @@ module MOM_diag_vkernels ! This file is part of MOM6. See LICENSE.md for the license. -use iso_fortran_env, only : stdout=>output_unit, stderr=>error_unit +use MOM_io, only : stdout, stderr implicit none ; private diff --git a/src/framework/MOM_document.F90 b/src/framework/MOM_document.F90 index 15d0839ee9..ff0934ac55 100644 --- a/src/framework/MOM_document.F90 +++ b/src/framework/MOM_document.F90 @@ -661,7 +661,7 @@ end function real_string !> Returns a character string of a comma-separated, compact formatted, reals !> e.g. "1., 2., 5*3., 5.E2", that give the list of values. function real_array_string(vals, sep) - character(len=1320) :: real_array_string !< The output string listing vals + character(len=:) ,allocatable :: real_array_string !< The output string listing vals real, intent(in) :: vals(:) !< The array of values to record character(len=*), & optional, intent(in) :: sep !< The separator between successive values, @@ -669,10 +669,10 @@ function real_array_string(vals, sep) ! Returns a character string of a comma-separated, compact formatted, reals ! e.g. "1., 2., 5*3., 5.E2" ! Local variables - integer :: j, n, b, ns + integer :: j, n, ns logical :: doWrite character(len=10) :: separator - n=1 ; doWrite=.true. ; real_array_string='' ; b=1 + n=1 ; doWrite=.true. ; real_array_string='' if (present(sep)) then separator=sep ; ns=len(sep) else @@ -687,16 +687,15 @@ function real_array_string(vals, sep) endif endif if (doWrite) then - if (b>1) then ! Write separator if a number has already been written - write(real_array_string(b:),'(A)') separator - b=b+ns + if(len(real_array_string)>0) then ! Write separator if a number has already been written + real_array_string = real_array_string // separator(1:ns) endif if (n>1) then - write(real_array_string(b:),'(A,"*",A)') trim(int_string(n)),trim(real_string(vals(j))) + real_array_string = real_array_string // trim(int_string(n)) // "*" // trim(real_string(vals(j))) else - write(real_array_string(b:),'(A)') trim(real_string(vals(j))) + real_array_string = real_array_string // trim(real_string(vals(j))) endif - n=1 ; b=len_trim(real_array_string)+1 + n=1 endif enddo end function real_array_string diff --git a/src/framework/MOM_file_parser.F90 b/src/framework/MOM_file_parser.F90 index 9422385a29..3e7a2f9e84 100644 --- a/src/framework/MOM_file_parser.F90 +++ b/src/framework/MOM_file_parser.F90 @@ -1407,14 +1407,13 @@ subroutine log_param_real_array(CS, modulename, varname, value, desc, & logical, optional, intent(in) :: like_default !< If present and true, log this parameter as !! though it has the default value, even if there is no default. - character(len=1320) :: mesg + character(len=:), allocatable :: mesg character(len=240) :: myunits !write(mesg, '(" ",a," ",a,": ",ES19.12,99(",",ES19.12))') & !write(mesg, '(" ",a," ",a,": ",G,99(",",G))') & ! trim(modulename), trim(varname), value - write(mesg, '(" ",a," ",a,": ",a)') & - trim(modulename), trim(varname), trim(left_reals(value)) + mesg = " " // trim(modulename) // " " // trim(varname) // ": " // trim(left_reals(value)) if (is_root_pe()) then if (CS%log_open) write(CS%stdlog,'(a)') trim(mesg) if (CS%log_to_stdout) write(CS%stdout,'(a)') trim(mesg) diff --git a/src/framework/MOM_io.F90 b/src/framework/MOM_io.F90 index c516c96e86..d13dddc3c7 100644 --- a/src/framework/MOM_io.F90 +++ b/src/framework/MOM_io.F90 @@ -33,6 +33,7 @@ module MOM_io use mpp_io_mod, only : get_file_fields=>mpp_get_fields, get_file_times=>mpp_get_times use mpp_io_mod, only : io_infra_init=>mpp_io_init +use iso_fortran_env, only : stdout_iso=>output_unit, stderr_iso=>error_unit use netcdf implicit none ; private @@ -84,6 +85,9 @@ module MOM_io module procedure MOM_read_vector_2d end interface +integer, public :: stdout = stdout_iso !< standard output unit +integer, public :: stderr = stderr_iso !< standard output unit + contains !> Routine creates a new NetCDF file. It also sets up diff --git a/src/framework/MOM_random.F90 b/src/framework/MOM_random.F90 index 14800df9aa..161236572c 100644 --- a/src/framework/MOM_random.F90 +++ b/src/framework/MOM_random.F90 @@ -11,7 +11,7 @@ module MOM_random use MersenneTwister_mod, only : getRandomReal ! Generates a random number use MersenneTwister_mod, only : getRandomPositiveInt ! Generates a random positive integer -use iso_fortran_env, only : stdout=>output_unit, stderr=>error_unit +use MOM_io, only : stdout, stderr implicit none ; private diff --git a/src/framework/MOM_string_functions.F90 b/src/framework/MOM_string_functions.F90 index 1293499930..5c04a77b7d 100644 --- a/src/framework/MOM_string_functions.F90 +++ b/src/framework/MOM_string_functions.F90 @@ -142,13 +142,13 @@ function left_reals(r,sep) real, intent(in) :: r(:) !< The array of real variables to convert to a string character(len=*), optional, intent(in) :: sep !< The separator between !! successive values, by default it is ', '. - character(len=1320) :: left_reals !< The output string + character(len=:), allocatable :: left_reals !< The output string - integer :: j, n, b, ns + integer :: j, n, ns logical :: doWrite character(len=10) :: separator - n=1 ; doWrite=.true. ; left_reals='' ; b=1 + n=1 ; doWrite=.true. ; left_reals='' if (present(sep)) then separator=sep ; ns=len(sep) else @@ -163,16 +163,15 @@ function left_reals(r,sep) endif endif if (doWrite) then - if (b>1) then ! Write separator if a number has already been written - write(left_reals(b:),'(A)') separator - b=b+ns + if (len(left_reals)>0) then ! Write separator if a number has already been written + left_reals = left_reals // separator(1:ns) endif if (n>1) then - write(left_reals(b:),'(A,"*",A)') trim(left_int(n)),trim(left_real(r(j))) + left_reals = left_reals // trim(left_int(n)) // "*" // trim(left_real(r(j))) else - write(left_reals(b:),'(A)') trim(left_real(r(j))) + left_reals = left_reals // trim(left_real(r(j))) endif - n=1 ; b=len_trim(left_reals)+1 + n=1 endif enddo end function left_reals diff --git a/src/framework/_Domain_decomposition.dox b/src/framework/_Domain_decomposition.dox new file mode 100644 index 0000000000..21fe27ade3 --- /dev/null +++ b/src/framework/_Domain_decomposition.dox @@ -0,0 +1,29 @@ +/*! \page Domain_Decomposition Domain Decomposition + +\section section_domain_decomp Domain Decomposition + +MOM6 supports serial, OpenMP, and MPI computations, with the user +choosing between them at run time. All are accomplished +through domain decomposition in the horizontal. All of the +horizontal operations are explicit with a relatively small +footprint, so the tiling is a logical choice. Some goals in the +parallel design of MOM6 were: + +\li Don't hard-code the number of processes. +\li MPI and OpenMP share the same basic structure. +\li Don't break the serial optimizations. +\li Same result as serial code for any number of processes. +\li Portability - able to run on any (Unix) system. + +The whole horizontal MOM6 grid is shown in \ref section_Memory. +The computations are done over the cells inside the darker line; +the cells are numbered 1 to NIGLOBAL in the \f$x\f$-direction and +1 to NJGLOBAL in the \f$y\f$-direction. Those looking ahead to +running in parallel would be wise to include factors of two and three in +their choice of NIGLOBAL and NJGLOBAL when building new grids. MOM6 will run in +parallel with any values of these, but the computations +might not be load-balanced. + +\section section_wide_halos Wide Halos + +*/ diff --git a/src/framework/_Global_grids.dox b/src/framework/_Global_grids.dox new file mode 100644 index 0000000000..078edbc487 --- /dev/null +++ b/src/framework/_Global_grids.dox @@ -0,0 +1,9 @@ +/*! \page Global_Grids Global Orthogonal Grids + +\brief Global Orthogonal Grids + +\section Dipole Dipole Grids + +\section Tripole Tripole Grids + +*/ diff --git a/src/framework/_Horizontal_indexing.dox b/src/framework/_Horizontal_indexing.dox index 7e3b86f8e7..e68c38ac0f 100644 --- a/src/framework/_Horizontal_indexing.dox +++ b/src/framework/_Horizontal_indexing.dox @@ -7,7 +7,7 @@ MOM6 is written in Fortran90 and uses the `i,j,k` order of indexing. We often refer to the i-direction as the x- or zonal direction, and similarly to the j-direction as y- or meridional direction. The model can use curvilinear grids/coordinates in the horizontal and so these labels have loose meanings but convenient. -\section Staggering Loops and staggered variables +\section section_Staggering Loops and staggered variables Many variables are staggered horizontally with respect to each other. The dynamics and tracer equations are discretized on an Arakawa C grid. @@ -33,7 +33,7 @@ In contrast, when a loop is over u-points collocated variables - the expression \f$ u_{i+\frac{1}{2},j} ( h_{i,j} + h_{i+1,j} ) \f$ is `u(I,j) * ( h(i,j) + h(i+1,j)`. -\section Memory Declaration of variables +\section section_Memory Declaration of variables \image html Horizontal_NE_indexing_nonsym.png Non-symmetric mode: All arrays are declared with the same shape `(isd:ied,jsd:jed)`. \image latex Horizontal_NE_indexing_nonsym.png Non-symmetric mode: All arrays are declared with the same shape `(isd:ied,jsd:jed)`. diff --git a/src/framework/_Parallel_IO.dox b/src/framework/_Parallel_IO.dox new file mode 100644 index 0000000000..585448b6c2 --- /dev/null +++ b/src/framework/_Parallel_IO.dox @@ -0,0 +1,10 @@ +/*! \page Parallel_IO Parallel I/O + +\brief Parallel I/O + +The model can be told to write a different output file per process. This may or may +not save time, and is a bad idea on Lustre filesystems. If the model is writing +individual files per process, one can combine them using the mppnccombine program from +the [FRE-nctools package](https://github.com/NOAA-GFDL/FRE-NCtools). + +*/ diff --git a/src/framework/_Regional_grids.dox b/src/framework/_Regional_grids.dox new file mode 100644 index 0000000000..b99dbed942 --- /dev/null +++ b/src/framework/_Regional_grids.dox @@ -0,0 +1,9 @@ +/*! \page Regional_Grids Regional Orthogonal Grids + +\brief Regional Orthogonal Grids + +\section map_projections Map Projections + +\section OBC_segments Open Boundary Segments + +*/ diff --git a/src/framework/_Vertical_grids.dox b/src/framework/_Vertical_grids.dox new file mode 100644 index 0000000000..1231a5bd42 --- /dev/null +++ b/src/framework/_Vertical_grids.dox @@ -0,0 +1,13 @@ +/*! \page Vertical_Grids Vertical Grids + +\section vert_layer Layered + +\section vert_z_star Z-Star + +\section vert_sigma Sigma + +\section vert_rho Rho + +\section vert_hybrid Hybrid + +*/ diff --git a/src/initialization/MOM_grid_initialize.F90 b/src/initialization/MOM_grid_initialize.F90 index efee50db05..4526d9e9c7 100644 --- a/src/initialization/MOM_grid_initialize.F90 +++ b/src/initialization/MOM_grid_initialize.F90 @@ -14,7 +14,7 @@ module MOM_grid_initialize use MOM_error_handler, only : MOM_error, MOM_mesg, FATAL, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave use MOM_file_parser, only : get_param, log_param, log_version, param_file_type -use MOM_io, only : MOM_read_data, read_data, slasher, file_exists +use MOM_io, only : MOM_read_data, read_data, slasher, file_exists, stdout use MOM_io, only : CORNER, NORTH_FACE, EAST_FACE use MOM_unit_scaling, only : unit_scale_type @@ -806,14 +806,14 @@ subroutine set_grid_metrics_mercator(G, param_file, US) y_q = find_root(Int_dj_dy, dy_dj, GP, jd, 0.0, -1.0*PI_2, PI_2, itt2) G%gridLatB(J) = y_q*180.0/PI ! if (is_root_pe()) & - ! write(*, '("J, y_q = ",I4,ES14.4," itts = ",I4)') j, y_q, itt2 + ! write(stdout, '("J, y_q = ",I4,ES14.4," itts = ",I4)') j, y_q, itt2 enddo do j=G%jsg,G%jeg jd = fnRef + (j - jRef) - 0.5 y_h = find_root(Int_dj_dy, dy_dj, GP, jd, 0.0, -1.0*PI_2, PI_2, itt1) G%gridLatT(j) = y_h*180.0/PI ! if (is_root_pe()) & - ! write(*, '("j, y_h = ",I4,ES14.4," itts = ",I4)') j, y_h, itt1 + ! write(stdout, '("j, y_h = ",I4,ES14.4," itts = ",I4)') j, y_h, itt1 enddo do J=JsdB+J_off,JedB+J_off jd = fnRef + (J - jRef) diff --git a/src/initialization/MOM_shared_initialization.F90 b/src/initialization/MOM_shared_initialization.F90 index 395cdcffd8..ec51a045cf 100644 --- a/src/initialization/MOM_shared_initialization.F90 +++ b/src/initialization/MOM_shared_initialization.F90 @@ -11,7 +11,7 @@ module MOM_shared_initialization use MOM_error_handler, only : MOM_mesg, MOM_error, FATAL, WARNING, is_root_pe use MOM_error_handler, only : callTree_enter, callTree_leave, callTree_waypoint use MOM_file_parser, only : get_param, log_param, param_file_type, log_version -use MOM_io, only : close_file, create_file, fieldtype, file_exists +use MOM_io, only : close_file, create_file, fieldtype, file_exists, stdout use MOM_io, only : MOM_read_data, MOM_read_vector, SINGLE_FILE, MULTIPLE use MOM_io, only : slasher, vardesc, write_field, var_desc use MOM_string_functions, only : uppercase @@ -282,7 +282,7 @@ subroutine apply_topography_edits_from_file(D, G, param_file, US) j = jg(n) - G%jsd_global + 2 if (i>=G%isc .and. i<=G%iec .and. j>=G%jsc .and. j<=G%jec) then if (new_depth(n)/=0.) then - write(*,'(a,3i5,f8.2,a,f8.2,2i4)') & + write(stdout,'(a,3i5,f8.2,a,f8.2,2i4)') & 'Ocean topography edit: ',n,ig(n),jg(n),D(i,j)/m_to_Z,'->',abs(new_depth(n)),i,j D(i,j) = abs(m_to_Z*new_depth(n)) ! Allows for height-file edits (i.e. converts negatives) else @@ -1008,11 +1008,11 @@ subroutine reset_face_lengths_list(G, param_file, US) G%dy_Cu(I,j) = G%mask2dCu(I,j) * m_to_L*min(L_to_m*G%dyCu(I,j), max(u_width(npt), 0.0)) if (j>=G%jsc .and. j<=G%jec .and. I>=G%isc .and. I<=G%iec) then ! Limit messages/checking to compute domain if ( G%mask2dCu(I,j) == 0.0 ) then - write(*,'(A,2F8.2,A,4F8.2,A)') "read_face_lengths_list : G%mask2dCu=0 at ",lat,lon," (",& + write(stdout,'(A,2F8.2,A,4F8.2,A)') "read_face_lengths_list : G%mask2dCu=0 at ",lat,lon," (",& u_lat(1,npt), u_lat(2,npt), u_lon(1,npt), u_lon(2,npt),") so grid metric is unmodified." else u_line_used(npt) = u_line_used(npt) + 1 - write(*,'(A,2F8.2,A,4F8.2,A5,F9.2,A1)') & + write(stdout,'(A,2F8.2,A,4F8.2,A5,F9.2,A1)') & "read_face_lengths_list : Modifying dy_Cu gridpoint at ",lat,lon," (",& u_lat(1,npt), u_lat(2,npt), u_lon(1,npt), u_lon(2,npt),") to ",L_to_m*G%dy_Cu(I,j),"m" endif @@ -1038,11 +1038,11 @@ subroutine reset_face_lengths_list(G, param_file, US) G%dx_Cv(i,J) = G%mask2dCv(i,J) * m_to_L*min(L_to_m*G%dxCv(i,J), max(v_width(npt), 0.0)) if (i>=G%isc .and. i<=G%iec .and. J>=G%jsc .and. J<=G%jec) then ! Limit messages/checking to compute domain if ( G%mask2dCv(i,J) == 0.0 ) then - write(*,'(A,2F8.2,A,4F8.2,A)') "read_face_lengths_list : G%mask2dCv=0 at ",lat,lon," (",& + write(stdout,'(A,2F8.2,A,4F8.2,A)') "read_face_lengths_list : G%mask2dCv=0 at ",lat,lon," (",& v_lat(1,npt), v_lat(2,npt), v_lon(1,npt), v_lon(2,npt),") so grid metric is unmodified." else v_line_used(npt) = v_line_used(npt) + 1 - write(*,'(A,2F8.2,A,4F8.2,A5,F9.2,A1)') & + write(stdout,'(A,2F8.2,A,4F8.2,A5,F9.2,A1)') & "read_face_lengths_list : Modifying dx_Cv gridpoint at ",lat,lon," (",& v_lat(1,npt), v_lat(2,npt), v_lon(1,npt), v_lon(2,npt),") to ",L_to_m*G%dx_Cv(I,j),"m" endif diff --git a/src/initialization/MOM_state_initialization.F90 b/src/initialization/MOM_state_initialization.F90 index b0dcb58609..f616f09e10 100644 --- a/src/initialization/MOM_state_initialization.F90 +++ b/src/initialization/MOM_state_initialization.F90 @@ -164,7 +164,6 @@ subroutine MOM_initialize_state(u, v, h, tv, Time, G, GV, US, PF, dirs, & ! by a large surface pressure, such as with an ice sheet. logical :: regrid_accelerate integer :: regrid_iterations -! logical :: Analytic_FV_PGF, obsol_test logical :: convert logical :: just_read ! If true, only read the parameters because this ! is a run from a restart file; this option @@ -1281,7 +1280,7 @@ subroutine initialize_velocity_from_file(u, v, G, GV, US, param_file, just_read_ intent(out) :: v !< The meridional velocity that is being initialized [L T-1 ~> m s-1] type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< A structure indicating the open file to - !! parse for modelparameter values. + !! parse for model parameter values. logical, optional, intent(in) :: just_read_params !< If present and true, this call will !! only read parameters without changing h. ! Local variables @@ -1322,7 +1321,7 @@ subroutine initialize_velocity_zero(u, v, G, GV, param_file, just_read_params) real, dimension(SZI_(G),SZJB_(G),SZK_(GV)), & intent(out) :: v !< The meridional velocity that is being initialized [L T-1 ~> m s-1] type(param_file_type), intent(in) :: param_file !< A structure indicating the open file to - !! parse for modelparameter values. + !! parse for model parameter values. logical, optional, intent(in) :: just_read_params !< If present and true, this call will !! only read parameters without changing h. ! Local variables @@ -1358,7 +1357,7 @@ subroutine initialize_velocity_uniform(u, v, G, GV, US, param_file, just_read_pa intent(out) :: v !< The meridional velocity that is being initialized [L T-1 ~> m s-1] type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type type(param_file_type), intent(in) :: param_file !< A structure indicating the open file to - !! parse for modelparameter values. + !! parse for model parameter values. logical, optional, intent(in) :: just_read_params !< If present and true, this call will !! only read parameters without changing h. ! Local variables @@ -1667,7 +1666,7 @@ subroutine initialize_temp_salt_linear(T, S, G, GV, param_file, just_read_params integer :: k real :: delta_S, delta_T - real :: S_top, T_top ! Reference salinity and temerature within surface layer + real :: S_top, T_top ! Reference salinity and temperature within surface layer real :: S_range, T_range ! Range of salinities and temperatures over the vertical real :: delta logical :: just_read ! If true, just read parameters but set nothing. @@ -1995,13 +1994,11 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, G, GV, US, PF, just_read_param ! Local variables character(len=200) :: filename !< The name of an input file containing temperature - !! and salinity in z-space; also used for ice shelf area. - character(len=200) :: tfilename !< The name of an input file containing only temperature - !! in z-space. - character(len=200) :: sfilename !< The name of an input file containing only salinity - !! in z-space. + !! and salinity in z-space; by default it is also used for ice shelf area. + character(len=200) :: tfilename !< The name of an input file containing temperature in z-space. + character(len=200) :: sfilename !< The name of an input file containing salinity in z-space. character(len=200) :: shelf_file !< The name of an input file used for ice shelf area. - character(len=200) :: inputdir !! The directory where NetCDF input filesare. + character(len=200) :: inputdir !! The directory where NetCDF input files are. character(len=200) :: mesg, area_varname, ice_shelf_file type(EOS_type), pointer :: eos => NULL() @@ -2065,6 +2062,10 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, G, GV, US, PF, just_read_param logical :: use_ice_shelf logical :: pre_gridded logical :: separate_mixed_layer ! If true, handle the mixed layers differently. + logical :: density_extrap_bug ! If true use an expression with a vertical indexing bug for + ! extrapolating the densities at the bottom of unstable profiles + ! from data when finding the initial interface locations in + ! layered mode from a dataset of T and S. character(len=10) :: remappingScheme real :: tempAvg, saltAvg integer :: nPoints, ans @@ -2096,26 +2097,26 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, G, GV, US, PF, just_read_param use_ice_shelf = present(frac_shelf_h) - call get_param(PF, mdl, "TEMP_SALT_Z_INIT_FILE",filename, & + call get_param(PF, mdl, "TEMP_SALT_Z_INIT_FILE", filename, & "The name of the z-space input file used to initialize "//& "temperatures (T) and salinities (S). If T and S are not "//& "in the same file, TEMP_Z_INIT_FILE and SALT_Z_INIT_FILE "//& - "must be set.",default="temp_salt_z.nc",do_not_log=just_read) - call get_param(PF, mdl, "TEMP_Z_INIT_FILE",tfilename, & + "must be set.", default="temp_salt_z.nc", do_not_log=just_read) + call get_param(PF, mdl, "TEMP_Z_INIT_FILE", tfilename, & "The name of the z-space input file used to initialize "//& - "temperatures, only.", default=trim(filename),do_not_log=just_read) - call get_param(PF, mdl, "SALT_Z_INIT_FILE",sfilename, & + "temperatures, only.", default=trim(filename), do_not_log=just_read) + call get_param(PF, mdl, "SALT_Z_INIT_FILE", sfilename, & "The name of the z-space input file used to initialize "//& - "temperatures, only.", default=trim(filename),do_not_log=just_read) + "temperatures, only.", default=trim(filename), do_not_log=just_read) filename = trim(inputdir)//trim(filename) tfilename = trim(inputdir)//trim(tfilename) sfilename = trim(inputdir)//trim(sfilename) call get_param(PF, mdl, "Z_INIT_FILE_PTEMP_VAR", potemp_var, & "The name of the potential temperature variable in "//& - "TEMP_Z_INIT_FILE.", default="ptemp",do_not_log=just_read) + "TEMP_Z_INIT_FILE.", default="ptemp", do_not_log=just_read) call get_param(PF, mdl, "Z_INIT_FILE_SALT_VAR", salin_var, & "The name of the salinity variable in "//& - "SALT_Z_INIT_FILE.", default="salt",do_not_log=just_read) + "SALT_Z_INIT_FILE.", default="salt", do_not_log=just_read) call get_param(PF, mdl, "Z_INIT_HOMOGENIZE", homogenize, & "If True, then horizontally homogenize the interpolated "//& "initial conditions.", default=.false., do_not_log=just_read) @@ -2177,6 +2178,11 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, G, GV, US, PF, just_read_param "The mixed layer depth in the initial conditions when Z_INIT_SEPARATE_MIXED_LAYER "//& "is set to true.", default=Hmix_default, units="m", scale=US%m_to_Z, & do_not_log=(just_read .or. .not.separate_mixed_layer)) + call get_param(PF, mdl, "LAYER_Z_INIT_IC_EXTRAP_BUG", density_extrap_bug, & + "If true use an expression with a vertical indexing bug for extrapolating the "//& + "densities at the bottom of unstable profiles from data when finding the "//& + "initial interface locations in layered mode from a dataset of T and S.", & + default=.true., do_not_log=just_read) ! Reusing MINIMUM_DEPTH for the default mixed layer depth may be a strange choice, but ! it reproduces previous answers. endif @@ -2343,8 +2349,8 @@ subroutine MOM_temp_salt_initialize_from_Z(h, tv, G, GV, US, PF, just_read_param nkml = 0 ; if (separate_mixed_layer) nkml = GV%nkml - call find_interfaces(rho_z, z_in, kd, Rb, G%bathyT, zi, G, GV, US, & - nlevs, nkml, hml=Hmix_depth, eps_z=eps_z, eps_rho=eps_rho) + call find_interfaces(rho_z, z_in, kd, Rb, G%bathyT, zi, G, GV, US, nlevs, nkml, & + Hmix_depth, eps_z, eps_rho, density_extrap_bug) if (correct_thickness) then call adjustEtaToFitBathymetry(G, GV, US, zi, h) @@ -2430,14 +2436,14 @@ end subroutine MOM_temp_salt_initialize_from_Z !> Find interface positions corresponding to interpolated depths in a density profile subroutine find_interfaces(rho, zin, nk_data, Rb, depth, zi, G, GV, US, nlevs, nkml, hml, & - eps_z, eps_rho) + eps_z, eps_rho, density_extrap_bug) type(ocean_grid_type), intent(in) :: G !< The ocean's grid structure - type(verticalGrid_type), intent(in) :: GV !< The ocean's vertical grid structure + type(verticalGrid_type), intent(in) :: GV !< The ocean's vertical grid structure integer, intent(in) :: nk_data !< The number of levels in the input data real, dimension(SZI_(G),SZJ_(G),nk_data), & intent(in) :: rho !< Potential density in z-space [R ~> kg m-3] real, dimension(nk_data), intent(in) :: zin !< Input data levels [Z ~> m]. - real, dimension(SZK_(GV)+1), intent(in) :: Rb !< target interface densities [R ~> kg m-3] + real, dimension(SZK_(GV)+1), intent(in) :: Rb !< target interface densities [R ~> kg m-3] real, dimension(SZI_(G),SZJ_(G)), & intent(in) :: depth !< ocean depth [Z ~> m]. real, dimension(SZI_(G),SZJ_(G),SZK_(GV)+1), & @@ -2450,6 +2456,11 @@ subroutine find_interfaces(rho, zin, nk_data, Rb, depth, zi, G, GV, US, nlevs, n real, intent(in) :: hml !< mixed layer depth [Z ~> m]. real, intent(in) :: eps_z !< A negligibly small layer thickness [Z ~> m]. real, intent(in) :: eps_rho !< A negligibly small density difference [R ~> kg m-3]. + logical, intent(in) :: density_extrap_bug !< If true use an expression with an + !! indexing bug for projecting the densities at + !! the bottom of unstable profiles from data when + !! finding the initial interface locations in + !! layered mode from a dataset of T and S. ! Local variables real, dimension(nk_data) :: rho_ ! A column of densities [R ~> kg m-3] @@ -2474,7 +2485,7 @@ subroutine find_interfaces(rho, zin, nk_data, Rb, depth, zi, G, GV, US, nlevs, n unstable=.true. work_down = .true. do while (unstable) - ! Modifiy the input profile until it no longer has densities that decrease with depth. + ! Modify the input profile until it no longer has densities that decrease with depth. unstable=.false. if (work_down) then do k=2,nlevs_data-1 ; if (rho_(k) - rho_(k-1) < 0.0 ) then @@ -2490,7 +2501,11 @@ subroutine find_interfaces(rho, zin, nk_data, Rb, depth, zi, G, GV, US, nlevs, n else do k=nlevs_data-1,2,-1 ; if (rho_(k+1) - rho_(k) < 0.0) then if (k == nlevs_data-1) then - rho_(k+1) = rho_(k-1) + eps_rho !### This should be rho_(k) + eps_rho + if (density_extrap_bug) then + rho_(k+1) = rho_(k-1) + eps_rho + else + rho_(k+1) = rho_(k) + eps_rho + endif else drhodz = (rho_(k+1)-rho_(k-1)) / (zin(k+1)-zin(k-1)) if (drhodz < 0.0) unstable=.true. @@ -2592,10 +2607,10 @@ subroutine MOM_state_init_tests(G, GV, US, tv) write(0,*) '' write(0,*) ' ==================================================================== ' write(0,*) '' - write(0,*) GV%H_to_m*h + write(0,*) GV%H_to_m*h(:) call cut_off_column_top(nk, tv, GV, US, GV%g_Earth, -e(nk+1), GV%Angstrom_Z, & T, T_t, T_b, S, S_t, S_b, 0.5*P_tot, h, remap_CS) - write(0,*) GV%H_to_m*h + write(0,*) GV%H_to_m*h(:) end subroutine MOM_state_init_tests diff --git a/src/parameterizations/lateral/MOM_MEKE.F90 b/src/parameterizations/lateral/MOM_MEKE.F90 index 562694a1e1..850f94cff2 100644 --- a/src/parameterizations/lateral/MOM_MEKE.F90 +++ b/src/parameterizations/lateral/MOM_MEKE.F90 @@ -912,7 +912,7 @@ subroutine MEKE_lengthScales_0d(CS, US, area, beta, depth, Rd_dx, SN, EKE, & ! Z type(MEKE_CS), pointer :: CS !< MEKE control structure. type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type real, intent(in) :: area !< Grid cell area [L2 ~> m2] - real, intent(in) :: beta !< Planetary beta = |grad F| [T-1 L-1 ~> s-1 m-1] + real, intent(in) :: beta !< Planetary beta = \f$ \nabla f\f$ [T-1 L-1 ~> s-1 m-1] real, intent(in) :: depth !< Ocean depth [Z ~> m] real, intent(in) :: Rd_dx !< Resolution Ld/dx [nondim]. real, intent(in) :: SN !< Eady growth rate [T-1 ~> s-1]. diff --git a/src/parameterizations/lateral/MOM_hor_visc.F90 b/src/parameterizations/lateral/MOM_hor_visc.F90 index 53667bf646..ed3ef7173e 100644 --- a/src/parameterizations/lateral/MOM_hor_visc.F90 +++ b/src/parameterizations/lateral/MOM_hor_visc.F90 @@ -193,6 +193,7 @@ module MOM_hor_visc integer :: id_sh_xy_q = -1, id_sh_xx_h = -1 integer :: id_FrictWork = -1, id_FrictWorkIntz = -1 integer :: id_FrictWork_GME = -1 + integer :: id_normstress = -1, id_shearstress = -1 !>@} @@ -307,8 +308,8 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, vort_xy_q, & ! vertical vorticity at corner points [T-1 ~> s-1] sh_xy_q, & ! horizontal shearing strain at corner points [T-1 ~> s-1] GME_coeff_q, & !< GME coeff. at q-points [L2 T-1 ~> m2 s-1] - max_diss_rate_q ! maximum possible energy dissipated by lateral friction [L2 T-3 ~> m2 s-3] - + max_diss_rate_q, & ! maximum possible energy dissipated by lateral friction [L2 T-3 ~> m2 s-3] + ShSt ! A diagnostic array of shear stress [T-1 ~> s-1]. real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)+1) :: & KH_u_GME !< interface height diffusivities in u-columns [L2 T-1 ~> m2 s-1] real, dimension(SZI_(G),SZJB_(G),SZK_(GV)+1) :: & @@ -320,7 +321,8 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, FrictWork, & ! work done by MKE dissipation mechanisms [R L2 T-3 ~> W m-2] FrictWork_GME, & ! work done by GME [R L2 T-3 ~> W m-2] div_xx_h, & ! horizontal divergence [T-1 ~> s-1] - sh_xx_h ! horizontal tension (du/dx - dv/dy) including metric terms [T-1 ~> s-1] + sh_xx_h, & ! horizontal tension (du/dx - dv/dy) including metric terms [T-1 ~> s-1] + NoSt ! A diagnostic array of normal stress [T-1 ~> s-1]. real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: & grid_Re_Kh, & !< Grid Reynolds number for Laplacian horizontal viscosity at h points [nondim] grid_Re_Ah, & !< Grid Reynolds number for Biharmonic horizontal viscosity at h points [nondim] @@ -494,7 +496,7 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, !$OMP diffu, diffv, max_diss_rate_h, max_diss_rate_q, & !$OMP Kh_h, Kh_q, Ah_h, Ah_q, FrictWork, FrictWork_GME, & !$OMP div_xx_h, sh_xx_h, vort_xy_q, sh_xy_q, GME_coeff_h, GME_coeff_q, & - !$OMP TD, KH_u_GME, KH_v_GME, grid_Re_Kh, grid_Re_Ah & + !$OMP TD, KH_u_GME, KH_v_GME, grid_Re_Kh, grid_Re_Ah, NoSt, ShSt & !$OMP ) & !$OMP private( & !$OMP i, j, k, n, & @@ -524,6 +526,7 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, dvdy(i,j) = CS%DX_dyT(i,j)*(G%IdxCv(i,J) * v(i,J,k) - & G%IdxCv(i,J-1) * v(i,J-1,k)) sh_xx(i,j) = dudx(i,j) - dvdy(i,j) + if (CS%id_normstress > 0) NoSt(i,j,k) = sh_xx(i,j) enddo ; enddo ! Components for the shearing strain @@ -671,10 +674,12 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, if (CS%no_slip) then do J=js-2,Jeq+1 ; do I=is-2,Ieq+1 sh_xy(I,J) = (2.0-G%mask2dBu(I,J)) * ( dvdx(I,J) + dudy(I,J) ) + if (CS%id_shearstress > 0) ShSt(I,J,k) = sh_xy(I,J) enddo ; enddo else do J=js-2,Jeq+1 ; do I=is-2,Ieq+1 sh_xy(I,J) = G%mask2dBu(I,J) * ( dvdx(I,J) + dudy(I,J) ) + if (CS%id_shearstress > 0) ShSt(I,J,k) = sh_xy(I,J) enddo ; enddo endif @@ -1338,6 +1343,8 @@ subroutine horizontal_viscosity(u, v, h, diffu, diffv, MEKE, VarMix, G, GV, US, enddo ! end of k loop ! Offer fields for diagnostic averaging. + if (CS%id_normstress > 0) call post_data(CS%id_normstress, NoSt, CS%diag) + if (CS%id_shearstress > 0) call post_data(CS%id_shearstress, ShSt, CS%diag) if (CS%id_diffu>0) call post_data(CS%id_diffu, diffu, CS%diag) if (CS%id_diffv>0) call post_data(CS%id_diffv, diffv, CS%diag) if (CS%id_FrictWork>0) call post_data(CS%id_FrictWork, FrictWork, CS%diag) @@ -2075,6 +2082,10 @@ subroutine hor_visc_init(Time, G, GV, US, param_file, diag, CS, MEKE, ADp) endif endif ! Register fields for output from this module. + CS%id_normstress = register_diag_field('ocean_model', 'NoSt', diag%axesTL, Time, & + 'Normal Stress', 's-1', conversion=US%s_to_T) + CS%id_shearstress = register_diag_field('ocean_model', 'ShSt', diag%axesBL, Time, & + 'Shear Stress', 's-1', conversion=US%s_to_T) CS%id_diffu = register_diag_field('ocean_model', 'diffu', diag%axesCuL, Time, & 'Zonal Acceleration from Horizontal Viscosity', 'm s-2', conversion=US%L_T2_to_m_s2) CS%id_diffv = register_diag_field('ocean_model', 'diffv', diag%axesCvL, Time, & diff --git a/src/tracer/MOM_lateral_boundary_diffusion.F90 b/src/tracer/MOM_lateral_boundary_diffusion.F90 index f928c78274..44e105d1cf 100644 --- a/src/tracer/MOM_lateral_boundary_diffusion.F90 +++ b/src/tracer/MOM_lateral_boundary_diffusion.F90 @@ -6,24 +6,25 @@ module MOM_lateral_boundary_diffusion ! This file is part of MOM6. See LICENSE.md for the license. use MOM_cpu_clock, only : cpu_clock_id, cpu_clock_begin, cpu_clock_end -use MOM_cpu_clock, only : CLOCK_MODULE, CLOCK_ROUTINE -use MOM_domains, only : pass_var +use MOM_cpu_clock, only : CLOCK_MODULE +use MOM_checksums, only : hchksum +use MOM_domains, only : pass_var, sum_across_PEs use MOM_diag_mediator, only : diag_ctrl, time_type use MOM_diag_mediator, only : post_data, register_diag_field -use MOM_error_handler, only : MOM_error, FATAL, WARNING, MOM_mesg, is_root_pe +use MOM_diag_vkernels, only : reintegrate_column +use MOM_error_handler, only : MOM_error, FATAL, is_root_pe use MOM_file_parser, only : get_param, log_version, param_file_type -use MOM_file_parser, only : openParameterBlock, closeParameterBlock use MOM_grid, only : ocean_grid_type use MOM_remapping, only : remapping_CS, initialize_remapping -use MOM_remapping, only : extract_member_remapping_CS, build_reconstructions_1d -use MOM_remapping, only : average_value_ppoly, remappingSchemesDoc, remappingDefaultScheme +use MOM_remapping, only : extract_member_remapping_CS, remapping_core_h +use MOM_remapping, only : remappingSchemesDoc, remappingDefaultScheme use MOM_tracer_registry, only : tracer_registry_type, tracer_type use MOM_unit_scaling, only : unit_scale_type use MOM_verticalGrid, only : verticalGrid_type use MOM_CVMix_KPP, only : KPP_get_BLD, KPP_CS use MOM_energetic_PBL, only : energetic_PBL_get_MLD, energetic_PBL_CS use MOM_diabatic_driver, only : diabatic_CS, extract_diabatic_member -use iso_fortran_env, only : stdout=>output_unit, stderr=>error_unit +use MOM_io, only : stdout, stderr implicit none ; private @@ -36,45 +37,49 @@ module MOM_lateral_boundary_diffusion #include !> Sets parameters for lateral boundary mixing module. -type, public :: lateral_boundary_diffusion_CS ; private - integer :: method !< Determine which of the three methods calculate - !! and apply near boundary layer fluxes - !! 1. Along layer - !! 2. Bulk-layer approach (not recommended) - integer :: deg !< Degree of polynomial reconstruction - integer :: surface_boundary_scheme !< Which boundary layer scheme to use - !! 1. ePBL; 2. KPP - logical :: limiter !< Controls wether a flux limiter is applied. - !! Only valid when method = 2. - logical :: linear !< If True, apply a linear transition at the base/top of the boundary. - !! The flux will be fully applied at k=k_min and zero at k=k_max. - - type(remapping_CS) :: remap_CS !< Control structure to hold remapping configuration - type(KPP_CS), pointer :: KPP_CSp => NULL() !< KPP control structure needed to get BLD - type(energetic_PBL_CS), pointer :: energetic_PBL_CSp => NULL() !< ePBL control structure needed to get BLD - type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to - !! regulate the timing of diagnostic output. -end type lateral_boundary_diffusion_CS +type, public :: lbd_CS ; private + logical :: debug !< If true, write verbose checksums for debugging. + integer :: deg !< Degree of polynomial reconstruction. + integer :: surface_boundary_scheme !< Which boundary layer scheme to use + !! 1. ePBL; 2. KPP + logical :: limiter !< Controls whether a flux limiter is applied in the + !! native grid (default is true). + logical :: limiter_remap !< Controls whether a flux limiter is applied in the + !! remapped grid (default is false). + logical :: linear !< If True, apply a linear transition at the base/top of the boundary. + !! The flux will be fully applied at k=k_min and zero at k=k_max. + real :: H_subroundoff !< A thickness that is so small that it can be added to a thickness of + !! Angstrom or larger without changing it at the bit level [H ~> m or kg m-2]. + !! If Angstrom is 0 or exceedingly small, this is negligible compared to 1e-17 m. + type(remapping_CS) :: remap_CS !< Control structure to hold remapping configuration. + type(KPP_CS), pointer :: KPP_CSp => NULL() !< KPP control structure needed to get BLD. + type(energetic_PBL_CS), pointer :: energetic_PBL_CSp => NULL() !< ePBL control structure needed to get BLD. + type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to + !! regulate the timing of diagnostic output. +end type lbd_CS ! This include declares and sets the variable "version". #include "version_variable.h" -character(len=40) :: mdl = "MOM_lateral_boundary_diffusion" !< Name of this module +character(len=40) :: mdl = "MOM_lateral_boundary_diffusion" !< Name of this module +integer :: id_clock_lbd !< CPU clock for lbd contains !> Initialization routine that reads runtime parameters and sets up pointers to other control structures that might be !! needed for lateral boundary diffusion. -logical function lateral_boundary_diffusion_init(Time, G, param_file, diag, diabatic_CSp, CS) +logical function lateral_boundary_diffusion_init(Time, G, GV, param_file, diag, diabatic_CSp, CS) type(time_type), target, intent(in) :: Time !< Time structure type(ocean_grid_type), intent(in) :: G !< Grid structure + type(verticalGrid_type), intent(in) :: GV !< ocean vertical grid structure type(param_file_type), intent(in) :: param_file !< Parameter file structure type(diag_ctrl), target, intent(inout) :: diag !< Diagnostics control structure type(diabatic_CS), pointer :: diabatic_CSp !< KPP control structure needed to get BLD - type(lateral_boundary_diffusion_CS), pointer :: CS !< Lateral boundary mixing control structure + type(lbd_CS), pointer :: CS !< Lateral boundary mixing control structure ! local variables - character(len=80) :: string ! Temporary strings - logical :: boundary_extrap + character(len=80) :: string ! Temporary strings + integer :: ke, nk ! Number of levels in the LBD and native grids, respectively + logical :: boundary_extrap ! controls if boundary extrapolation is used in the LBD code if (ASSOCIATED(CS)) then call MOM_error(FATAL, "lateral_boundary_diffusion_init called with associated control structure.") @@ -90,11 +95,11 @@ logical function lateral_boundary_diffusion_init(Time, G, param_file, diag, diab call get_param(param_file, mdl, "USE_LATERAL_BOUNDARY_DIFFUSION", lateral_boundary_diffusion_init, & "If true, enables the lateral boundary tracer's diffusion module.", & default=.false.) - if (.not. lateral_boundary_diffusion_init) return allocate(CS) CS%diag => diag + CS%H_subroundoff = GV%H_subroundoff call extract_diabatic_member(diabatic_CSp, KPP_CSp=CS%KPP_CSp) call extract_diabatic_member(diabatic_CSp, energetic_PBL_CSp=CS%energetic_PBL_CSp) @@ -104,18 +109,13 @@ logical function lateral_boundary_diffusion_init(Time, G, param_file, diag, diab endif ! Read all relevant parameters and write them to the model log. - call get_param(param_file, mdl, "LATERAL_BOUNDARY_METHOD", CS%method, & - "Determine how to apply boundary lateral diffusion of tracers: \n"//& - "1. Along layer approach \n"//& - "2. Bulk layer approach (this option is not recommended)", default=1) - if (CS%method == 2) then - call get_param(param_file, mdl, "APPLY_LIMITER", CS%limiter, & - "If True, apply a flux limiter in the LBD. This is only available \n"//& - "when LATERAL_BOUNDARY_METHOD=2.", default=.false.) - endif call get_param(param_file, mdl, "LBD_LINEAR_TRANSITION", CS%linear, & "If True, apply a linear transition at the base/top of the boundary. \n"//& "The flux will be fully applied at k=k_min and zero at k=k_max.", default=.false.) + call get_param(param_file, mdl, "APPLY_LIMITER", CS%limiter, & + "If True, apply a flux limiter in the native grid.", default=.true.) + call get_param(param_file, mdl, "APPLY_LIMITER_REMAP", CS%limiter_remap, & + "If True, apply a flux limiter in the remapped grid.", default=.false.) call get_param(param_file, mdl, "LBD_BOUNDARY_EXTRAP", boundary_extrap, & "Use boundary extrapolation in LBD code", & default=.false.) @@ -124,142 +124,139 @@ logical function lateral_boundary_diffusion_init(Time, G, param_file, diag, diab "for vertical remapping for all variables. "//& "It can be one of the following schemes: "//& trim(remappingSchemesDoc), default=remappingDefaultScheme) - call initialize_remapping( CS%remap_CS, string, boundary_extrapolation = boundary_extrap ) + call initialize_remapping( CS%remap_CS, string, boundary_extrapolation = boundary_extrap ,& + check_reconstruction = .false., check_remapping = .false., answers_2018 = .false.) call extract_member_remapping_CS(CS%remap_CS, degree=CS%deg) + call get_param(param_file, mdl, "LBD_DEBUG", CS%debug, & + "If true, write out verbose debugging data in the LBD module.", & + default=.false.) + + id_clock_lbd = cpu_clock_id('(Ocean LBD)', grain=CLOCK_MODULE) end function lateral_boundary_diffusion_init !> Driver routine for calculating lateral diffusive fluxes near the top and bottom boundaries. -!! Two different methods are available: -!! Method 1: lower order representation, calculate fluxes from bulk layer integrated quantities. -!! Method 2: more straight forward, diffusion is applied layer by layer using only information -!! from neighboring cells. +!! Diffusion is applied using only information from neighboring cells, as follows: +!! 1) remap tracer to a z* grid (LBD grid) +!! 2) calculate diffusive tracer fluxes (F) in the LBD grid using a layer by layer approach +!! 3) remap fluxes to the native grid +!! 4) update tracer by adding the divergence of F subroutine lateral_boundary_diffusion(G, GV, US, h, Coef_x, Coef_y, dt, Reg, CS) - type(ocean_grid_type), intent(inout) :: G !< Grid type - type(verticalGrid_type), intent(in) :: GV !< ocean vertical grid structure - type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type + type(ocean_grid_type), intent(inout) :: G !< Grid type + type(verticalGrid_type), intent(in) :: GV !< ocean vertical grid structure + type(unit_scale_type), intent(in) :: US !< A dimensional unit scaling type real, dimension(SZI_(G),SZJ_(G),SZK_(GV)), & intent(in) :: h !< Layer thickness [H ~> m or kg m-2] real, dimension(SZIB_(G),SZJ_(G)), intent(in) :: Coef_x !< dt * Kh * dy / dx at u-points [L2 ~> m2] real, dimension(SZI_(G),SZJB_(G)), intent(in) :: Coef_y !< dt * Kh * dx / dy at v-points [L2 ~> m2] real, intent(in) :: dt !< Tracer time step * I_numitts - !! (I_numitts in tracer_hordiff) + !! (I_numitts in tracer_hordiff) [T ~> s] type(tracer_registry_type), pointer :: Reg !< Tracer registry - type(lateral_boundary_diffusion_CS), intent(in) :: CS !< Control structure for this module + type(lbd_CS), pointer :: CS !< Control structure for this module ! Local variables - real, dimension(SZI_(G),SZJ_(G)) :: hbl !< bnd. layer depth [H ~> m or kg m-2] - real, dimension(SZI_(G),SZJ_(G),SZK_(GV),CS%deg+1) :: ppoly0_coefs !< Coefficients of polynomial - real, dimension(SZI_(G),SZJ_(G),SZK_(GV),2) :: ppoly0_E !< Edge values from reconstructions - real, dimension(SZK_(GV),CS%deg+1) :: ppoly_S !< Slopes from reconstruction (placeholder) - real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)) :: uFlx !< Zonal flux of tracer [conc m^3] - real, dimension(SZIB_(G),SZJ_(G)) :: uFLx_bulk !< Total calculated bulk-layer u-flux for the tracer - real, dimension(SZI_(G),SZJB_(G),SZK_(GV)) :: vFlx !< Meridional flux of tracer [conc m^3] - real, dimension(SZI_(G),SZJB_(G)) :: vFlx_bulk !< Total calculated bulk-layer v-flux for the tracer - real, dimension(SZIB_(G),SZJ_(G)) :: uwork_2d !< Layer summed u-flux transport - real, dimension(SZI_(G),SZJB_(G)) :: vwork_2d !< Layer summed v-flux transport - real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: tendency !< tendency array for diagnostics - real, dimension(SZI_(G),SZJ_(G)) :: tendency_2d !< depth integrated content tendency for diagnostics - type(tracer_type), pointer :: Tracer => NULL() !< Pointer to the current tracer - integer :: remap_method !< Reconstruction method - integer :: i,j,k,m !< indices to loop over + real, dimension(SZI_(G),SZJ_(G)) :: hbl !< Boundary layer depth [H ~> m or kg m-2] + real, dimension(SZIB_(G),SZJ_(G),SZK_(GV)) :: uFlx !< Zonal flux of tracer [conc H L2 ~> conc kg or conc m^3] + real, dimension(SZI_(G),SZJB_(G),SZK_(GV)) :: vFlx !< Meridional flux of tracer + !! [conc H L2 ~> conc kg or conc m^3] + real, dimension(SZIB_(G),SZJ_(G)) :: uwork_2d !< Layer summed u-flux transport + !! [conc H L2 ~> conc kg or conc m^3] + real, dimension(SZI_(G),SZJB_(G)) :: vwork_2d !< Layer summed v-flux transport + !! [conc H L2 ~> conc kg or conc m^3] + real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: tendency !< tendency array for diagnostic [conc T-1 ~> conc s-1] + real, dimension(SZI_(G),SZJ_(G)) :: tendency_2d !< depth integrated content tendency for diagn + type(tracer_type), pointer :: tracer => NULL() !< Pointer to the current tracer + real, dimension(SZK_(GV)) :: tracer_1d !< 1d-array used to remap tracer change to native grid + real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: tracer_old !< local copy of the initial tracer concentration, + !! only used to compute tendencies. + real, dimension(SZI_(G),SZJ_(G)) :: tracer_int !< integrated tracer before LBD is applied + !! [conc H L2 ~> conc m3 or conc kg] + real, dimension(SZI_(G),SZJ_(G)) :: tracer_end !< integrated tracer after LBD is applied. + !! [conc H L2 ~> conc m3 or conc kg] + integer :: i, j, k, m !< indices to loop over real :: Idt !< inverse of the time step [s-1] + real :: tmp1, tmp2 !< temporary variables + call cpu_clock_begin(id_clock_lbd) Idt = 1./dt - hbl(:,:) = 0. if (ASSOCIATED(CS%KPP_CSp)) call KPP_get_BLD(CS%KPP_CSp, hbl, G, US, m_to_BLD_units=GV%m_to_H) - if (ASSOCIATED(CS%energetic_PBL_CSp)) & - call energetic_PBL_get_MLD(CS%energetic_PBL_CSp, hbl, G, US, m_to_MLD_units=GV%m_to_H) - + if (ASSOCIATED(CS%energetic_PBL_CSp)) call energetic_PBL_get_MLD(CS%energetic_PBL_CSp, hbl, G, US, & + m_to_MLD_units=GV%m_to_H) call pass_var(hbl,G%Domain) do m = 1,Reg%ntr + ! current tracer tracer => Reg%tr(m) + if (CS%debug) then + call hchksum(tracer%t, "before LBD "//tracer%name,G%HI) + endif + ! for diagnostics - if (tracer%id_lbdxy_conc > 0 .or. tracer%id_lbdxy_cont > 0 .or. tracer%id_lbdxy_cont_2d > 0) then + if (tracer%id_lbdxy_conc > 0 .or. tracer%id_lbdxy_cont > 0 .or. tracer%id_lbdxy_cont_2d > 0 .or. CS%debug) then tendency(:,:,:) = 0.0 + tracer_old(:,:,:) = tracer%t(:,:,:) endif - ! Interpolate state to interface - do j=G%jsc-1,G%jec+1 ; do i=G%isc-1,G%iec+1 - call build_reconstructions_1d( CS%remap_CS, GV%ke, h(i,j,:), tracer%t(i,j,:), ppoly0_coefs(i,j,:,:), & - ppoly0_E(i,j,:,:), ppoly_S, remap_method, GV%H_subroundoff, GV%H_subroundoff) - enddo ; enddo - - ! Diffusive fluxes in the i-direction + ! Diffusive fluxes in the i- and j-direction uFlx(:,:,:) = 0. vFlx(:,:,:) = 0. - uFlx_bulk(:,:) = 0. - vFlx_bulk(:,:) = 0. - - ! Method #1 (layer by layer) - if (CS%method == 1) then - do j=G%jsc,G%jec - do i=G%isc-1,G%iec - if (G%mask2dCu(I,j)>0.) then - call fluxes_layer_method(SURFACE, GV%ke, CS%deg, h(I,j,:), h(I+1,j,:), hbl(I,j), hbl(I+1,j), & - G%areaT(I,j), G%areaT(I+1,j), tracer%t(I,j,:), tracer%t(I+1,j,:), ppoly0_coefs(I,j,:,:), & - ppoly0_coefs(I+1,j,:,:), ppoly0_E(I,j,:,:), ppoly0_E(I+1,j,:,:), remap_method, Coef_x(I,j), & - uFlx(I,j,:), CS%linear) - endif - enddo - enddo - do J=G%jsc-1,G%jec - do i=G%isc,G%iec - if (G%mask2dCv(i,J)>0.) then - call fluxes_layer_method(SURFACE, GV%ke, CS%deg, h(i,J,:), h(i,J+1,:), hbl(i,J), hbl(i,J+1), & - G%areaT(i,J), G%areaT(i,J+1), tracer%t(i,J,:), tracer%t(i,J+1,:), ppoly0_coefs(i,J,:,:), & - ppoly0_coefs(i,J+1,:,:), ppoly0_E(i,J,:,:), ppoly0_E(i,J+1,:,:), remap_method, Coef_y(i,J), & - vFlx(i,J,:), CS%linear) - endif - enddo - enddo - ! Method #2 (bulk approach) - elseif (CS%method == 2) then - do j=G%jsc,G%jec - do i=G%isc-1,G%iec - if (G%mask2dCu(I,j)>0.) then - call fluxes_bulk_method(SURFACE, GV%ke, CS%deg, h(I,j,:), h(I+1,j,:), hbl(I,j), hbl(I+1,j), & - G%areaT(I,j), G%areaT(I+1,j), tracer%t(I,j,:), tracer%t(I+1,j,:), & - ppoly0_coefs(I,j,:,:), ppoly0_coefs(I+1,j,:,:), ppoly0_E(I,j,:,:), & - ppoly0_E(I+1,j,:,:), remap_method, Coef_x(I,j), uFlx_bulk(I,j), uFlx(I,j,:), CS%limiter, & - CS%linear) - endif - enddo + ! LBD layer by layer + do j=G%jsc,G%jec + do i=G%isc-1,G%iec + if (G%mask2dCu(I,j)>0.) then + call fluxes_layer_method(SURFACE, G%ke, hbl(I,j), hbl(I+1,j), & + h(I,j,:), h(I+1,j,:), tracer%t(I,j,:), tracer%t(I+1,j,:), & + Coef_x(I,j), uFlx(I,j,:), G%areaT(I,j), G%areaT(I+1,j), CS) + endif enddo - do J=G%jsc-1,G%jec - do i=G%isc,G%iec - if (G%mask2dCv(i,J)>0.) then - call fluxes_bulk_method(SURFACE, GV%ke, CS%deg, h(i,J,:), h(i,J+1,:), hbl(i,J), hbl(i,J+1), & - G%areaT(i,J), G%areaT(i,J+1), tracer%t(i,J,:), tracer%t(i,J+1,:), & - ppoly0_coefs(i,J,:,:), ppoly0_coefs(i,J+1,:,:), ppoly0_E(i,J,:,:), & - ppoly0_E(i,J+1,:,:), remap_method, Coef_y(i,J), vFlx_bulk(i,J), vFlx(i,J,:), CS%limiter, & - CS%linear) - endif - enddo + enddo + do J=G%jsc-1,G%jec + do i=G%isc,G%iec + if (G%mask2dCv(i,J)>0.) then + call fluxes_layer_method(SURFACE, GV%ke, hbl(i,J), hbl(i,J+1), & + h(i,J,:), h(i,J+1,:), tracer%t(i,J,:), tracer%t(i,J+1,:), & + Coef_y(i,J), vFlx(i,J,:), G%areaT(i,J), G%areaT(i,J+1), CS) + endif enddo - ! Post tracer bulk diags - if (tracer%id_lbd_bulk_dfx>0) call post_data(tracer%id_lbd_bulk_dfx, uFlx_bulk*Idt, CS%diag) - if (tracer%id_lbd_bulk_dfy>0) call post_data(tracer%id_lbd_bulk_dfy, vFlx_bulk*Idt, CS%diag) - endif + enddo ! Update the tracer fluxes do k=1,GV%ke ; do j=G%jsc,G%jec ; do i=G%isc,G%iec if (G%mask2dT(i,j)>0.) then tracer%t(i,j,k) = tracer%t(i,j,k) + (( (uFlx(I-1,j,k)-uFlx(I,j,k)) ) + ( (vFlx(i,J-1,k)-vFlx(i,J,k) ) ))* & - (G%IareaT(i,j)/( h(i,j,k) + GV%H_subroundoff)) - + G%IareaT(i,j) / ( h(i,j,k) + GV%H_subroundoff ) if (tracer%id_lbdxy_conc > 0 .or. tracer%id_lbdxy_cont > 0 .or. tracer%id_lbdxy_cont_2d > 0 ) then - tendency(i,j,k) = (( (uFlx(I-1,j,k)-uFlx(I,j,k)) ) + ( (vFlx(i,J-1,k)-vFlx(i,J,k) ) )) * G%IareaT(i,j) * Idt + !### Probably this needs to be multiplied by (h(i,j,k) + GV%H_subroundoff) for consistency + ! the way it is used later in this routine. + tendency(i,j,k) = (tracer%t(i,j,k)-tracer_old(i,j,k)) * Idt endif - endif enddo ; enddo ; enddo + if (CS%debug) then + call hchksum(tracer%t, "after LBD "//tracer%name,G%HI) + tracer_int(:,:) = 0.0; tracer_end(:,:) = 0.0 + ! tracer (native grid) before and after LBD + do j=G%jsc,G%jec ; do i=G%isc,G%iec + do k=1,GV%ke + tracer_int(i,j) = tracer_int(i,j) + tracer_old(i,j,k) * & + (h(i,j,k)*(G%mask2dT(i,j)*G%areaT(i,j))) + tracer_end(i,j) = tracer_end(i,j) + tracer%t(i,j,k) * & + (h(i,j,k)*(G%mask2dT(i,j)*G%areaT(i,j))) + enddo + enddo; enddo + + tmp1 = SUM(tracer_int) + tmp2 = SUM(tracer_end) + call sum_across_PEs(tmp1) + call sum_across_PEs(tmp2) + if (is_root_pe()) write(*,*)'Total '//tracer%name//' before/after LBD:', tmp1, tmp2 + endif + ! Post the tracer diagnostics - if (tracer%id_lbd_dfx>0) call post_data(tracer%id_lbd_dfx, uFlx*Idt, CS%diag) - if (tracer%id_lbd_dfy>0) call post_data(tracer%id_lbd_dfy, vFlx*Idt, CS%diag) + if (tracer%id_lbd_dfx>0) call post_data(tracer%id_lbd_dfx, uFlx(:,:,:)*Idt, CS%diag) + if (tracer%id_lbd_dfy>0) call post_data(tracer%id_lbd_dfy, vFlx(:,:,:)*Idt, CS%diag) if (tracer%id_lbd_dfx_2d>0) then uwork_2d(:,:) = 0. do k=1,GV%ke ; do j=G%jsc,G%jec ; do I=G%isc-1,G%iec @@ -277,6 +274,7 @@ subroutine lateral_boundary_diffusion(G, GV, US, h, Coef_x, Coef_y, dt, Reg, CS) endif ! post tendency of tracer content + !### This seems to be dimensionally inconsistent with the calculation of tendency above. if (tracer%id_lbdxy_cont > 0) then call post_data(tracer%id_lbdxy_cont, tendency, CS%diag) endif @@ -295,68 +293,19 @@ subroutine lateral_boundary_diffusion(G, GV, US, h, Coef_x, Coef_y, dt, Reg, CS) ! post tendency of tracer concentration; this step must be ! done after posting tracer content tendency, since we alter ! the tendency array and its units. + !### This seems to be dimensionally inconsistent with the calculation of tendency above. if (tracer%id_lbdxy_conc > 0) then do k=1,GV%ke ; do j=G%jsc,G%jec ; do i=G%isc,G%iec - tendency(i,j,k) = tendency(i,j,k) / ( h(i,j,k) + GV%H_subroundoff ) + tendency(i,j,k) = tendency(i,j,k) / ( h(i,j,k) + CS%H_subroundoff ) enddo ; enddo ; enddo call post_data(tracer%id_lbdxy_conc, tendency, CS%diag) endif enddo -end subroutine lateral_boundary_diffusion - -!< Calculate bulk layer value of a scalar quantity as the thickness weighted average -real function bulk_average(boundary, nk, deg, h, hBLT, phi, ppoly0_E, ppoly0_coefs, method, k_top, zeta_top, k_bot, & - zeta_bot) - integer :: boundary !< SURFACE or BOTTOM [nondim] - integer :: nk !< Number of layers [nondim] - integer :: deg !< Degree of polynomial [nondim] - real, dimension(nk) :: h !< Layer thicknesses [H ~> m or kg m-2] - real :: hBLT !< Depth of the boundary layer [H ~> m or kg m-2] - real, dimension(nk) :: phi !< Scalar quantity - real, dimension(nk,2) :: ppoly0_E !< Edge value of polynomial - real, dimension(nk,deg+1) :: ppoly0_coefs!< Coefficients of polynomial - integer :: method !< Remapping scheme to use - - integer :: k_top !< Index of the first layer within the boundary - real :: zeta_top !< Fraction of the layer encompassed by the bottom boundary layer - !! (0 if none, 1. if all). For the surface, this is always 0. because - !! integration starts at the surface [nondim] - integer :: k_bot !< Index of the last layer within the boundary - real :: zeta_bot !< Fraction of the layer encompassed by the surface boundary layer - !! (0 if none, 1. if all). For the bottom boundary layer, this is always 1. - !! because integration starts at the bottom [nondim] - ! Local variables - real :: htot !< Running sum of the thicknesses (top to bottom) - integer :: k !< k indice - - - htot = 0. - bulk_average = 0. - if (hblt == 0.) return - if (boundary == SURFACE) then - htot = (h(k_bot) * zeta_bot) - bulk_average = average_value_ppoly( nk, phi, ppoly0_E, ppoly0_coefs, method, k_bot, 0., zeta_bot) * htot - do k = k_bot-1,1,-1 - bulk_average = bulk_average + phi(k)*h(k) - htot = htot + h(k) - enddo - elseif (boundary == BOTTOM) then - htot = (h(k_top) * zeta_top) - ! (note 1-zeta_top because zeta_top is the fraction of the layer) - bulk_average = average_value_ppoly( nk, phi, ppoly0_E, ppoly0_coefs, method, k_top, (1.-zeta_top), 1.) * htot - do k = k_top+1,nk - bulk_average = bulk_average + phi(k)*h(k) - htot = htot + h(k) - enddo - else - call MOM_error(FATAL, "bulk_average: a valid boundary type must be provided.") - endif + call cpu_clock_end(id_clock_lbd) - bulk_average = bulk_average / hBLT - -end function bulk_average +end subroutine lateral_boundary_diffusion !> Calculate the harmonic mean of two quantities !! See \ref section_harmonic_mean. @@ -370,6 +319,190 @@ real function harmonic_mean(h1,h2) endif end function harmonic_mean +!> Returns the location of the minimum value in a 1D array +!! between indices s and e. +integer function find_minimum(x, s, e) + integer, intent(in) :: s !< start index + integer, intent(in) :: e !< end index + real, dimension(e), intent(in) :: x !< 1D array to be checked + + ! local variables + real :: minimum + integer :: location + integer :: i + + minimum = x(s) ! assume the first is the min + location = s ! record its position + do i = s+1, e ! start with next elements + if (x(i) < minimum) then ! if x(i) less than the min? + minimum = x(i) ! Yes, a new minimum found + location = i ! record its position + end if + enddo + find_minimum = location ! return the position +end function find_minimum + +!> Swaps the values of its two formal arguments. +subroutine swap(a, b) + real, intent(inout) :: a !< First value to be swaped + real, intent(inout) :: b !< Second value to be swaped + + ! local variables + real :: tmp + + tmp = a + a = b + b = tmp +end subroutine swap + +!> Receives a 1D array x and sorts it into ascending order. +subroutine sort(x, n) + real, dimension(n), intent(inout) :: x !< 1D array to be sorted + integer, intent(in ) :: n !< # of pts in the array + + ! local variables + integer :: i, location + + do i = 1, n-1 + location = find_minimum(x, i, n) ! find min from this to last + call swap(x(i), x(location)) ! swap this and the minimum + enddo +end subroutine sort + +!> Returns the unique values in a 1D array. +subroutine unique(val, n, val_unique, val_max) + integer, intent(in ) :: n !< # of pts in the array. + real, dimension(n), intent(in ) :: val !< 1D array to be checked. + real, dimension(:), allocatable, intent(inout) :: val_unique !< Returned 1D array with unique values. + real, optional, intent(in ) :: val_max !< sets the maximum value in val_unique to + !! this value. + ! local variables + real, dimension(n) :: tmp + integer :: i, j, ii + real :: min_val, max_val + logical :: limit + + limit = .false. + if (present(val_max)) then + limit = .true. + if (val_max > MAXVAL(val)) then + if (is_root_pe()) write(*,*)'val_max, MAXVAL(val)',val_max, MAXVAL(val) + call MOM_error(FATAL,"Houston, we've had a problem in unique (val_max cannot be > MAXVAL(val))") + endif + endif + + tmp(:) = 0. + min_val = MINVAL(val)-1 + max_val = MAXVAL(val) + i = 0 + do while (min_valmin_val) + tmp(i) = min_val + enddo + ii = i + if (limit) then + do j=1,ii + if (tmp(j) <= val_max) i = j + enddo + endif + allocate(val_unique(i), source=tmp(1:i)) +end subroutine unique + + +!> Given layer thicknesses (and corresponding interfaces) and BLDs in two adjacent columns, +!! return a set of 1-d layer thicknesses whose interfaces cover all interfaces in the left +!! and right columns plus the two BLDs. This can be used to accurately remap tracer tendencies +!! in both columns. +subroutine merge_interfaces(nk, h_L, h_R, hbl_L, hbl_R, H_subroundoff, h) + integer, intent(in ) :: nk !< Number of layers [nondim] + real, dimension(nk), intent(in ) :: h_L !< Layer thicknesses in the left column [H ~> m or kg m-2] + real, dimension(nk), intent(in ) :: h_R !< Layer thicknesses in the right column [H ~> m or kg m-2] + real, intent(in ) :: hbl_L !< Thickness of the boundary layer in the left column + !! [H ~> m or kg m-2] + real, intent(in ) :: hbl_R !< Thickness of the boundary layer in the right column + !! [H ~> m or kg m-2] + real, intent(in ) :: H_subroundoff !< GV%H_subroundoff [H ~> m or kg m-2] + real, dimension(:), allocatable, intent(inout) :: h !< Combined thicknesses [H ~> m or kg m-2] + + ! Local variables + integer :: n !< Number of layers in eta_all + real, dimension(nk+1) :: eta_L, eta_R!< Interfaces in the left and right coloumns + real, dimension(:), allocatable :: eta_all !< Combined interfaces in the left/right columns + hbl_L and hbl_R + real, dimension(:), allocatable :: eta_unique !< Combined interfaces (eta_L, eta_R), possibly hbl_L and hbl_R + real :: min_depth !< Minimum depth + real :: max_depth !< Maximum depth + real :: max_bld !< Deepest BLD + integer :: k, kk, nk1 !< loop indices (k and kk) and array size (nk1) + + n = (2*nk)+3 + allocate(eta_all(n)) + ! compute and merge interfaces + eta_L(:) = 0.0; eta_R(:) = 0.0; eta_all(:) = 0.0 + kk = 0 + do k=2,nk+1 + eta_L(k) = eta_L(k-1) + h_L(k-1) + eta_R(k) = eta_R(k-1) + h_R(k-1) + kk = kk + 2 + eta_all(kk) = eta_L(k) + eta_all(kk+1) = eta_R(k) + enddo + + ! add hbl_L and hbl_R into eta_all + eta_all(kk+2) = hbl_L + eta_all(kk+3) = hbl_R + + ! find maximum depth + min_depth = MIN(MAXVAL(eta_L), MAXVAL(eta_R)) + max_bld = MAX(hbl_L, hbl_R) + max_depth = MIN(min_depth, max_bld) + + ! sort eta_all + call sort(eta_all, n) + ! remove duplicates from eta_all and sets maximum depth + call unique(eta_all, n, eta_unique, max_depth) + + nk1 = SIZE(eta_unique) + allocate(h(nk1-1)) + do k=1,nk1-1 + h(k) = (eta_unique(k+1) - eta_unique(k)) + H_subroundoff + enddo +end subroutine merge_interfaces + +!> Calculates the maximum flux that can leave a cell and uses that to apply a +!! limiter to F_layer. +subroutine flux_limiter(F_layer, area_L, area_R, phi_L, phi_R, h_L, h_R) + real, intent(inout) :: F_layer !< Tracer flux to be checked [H L2 conc ~> m3 conc] + real, intent(in) :: area_L !< Area of left cell [L2 ~> m2] + real, intent(in) :: area_R !< Area of right cell [L2 ~> m2] + real, intent(in) :: h_L !< Thickness of left cell [H ~> m or kg m-2] + real, intent(in) :: h_R !< Thickness of right cell [H ~> m or kg m-2] + real, intent(in) :: phi_L !< Tracer concentration in the left cell [conc] + real, intent(in) :: phi_R !< Tracer concentration in the right cell [conc] + + ! local variables + real :: F_max !< maximum flux allowed + ! limit the flux to 0.2 of the tracer *gradient* + ! Why 0.2? + ! t=0 t=inf + ! 0 .2 + ! 0 1 0 .2.2.2 + ! 0 .2 + ! + F_max = -0.2 * ((area_R*(phi_R*h_R))-(area_L*(phi_L*h_L))) + + if ( SIGN(1.,F_layer) == SIGN(1., F_max)) then + ! Apply flux limiter calculated above + if (F_max >= 0.) then + F_layer = MIN(F_layer,F_max) + else + F_layer = MAX(F_layer,F_max) + endif + else + F_layer = 0.0 + endif +end subroutine flux_limiter + !> Find the k-index range corresponding to the layers that are within the boundary-layer region subroutine boundary_k_range(boundary, nk, h, hbl, k_top, zeta_top, k_bot, zeta_bot) integer, intent(in ) :: boundary !< SURFACE or BOTTOM [nondim] @@ -435,372 +568,175 @@ subroutine boundary_k_range(boundary, nk, h, hbl, k_top, zeta_top, k_bot, zeta_b end subroutine boundary_k_range - !> Calculate the lateral boundary diffusive fluxes using the layer by layer method. -!! See \ref section_method1 -subroutine fluxes_layer_method(boundary, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, & - ppoly0_coefs_L, ppoly0_coefs_R, ppoly0_E_L, ppoly0_E_R, method, khtr_u, & - F_layer, linear_decay) - - integer, intent(in ) :: boundary !< Which boundary layer SURFACE or BOTTOM [nondim] - integer, intent(in ) :: nk !< Number of layers [nondim] - integer, intent(in ) :: deg !< order of the polynomial reconstruction [nondim] - real, dimension(nk), intent(in ) :: h_L !< Layer thickness (left) [H ~> m or kg m-2] - real, dimension(nk), intent(in ) :: h_R !< Layer thickness (right) [H ~> m or kg m-2] - real, intent(in ) :: hbl_L !< Thickness of the boundary boundary - !! layer (left) [H ~> m or kg m-2] - real, intent(in ) :: hbl_R !< Thickness of the boundary boundary - !! layer (right) [H ~> m or kg m-2] - real, intent(in ) :: area_L !< Area of the horizontal grid (left) [L2 ~> m2] - real, intent(in ) :: area_R !< Area of the horizontal grid (right) [L2 ~> m2] - real, dimension(nk), intent(in ) :: phi_L !< Tracer values (left) [conc] - real, dimension(nk), intent(in ) :: phi_R !< Tracer values (right) [conc] - real, dimension(nk,deg+1), intent(in ) :: ppoly0_coefs_L !< Tracer reconstruction (left) [conc] - real, dimension(nk,deg+1), intent(in ) :: ppoly0_coefs_R !< Tracer reconstruction (right) [conc] - real, dimension(nk,2), intent(in ) :: ppoly0_E_L !< Polynomial edge values (left) [nondim] - real, dimension(nk,2), intent(in ) :: ppoly0_E_R !< Polynomial edge values (right) [nondim] - integer, intent(in ) :: method !< Method of polynomial integration [nondim] - real, intent(in ) :: khtr_u !< Horizontal diffusivities times delta t - !! at a velocity point [L2 ~> m2] - real, dimension(nk), intent( out) :: F_layer !< Layerwise diffusive flux at U- or V-point - !! [H L2 conc ~> m3 conc] - logical, optional, intent(in ) :: linear_decay !< If True, apply a linear transition at the base of - !! the boundary layer +!! See \ref section_method +subroutine fluxes_layer_method(boundary, ke, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, area_L, area_R, CS) + + integer, intent(in ) :: boundary !< Which boundary layer SURFACE or BOTTOM [nondim] + integer, intent(in ) :: ke !< Number of layers in the native grid [nondim] + real, intent(in ) :: hbl_L !< Thickness of the boundary boundary + !! layer (left) [H ~> m or kg m-2] + real, intent(in ) :: hbl_R !< Thickness of the boundary boundary + !! layer (right) [H ~> m or kg m-2] + real, dimension(ke), intent(in ) :: h_L !< Thicknesses in the native grid (left) [H ~> m or kg m-2] + real, dimension(ke), intent(in ) :: h_R !< Thicknesses in the native grid (right) [H ~> m or kg m-2] + real, dimension(ke), intent(in ) :: phi_L !< Tracer values in the native grid (left) [conc] + real, dimension(ke), intent(in ) :: phi_R !< Tracer values in the native grid (right) [conc] + real, intent(in ) :: khtr_u !< Horizontal diffusivities times delta t + !! at a velocity point [L2 ~> m2] + real, dimension(ke), intent( out) :: F_layer !< Layerwise diffusive flux at U- or V-point in the native + !! grid [H L2 conc ~> m3 conc] + real, intent(in ) :: area_L !< Area of the horizontal grid (left) [L2 ~> m2] + real, intent(in ) :: area_R !< Area of the horizontal grid (right) [L2 ~> m2] + type(lbd_CS), pointer :: CS !< Lateral diffusion control structure + !! the boundary layer ! Local variables - real, dimension(nk) :: h_means !< Calculate the layer-wise harmonic means [H ~> m or kg m-2] - real :: khtr_avg !< Thickness-weighted diffusivity at the u-point [m^2 s^-1] - !! This is just to remind developers that khtr_avg should be - !! computed once khtr is 3D. - real :: heff !< Harmonic mean of layer thicknesses [H ~> m or kg m-2] - real :: inv_heff !< Inverse of the harmonic mean of layer thicknesses - !! [H-1 ~> m-1 or m2 kg-1] - real :: phi_L_avg, phi_R_avg !< Bulk, thickness-weighted tracer averages (left and right column) - !! [conc m^-3 ] - real :: htot !< Total column thickness [H ~> m or kg m-2] - real :: heff_tot !< Total effective column thickness in the transition layer [m] - integer :: k, k_bot_min, k_top_max !< k-indices, min and max for bottom and top, respectively - integer :: k_bot_max, k_top_min !< k-indices, max and min for bottom and top, respectively - integer :: k_bot_diff, k_top_diff !< different between left and right k-indices for bottom and top, respectively - integer :: k_top_L, k_bot_L !< k-indices left - integer :: k_top_R, k_bot_R !< k-indices right - real :: zeta_top_L, zeta_top_R !< distance from the top of a layer to the boundary - !! layer depth [nondim] - real :: zeta_bot_L, zeta_bot_R !< distance from the bottom of a layer to the boundary - !!layer depth [nondim] - real :: h_work_L, h_work_R !< dummy variables - real :: hbl_min !< minimum BLD (left and right) [m] - real :: wgt !< weight to be used in the linear transition to the interior [nondim] - real :: a !< coefficient to be used in the linear transition to the interior [nondim] - logical :: linear !< True if apply a linear transition + real, dimension(:), allocatable :: dz_top !< The LBD z grid to be created [L ~ m] + real, dimension(:), allocatable :: phi_L_z !< Tracer values in the ztop grid (left) [conc] + real, dimension(:), allocatable :: phi_R_z !< Tracer values in the ztop grid (right) [conc] + real, dimension(:), allocatable :: F_layer_z !< Diffusive flux at U- or V-points in the ztop grid + !! [H L2 conc ~> m3 conc] + real, dimension(ke) :: h_vel !< Thicknesses at u- and v-points in the native grid + !! The harmonic mean is used to avoid zero values [H ~> m or kg m-2] + real :: khtr_avg !< Thickness-weighted diffusivity at the u-point [m^2 s^-1] + !! This is just to remind developers that khtr_avg should be + !! computed once khtr is 3D. + real :: htot !< Total column thickness [H ~> m or kg m-2] + integer :: k, k_bot_min, k_top_max !< k-indices, min and max for bottom and top, respectively + integer :: k_bot_max, k_top_min !< k-indices, max and min for bottom and top, respectively + integer :: k_bot_diff, k_top_diff !< different between left and right k-indices for bottom and top, respectively + integer :: k_top_L, k_bot_L !< k-indices left native grid + integer :: k_top_R, k_bot_R !< k-indices right native grid + real :: zeta_top_L, zeta_top_R !< distance from the top of a layer to the boundary + !! layer depth in the native grid [nondim] + real :: zeta_bot_L, zeta_bot_R !< distance from the bottom of a layer to the boundary + !!layer depth in the native grid [nondim] + real :: hbl_min !< minimum BLD (left and right) [m] + real :: wgt !< weight to be used in the linear transition to the interior [nondim] + real :: a !< coefficient to be used in the linear transition to the interior [nondim] + real :: tmp1, tmp2 !< dummy variables + real :: htot_max !< depth below which no fluxes should be applied + integer :: nk !< number of layers in the LBD grid F_layer(:) = 0.0 if (hbl_L == 0. .or. hbl_R == 0.) then return endif - linear = .false. - if (PRESENT(linear_decay)) then - linear = linear_decay - endif + ! Define vertical grid, dz_top + call merge_interfaces(ke, h_L(:), h_R(:), hbl_L, hbl_R, CS%H_subroundoff, dz_top) + nk = SIZE(dz_top) - ! Calculate vertical indices containing the boundary layer - call boundary_k_range(boundary, nk, h_L, hbl_L, k_top_L, zeta_top_L, k_bot_L, zeta_bot_L) - call boundary_k_range(boundary, nk, h_R, hbl_R, k_top_R, zeta_top_R, k_bot_R, zeta_bot_R) + ! allocate arrays + allocate(phi_L_z(nk)); phi_L_z(:) = 0.0 + allocate(phi_R_z(nk)); phi_R_z(:) = 0.0 + allocate(F_layer_z(nk)); F_layer_z(:) = 0.0 + + ! remap tracer to dz_top + call remapping_core_h(CS%remap_cs, ke, h_L(:), phi_L(:), nk, dz_top(:), phi_L_z(:)) + call remapping_core_h(CS%remap_cs, ke, h_R(:), phi_R(:), nk, dz_top(:), phi_R_z(:)) + + ! Calculate vertical indices containing the boundary layer in dz_top + call boundary_k_range(boundary, nk, dz_top, hbl_L, k_top_L, zeta_top_L, k_bot_L, zeta_bot_L) + call boundary_k_range(boundary, nk, dz_top, hbl_R, k_top_R, zeta_top_R, k_bot_R, zeta_bot_R) if (boundary == SURFACE) then k_bot_min = MIN(k_bot_L, k_bot_R) k_bot_max = MAX(k_bot_L, k_bot_R) k_bot_diff = (k_bot_max - k_bot_min) - ! make sure left and right k indices span same range - if (k_bot_min .ne. k_bot_L) then - k_bot_L = k_bot_min - zeta_bot_L = 1.0 - endif - if (k_bot_min .ne. k_bot_R) then - k_bot_R= k_bot_min - zeta_bot_R = 1.0 - endif - - h_work_L = (h_L(k_bot_L) * zeta_bot_L) - h_work_R = (h_R(k_bot_R) * zeta_bot_R) - - phi_L_avg = average_value_ppoly( nk, phi_L, ppoly0_E_L, ppoly0_coefs_L, method, k_bot_L, 0., zeta_bot_L) - phi_R_avg = average_value_ppoly( nk, phi_R, ppoly0_E_R, ppoly0_coefs_R, method, k_bot_R, 0., zeta_bot_R) - heff = harmonic_mean(h_work_L, h_work_R) ! tracer flux where the minimum BLD intersets layer - ! GMM, khtr_avg should be computed once khtr is 3D - if ((linear) .and. (k_bot_diff .gt. 1)) then + if ((CS%linear) .and. (k_bot_diff .gt. 1)) then ! apply linear decay at the base of hbl do k = k_bot_min,1,-1 - heff = harmonic_mean(h_L(k), h_R(k)) - F_layer(k) = -(heff * khtr_u) * (phi_R(k) - phi_L(k)) + F_layer_z(k) = -(dz_top(k) * khtr_u) * (phi_R_z(k) - phi_L_z(k)) + if (CS%limiter_remap) call flux_limiter(F_layer_z(k), area_L, area_R, phi_L_z(k), & + phi_R_z(k), dz_top(k), dz_top(k)) enddo - ! heff_total - heff_tot = 0.0 + htot = 0.0 do k = k_bot_min+1,k_bot_max, 1 - heff_tot = heff_tot + harmonic_mean(h_L(k), h_R(k)) + htot = htot + dz_top(k) enddo - a = -1.0/heff_tot - heff_tot = 0.0 + a = -1.0/htot + htot = 0. do k = k_bot_min+1,k_bot_max, 1 - heff = harmonic_mean(h_L(k), h_R(k)) - wgt = (a*(heff_tot + (heff * 0.5))) + 1.0 - F_layer(k) = -(heff * khtr_u) * (phi_R(k) - phi_L(k)) * wgt - heff_tot = heff_tot + heff + wgt = (a*(htot + (dz_top(k) * 0.5))) + 1.0 + F_layer_z(k) = -(dz_top(k) * khtr_u) * (phi_R_z(k) - phi_L_z(k)) * wgt + htot = htot + dz_top(k) + if (CS%limiter_remap) call flux_limiter(F_layer_z(k), area_L, area_R, phi_L_z(k), & + phi_R_z(k), dz_top(k), dz_top(k)) enddo else - F_layer(k_bot_min) = -(heff * khtr_u) * (phi_R_avg - phi_L_avg) - do k = k_bot_min-1,1,-1 - heff = harmonic_mean(h_L(k), h_R(k)) - F_layer(k) = -(heff * khtr_u) * (phi_R(k) - phi_L(k)) + do k = k_bot_min,1,-1 + F_layer_z(k) = -(dz_top(k) * khtr_u) * (phi_R_z(k) - phi_L_z(k)) + if (CS%limiter_remap) call flux_limiter(F_layer_z(k), area_L, area_R, phi_L_z(k), & + phi_R_z(k), dz_top(k), dz_top(k)) enddo endif endif - if (boundary == BOTTOM) then - ! TODO: GMM add option to apply linear decay - k_top_max = MAX(k_top_L, k_top_R) - ! make sure left and right k indices span same range - if (k_top_max .ne. k_top_L) then - k_top_L = k_top_max - zeta_top_L = 1.0 - endif - if (k_top_max .ne. k_top_R) then - k_top_R= k_top_max - zeta_top_R = 1.0 - endif - - h_work_L = (h_L(k_top_L) * zeta_top_L) - h_work_R = (h_R(k_top_R) * zeta_top_R) - - phi_L_avg = average_value_ppoly( nk, phi_L, ppoly0_E_L, ppoly0_coefs_L, method, k_top_L, 1.0-zeta_top_L, 1.0) - phi_R_avg = average_value_ppoly( nk, phi_R, ppoly0_E_R, ppoly0_coefs_R, method, k_top_R, 1.0-zeta_top_R, 1.0) - heff = harmonic_mean(h_work_L, h_work_R) - - ! tracer flux where the minimum BLD intersets layer - F_layer(k_top_max) = (-heff * khtr_u) * (phi_R_avg - phi_L_avg) - - do k = k_top_max+1,nk - heff = harmonic_mean(h_L(k), h_R(k)) - F_layer(k) = -(heff * khtr_u) * (phi_R(k) - phi_L(k)) - enddo - endif - -end subroutine fluxes_layer_method - -!> Apply the lateral boundary diffusive fluxes calculated from a 'bulk model' -!! See \ref section_method2 -subroutine fluxes_bulk_method(boundary, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, ppoly0_coefs_L, & - ppoly0_coefs_R, ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer, F_limit, & - linear_decay) - - integer, intent(in ) :: boundary !< Which boundary layer SURFACE or BOTTOM [nondim] - integer, intent(in ) :: nk !< Number of layers [nondim] - integer, intent(in ) :: deg !< order of the polynomial reconstruction [nondim] - real, dimension(nk), intent(in ) :: h_L !< Layer thickness (left) [H ~> m or kg m-2] - real, dimension(nk), intent(in ) :: h_R !< Layer thickness (right) [H ~> m or kg m-2] - real, intent(in ) :: hbl_L !< Thickness of the boundary boundary - !! layer (left) [H ~> m or kg m-2] - real, intent(in ) :: hbl_R !< Thickness of the boundary boundary - !! layer (left) [H ~> m or kg m-2] - real, intent(in ) :: area_L !< Area of the horizontal grid (left) [L2 ~> m2] - real, intent(in ) :: area_R !< Area of the horizontal grid (right) [L2 ~> m2] - real, dimension(nk), intent(in ) :: phi_L !< Tracer values (left) [conc] - real, dimension(nk), intent(in ) :: phi_R !< Tracer values (right) [conc] - real, dimension(nk,deg+1), intent(in ) :: ppoly0_coefs_L !< Tracer reconstruction (left) [conc] - real, dimension(nk,deg+1), intent(in ) :: ppoly0_coefs_R !< Tracer reconstruction (right) [conc] - real, dimension(nk,2), intent(in ) :: ppoly0_E_L !< Polynomial edge values (left) [nondim] - real, dimension(nk,2), intent(in ) :: ppoly0_E_R !< Polynomial edge values (right) [nondim] - integer, intent(in ) :: method !< Method of polynomial integration [nondim] - real, intent(in ) :: khtr_u !< Horizontal diffusivities times delta t - !! at a velocity point [L2 ~> m2] - real, intent( out) :: F_bulk !< The bulk mixed layer lateral flux - !! [H L2 conc ~> m3 conc] - real, dimension(nk), intent( out) :: F_layer !< Layerwise diffusive flux at U- or V-point - !! [H L2 conc ~> m3 conc] - logical, optional, intent(in ) :: F_limit !< If True, apply a limiter - logical, optional, intent(in ) :: linear_decay !< If True, apply a linear transition at the base of - !! the boundary layer +! TODO, boundary == BOTTOM +! if (boundary == BOTTOM) then +! ! TODO: GMM add option to apply linear decay +! k_top_max = MAX(k_top_L, k_top_R) +! ! make sure left and right k indices span same range +! if (k_top_max .ne. k_top_L) then +! k_top_L = k_top_max +! zeta_top_L = 1.0 +! endif +! if (k_top_max .ne. k_top_R) then +! k_top_R= k_top_max +! zeta_top_R = 1.0 +! endif +! +! ! tracer flux where the minimum BLD intersets layer +! F_layer(k_top_max) = (-heff * khtr_u) * (phi_R_avg - phi_L_avg) +! +! do k = k_top_max+1,nk +! F_layer_z(k) = -(heff * khtr_u) * (phi_R_z(k) - phi_L_z(k)) +! enddo +! endif + + ! thicknesses at velocity points + do k = 1,ke + h_vel(k) = harmonic_mean(h_L(k), h_R(k)) + enddo - ! Local variables - real, dimension(nk) :: h_means !< Calculate the layer-wise harmonic means [H ~> m or kg m-2] - real :: khtr_avg !< Thickness-weighted diffusivity at the u-point [m^2 s^-1] - !! This is just to remind developers that khtr_avg should be - !! computed once khtr is 3D. - real :: heff !< Harmonic mean of layer thicknesses [H ~> m or kg m-2] - real :: heff_tot !< Total effective column thickness in the transition layer [m] - real :: inv_heff !< Inverse of the harmonic mean of layer thicknesses - !! [H-1 ~> m-1 or m2 kg-1] - real :: phi_L_avg, phi_R_avg !< Bulk, thickness-weighted tracer averages (left and right column) - !! [conc m^-3 ] - real :: htot ! Total column thickness [H ~> m or kg m-2] - integer :: k, k_min, k_max !< k-indices, min and max for top and bottom, respectively - integer :: k_diff !< difference between k_max and k_min - integer :: k_top_L, k_bot_L !< k-indices left - integer :: k_top_R, k_bot_R !< k-indices right - real :: zeta_top_L, zeta_top_R !< distance from the top of a layer to the - !! boundary layer [nondim] - real :: zeta_bot_L, zeta_bot_R !< distance from the bottom of a layer to the - !! boundary layer [nondim] - real :: h_work_L, h_work_R !< dummy variables - real :: F_max !< The maximum amount of flux that can leave a - !! cell [m^3 conc] - logical :: limiter !< True if flux limiter should be applied - logical :: linear !< True if apply a linear transition - real :: hfrac !< Layer fraction wrt sum of all layers [nondim] - real :: dphi !< tracer gradient [conc m^-3] - real :: wgt !< weight to be used in the linear transition to the - !! interior [nondim] - real :: a !< coefficient to be used in the linear transition to the - !! interior [nondim] - - F_bulk = 0. - F_layer(:) = 0. - if (hbl_L == 0. .or. hbl_R == 0.) then - return - endif + ! remap flux to h_vel (native grid) + call reintegrate_column(nk, dz_top(:), F_layer_z(:), ke, h_vel(:), 0.0, F_layer(:)) - limiter = .false. - if (PRESENT(F_limit)) then - limiter = F_limit - endif - linear = .false. - if (PRESENT(linear_decay)) then - linear = linear_decay + ! used to avoid fluxes below hbl + if (CS%linear) then + htot_max = MAX(hbl_L, hbl_R) + else + htot_max = MIN(hbl_L, hbl_R) endif - ! Calculate vertical indices containing the boundary layer - call boundary_k_range(boundary, nk, h_L, hbl_L, k_top_L, zeta_top_L, k_bot_L, zeta_bot_L) - call boundary_k_range(boundary, nk, h_R, hbl_R, k_top_R, zeta_top_R, k_bot_R, zeta_bot_R) - - ! Calculate bulk averages of various quantities - phi_L_avg = bulk_average(boundary, nk, deg, h_L, hbl_L, phi_L, ppoly0_E_L, ppoly0_coefs_L, method, k_top_L, & - zeta_top_L, k_bot_L, zeta_bot_L) - phi_R_avg = bulk_average(boundary, nk, deg, h_R, hbl_R, phi_R, ppoly0_E_R, ppoly0_coefs_R, method, k_top_R, & - zeta_top_R, k_bot_R, zeta_bot_R) - ! Calculate the 'bulk' diffusive flux from the bulk averaged quantities - ! GMM, khtr_avg should be computed once khtr is 3D - heff = harmonic_mean(hbl_L, hbl_R) - F_bulk = -(khtr_u * heff) * (phi_R_avg - phi_L_avg) - ! Calculate the layerwise sum of the vertical effective thickness. This is different than the heff calculated - ! above, but is used as a way to decompose the fluxes onto the individual layers - h_means(:) = 0. - if (boundary == SURFACE) then - k_min = MIN(k_bot_L, k_bot_R) - k_max = MAX(k_bot_L, k_bot_R) - k_diff = (k_max - k_min) - if ((linear) .and. (k_diff .gt. 1)) then - do k=1,k_min - h_means(k) = harmonic_mean(h_L(k),h_R(k)) - enddo - ! heff_total - heff_tot = 0.0 - do k = k_min+1,k_max, 1 - heff_tot = heff_tot + harmonic_mean(h_L(k), h_R(k)) - enddo - - a = -1.0/heff_tot - heff_tot = 0.0 - ! fluxes will decay linearly at base of hbl - do k = k_min+1,k_max, 1 - heff = harmonic_mean(h_L(k), h_R(k)) - wgt = (a*(heff_tot + (heff * 0.5))) + 1.0 - h_means(k) = harmonic_mean(h_L(k), h_R(k)) * wgt - heff_tot = heff_tot + heff - enddo - else - ! left hand side - if (k_bot_L == k_min) then - h_work_L = h_L(k_min) * zeta_bot_L - else - h_work_L = h_L(k_min) - endif - - ! right hand side - if (k_bot_R == k_min) then - h_work_R = h_R(k_min) * zeta_bot_R - else - h_work_R = h_R(k_min) - endif - - h_means(k_min) = harmonic_mean(h_work_L,h_work_R) - - do k=1,k_min-1 - h_means(k) = harmonic_mean(h_L(k),h_R(k)) - enddo - endif - - elseif (boundary == BOTTOM) then - !TODO, GMM linear decay is not implemented here - k_max = MAX(k_top_L, k_top_R) - ! left hand side - if (k_top_L == k_max) then - h_work_L = h_L(k_max) * zeta_top_L - else - h_work_L = h_L(k_max) + tmp1 = 0.0; tmp2 = 0.0 + do k = 1,ke + ! apply flux_limiter + if (CS%limiter .and. F_layer(k) /= 0.) then + call flux_limiter(F_layer(k), area_L, area_R, phi_L(k), phi_R(k), h_L(k), h_R(k)) endif - ! right hand side - if (k_top_R == k_max) then - h_work_R = h_R(k_max) * zeta_top_R - else - h_work_R = h_R(k_max) + ! if tracer point is below htot_max, set flux to zero + if (MAX(tmp1+(h_L(k)*0.5), tmp2+(h_R(k)*0.5)) > htot_max) then + F_layer(k) = 0. endif - h_means(k_max) = harmonic_mean(h_work_L,h_work_R) + tmp1 = tmp1 + h_L(k) + tmp2 = tmp2 + h_R(k) + enddo - do k=nk,k_max+1,-1 - h_means(k) = harmonic_mean(h_L(k),h_R(k)) - enddo - endif + ! deallocated arrays + deallocate(dz_top) + deallocate(phi_L_z) + deallocate(phi_R_z) + deallocate(F_layer_z) - if ( SUM(h_means) == 0. .or. F_bulk == 0.) then - return - ! Decompose the bulk flux onto the individual layers - else - ! Initialize remaining thickness - inv_heff = 1./SUM(h_means) - do k=1,nk - if ((h_means(k) > 0.) .and. (phi_L(k) /= phi_R(k))) then - hfrac = h_means(k)*inv_heff - F_layer(k) = F_bulk * hfrac - - if (limiter) then - ! limit the flux to 0.2 of the tracer *gradient* - ! Why 0.2? - ! t=0 t=inf - ! 0 .2 - ! 0 1 0 .2.2.2 - ! 0 .2 - ! - F_max = -0.2 * ((area_R*(phi_R(k)*h_R(k)))-(area_L*(phi_L(k)*h_R(k)))) - - ! check if bulk flux (or F_layer) and F_max have same direction - if ( SIGN(1.,F_bulk) == SIGN(1., F_max)) then - ! Apply flux limiter calculated above - if (F_max >= 0.) then - F_layer(k) = MIN(F_layer(k),F_max) - else - F_layer(k) = MAX(F_layer(k),F_max) - endif - else - ! do not apply a flux on this layer - F_layer(k) = 0. - endif - else - dphi = -(phi_R(k) - phi_L(k)) - if (.not. SIGN(1.,F_bulk) == SIGN(1., dphi)) then - ! upgradient, do not apply a flux on this layer - F_layer(k) = 0. - endif - endif ! limited - endif - enddo - endif - -end subroutine fluxes_bulk_method +end subroutine fluxes_layer_method !> Unit tests for near-boundary horizontal mixing logical function near_boundary_unit_tests( verbose ) @@ -808,32 +744,33 @@ logical function near_boundary_unit_tests( verbose ) ! Local variables integer, parameter :: nk = 2 ! Number of layers - integer, parameter :: deg = 1 ! Degree of reconstruction (linear here) - integer, parameter :: method = 1 ! Method used for integrating polynomials + real, dimension(nk+1) :: eta1 ! Updated interfaces with one extra value [m] + real, dimension(:), allocatable :: h1 ! Upates layer thicknesses [m] real, dimension(nk) :: phi_L, phi_R ! Tracer values (left and right column) [ nondim m^-3 ] - real, dimension(nk) :: phi_L_avg, phi_R_avg ! Bulk, thickness-weighted tracer averages (left and right column) - real, dimension(nk,deg+1) :: phi_pp_L, phi_pp_R ! Coefficients for the linear pseudo-reconstructions - ! [ nondim m^-3 ] - - real, dimension(nk,2) :: ppoly0_E_L, ppoly0_E_R! Polynomial edge values (left and right) [concentration] real, dimension(nk) :: h_L, h_R ! Layer thickness (left and right) [m] real :: khtr_u ! Horizontal diffusivities at U-point [m^2 s^-1] real :: hbl_L, hbl_R ! Depth of the boundary layer (left and right) [m] - real :: F_bulk ! Total diffusive flux across the U point [nondim s^-1] real, dimension(nk) :: F_layer ! Diffusive flux within each layer at U-point [nondim s^-1] - real :: h_u, hblt_u ! Thickness at the u-point [m] - real :: khtr_avg ! Thickness-weighted diffusivity at the u-point [m^2 s^-1] - real :: heff ! Harmonic mean of layer thicknesses [m] - real :: inv_heff ! Inverse of the harmonic mean of layer thicknesses [m^[-1] character(len=120) :: test_name ! Title of the unit test integer :: k_top ! Index of cell containing top of boundary real :: zeta_top ! Nondimension position integer :: k_bot ! Index of cell containing bottom of boundary real :: zeta_bot ! Nondimension position - real :: area_L,area_R ! Area of grid cell [m^2] - area_L = 1.; area_R = 1. ! Set to unity for all unit tests + type(lbd_CS), pointer :: CS + + allocate(CS) + ! fill required fields in CS + CS%linear=.false. + call initialize_remapping( CS%remap_CS, 'PLM', boundary_extrapolation = .true. ,& + check_reconstruction = .true., check_remapping = .true.) + call extract_member_remapping_CS(CS%remap_CS, degree=CS%deg) + CS%H_subroundoff = 1.0E-20 + CS%debug=.false. + CS%limiter=.false. + CS%limiter_remap=.false. near_boundary_unit_tests = .false. + write(stdout,*) '==== MOM_lateral_boundary_diffusion =======================' ! Unit tests for boundary_k_range test_name = 'Surface boundary spans the entire top cell' @@ -890,215 +827,153 @@ logical function near_boundary_unit_tests( verbose ) near_boundary_unit_tests = near_boundary_unit_tests .or. & test_boundary_k_range(k_top, zeta_top, k_bot, zeta_bot, 2, 0.25, 2, 0., test_name, verbose) - ! All cases in this section have hbl which are equal to the column thicknesses - test_name = 'Equal hbl and same layer thicknesses (gradient from right to left)' - hbl_L = 10; hbl_R = 10 - h_L = (/5.,5./) ; h_R = (/5.,5./) - phi_L = (/0.,0./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. - khtr_u = 1. - ! Without limiter - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R, & - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + if (.not. near_boundary_unit_tests) write(stdout,*) 'Passed boundary_k_range' + + ! unit tests for sorting array and finding unique values + test_name = 'Sorting array' + eta1 = (/1., 0., 0.1/) + call sort(eta1, nk+1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-5.0,-5.0/) ) + test_layer_fluxes( verbose, nk+1, test_name, eta1, (/0., 0.1, 1./) ) - ! same as above, but with limiter - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R, & - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer, .true.) + test_name = 'Unique values' + call unique((/0., 1., 1., 2./), nk+2, h1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-1.0,-1.0/) ) + test_layer_fluxes( verbose, nk+1, test_name, h1, (/0., 1., 2./) ) + deallocate(h1) - test_name = 'Equal hbl and same layer thicknesses (gradient from left to right)' - hbl_L = 10.; hbl_R = 10. - h_L = (/5.,5./) ; h_R = (/5.,5./) - phi_L = (/1.,1./) ; phi_R = (/0.,0./) - phi_pp_L(1,1) = 1.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 1.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 0.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 0.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 1.; ppoly0_E_L(1,2) = 1. - ppoly0_E_L(2,1) = 1.; ppoly0_E_L(2,2) = 1. - ppoly0_E_R(1,1) = 0.; ppoly0_E_R(1,2) = 0. - ppoly0_E_R(2,1) = 0.; ppoly0_E_R(2,2) = 0. - khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + test_name = 'Unique values with maximum depth' + call unique((/0., 1., 1., 2., 3./), nk+3, h1, 2.) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/5.0,5.0/) ) + test_layer_fluxes( verbose, nk+1, test_name, h1, (/0., 1., 2./) ) + deallocate(h1) - test_name = 'Equal hbl and same layer thicknesses (no gradient)' - hbl_L = 10; hbl_R = 10 - h_L = (/5.,5./) ; h_R = (/5.,5./) - phi_L = (/1.,1./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 1.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 1.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 1.; ppoly0_E_L(1,2) = 1. - ppoly0_E_L(2,1) = 1.; ppoly0_E_L(2,2) = 1. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. - khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + if (.not. near_boundary_unit_tests) write(stdout,*) 'Passed sort and unique' + + ! unit tests for merge_interfaces + test_name = 'h_L = h_R and BLD_L = BLD_R' + call merge_interfaces(nk, (/1., 2./), (/1., 2./), 1.5, 1.5, CS%H_subroundoff, h1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/0.0,0.0/) ) + test_layer_fluxes( verbose, nk, test_name, h1, (/1., 0.5/) ) + deallocate(h1) - test_name = 'Equal hbl and different layer thicknesses (gradient right to left)' - hbl_L = 16.; hbl_R = 16. - h_L = (/10.,6./) ; h_R = (/6.,10./) - phi_L = (/0.,0./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. - khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + test_name = 'h_L = h_R and BLD_L /= BLD_R' + call merge_interfaces(nk, (/1., 2./), (/1., 2./), 0.5, 1.5, CS%H_subroundoff, h1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-8.0,-8.0/) ) - - test_name = 'Equal hbl and same layer thicknesses (diagonal tracer values)' - hbl_L = 10.; hbl_R = 10. - h_L = (/5.,5./) ; h_R = (/5.,5./) - phi_L = (/1.,0./) ; phi_R = (/0.,1./) - phi_pp_L(1,1) = 1.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 0.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 1.; ppoly0_E_L(1,2) = 1. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 0.; ppoly0_E_R(1,2) = 0. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. - khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + test_layer_fluxes( verbose, nk+1, test_name, h1, (/0.5, 0.5, 0.5/) ) + deallocate(h1) + + test_name = 'h_L /= h_R and BLD_L = BLD_R' + call merge_interfaces(nk, (/1., 3./), (/2., 2./), 1.5, 1.5, CS%H_subroundoff, h1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/0.0,0.0/) ) + test_layer_fluxes( verbose, nk, test_name, h1, (/1., 0.5/) ) + deallocate(h1) - test_name = 'Different hbl and different layer thicknesses (gradient from right to left)' - hbl_L = 12; hbl_R = 20 - h_L = (/6.,6./) ; h_R = (/10.,10./) - phi_L = (/0.,0./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. - khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + test_name = 'h_L /= h_R and BLD_L /= BLD_R' + call merge_interfaces(nk, (/1., 3./), (/2., 2./), 0.5, 1.5, CS%H_subroundoff, h1) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-7.5,-7.5/) ) + test_layer_fluxes( verbose, nk+1, test_name, h1, (/0.5, 0.5, 0.5/) ) + deallocate(h1) - ! Cases where hbl < column thickness (polynomial coefficients specified for pseudo-linear reconstruction) + test_name = 'Left deeper than right, h_L /= h_R and BLD_L /= BLD_R' + call merge_interfaces(nk, (/2., 3./), (/2., 2./), 1.0, 2.0, CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk, test_name, h1, (/1., 1./) ) + deallocate(h1) - test_name = 'hbl < column thickness, hbl same, constant concentration each column' - hbl_L = 2; hbl_R = 2 - h_L = (/1.,2./) ; h_R = (/1.,2./) + test_name = 'Left has zero thickness, h_L /= h_R and BLD_L = BLD_R' + call merge_interfaces(nk, (/4., 0./), (/2., 2./), 2.0, 2.0, CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk-1, test_name, h1, (/2./) ) + deallocate(h1) + + test_name = 'Left has zero thickness, h_L /= h_R and BLD_L /= BLD_R' + call merge_interfaces(nk, (/4., 0./), (/2., 2./), 1.0, 2.0, CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk, test_name, h1, (/1., 1./) ) + deallocate(h1) + + test_name = 'Right has zero thickness, h_L /= h_R and BLD_L = BLD_R' + call merge_interfaces(nk, (/2., 2./), (/0., 4./), 2.0, 2.0, CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk-1, test_name, h1, (/2./) ) + deallocate(h1) + + test_name = 'Right has zero thickness, h_L /= h_R and BLD_L /= BLD_R' + call merge_interfaces(nk, (/2., 2./), (/0., 4./), 1.0, 2.0, CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk, test_name, h1, (/1., 1./) ) + deallocate(h1) + + test_name = 'Right deeper than left, h_L /= h_R and BLD_L = BLD_R' + call merge_interfaces(nk+1, (/2., 2., 0./), (/2., 2., 1./), 4., 4., CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk, test_name, h1, (/2., 2./) ) + deallocate(h1) + + test_name = 'Right and left small values at bottom, h_L /= h_R and BLD_L = BLD_R' + call merge_interfaces(nk+2, (/2., 2., 1., 1./), (/1., 1., .5, .5/), 3., 3., CS%H_subroundoff, h1) + near_boundary_unit_tests = near_boundary_unit_tests .or. & + test_layer_fluxes( verbose, nk+2, test_name, h1, (/1., 1., .5, .5/) ) + deallocate(h1) + + if (.not. near_boundary_unit_tests) write(stdout,*) 'Passed merge interfaces' + + ! All cases in this section have hbl which are equal to the column thicknesses + test_name = 'Equal hbl and same layer thicknesses (gradient from right to left)' + hbl_L = 2.; hbl_R = 2. + h_L = (/2.,2./) ; h_R = (/2.,2./) phi_L = (/0.,0./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. khtr_u = 1. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + call fluxes_layer_method(SURFACE, nk, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, 1., 1., CS) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-1.,-1./) ) + test_layer_fluxes( verbose, nk, test_name, F_layer, (/-2.0,0.0/) ) - test_name = 'hbl < column thickness, hbl same, linear profile right' - hbl_L = 2; hbl_R = 2 - h_L = (/1.,2./) ; h_R = (/1.,2./) - phi_L = (/0.,0./) ; phi_R = (/0.5,2./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 0.; phi_pp_R(1,2) = 1. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 2. - khtr_u = 1. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 0.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 3. - call fluxes_bulk_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, phi_pp_R,& - ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_bulk, F_layer) + test_name = 'Equal hbl and same layer thicknesses (gradient from left to right)' + hbl_L = 2.; hbl_R = 2. + h_L = (/2.,2./) ; h_R = (/2.,2./) + phi_L = (/2.,1./) ; phi_R = (/1.,1./) + khtr_u = 0.5 + call fluxes_layer_method(SURFACE, nk, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, 1., 1., CS) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-1.,-1./) ) + test_layer_fluxes( verbose, nk, test_name, F_layer, (/1.0,0.0/) ) test_name = 'hbl < column thickness, hbl same, linear profile right, khtr=2' hbl_L = 2; hbl_R = 2 h_L = (/1.,2./) ; h_R = (/1.,2./) phi_L = (/0.,0./) ; phi_R = (/0.5,2./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 0.; phi_pp_R(1,2) = 1. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 2. khtr_u = 2. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 0.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 3. - call fluxes_layer_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, & - phi_pp_R, ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_layer) + call fluxes_layer_method(SURFACE, nk, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, 1., 1., CS) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-1.,-3./) ) + test_layer_fluxes( verbose, nk, test_name, F_layer, (/-1.0,-4.0/) ) - ! unit tests for layer by layer method - test_name = 'Different hbl and different column thicknesses (gradient from right to left)' + test_name = 'Different hbl and different column thicknesses (zero gradient)' hbl_L = 12; hbl_R = 20 h_L = (/6.,6./) ; h_R = (/10.,10./) - phi_L = (/0.,0./) ; phi_R = (/1.,1./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 1.; phi_pp_R(1,2) = 0. - phi_pp_R(2,1) = 1.; phi_pp_R(2,2) = 0. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 1.; ppoly0_E_R(1,2) = 1. - ppoly0_E_R(2,1) = 1.; ppoly0_E_R(2,2) = 1. + phi_L = (/1.,1./) ; phi_R = (/1.,1./) khtr_u = 1. - call fluxes_layer_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, & - phi_pp_R, ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_layer) + call fluxes_layer_method(SURFACE, nk, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, 1., 1., CS) near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-7.5,-7.5/) ) - - test_name = 'Different hbl and different column thicknesses (linear profile right)' - - hbl_L = 15; hbl_R = 6 - h_L = (/10.,10./) ; h_R = (/12.,10./) - phi_L = (/0.,0./) ; phi_R = (/1.,3./) - phi_pp_L(1,1) = 0.; phi_pp_L(1,2) = 0. - phi_pp_L(2,1) = 0.; phi_pp_L(2,2) = 0. - phi_pp_R(1,1) = 0.; phi_pp_R(1,2) = 2. - phi_pp_R(2,1) = 2.; phi_pp_R(2,2) = 2. - ppoly0_E_L(1,1) = 0.; ppoly0_E_L(1,2) = 0. - ppoly0_E_L(2,1) = 0.; ppoly0_E_L(2,2) = 0. - ppoly0_E_R(1,1) = 0.; ppoly0_E_R(1,2) = 2. - ppoly0_E_R(2,1) = 2.; ppoly0_E_R(2,2) = 4. + test_layer_fluxes( verbose, nk, test_name, F_layer, (/0.,0./) ) + + test_name = 'Different hbl and different column thicknesses (gradient from left to right)' + + hbl_L = 15; hbl_R = 10. + h_L = (/10.,5./) ; h_R = (/10.,0./) + phi_L = (/1.,1./) ; phi_R = (/0.,0./) khtr_u = 1. - call fluxes_layer_method(SURFACE, nk, deg, h_L, h_R, hbl_L, hbl_R, area_L, area_R, phi_L, phi_R, phi_pp_L, & - phi_pp_R, ppoly0_E_L, ppoly0_E_R, method, khtr_u, F_layer) + call fluxes_layer_method(SURFACE, nk, hbl_L, hbl_R, h_L, h_R, phi_L, phi_R, & + khtr_u, F_layer, 1., 1., CS) + near_boundary_unit_tests = near_boundary_unit_tests .or. & - test_layer_fluxes( verbose, nk, test_name, F_layer, (/-3.75,0.0/) ) + test_layer_fluxes( verbose, nk, test_name, F_layer, (/10.,0.0/) ) + + if (.not. near_boundary_unit_tests) write(stdout,*) 'Passed fluxes_layer_method' + end function near_boundary_unit_tests !> Returns true if output of near-boundary unit tests does not match correct computed values @@ -1111,16 +986,15 @@ logical function test_layer_fluxes(verbose, nk, test_name, F_calc, F_ans) real, dimension(nk), intent(in) :: F_ans !< Fluxes of the unitless tracer calculated by hand [s^-1] ! Local variables integer :: k - integer, parameter :: stdunit = stdout test_layer_fluxes = .false. do k=1,nk if ( F_calc(k) /= F_ans(k) ) then test_layer_fluxes = .true. - write(stdunit,*) "MOM_lateral_boundary_diffusion, UNIT TEST FAILED: ", test_name - write(stdunit,10) k, F_calc(k), F_ans(k) + write(stdout,*) "MOM_lateral_boundary_diffusion, UNIT TEST FAILED: ", test_name + write(stdout,10) k, F_calc(k), F_ans(k) elseif (verbose) then - write(stdunit,10) k, F_calc(k), F_ans(k) + write(stdout,10) k, F_calc(k), F_ans(k) endif enddo @@ -1141,19 +1015,17 @@ logical function test_boundary_k_range(k_top, zeta_top, k_bot, zeta_bot, k_top_a character(len=80) :: test_name !< Name of the unit test logical :: verbose !< If true always print output - integer, parameter :: stdunit = stdout - test_boundary_k_range = k_top .ne. k_top_ans test_boundary_k_range = test_boundary_k_range .or. (zeta_top .ne. zeta_top_ans) test_boundary_k_range = test_boundary_k_range .or. (k_bot .ne. k_bot_ans) test_boundary_k_range = test_boundary_k_range .or. (zeta_bot .ne. zeta_bot_ans) - if (test_boundary_k_range) write(stdunit,*) "UNIT TEST FAILED: ", test_name + if (test_boundary_k_range) write(stdout,*) "UNIT TEST FAILED: ", test_name if (test_boundary_k_range .or. verbose) then - write(stdunit,20) "k_top", k_top, "k_top_ans", k_top_ans - write(stdunit,20) "k_bot", k_bot, "k_bot_ans", k_bot_ans - write(stdunit,30) "zeta_top", zeta_top, "zeta_top_ans", zeta_top_ans - write(stdunit,30) "zeta_bot", zeta_bot, "zeta_bot_ans", zeta_bot_ans + write(stdout,20) "k_top", k_top, "k_top_ans", k_top_ans + write(stdout,20) "k_bot", k_bot, "k_bot_ans", k_bot_ans + write(stdout,30) "zeta_top", zeta_top, "zeta_top_ans", zeta_top_ans + write(stdout,30) "zeta_bot", zeta_bot, "zeta_bot_ans", zeta_bot_ans endif 20 format(A,"=",i3,X,A,"=",i3) @@ -1169,24 +1041,28 @@ end function test_boundary_k_range !! The LBD framework accounts for the effects of diabatic mesoscale fluxes !! within surface and bottom boundary layers. Unlike the equivalent adiabatic !! fluxes, which is applied along neutral density surfaces, LBD is purely -!! horizontal. +!! horizontal. To assure that diffusive fluxes are strictly horizontal +!! regardless of the vertical coordinate system, this method relies on +!! regridding/remapping techniques. !! -!! The bottom boundary layer fluxes remain to be implemented, although most +!! The bottom boundary layer fluxes remain to be implemented, although some !! of the steps needed to do so have already been added and tested. !! -!! Boundary lateral diffusion can be applied using one of the three methods: +!! Boundary lateral diffusion is applied as follows: !! -!! * [Method #1: Along layer](@ref section_method2) (default); -!! * [Method #2: Bulk layer](@ref section_method1); +!! 1) remap tracer to a z* grid (LBD grid) +!! 2) calculate diffusive tracer fluxes (F) in the LBD grid using a layer by layer approach (@ref section_method) +!! 3) remap fluxes to the native grid +!! 4) update tracer by adding the divergence of F !! -!! A brief summary of these methods is provided below. +!! \subsection section_method Along layer approach !! -!! \subsection section_method1 Along layer approach (Method #1) +!! Here diffusion is applied layer by layer using only information from neighboring cells. !! -!! This is the recommended and more straight forward method where diffusion is -!! applied layer by layer using only information from neighboring cells. +!! Step #1: define vertical grid using interfaces and surface boundary layers from left and right +!! columns (see merge_interfaces). !! -!! Step #1: compute vertical indices containing boundary layer (boundary_k_range). +!! Step #2: compute vertical indices containing boundary layer (boundary_k_range). !! For the TOP boundary layer, these are: !! !! k_top, k_bot, zeta_top, zeta_bot @@ -1195,9 +1071,7 @@ end function test_boundary_k_range !! !! \f[ F_{k} = -KHTR \times h_{eff}(k) \times (\phi_R(k) - \phi_L(k)), \f] !! where h_eff is the [harmonic mean](@ref section_harmonic_mean) of the layer thickness -!! in the left and right columns. This method does not require a limiter since KHTR -!! is already limted based on a diffusive CFL condition prior to the call of this -!! module. +!! in the left and right columns. !! !! Step #3: option to linearly decay the flux from k_bot_min to k_bot_max: !! @@ -1206,44 +1080,9 @@ end function test_boundary_k_range !! layer depth (k_bot_min) and the lower interface of the layer containing the !! maximum layer depth (k_bot_max). !! -!! \subsection section_method2 Bulk layer approach (Method #2) -!! -!! Apply the lateral boundary diffusive fluxes calculated from a 'bulk model'.This -!! is a lower order representation (Kraus-Turner like approach) which assumes that -!! eddies are acting along well mixed layers (i.e., eddies do not know care about -!! vertical tracer gradients within the boundary layer). -!! -!! Step #1: compute vertical indices containing boundary layer (boundary_k_range). -!! For the TOP boundary layer, these are: -!! -!! k_top, k_bot, zeta_top, zeta_bot -!! -!! Step #2: compute bulk averages (thickness weighted) tracer averages (phi_L and phi_R), -!! then calculate the bulk diffusive flux (F_{bulk}): -!! -!! \f[ F_{bulk} = -KHTR \times h_{eff} \times (\phi_R - \phi_L), \f] -!! where h_eff is the [harmonic mean](@ref section_harmonic_mean) of the boundary layer depth -!! in the left and right columns (\f[ HBL_L \f] and \f[ HBL_R \f], respectively). -!! -!! Step #3: decompose F_bulk onto individual layers: -!! -!! \f[ F_{layer}(k) = F_{bulk} \times h_{frac}(k) , \f] -!! -!! where h_{frac} is -!! -!! \f[ h_{frac}(k) = h_u(k) \times \frac{1}{\sum(h_u)}. \f] -!! -!! h_u is the [harmonic mean](@ref section_harmonic_mean) of thicknesses at each layer. -!! Special care (layer reconstruction) must be taken at k_min = min(k_botL, k_bot_R). -!! -!! Step #4: option to linearly decay the flux from k_bot_min to k_bot_max: -!! -!! If LBD_LINEAR_TRANSITION = True and k_bot_diff > 1, the diffusive flux will decay -!! linearly between the top interface of the layer containing the minimum boundary -!! layer depth (k_bot_min) and the lower interface of the layer containing the -!! maximum layer depth (k_bot_max). -!! -!! Step #5: limit the tracer flux so that 1) only down-gradient fluxes are applied, +!! Step #4: remap the fluxes back to the native grid. This is done at velocity points, whose vertical grid +!! is determined using [harmonic mean](@ref section_harmonic_mean). To assure monotonicity, +!! tracer fluxes are limited so that 1) only down-gradient fluxes are applied, !! and 2) the flux cannot be larger than F_max, which is defined using the tracer !! gradient: !! diff --git a/src/tracer/MOM_neutral_diffusion.F90 b/src/tracer/MOM_neutral_diffusion.F90 index 67a2519f78..50ce18eb57 100644 --- a/src/tracer/MOM_neutral_diffusion.F90 +++ b/src/tracer/MOM_neutral_diffusion.F90 @@ -28,8 +28,7 @@ module MOM_neutral_diffusion use MOM_energetic_PBL, only : energetic_PBL_get_MLD, energetic_PBL_CS use MOM_diabatic_driver, only : diabatic_CS, extract_diabatic_member use MOM_lateral_boundary_diffusion, only : boundary_k_range, SURFACE, BOTTOM - -use iso_fortran_env, only : stdout=>output_unit, stderr=>error_unit +use MOM_io, only : stdout, stderr implicit none ; private @@ -229,9 +228,6 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, default = .true.) endif - ! Store a rescaling factor for use in diagnostic messages. - CS%R_to_kg_m3 = US%R_to_kg_m3 - if (CS%interior_only) then call extract_diabatic_member(diabatic_CSp, KPP_CSp=CS%KPP_CSp) call extract_diabatic_member(diabatic_CSp, energetic_PBL_CSp=CS%energetic_PBL_CSp) @@ -239,6 +235,8 @@ logical function neutral_diffusion_init(Time, G, GV, US, param_file, diag, EOS, call MOM_error(FATAL,"NDIFF_INTERIOR_ONLY is true, but no valid boundary layer scheme was found") endif endif + ! Store a rescaling factor for use in diagnostic messages. + CS%R_to_kg_m3 = US%R_to_kg_m3 ! call get_param(param_file, mdl, "KHTR", CS%KhTr, & ! "The background along-isopycnal tracer diffusivity.", & @@ -317,11 +315,10 @@ subroutine neutral_diffusion_calc_coeffs(G, GV, US, h, T, S, CS, p_surf) ! Check if hbl needs to be extracted if (CS%interior_only) then - hbl(:,:) = 0. if (ASSOCIATED(CS%KPP_CSp)) call KPP_get_BLD(CS%KPP_CSp, hbl, G, US, m_to_BLD_units=GV%m_to_H) - if (ASSOCIATED(CS%energetic_PBL_CSp)) & - call energetic_PBL_get_MLD(CS%energetic_PBL_CSp, hbl, G, US, m_to_MLD_units=GV%m_to_H) - call pass_var(hbl, G%Domain) + if (ASSOCIATED(CS%energetic_PBL_CSp)) call energetic_PBL_get_MLD(CS%energetic_PBL_CSp, hbl, G, US, & + m_to_MLD_units=GV%m_to_H) + call pass_var(hbl,G%Domain) ! get k-indices and zeta do j=G%jsc-1, G%jec+1 ; do i=G%isc-1,G%iec+1 call boundary_k_range(SURFACE, GV%ke, h(i,j,:), hbl(i,j), k_top(i,j), zeta_top(i,j), k_bot(i,j), zeta_bot(i,j)) @@ -436,7 +433,7 @@ subroutine neutral_diffusion_calc_coeffs(G, GV, US, h, T, S, CS, p_surf) if (CS%interior_only) then if (.not. CS%stable_cell(i,j,k_bot(i,j))) zeta_bot(i,j) = -1. ! set values in the surface and bottom boundary layer to false. - do k = 1, k_bot(i,j)-1 + do k = 1, k_bot(i,j) CS%stable_cell(i,j,k) = .false. enddo endif @@ -482,7 +479,7 @@ subroutine neutral_diffusion_calc_coeffs(G, GV, US, h, T, S, CS, p_surf) call find_neutral_surface_positions_continuous(GV%ke, & CS%Pint(i,j,:), CS%Tint(i,j,:), CS%Sint(i,j,:), CS%dRdT(i,j,:), CS%dRdS(i,j,:), & CS%Pint(i,j+1,:), CS%Tint(i,j+1,:), CS%Sint(i,j+1,:), CS%dRdT(i,j+1,:), CS%dRdS(i,j+1,:), & - CS%vPoL(i,J,:), CS%vPoR(i,J,:), CS%vKoL(i,J,:), CS%vKoR(i,J,:), CS%vhEff(i,J,:), & + CS%vPoL(i,J,:), CS%vPoR(i,J,:), CS%vKoL(i,J,:), CS%vKoR(i,J,:), CS%vhEff(i,J,:), & k_bot(i,J), k_bot(i,J+1), zeta_bot(i,J), zeta_bot(i,J+1)) else call find_neutral_surface_positions_discontinuous(CS, GV%ke, & @@ -543,11 +540,14 @@ subroutine neutral_diffusion(G, GV, h, Coef_x, Coef_y, dt, Reg, US, CS) real, dimension(SZIB_(G),SZJ_(G),CS%nsurf-1) :: uFlx ! Zonal flux of tracer [H conc ~> m conc or conc kg m-2] real, dimension(SZI_(G),SZJB_(G),CS%nsurf-1) :: vFlx ! Meridional flux of tracer ! [H conc ~> m conc or conc kg m-2] - real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: tendency ! tendency array for diagn + real, dimension(SZI_(G),SZJ_(G),SZK_(GV)) :: tendency ! tendency array for diagnostics + ! [H conc T-1 ~> m conc s-1 or kg m-2 conc s-1] real, dimension(SZI_(G),SZJ_(G)) :: tendency_2d ! depth integrated content tendency for diagn + ! [H conc T-1 ~> m conc s-1 or kg m-2 conc s-1] real, dimension(SZIB_(G),SZJ_(G)) :: trans_x_2d ! depth integrated diffusive tracer x-transport diagn real, dimension(SZI_(G),SZJB_(G)) :: trans_y_2d ! depth integrated diffusive tracer y-transport diagn - real, dimension(SZK_(GV)) :: dTracer ! change in tracer concentration due to ndiffusion + real, dimension(SZK_(GV)) :: dTracer ! change in tracer concentration due to ndiffusion + ! [H L2 conc ~> m3 conc or kg conc] type(tracer_type), pointer :: Tracer => NULL() ! Pointer to the current tracer @@ -660,7 +660,7 @@ subroutine neutral_diffusion(G, GV, h, Coef_x, Coef_y, dt, Reg, US, CS) call post_data(tracer%id_dfy_2d, trans_y_2d(:,:), CS%diag) endif - ! post tendency of tracer content + ! post tendency of layer-integrated tracer content if (tracer%id_dfxy_cont > 0) then call post_data(tracer%id_dfxy_cont, tendency(:,:,:), CS%diag) endif diff --git a/src/tracer/MOM_tracer_hor_diff.F90 b/src/tracer/MOM_tracer_hor_diff.F90 index 9a3a76ed29..eb59dcc74f 100644 --- a/src/tracer/MOM_tracer_hor_diff.F90 +++ b/src/tracer/MOM_tracer_hor_diff.F90 @@ -23,7 +23,7 @@ module MOM_tracer_hor_diff use MOM_neutral_diffusion, only : neutral_diffusion_init, neutral_diffusion_end use MOM_neutral_diffusion, only : neutral_diffusion_CS use MOM_neutral_diffusion, only : neutral_diffusion_calc_coeffs, neutral_diffusion -use MOM_lateral_boundary_diffusion, only : lateral_boundary_diffusion_CS, lateral_boundary_diffusion_init +use MOM_lateral_boundary_diffusion, only : lbd_CS, lateral_boundary_diffusion_init use MOM_lateral_boundary_diffusion, only : lateral_boundary_diffusion use MOM_tracer_registry, only : tracer_registry_type, tracer_type, MOM_tracer_chksum use MOM_unit_scaling, only : unit_scale_type @@ -64,7 +64,7 @@ module MOM_tracer_hor_diff logical :: recalc_neutral_surf !< If true, recalculate the neutral surfaces if CFL has been !! exceeded type(neutral_diffusion_CS), pointer :: neutral_diffusion_CSp => NULL() !< Control structure for neutral diffusion. - type(lateral_boundary_diffusion_CS), pointer :: lateral_boundary_diffusion_CSp => NULL() !< Control structure for + type(lbd_CS), pointer :: lateral_boundary_diffusion_CSp => NULL() !< Control structure for !! lateral boundary mixing. type(diag_ctrl), pointer :: diag => NULL() !< A structure that is used to !! regulate the timing of diagnostic output. @@ -1512,7 +1512,7 @@ subroutine tracer_hor_diff_init(Time, G, GV, US, param_file, diag, EOS, diabatic diabatic_CSp, CS%neutral_diffusion_CSp ) if (CS%use_neutral_diffusion .and. CS%Diffuse_ML_interior) call MOM_error(FATAL, "MOM_tracer_hor_diff: "// & "USE_NEUTRAL_DIFFUSION and DIFFUSE_ML_TO_INTERIOR are mutually exclusive!") - CS%use_lateral_boundary_diffusion = lateral_boundary_diffusion_init(Time, G, param_file, diag, diabatic_CSp, & + CS%use_lateral_boundary_diffusion = lateral_boundary_diffusion_init(Time, G, GV, param_file, diag, diabatic_CSp, & CS%lateral_boundary_diffusion_CSp) if (CS%use_lateral_boundary_diffusion .and. CS%Diffuse_ML_interior) call MOM_error(FATAL, "MOM_tracer_hor_diff: "// & "USE_LATERAL_BOUNDARY_DIFFUSION and DIFFUSE_ML_TO_INTERIOR are mutually exclusive!") diff --git a/src/tracer/MOM_tracer_registry.F90 b/src/tracer/MOM_tracer_registry.F90 index e3936ecfed..cfbdc6ecb0 100644 --- a/src/tracer/MOM_tracer_registry.F90 +++ b/src/tracer/MOM_tracer_registry.F90 @@ -58,25 +58,26 @@ module MOM_tracer_registry real, dimension(:,:,:), pointer :: df_y => NULL() !< diagnostic array for y-diffusive tracer flux !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:,:), pointer :: lbd_dfx => NULL() !< diagnostic array for x-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:,:), pointer :: lbd_dfy => NULL() !< diagnostic array for y-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:), pointer :: lbd_dfx_2d => NULL() !< diagnostic array for x-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:), pointer :: lbd_dfy_2d => NULL() !< diagnostic array for y-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] + !### These two arrays may be allocated but are never used. real, dimension(:,:), pointer :: lbd_bulk_df_x => NULL() !< diagnostic array for x-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:), pointer :: lbd_bulk_df_y => NULL() !< diagnostic array for y-diffusive tracer flux - !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] + !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:), pointer :: df2d_x => NULL() !< diagnostic vertical sum x-diffusive flux !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:), pointer :: df2d_y => NULL() !< diagnostic vertical sum y-diffusive flux !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] ! real, dimension(:,:), pointer :: df2d_conc_x => NULL() !< diagnostic vertical sum x-diffusive content flux -! !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] +! !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] ! real, dimension(:,:), pointer :: df2d_conc_y => NULL() !< diagnostic vertical sum y-diffusive content flux -! !! [conc H m2 s-1 ~> conc m3 s-1 or conc kg s-1] +! !! [conc H L2 T-1 ~> conc m3 s-1 or conc kg s-1] real, dimension(:,:,:), pointer :: advection_xy => NULL() !< convergence of lateral advective tracer fluxes !! [conc H T-1 ~> conc m s-1 or conc kg m-2 s-1] @@ -88,6 +89,7 @@ module MOM_tracer_registry !! timestep used for diagnostics [conc] real, dimension(:,:,:), pointer :: Trxh_prev => NULL() !< layer integrated tracer concentration array !! at a previous timestep used for diagnostics + !! [conc H ~> conc m or conc kg m-2] character(len=32) :: name !< tracer name used for diagnostics and error messages character(len=64) :: units !< Physical dimensions of the tracer concentration diff --git a/src/tracer/_Advection.dox b/src/tracer/_Advection.dox index ebbd040d33..4e5585fb63 100644 --- a/src/tracer/_Advection.dox +++ b/src/tracer/_Advection.dox @@ -1,7 +1,5 @@ /*! \page Tracer_Advection Tracer Advection -\brief Tracer transport schemes - MOM6 implements a generalised tracer advection scheme, which is a combination of the modified flux advection scheme \cite easter1993 with reconstructed tracer distributions. The tracer distributions may be diff --git a/src/tracer/_Discrete_tracer.dox b/src/tracer/_Discrete_tracer.dox new file mode 100644 index 0000000000..60aa2a0a44 --- /dev/null +++ b/src/tracer/_Discrete_tracer.dox @@ -0,0 +1,5 @@ +/*! \page Discrete_Tracer Discrete Tracer Transport Equations + +\brief Discrete Tracer Transport Equations + +*/ diff --git a/src/tracer/_Passive_tracer.dox b/src/tracer/_Passive_tracer.dox new file mode 100644 index 0000000000..a997eb168f --- /dev/null +++ b/src/tracer/_Passive_tracer.dox @@ -0,0 +1,9 @@ +/*! \page Passive_Tracers Passive and Other User-defined Tracers + +\section Passive_tracers Passive Tracers + +\section Generic_tracers Generic Tracers + +\section User_tracers User-defined Tracers + +*/ diff --git a/src/tracer/_Tracer_Transport.dox b/src/tracer/_Tracer_Transport.dox new file mode 100644 index 0000000000..e62f05dfb7 --- /dev/null +++ b/src/tracer/_Tracer_Transport.dox @@ -0,0 +1,124 @@ +/*! \page Tracer_Transport_Equations Tracer Transport Equations + +\image html PPM_1d.png "The 1-D finite volume advection of tracers. The reddish fluid will be in the cell at the end of the timestep." +\image latex PPM_1d.png "The 1-D finite volume advection of tracers. The reddish fluid will be in the cell at the end of the timestep." + +Given a piecewise polynomial description of the tracer concentration, the new tracer cell +concentration is the average of the fluid that will be in the cell after a timestep. + +\f{eqnarray} + \int_{x_{i-1/2}}^{x_{i+1/2}} A_i^{n+1} (x) dx = + \int_{x_{i-1/2 - u \Delta t}}^{x_{i+1/2-u\Delta t}} A_i^{n} (x) dx &= \mbox{} \\ + \int_{x_{i-1/2}}^{x_{i+1/2}} A_i^{n} (x) dx - + \int_{x_{i+1/2 - u \Delta t}}^{x_{i+1/2}} A_i^{n} (x) dx &+ + \int_{x_{i-1/2 - u \Delta t}}^{x_{i-1/2}} A_i^{n} (x) dx +\f} + +Fluxes are found by analytically integrating the profile over the distance that is +swept past the face within a timestep. + +\f[ + a_i^n = \frac{1}{\Delta x} \int_{x_{i-1/2}}^{x_{i+1/2}} A_i^n(x) dx +\f] +\f[ + a_i^{n+1} = a_i^n - \frac{\Delta t}{\Delta x} (F_{i+1/2} - F_{i-1/2}) +\f] +\f[ + F_{i+1/2} = \frac{1}{\Delta t} \int_{x_{i+1/2 - u \Delta t}}^{x_{i+1/2}} A_i^n(x) dx +\f] +\f[ + F_{i-1/2} = \frac{1}{\Delta t} \int_{x_{i-1/2 - u \Delta t}}^{x_{i-1/2}} A_i^n(x) dx +\f] + +With piecewise constant profiles, this approach give first order upwind advection. +Higher order polynomials (e.g., parabolas) can give higher order accuracy. + +\section Multidimensional_Tracer_Advection Multidimensional Tracer Advection + +Using "Easter's Pseudo-compressibility" (\cite easter1993), we start with these +basic equations for a tracer \f$\psi\f$: + +\anchor ht-equation +\f[ + \frac{\partial h}{\partial t} + \vec{\nabla} \cdot (\vec{u}h) = 0 \equiv + \frac{\partial h}{\partial t} + \vec{\nabla} \cdot (\vec{U}) +\f] + +\f[ + \frac{\partial}{\partial t} (h \psi) + \vec{\nabla} \cdot (\vec{U}\psi) = 0 +\f] + +\f[ + \frac{\partial \psi}{\partial t} + \vec{u} \cdot \vec{\nabla} \psi = 0 +\f] + +We discretize the first of these equations in space: + +\f[ + \frac{\partial h}{\partial t} = \frac{1}{\Delta x} \left(U_{i-\frac{1}{2},j} - + U_{i+\frac{1}{2},j} \right) + \frac{1}{\Delta y} \left(V_{i, j-\frac{1}{2}} - + V_{i,j+\frac{1}{2}} \right) +\f] + +Using our monotonic one-dimensional flux: + +\f[ + F_{i+\frac{1}{2},j} (\psi) = U_{i+\frac{1}{2},j} \psi_{i+\frac{1}{2},j} +\f] + +we come up with an estimate based only on an update in the \f$x\f$ direction: + +\f[ + \tilde{h}_{i,j} \tilde{\psi}_{i,j} = h^n_{i,j} \psi_{i,j} + \frac{\Delta + t}{\Delta x} \left( F_{i-\frac{1}{2},j} (\psi^n) - F_{i+\frac{1}{2},j} (\psi^n) + \right) +\f] + +\f[ + \tilde{h}_{i,j} = h^n_{i,j} + \frac{\Delta + t}{\Delta x} \left( U_{i-\frac{1}{2},j} - U_{i+\frac{1}{2},j} \right) +\f] + +\f[ + \tilde{\psi}_{i,j} = \frac{\tilde{h}_{i,j} \tilde{\psi}_{i,j}}{\tilde{h}_{i,j}} +\f] + +Next, we update in the \f$y\f$ direction: + +\f[ + h^{n+1}_{i,j} \psi^{n+1}_{i,j} = \tilde{h}_{i,j} \tilde{\psi}_{i,j} + \frac{\Delta + t}{\Delta y} \left( G_{i,j-\frac{1}{2}} (\tilde{\psi}) - G_{i,j+\frac{1}{2}} + (\tilde{\psi}) \right) +\f] + +\f[ + h^{n+1}_{i,j} = \tilde{h}_{i,j} + \frac{\Delta + t}{\Delta y} \left( V_{i,j-\frac{1}{2}} - V_{i,j+\frac{1}{2}} \right) +\f] + +\f[ + \psi^{n+1}_{i,j} = \frac{h^{n+1}_{i,j} \psi^{n+1}_{i,j}}{h^{n+1}_{i,j}} +\f] + +\li This method ensures monotonicity. Strang splitting can reduce directional +splitting error. See \cite easter1993, \cite durran2010 (section 5.9.4), and +\cite russell1981 . + +\li Flux-form pseudo-compressibility advection is based on accumulated mass (or +volume) fluxes, not velocities. + +\li Additional pseudo-compressibility passes can be added to accommodate +transports exceeding cell masses. Extra passes of tracer advection are used in +MOM6 in the small fraction of cells where this is needed. + +\li Explicit layered dynamics time-steps are limited by Doppler-shifted internal +gravity wave speeds or inertial oscillations. +Flow speeds in most of the ocean volume are much smaller than the peak +internal wave speeds so that the advective time-steps can be longer. + +\li Advective mass fluxes in MOM6 are often accumulated over multiple dynamic +steps. The goal is that as we go to higher resolution, this tracer advection will +remain stable at relatively long time-steps, allowing for the inclusion of many +biogeochemical tracers without adding an undue burden in computational cost. + +*/ diff --git a/src/tracer/_Tracer_fluxes.dox b/src/tracer/_Tracer_fluxes.dox new file mode 100644 index 0000000000..fe315a56e5 --- /dev/null +++ b/src/tracer/_Tracer_fluxes.dox @@ -0,0 +1,9 @@ +/*! \page Tracer_Fluxes Tracer Fluxes + +\section section_Tracer_Fluxes Tracer Fluxes + +\section section_River_Runoff River Runoff + +\section section_Ice_Runoff Ice Runoff + +*/ diff --git a/src/tracer/_Tracer_timestep.dox b/src/tracer/_Tracer_timestep.dox new file mode 100644 index 0000000000..991578669c --- /dev/null +++ b/src/tracer/_Tracer_timestep.dox @@ -0,0 +1,31 @@ +/*! \page Tracer_Timestep Tracer Timestep + +\brief Overview of Tracer Timestep + +The MOM6 code handles advection and lateral diffusion of all tracers. For +potential temperature and salinity, it also timesteps the thermodynamics +and vertical mixing (column physics). Since evaporation and precipitation +are handled as volume changes, the layer thicknesses need to be updated: + +\f[ + \frac{\partial h_k}{\partial t} = (P - E)_k +\f] + +The full tracer equation for tracer \f$\theta\f$ is: + +\f[ + \frac{\partial}{\partial t} (h_k\theta_k) + \nabla_s \cdot + (\vec{u}h_k \theta_k) = Q_k^\theta h_k + \frac{1}{h_k} \Delta \left( + \kappa \frac{\partial \theta}{\partial z} \right) + \frac{1}{h_k} + \nabla_s (h_k K \nabla_s \theta) +\f] + +Here, the advection is on the left hand side of the equation while the +right hand side contains thermodynamic processes, vertical diffusion, and +horizontal diffusion. There is more than one choice for vertical diffusion; +these will be described elsewhere. Also, the lateral diffusion is handled +in novel ways so as to avoid introduction of new extrema and to avoid +instabilities associated with rotated mixing tensors. The lateral diffusion +is described in \ref Horizontal_Diffusion. + +*/ diff --git a/src/tracer/_Vertical_diffusion.dox b/src/tracer/_Vertical_diffusion.dox new file mode 100644 index 0000000000..14a23bb042 --- /dev/null +++ b/src/tracer/_Vertical_diffusion.dox @@ -0,0 +1,9 @@ +/*! \page Vertical_Diffusion Vertical Diffusion + +\brief Vertical diffusion of tracers + +The MOM6 tracer registry takes care of the tracer advection as well as horizontal +diffusion, but it is up to each individual tracer package to define its own vertical +diffusion. + +*/ diff --git a/src/user/basin_builder.F90 b/src/user/basin_builder.F90 index 61b65e0e9c..c9cdbfa392 100644 --- a/src/user/basin_builder.F90 +++ b/src/user/basin_builder.F90 @@ -94,6 +94,18 @@ subroutine basin_builder_topography(D, G, param_file, max_depth) lat = G%geoLatT(i,j) D(i,j) = min( D(i,j), NS_scurve_ridge(lon, lat, pars(1), pars(2), pars(3), pars(4), pars(5)) ) enddo ; enddo + elseif (trim(lowercase(funcs)) == 'angled_coast') then + call get_param(param_file, mdl, pname2, pars(1:4), & + "ANGLED_COAST parameters: longitude intersection with Equator, "//& + "latitude intersection with Prime Meridian, footprint radius, shelf depth.", & + units="degrees_E,degrees_N,degrees,m", & + fail_if_missing=.true.) + pars(4) = pars(4) / max_depth + do j=G%jsc,G%jec ; do i=G%isc,G%iec + lon = G%geoLonT(i,j) + lat = G%geoLatT(i,j) + D(i,j) = min( D(i,j), angled_coast(lon, lat, pars(1), pars(2), pars(3), pars(4)) ) + enddo ; enddo elseif (trim(lowercase(funcs)) == 'ew_coast') then call get_param(param_file, mdl, pname2, pars(1:5), & "EW_COAST parameters: latitude, starting longitude, "//& @@ -211,6 +223,21 @@ real function dist_line_fixed_y(x, y, x0, x1, y0) dist_line_fixed_y = dist_line_fixed_x(y, x, y0, x0, x1) end function dist_line_fixed_y +!> An "angled coast profile". +real function angled_coast(lon, lat, lon_eq, lat_mer, dr, sh) + real, intent(in) :: lon !< Longitude [degrees_E] + real, intent(in) :: lat !< Latitude [degrees_N] + real, intent(in) :: lon_eq !< Longitude intersection with Equator [degrees_E] + real, intent(in) :: lat_mer !< Latitude intersection with Prime Meridian [degrees_N] + real, intent(in) :: dr !< "Radius" of coast profile [degrees] + real, intent(in) :: sh !< depth of shelf as fraction of full depth [nondim] + real :: r + + r = 1/sqrt( lat_mer*lat_mer + lon_eq*lon_eq ) + r = r * ( lat_mer*lon + lon_eq*lat - lon_eq*lat_mer) + angled_coast = cstprof(r, 0., dr, 0.125, 0.125, 0.5, sh) +end function angled_coast + !> A "coast profile" applied in an N-S line from lonC,lat0 to lonC,lat1. real function NS_coast(lon, lat, lonC, lat0, lat1, dlon, sh) real, intent(in) :: lon !< Longitude [degrees_E]