From d4531ca374fe70ca772d59a5bc5e0dd4d7c368d6 Mon Sep 17 00:00:00 2001 From: Robert Hallberg Date: Fri, 5 Mar 2021 19:38:49 -0500 Subject: [PATCH] Duplicated infra/FMS1 into infra/FMS2 Duplicated all of the modules in infra/FMS1 into new copies in infra/FMS2, in preparation for merging in the FMS2 i/o changes to the new infra/FMS2 directory. All answers are bitwise identical, regardless of which config_src/infra directory is used. --- config_src/infra/FMS2/MOM_coms_infra.F90 | 455 ++++ config_src/infra/FMS2/MOM_constants.F90 | 14 + .../infra/FMS2/MOM_couplertype_infra.F90 | 247 +++ config_src/infra/FMS2/MOM_cpu_clock_infra.F90 | 93 + .../infra/FMS2/MOM_data_override_infra.F90 | 105 + .../infra/FMS2/MOM_diag_manager_infra.F90 | 423 ++++ config_src/infra/FMS2/MOM_domain_infra.F90 | 1941 +++++++++++++++++ .../infra/FMS2/MOM_ensemble_manager_infra.F90 | 95 + config_src/infra/FMS2/MOM_error_infra.F90 | 42 + config_src/infra/FMS2/MOM_interp_infra.F90 | 251 +++ config_src/infra/FMS2/MOM_io_infra.F90 | 801 +++++++ config_src/infra/FMS2/MOM_time_manager.F90 | 54 + 12 files changed, 4521 insertions(+) create mode 100644 config_src/infra/FMS2/MOM_coms_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_constants.F90 create mode 100644 config_src/infra/FMS2/MOM_couplertype_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_cpu_clock_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_data_override_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_diag_manager_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_domain_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_error_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_interp_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_io_infra.F90 create mode 100644 config_src/infra/FMS2/MOM_time_manager.F90 diff --git a/config_src/infra/FMS2/MOM_coms_infra.F90 b/config_src/infra/FMS2/MOM_coms_infra.F90 new file mode 100644 index 0000000000..555b4df119 --- /dev/null +++ b/config_src/infra/FMS2/MOM_coms_infra.F90 @@ -0,0 +1,455 @@ +!> Thin interfaces to non-domain-oriented mpp communication subroutines +module MOM_coms_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use iso_fortran_env, only : int32, int64 + +use mpp_mod, only : mpp_pe, mpp_root_pe, mpp_npes, mpp_set_root_pe +use mpp_mod, only : mpp_set_current_pelist, mpp_get_current_pelist +use mpp_mod, only : mpp_broadcast, mpp_sync, mpp_sync_self, mpp_chksum +use mpp_mod, only : mpp_sum, mpp_max, mpp_min +use memutils_mod, only : print_memuse_stats +use fms_mod, only : fms_end, fms_init + +implicit none ; private + +public :: PE_here, root_PE, num_PEs, set_rootPE, Set_PElist, Get_PElist +public :: broadcast, sum_across_PEs, min_across_PEs, max_across_PEs +public :: field_chksum, MOM_infra_init, MOM_infra_end + +! This module provides interfaces to the non-domain-oriented communication +! subroutines. + +!> Communicate an array, string or scalar from one PE to others +interface broadcast + module procedure broadcast_char, broadcast_int32_0D, broadcast_int64_0D, broadcast_int1D + module procedure broadcast_real0D, broadcast_real1D, broadcast_real2D +end interface broadcast + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +interface field_chksum + module procedure field_chksum_real_0d + module procedure field_chksum_real_1d + module procedure field_chksum_real_2d + module procedure field_chksum_real_3d + module procedure field_chksum_real_4d +end interface field_chksum + +!> Find the sum of field across PEs, and update PEs with the sums. +interface sum_across_PEs + module procedure sum_across_PEs_int4_0d + module procedure sum_across_PEs_int4_1d + module procedure sum_across_PEs_int8_0d + module procedure sum_across_PEs_int8_1d + module procedure sum_across_PEs_int8_2d + module procedure sum_across_PEs_real_0d + module procedure sum_across_PEs_real_1d + module procedure sum_across_PEs_real_2d +end interface sum_across_PEs + +!> Find the maximum value of field across PEs, and update PEs with the values. +interface max_across_PEs + module procedure max_across_PEs_int_0d + module procedure max_across_PEs_real_0d + module procedure max_across_PEs_real_1d +end interface max_across_PEs + +!> Find the minimum value of field across PEs, and update PEs with the values. +interface min_across_PEs + module procedure min_across_PEs_int_0d + module procedure min_across_PEs_real_0d + module procedure min_across_PEs_real_1d +end interface min_across_PEs + +contains + +!> Return the ID of the PE for the current process. +function PE_here() result(pe) + integer :: pe !< PE ID of the current process + pe = mpp_pe() +end function PE_here + +!> Return the ID of the root PE for the PE list of the current procss. +function root_PE() result(pe) + integer :: pe !< root PE ID + pe = mpp_root_pe() +end function root_PE + +!> Return the number of PEs for the current PE list. +function num_PEs() result(npes) + integer :: npes !< Number of PEs + npes = mpp_npes() +end function num_PEs + +!> Designate a PE as the root PE +subroutine set_rootPE(pe) + integer, intent(in) :: pe !< ID of the PE to be assigned as root + call mpp_set_root_pe(pe) +end subroutine + +!> Set the current PE list. If no list is provided, then the current PE list +!! is set to the list of all available PEs on the communicator. Setting the +!! list will trigger a rank synchronization unless the `no_sync` flag is set. +subroutine Set_PEList(pelist, no_sync) + integer, optional, intent(in) :: pelist(:) !< List of PEs to set for communication + logical, optional, intent(in) :: no_sync !< Do not sync after list update. + call mpp_set_current_pelist(pelist, no_sync) +end subroutine Set_PEList + +!> Retrieve the current PE list and any metadata if requested. +subroutine Get_PEList(pelist, name, commID) + integer, intent(out) :: pelist(:) !< List of PE IDs of the current PE list + character(len=*), optional, intent(out) :: name !< Name of PE list + integer, optional, intent(out) :: commID !< Communicator ID of PE list + + call mpp_get_current_pelist(pelist, name, commiD) +end subroutine Get_PEList + +!> Communicate a 1-D array of character strings from one PE to others +subroutine broadcast_char(dat, length, from_PE, PElist, blocking) + character(len=*), intent(inout) :: dat(:) !< The data to communicate and destination + integer, intent(in) :: length !< The length of each string + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_char + +!> Communicate an integer from one PE to others +subroutine broadcast_int64_0D(dat, from_PE, PElist, blocking) + integer(kind=int64), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int64_0D + + +!> Communicate an integer from one PE to others +subroutine broadcast_int32_0D(dat, from_PE, PElist, blocking) + integer(kind=int32), intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int32_0D + +!> Communicate a 1-D array of integers from one PE to others +subroutine broadcast_int1D(dat, length, from_PE, PElist, blocking) + integer, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_int1D + +!> Communicate a real number from one PE to others +subroutine broadcast_real0D(dat, from_PE, PElist, blocking) + real, intent(inout) :: dat !< The data to communicate and destination + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real0D + +!> Communicate a 1-D array of reals from one PE to others +subroutine broadcast_real1D(dat, length, from_PE, PElist, blocking) + real, dimension(:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real1D + +!> Communicate a 2-D array of reals from one PE to others +subroutine broadcast_real2D(dat, length, from_PE, PElist, blocking) + real, dimension(:,:), intent(inout) :: dat !< The data to communicate and destination + integer, intent(in) :: length !< The total number of data elements + integer, optional, intent(in) :: from_PE !< The source PE, by default the root PE + integer, optional, intent(in) :: PElist(:) !< The list of participating PEs, by default the + !! active PE set as previously set via Set_PElist. + logical, optional, intent(in) :: blocking !< If true, barriers are added around the call + + integer :: src_PE ! The processor that is sending the data + logical :: do_block ! If true add synchronizing barriers + + do_block = .false. ; if (present(blocking)) do_block = blocking + if (present(from_PE)) then ; src_PE = from_PE ; else ; src_PE = root_PE() ; endif + + if (do_block) call mpp_sync(PElist) + call mpp_broadcast(dat, length, src_PE, PElist) + if (do_block) call mpp_sync_self(PElist) + +end subroutine broadcast_real2D + +! field_chksum wrappers + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_0d(field, pelist, mask_val) result(chksum) + real, intent(in) :: field !< Input scalar + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_0d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_1d(field, pelist, mask_val) result(chksum) + real, dimension(:), intent(in) :: field !< Input array + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_1d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_2d(field, pelist, mask_val) result(chksum) + real, dimension(:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_2d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_3d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_3d + +!> Compute a checksum for a field distributed over a PE list. If no PE list is +!! provided, then the current active PE list is used. +function field_chksum_real_4d(field, pelist, mask_val) result(chksum) + real, dimension(:,:,:,:), intent(in) :: field !< Unrotated input field + integer, optional, intent(in) :: pelist(:) !< PE list of ranks to checksum + real, optional, intent(in) :: mask_val !< FMS mask value + integer(kind=int64) :: chksum !< checksum of array + + chksum = mpp_chksum(field, pelist, mask_val) +end function field_chksum_real_4d + +! sum_across_PEs wrappers + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int4_0d(field, pelist) + integer(kind=int32), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int4_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int4_1d(field, length, pelist) + integer(kind=int32), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int4_1d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_int8_0d(field, pelist) + integer(kind=int64), intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_int8_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_1d(field, length, pelist) + integer(kind=int64), dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_int8_2d(field, length, pelist) + integer(kind=int64), & + dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_int8_2d + +!> Find the sum of field across PEs, and return this sum in field. +subroutine sum_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< Value on this PE, and the sum across PEs upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, pelist) +end subroutine sum_across_PEs_real_0d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< Number of elements in field to add + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_1d + +!> Find the sum of the values in corresponding positions of field across PEs, and return these sums in field. +subroutine sum_across_PEs_real_2d(field, length, pelist) + real, dimension(:,:), intent(inout) :: field !< The values to add, the sums upon return + integer, intent(in) :: length !< The total number of positions to sum, usually + !! the product of the array sizes. + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_sum(field, length, pelist) +end subroutine sum_across_PEs_real_2d + +! max_across_PEs wrappers + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_int_0d + +!> Find the maximum value of field across PEs, and store this maximum in field. +subroutine max_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the maximum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, pelist) +end subroutine max_across_PEs_real_0d + +!> Find the maximum values in each position of field across PEs, and store these minima in field. +subroutine max_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! maxima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_max(field, length, pelist) +end subroutine max_across_PEs_real_1d + +! min_across_PEs wrappers + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_int_0d(field, pelist) + integer, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, pelist) +end subroutine min_across_PEs_int_0d + +!> Find the minimum value of field across PEs, and store this minimum in field. +subroutine min_across_PEs_real_0d(field, pelist) + real, intent(inout) :: field !< The values to compare, the minimum upon return + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + call mpp_min(field, pelist) +end subroutine min_across_PEs_real_0d + +!> Find the minimum values in each position of field across PEs, and store these minima in field. +subroutine min_across_PEs_real_1d(field, length, pelist) + real, dimension(:), intent(inout) :: field !< The list of values being compared, with the + !! minima in each position upon return + integer, intent(in) :: length !< Number of elements in field to compare + integer, optional, intent(in) :: pelist(:) !< List of PEs to work with + + call mpp_min(field, length, pelist) +end subroutine min_across_PEs_real_1d + +!> Initialize the model framework, including PE communication over a designated communicator. +!! If no communicator ID is provided, the framework's default communicator is used. +subroutine MOM_infra_init(localcomm) + integer, optional, intent(in) :: localcomm !< Communicator ID to initialize + call fms_init(localcomm) +end subroutine + +!> This subroutine carries out all of the calls required to close out the infrastructure cleanly. +!! This should only be called in ocean-only runs, as the coupler takes care of this in coupled runs. +subroutine MOM_infra_end + call print_memuse_stats( 'Memory HiWaterMark', always=.TRUE. ) + call fms_end() +end subroutine MOM_infra_end + +end module MOM_coms_infra diff --git a/config_src/infra/FMS2/MOM_constants.F90 b/config_src/infra/FMS2/MOM_constants.F90 new file mode 100644 index 0000000000..2db177e08c --- /dev/null +++ b/config_src/infra/FMS2/MOM_constants.F90 @@ -0,0 +1,14 @@ +!> Provides a few physical constants +module MOM_constants + +! This file is part of MOM6. See LICENSE.md for the license. + +use constants_mod, only : HLV, HLF + +implicit none ; private + +!> The constant offset for converting temperatures in Kelvin to Celsius +real, public, parameter :: CELSIUS_KELVIN_OFFSET = 273.15 +public :: HLV, HLF + +end module MOM_constants diff --git a/config_src/infra/FMS2/MOM_couplertype_infra.F90 b/config_src/infra/FMS2/MOM_couplertype_infra.F90 new file mode 100644 index 0000000000..fd947691ca --- /dev/null +++ b/config_src/infra/FMS2/MOM_couplertype_infra.F90 @@ -0,0 +1,247 @@ +!> This module wraps the FMS coupler types module +module MOM_couplertype_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use coupler_types_mod, only : coupler_type_spawn, coupler_type_initialized, coupler_type_destructor +use coupler_types_mod, only : coupler_type_set_diags, coupler_type_send_data +use coupler_types_mod, only : coupler_type_write_chksums +use coupler_types_mod, only : coupler_type_copy_data, coupler_type_increment_data +use coupler_types_mod, only : coupler_type_extract_data, coupler_type_set_data +use coupler_types_mod, only : ind_flux, ind_alpha, ind_csurf +use coupler_types_mod, only : coupler_1d_bc_type, coupler_2d_bc_type +use atmos_ocean_fluxes_mod, only : aof_set_coupler_flux +use MOM_time_manager, only : time_type + +implicit none ; private + +public :: CT_spawn, CT_initialized, CT_destructor +public :: CT_set_diags, CT_send_data, CT_write_chksums +public :: CT_set_data, CT_increment_data +public :: CT_copy_data, CT_extract_data +public :: atmos_ocn_coupler_flux +public :: ind_flux, ind_alpha, ind_csurf +public :: coupler_1d_bc_type, coupler_2d_bc_type + +!> This is the interface to spawn one coupler_bc_type into another. +interface CT_spawn + module procedure CT_spawn_1d_2d, CT_spawn_2d_2d +end interface CT_spawn + +!> This function interface indicates whether a coupler_bc_type has been initialized. +interface CT_initialized + module procedure CT_initialized_1d, CT_initialized_2d +end interface CT_initialized + +!> This is the interface to deallocate any data associated with a coupler_bc_type. +interface CT_destructor + module procedure CT_destructor_1d, CT_destructor_2d +end interface CT_destructor + +contains + +!> This subroutine sets many of the parameters for calculating an atmosphere-ocean tracer flux +!! and retuns an integer index for that flux. +function atmos_ocn_coupler_flux(name, flux_type, implementation, param, mol_wt, & + ice_restart_file, ocean_restart_file, units, caller, verbosity) & + result (coupler_index) + + character(len=*), intent(in) :: name !< A name to use for the flux + character(len=*), intent(in) :: flux_type !< A string describing the type of this flux, + !! perhaps 'air_sea_gas_flux'. + character(len=*), intent(in) :: implementation !< A name describing the specific + !! implementation of this flux, such as 'ocmip2'. + real, dimension(:), optional, intent(in) :: param !< An array of parameters used for the fluxes + real, optional, intent(in) :: mol_wt !< The molecular weight of this tracer + character(len=*), optional, intent(in) :: ice_restart_file !< A sea-ice restart file to use with this flux. + character(len=*), optional, intent(in) :: ocean_restart_file !< An ocean restart file to use with this flux. + character(len=*), optional, intent(in) :: units !< The units of the flux + character(len=*), optional, intent(in) :: caller !< The name of the calling routine + integer, optional, intent(in) :: verbosity !< A 0-9 integer indicating a level of verbosity. + integer :: coupler_index !< The resulting integer handle to use for this flux in subsequent calls. + + coupler_index = aof_set_coupler_flux(name, flux_type, implementation, & + param=param, mol_wt=mol_wt, ice_restart_file=ice_restart_file, & + ocean_restart_file=ocean_restart_file, & + units=units, caller=caller, verbosity=verbosity) + +end function atmos_ocn_coupler_flux + +!> Generate a 2-D coupler type using a 1-D coupler type as a template. +subroutine CT_spawn_1d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_1d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_1d_2d + +!> Generate one 2-D coupler type using another 2-D coupler type as a template. +subroutine CT_spawn_2d_2d(var_in, var, idim, jdim, suffix, as_needed) + type(coupler_2d_bc_type), intent(in) :: var_in !< structure from which to copy information + type(coupler_2d_bc_type), intent(inout) :: var !< structure into which to copy information + integer, dimension(4), intent(in) :: idim !< The data and computational domain extents of + !! the first dimension in a non-decreasing list + integer, dimension(4), intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension in a non-decreasing list + character(len=*), optional, intent(in) :: suffix !< optional suffix to make the name identifier unique + logical, optional, intent(in) :: as_needed !< Only do the spawn if the target type (var) + !! is not set and the parent type (var_in) is set. + + call coupler_type_spawn(var_in, var, idim, jdim, suffix=suffix, as_needed=as_needed) + +end subroutine CT_spawn_2d_2d + +!> Copy all elements of the data in of one coupler_2d_bc_type into another. Both must have the same array sizes. +subroutine CT_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to copy + type(coupler_2d_bc_type), intent(inout) :: var !< The recipient BC_type structure + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, optional, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, optional, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being copied + character(len=*), optional, intent(in) :: exclude_flux_type !< A string describing which types of fluxes + !! to exclude from this copy. + character(len=*), optional, intent(in) :: only_flux_type !< A string describing which types of fluxes + !! to include from this copy. + logical, optional, intent(in) :: pass_through_ice !< If true, only copy BCs whose + !! value of pass_through ice matches this + + call coupler_type_copy_data(var_in, var, halo_size, bc_index, field_index, & + exclude_flux_type, only_flux_type, pass_through_ice) +end subroutine CT_copy_data + +!> Increment data in all elements of one coupler_2d_bc_type with the data from another. Both +!! must have the same array sizes. +subroutine CT_increment_data(var_in, var, halo_size, scale_factor, scale_prev) + type(coupler_2d_bc_type), intent(in) :: var_in !< coupler_type structure with the data to add to the other type + type(coupler_2d_bc_type), intent(inout) :: var !< The coupler_type structure whose fields are being incremented + integer, optional, intent(in) :: halo_size !< The extent of the halo to increment; 0 by default + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + real, optional, intent(in) :: scale_prev !< A scaling factor for the data that is already here + + call coupler_type_increment_data(var_in, var, halo_size=halo_size, scale_factor=scale_factor, & + scale_prev=scale_prev) + +end subroutine CT_increment_data + +!> Extract a 2d field from a coupler_2d_bc_type into a two-dimensional array. +subroutine CT_extract_data(var_in, bc_index, field_index, array_out, & + scale_factor, halo_size, idim, jdim) + type(coupler_2d_bc_type), intent(in) :: var_in !< BC_type structure with the data to extract + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the boundary + !! condition that is being copied, or the + !! surface flux by default. + real, dimension(1:,1:), intent(out) :: array_out !< The recipient array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + call coupler_type_extract_data(var_in, bc_index, field_index, array_out, scale_factor, halo_size, idim, jdim) + +end subroutine CT_extract_data + +!> Set single 2d field in coupler_2d_bc_type from a two-dimensional array. +subroutine CT_set_data(array_in, bc_index, field_index, var, & + scale_factor, halo_size, idim, jdim) + real, dimension(1:,1:), intent(in) :: array_in !< The source array for the field; its size + !! must match the size of the data being copied + !! unless idim and jdim are supplied. + integer, intent(in) :: bc_index !< The index of the boundary condition + !! that is being copied + integer, intent(in) :: field_index !< The index of the field in the + !! boundary condition that is being set. The + !! surface concentration is set by default. + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure with the data to set + real, optional, intent(in) :: scale_factor !< A scaling factor for the data that is being added + integer, optional, intent(in) :: halo_size !< The extent of the halo to copy; 0 by default + integer, dimension(4), optional, intent(in) :: idim !< The data and computational domain extents of + !! the first dimension of the output array + !! in a non-decreasing list + integer, dimension(4), optional, intent(in) :: jdim !< The data and computational domain extents of + !! the second dimension of the output array + !! in a non-decreasing list + + integer :: subfield ! An integer indicating which field to set. + + call coupler_type_set_data(array_in, bc_index, field_index, var, scale_factor, halo_size, idim, jdim) + +end subroutine CT_set_data + +!> Register the diagnostics of a coupler_2d_bc_type +subroutine CT_set_diags(var, diag_name, axes, time) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure for which to register diagnostics + character(len=*), intent(in) :: diag_name !< name for diagnostic file, or blank not to register the fields + integer, dimension(:), intent(in) :: axes !< array of axes identifiers for diagnostic variable registration + type(time_type), intent(in) :: time !< model time variable for registering diagnostic field + + call coupler_type_set_diags(var, diag_name, axes, time) + +end subroutine CT_set_diags + +!> Write out all diagnostics of elements of a coupler_2d_bc_type +subroutine CT_send_data(var, Time) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure with the diagnostics to write + type(time_type), intent(in) :: time !< The current model time + + call coupler_type_send_data(var, Time) +end subroutine CT_send_data + +!> Write out checksums for the elements of a coupler_2d_bc_type +subroutine CT_write_chksums(var, outunit, name_lead) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure for which to register diagnostics + integer, intent(in) :: outunit !< The index of a open output file + character(len=*), optional, intent(in) :: name_lead !< An optional prefix for the variable names + + call coupler_type_write_chksums(var, outunit, name_lead) + +end subroutine CT_write_chksums + +!> Indicate whether a coupler_1d_bc_type has been initialized. +logical function CT_initialized_1d(var) + type(coupler_1d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_1d = coupler_type_initialized(var) +end function CT_initialized_1d + +!> Indicate whether a coupler_2d_bc_type has been initialized. +logical function CT_initialized_2d(var) + type(coupler_2d_bc_type), intent(in) :: var !< BC_type structure to be deconstructed + + CT_initialized_2d = coupler_type_initialized(var) +end function CT_initialized_2d + +!> Deallocate all data associated with a coupler_1d_bc_type +subroutine CT_destructor_1d(var) + type(coupler_1d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_1d + +!> Deallocate all data associated with a coupler_2d_bc_type +subroutine CT_destructor_2d(var) + type(coupler_2d_bc_type), intent(inout) :: var !< BC_type structure to be deconstructed + + call coupler_type_destructor(var) + +end subroutine CT_destructor_2d + +end module MOM_couplertype_infra diff --git a/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 b/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 new file mode 100644 index 0000000000..47d7bbedaa --- /dev/null +++ b/config_src/infra/FMS2/MOM_cpu_clock_infra.F90 @@ -0,0 +1,93 @@ +!> Wraps the MPP cpu clock functions +!! +!! The functions and constants should be accessed via mom_cpu_clock +module MOM_cpu_clock_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +! These interfaces and constants from MPP/FMS will not be directly exposed outside of this module +use fms_mod, only : clock_flag_default +use mpp_mod, only : mpp_clock_begin +use mpp_mod, only : mpp_clock_end, mpp_clock_id +use mpp_mod, only : MPP_CLOCK_COMPONENT => CLOCK_COMPONENT +use mpp_mod, only : MPP_CLOCK_SUBCOMPONENT => CLOCK_SUBCOMPONENT +use mpp_mod, only : MPP_CLOCK_MODULE_DRIVER => CLOCK_MODULE_DRIVER +use mpp_mod, only : MPP_CLOCK_MODULE => CLOCK_MODULE +use mpp_mod, only : MPP_CLOCK_ROUTINE => CLOCK_ROUTINE +use mpp_mod, only : MPP_CLOCK_LOOP => CLOCK_LOOP +use mpp_mod, only : MPP_CLOCK_INFRA => CLOCK_INFRA + +implicit none ; private + +! Public entities +public :: cpu_clock_id, cpu_clock_begin, cpu_clock_end +public :: CLOCK_COMPONENT, CLOCK_SUBCOMPONENT, CLOCK_MODULE_DRIVER, CLOCK_MODULE +public :: CLOCK_ROUTINE, CLOCK_LOOP, CLOCK_INFRA + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! component, e.g. the entire MOM6 model +integer, parameter :: CLOCK_COMPONENT = MPP_CLOCK_COMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! sub-component, e.g. dynamics or thermodynamics +integer, parameter :: CLOCK_SUBCOMPONENT = MPP_CLOCK_SUBCOMPONENT + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module driver, e.g. a routine that calls multiple other routines +integer, parameter :: CLOCK_MODULE_DRIVER = MPP_CLOCK_MODULE_DRIVER + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! module, e.g. the main entry routine for a module +integer, parameter :: CLOCK_MODULE = MPP_CLOCK_MODULE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! subroutine or function +integer, parameter :: CLOCK_ROUTINE = MPP_CLOCK_ROUTINE + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for a +!! section with in a routine, e.g. around a loop +integer, parameter :: CLOCK_LOOP = MPP_CLOCK_LOOP + +!> A granularity value to passed to cpu_clock_id() to indicate the clock is for an +!! infrastructure operation, e.g. a halo update +integer, parameter :: CLOCK_INFRA = MPP_CLOCK_INFRA + +contains + +!> Turns on clock with handle "id" +subroutine cpu_clock_begin(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_begin(id) + +end subroutine cpu_clock_begin + +!> Turns off clock with handle "id" +subroutine cpu_clock_end(id) + integer, intent(in) :: id !< Handle for clock + + call mpp_clock_end(id) + +end subroutine cpu_clock_end + +!> Returns the integer handle for a named CPU clock. +integer function cpu_clock_id( name, synchro_flag, grain ) + character(len=*), intent(in) :: name !< The unique name of the CPU clock + integer, optional, intent(in) :: synchro_flag !< An integer flag that controls whether the PEs + !! are synchronized before the cpu clocks start counting. + !! Synchronization occurs before the start of a clock if this + !! is odd, while additional (expensive) statistics can set + !! for other values. If absent, the default is taken from the + !! settings for FMS. + integer, optional, intent(in) :: grain !< The timing granularity for this clock, usually set to + !! the values of CLOCK_COMPONENT, CLOCK_ROUTINE, CLOCK_LOOP, etc. + + if (present(synchro_flag)) then + cpu_clock_id = mpp_clock_id(name, flags=synchro_flag, grain=grain) + else + cpu_clock_id = mpp_clock_id(name, flags=clock_flag_default, grain=grain) + endif + +end function cpu_clock_id + +end module MOM_cpu_clock_infra diff --git a/config_src/infra/FMS2/MOM_data_override_infra.F90 b/config_src/infra/FMS2/MOM_data_override_infra.F90 new file mode 100644 index 0000000000..1484f0c128 --- /dev/null +++ b/config_src/infra/FMS2/MOM_data_override_infra.F90 @@ -0,0 +1,105 @@ +!> These interfaces allow for ocean or sea-ice variables to be replaced with data. +module MOM_data_override_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_domain_infra, only : get_simple_array_i_ind, get_simple_array_j_ind +use MOM_time_manager, only : time_type +use data_override_mod, only : data_override_init +use data_override_mod, only : data_override +use data_override_mod, only : data_override_unset_domains + +implicit none ; private + +public :: impose_data_init, impose_data, impose_data_unset_domains + +!> Potentially override the values of a field in the model with values from a dataset. +interface impose_data + module procedure data_override_MD, data_override_2d +end interface + +contains + +!> Initialize the data override capability and set the domains for the ocean and ice components. +!> There should be a call to impose_data_init before impose_data is called. +subroutine impose_data_init(MOM_domain_in, Ocean_domain_in, Ice_domain_in) + type (MOM_domain_type), intent(in), optional :: MOM_domain_in + type (domain2d), intent(in), optional :: Ocean_domain_in + type (domain2d), intent(in), optional :: Ice_domain_in + + if (present(MOM_domain_in)) then + call data_override_init(Ocean_domain_in=MOM_domain_in%mpp_domain, Ice_domain_in=Ice_domain_in) + else + call data_override_init(Ocean_domain_in=Ocean_domain_in, Ice_domain_in=Ice_domain_in) + endif +end subroutine impose_data_init + + +!> Potentially override a 2-d field on a MOM6 domain with values from a dataset. +subroutine data_override_MD(domain, fieldname, data_2D, time, scale, override, is_ice) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call. + type(time_type), intent(in) :: time !< The model time, and the time for the data + real, optional, intent(in) :: scale !< A scaling factor that an overridden field is + !! multiplied by before it is returned. However, + !! if there is no override, there is no rescaling. + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + logical, optional, intent(in) :: is_ice !< If present and true, use the ice domain. + + logical :: overridden, is_ocean + integer :: i, j, is, ie, js, je + + overridden = .false. + is_ocean = .true. ; if (present(is_ice)) is_ocean = .not.is_ice + if (is_ocean) then + call data_override('OCN', fieldname, data_2D, time, override=overridden) + else + call data_override('ICE', fieldname, data_2D, time, override=overridden) + endif + + if (overridden .and. present(scale)) then ; if (scale /= 1.0) then + ! Rescale data in the computational domain if the data override has occurred. + call get_simple_array_i_ind(domain, size(data_2D,1), is, ie) + call get_simple_array_j_ind(domain, size(data_2D,2), js, je) + do j=js,je ; do i=is,ie + data_2D(i,j) = scale*data_2D(i,j) + enddo ; enddo + endif ; endif + + if (present(override)) override = overridden + +end subroutine data_override_MD + + +!> Potentially override a 2-d field with values from a dataset. +subroutine data_override_2d(gridname, fieldname, data_2D, time, override) + character(len=3), intent(in) :: gridname !< String identifying the model component, in MOM6 + !! and SIS this may be either 'OCN' or 'ICE' + character(len=*), intent(in) :: fieldname !< Name of the field to override + real, dimension(:,:), intent(inout) :: data_2D !< Data that may be modified by this call + type(time_type), intent(in) :: time !< The model time, and the time for the data + logical, optional, intent(out) :: override !< True if the field has been overridden successfully + + call data_override(gridname, fieldname, data_2D, time, override) + +end subroutine data_override_2d + +!> Unset domains that had previously been set for use by data_override. +subroutine impose_data_unset_domains(unset_Ocean, unset_Ice, must_be_set) + logical, intent(in), optional :: unset_Ocean !< If present and true, unset the ocean domain for overrides + logical, intent(in), optional :: unset_Ice !< If present and true, unset the sea-ice domain for overrides + logical, intent(in), optional :: must_be_set !< If present and true, it is a fatal error to unset + !! a domain that is not set. + + call data_override_unset_domains(unset_Ocean=unset_Ocean, unset_Ice=unset_Ice, & + must_be_set=must_be_set) +end subroutine impose_data_unset_domains + +end module MOM_data_override_infra + +!> \namespace MOM_data_override_infra +!! +!! The routines here wrap routines from the FMS module data_override_mod, which potentially replace +!! model values with values read from a data file. diff --git a/config_src/infra/FMS2/MOM_diag_manager_infra.F90 b/config_src/infra/FMS2/MOM_diag_manager_infra.F90 new file mode 100644 index 0000000000..18c80cf24c --- /dev/null +++ b/config_src/infra/FMS2/MOM_diag_manager_infra.F90 @@ -0,0 +1,423 @@ +!> A wrapper for the FMS diag_manager routines. This module should be the +!! only MOM6 module which imports the FMS shared infrastructure for +!! diagnostics. Pass through interfaces are being documented +!! here and renamed in order to clearly identify these APIs as being +!! consistent with the FMS infrastructure (Any future updates to +!! those APIs would be applied here). +module MOM_diag_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use diag_axis_mod, only : fms_axis_init=>diag_axis_init +use diag_axis_mod, only : fms_get_diag_axis_name => get_diag_axis_name +use diag_axis_mod, only : EAST, NORTH +use diag_data_mod, only : null_axis_id +use diag_manager_mod, only : fms_diag_manager_init => diag_manager_init +use diag_manager_mod, only : fms_diag_manager_end => diag_manager_end +use diag_manager_mod, only : send_data_fms => send_data +use diag_manager_mod, only : fms_diag_field_add_attribute => diag_field_add_attribute +use diag_manager_mod, only : DIAG_FIELD_NOT_FOUND +use diag_manager_mod, only : register_diag_field_fms => register_diag_field +use diag_manager_mod, only : register_static_field_fms => register_static_field +use diag_manager_mod, only : get_diag_field_id_fms => get_diag_field_id +use MOM_time_manager, only : time_type +use MOM_domain_infra, only : MOM_domain_type +use MOM_error_infra, only : MOM_error => MOM_err, FATAL, WARNING + +implicit none ; private + +!> transmit data for diagnostic output +interface register_diag_field_infra + module procedure register_diag_field_infra_scalar + module procedure register_diag_field_infra_array +end interface register_diag_field_infra + +!> transmit data for diagnostic output +interface send_data_infra + module procedure send_data_infra_0d, send_data_infra_1d + module procedure send_data_infra_2d, send_data_infra_3d +#ifdef OVERLOAD_R8 + module procedure send_data_infra_2d_r8, send_data_infra_3d_r8 +#endif +end interface send_data_infra + +!> Add an attribute to a diagnostic field +interface MOM_diag_field_add_attribute + module procedure MOM_diag_field_add_attribute_scalar_r + module procedure MOM_diag_field_add_attribute_scalar_i + module procedure MOM_diag_field_add_attribute_scalar_c + module procedure MOM_diag_field_add_attribute_r1d + module procedure MOM_diag_field_add_attribute_i1d +end interface MOM_diag_field_add_attribute + + +! Public interfaces +public MOM_diag_axis_init +public get_MOM_diag_axis_name +public MOM_diag_manager_init +public MOM_diag_manager_end +public send_data_infra +public MOM_diag_field_add_attribute +public register_diag_field_infra +public register_static_field_infra +public get_MOM_diag_field_id +! Public data +public null_axis_id +public DIAG_FIELD_NOT_FOUND +public EAST, NORTH + + +contains + +!> Initialize a diagnostic axis +integer function MOM_diag_axis_init(name, data, units, cart_name, long_name, MOM_domain, position, & + & direction, edges, set_name, coarsen, null_axis) + character(len=*), intent(in) :: name !< The name of this axis + real, dimension(:), intent(in) :: data !< The array of coordinate values + character(len=*), intent(in) :: units !< The units for the axis data + character(len=*), intent(in) :: cart_name !< Cartesian axis ("X", "Y", "Z", "T", or "N" for none) + character(len=*), & + optional, intent(in) :: long_name !< The long name of this axis + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: position !< This indicates the relative position of this + !! axis. The default is CENTER, but EAST and NORTH + !! are common options. + integer, optional, intent(in) :: direction !< This indicates the direction along which this + !! axis increases: 1 for upward, -1 for downward, or + !! 0 for non-vertical axes (the default) + integer, optional, intent(in) :: edges !< The axis_id of the complementary axis that + !! describes the edges of this axis + character(len=*), & + optional, intent(in) :: set_name !< A name to use for this set of axes. + integer, optional, intent(in) :: coarsen !< An optional degree of coarsening for the grid, 1 + !! by default. + logical, optional, intent(in) :: null_axis !< If present and true, return the special null axis + !! id for use with scalars. + + integer :: coarsening ! The degree of grid coarsening + + if (present(null_axis)) then ; if (null_axis) then + ! Return the special null axis id for scalars + MOM_diag_axis_init = null_axis_id + return + endif ; endif + + if (present(MOM_domain)) then + coarsening = 1 ; if (present(coarsen)) coarsening = coarsen + if (coarsening == 1) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain, domain_position=position) + elseif (coarsening == 2) then + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges, & + domain2=MOM_domain%mpp_domain_d2, domain_position=position) + else + call MOM_error(FATAL, "diag_axis_init called with an invalid value of coarsen.") + endif + else + if (present(coarsen)) then ; if (coarsen /= 1) then + call MOM_error(FATAL, "diag_axis_init does not support grid coarsening without a MOM_domain.") + endif ; endif + MOM_diag_axis_init = fms_axis_init(name, data, units, cart_name, long_name=long_name, & + direction=direction, set_name=set_name, edges=edges) + endif + +end function MOM_diag_axis_init + +!> Returns the short name of the axis +subroutine get_MOM_diag_axis_name(id, name) + integer, intent(in) :: id !< The axis numeric id + character(len=*), intent(out) :: name !< The short name of the axis + + call fms_get_diag_axis_name(id, name) + +end subroutine get_MOM_diag_axis_name + +!> Return a unique numeric ID field a module/field name combination. +integer function get_MOM_diag_field_id(module_name, field_name) + character(len=*), intent(in) :: module_name !< A module name string to query. + character(len=*), intent(in) :: field_name !< A field name string to query. + + + get_MOM_diag_field_id = -1 + get_MOM_diag_field_id = get_diag_field_id_fms(module_name, field_name) + +end function get_MOM_diag_field_id + +!> Initializes the diagnostic manager +subroutine MOM_diag_manager_init(diag_model_subset, time_init, err_msg) + integer, optional, intent(in) :: diag_model_subset !< An optional diagnostic subset + integer, dimension(6), optional, intent(in) :: time_init !< An optional reference time for diagnostics + !! The default uses the value contained in the + !! diag_table. Format is Y-M-D-H-M-S + character(len=*), optional, intent(out) :: err_msg !< Error message. + call FMS_diag_manager_init(diag_model_subset, time_init, err_msg) + +end subroutine MOM_diag_manager_init + +!> Close the diagnostic manager +subroutine MOM_diag_manager_end(time) + type(time_type), intent(in) :: time !< Model time at call to close. + + call FMS_diag_manager_end(time) + +end subroutine MOM_diag_manager_end + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_scalar(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, & + err_msg, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Field units + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_scalar = register_diag_field_fms(module_name, field_name, init_time, & + long_name, units, missing_value, range, standard_name, do_not_log, err_msg, area, volume) + +end function register_diag_field_infra_scalar + +!> Register a MOM diagnostic field for scalars +integer function register_diag_field_infra_array(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, & + do_not_log, err_msg, interp_method, tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + type(time_type), optional, intent(in) :: init_time !< The registration time + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: verbose !< If true, provide additional log information + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + character(len=*), optional, intent(out):: err_msg !< An error message to return + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_diag_field_infra_array = register_diag_field_fms(module_name, field_name, axes, init_time, & + long_name, units, missing_value, range, mask_variant, standard_name, verbose, do_not_log, & + err_msg, interp_method, tile_count, area, volume) + +end function register_diag_field_infra_array + + +integer function register_static_field_infra(module_name, field_name, axes, long_name, units, & + missing_value, range, mask_variant, standard_name, do_not_log, interp_method, & + tile_count, area, volume) + character(len=*), intent(in) :: module_name !< The name of the associated module + character(len=*), intent(in) :: field_name !< The name of the field + integer, dimension(:), intent(in) :: axes !< Diagnostic IDs of axis attributes for the field + character(len=*), optional, intent(in) :: long_name !< A long name for the field + character(len=*), optional, intent(in) :: units !< Units of the field + real, optional, intent(in) :: missing_value !< Missing value attribute + real, dimension(2), optional, intent(in) :: range !< A valid range of the field + logical, optional, intent(in) :: mask_variant !< If true, the field mask is varying in time + character(len=*), optional, intent(in) :: standard_name !< A standard name for the field + logical, optional, intent(in) :: do_not_log !< if TRUE, field information is not logged + character(len=*), optional, intent(in) :: interp_method !< If 'none' indicates the field should + !! not be interpolated as a scalar + integer, optional, intent(in) :: tile_count !< The tile number for the current PE + integer, optional, intent(in) :: area !< Diagnostic ID of the field containing the area attribute + integer, optional, intent(in) :: volume !< Diagnostic ID of the field containing the volume attribute + + register_static_field_infra = register_static_field_fms(module_name, field_name, axes, long_name, units,& + & missing_value, range, mask_variant, standard_name, dynamic=.false.,do_not_log=do_not_log, & + interp_method=interp_method,tile_count=tile_count, area=area, volume=volume) +end function register_static_field_infra + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_0d(diag_field_id, field, time, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, intent(in) :: field !< The value being recorded + TYPE(time_type), optional, intent(in) :: time !< The time for the current record + CHARACTER(len=*), optional, intent(out) :: err_msg !< An optional error message + + send_data_infra_0d = send_data_fms(diag_field_id, field, time, err_msg) +end function send_data_infra_0d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_1d(diag_field_id, field, is_in, ie_in, time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:), intent(in) :: field !< A 1-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:), optional, intent(in) :: mask !< An optional rank 1 logical mask + real, dimension(:), optional, intent(in) :: rmask !< An optional rank 1 mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_1d = send_data_fms(diag_field_id, field, time, is_in, mask, rmask, ie_in, weight, err_msg) + +end function send_data_infra_1d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real, dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, & + rmask, ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d + + +#ifdef OVERLOAD_R8 +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_2d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:), intent(in) :: field !< A 2-d array of values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:), optional, intent(in) :: mask !< An optional 2-d logical mask + real, dimension(:,:), optional, intent(in) :: rmask !< An optional 2-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_2d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, mask, & + rmask, ie_in, je_in, weight, err_msg) + +end function send_data_infra_2d_r8 + +!> Returns true if the argument data are successfully passed to a diagnostic manager +!! with the indicated unique reference id, false otherwise. +logical function send_data_infra_3d_r8(diag_field_id, field, is_in, ie_in, js_in, je_in, ks_in, ke_in, & + time, mask, rmask, weight, err_msg) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + real(kind=8), dimension(:,:,:), intent(in) :: field !< A rank 1 array of floating point values being recorded + integer, optional, intent(in) :: is_in !< The starting i-index for the data being recorded + integer, optional, intent(in) :: ie_in !< The end i-index for the data being recorded + integer, optional, intent(in) :: js_in !< The starting j-index for the data being recorded + integer, optional, intent(in) :: je_in !< The end j-index for the data being recorded + integer, optional, intent(in) :: ks_in !< The starting k-index for the data being recorded + integer, optional, intent(in) :: ke_in !< The end k-index for the data being recorded + type(time_type), optional, intent(in) :: time !< The time for the current record + logical, dimension(:,:,:), optional, intent(in) :: mask !< An optional 3-d logical mask + real, dimension(:,:,:), optional, intent(in) :: rmask !< An optional 3-d mask array + real, optional, intent(in) :: weight !< A scalar weight factor to apply to the current + !! record if there is averaging in time + character(len=*), optional, intent(out) :: err_msg !< A log indicating the status of the post upon + !! returning to the calling routine + + send_data_infra_3d_r8 = send_data_fms(diag_field_id, field, time, is_in, js_in, ks_in, mask, rmask, & + ie_in, je_in, ke_in, weight, err_msg) + +end function send_data_infra_3d_r8 +#endif + +!> Add a real scalar attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_r(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, intent(in) :: att_value !< A real scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_r + +!> Add an integer attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_i(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, intent(in) :: att_value !< An integer scalar value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_i + +!> Add a character string attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_scalar_c(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + character(len=*), intent(in) :: att_value !< A character string value + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_scalar_c + +!> Add a real list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_r1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + real, dimension(:), intent(in) :: att_value !< An array of real values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_r1d + +!> Add a integer list of attributes attribute to a diagnostic field +subroutine MOM_diag_field_add_attribute_i1d(diag_field_id, att_name, att_value) + integer, intent(in) :: diag_field_id !< The diagnostic manager identifier for this field + character(len=*), intent(in) :: att_name !< The name of the attribute + integer, dimension(:), intent(in) :: att_value !< An array of integer values + + call FMS_diag_field_add_attribute(diag_field_id, att_name, att_value) + +end subroutine MOM_diag_field_add_attribute_i1d + +end module MOM_diag_manager_infra diff --git a/config_src/infra/FMS2/MOM_domain_infra.F90 b/config_src/infra/FMS2/MOM_domain_infra.F90 new file mode 100644 index 0000000000..7c3424ca15 --- /dev/null +++ b/config_src/infra/FMS2/MOM_domain_infra.F90 @@ -0,0 +1,1941 @@ +!> Describes the decomposed MOM domain and has routines for communications across PEs +module MOM_domain_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_coms_infra, only : PE_here, root_PE, num_PEs +use MOM_cpu_clock_infra, only : cpu_clock_begin, cpu_clock_end +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, WARNING, FATAL + +use mpp_domains_mod, only : domain2D, domain1D +use mpp_domains_mod, only : mpp_define_io_domain, mpp_define_domains, mpp_deallocate_domain +use mpp_domains_mod, only : mpp_get_domain_components, mpp_get_domain_extents +use mpp_domains_mod, only : mpp_get_compute_domain, mpp_get_data_domain, mpp_get_global_domain +use mpp_domains_mod, only : mpp_get_boundary, mpp_update_domains +use mpp_domains_mod, only : mpp_start_update_domains, mpp_complete_update_domains +use mpp_domains_mod, only : mpp_create_group_update, mpp_do_group_update +use mpp_domains_mod, only : mpp_reset_group_update_field, mpp_group_update_initialized +use mpp_domains_mod, only : mpp_start_group_update, mpp_complete_group_update +use mpp_domains_mod, only : mpp_compute_block_extent +use mpp_domains_mod, only : mpp_broadcast_domain, mpp_redistribute, mpp_global_field +use mpp_domains_mod, only : AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR, BITWISE_EXACT_SUM +use mpp_domains_mod, only : CYCLIC_GLOBAL_DOMAIN, FOLD_NORTH_EDGE +use mpp_domains_mod, only : To_East => WUPDATE, To_West => EUPDATE, Omit_Corners => EDGEUPDATE +use mpp_domains_mod, only : To_North => SUPDATE, To_South => NUPDATE +use mpp_domains_mod, only : CENTER, CORNER, NORTH_FACE => NORTH, EAST_FACE => EAST +use fms_io_mod, only : file_exist, parse_mask_table +use fms_affinity_mod, only : fms_affinity_init, fms_affinity_set, fms_affinity_get + +! This subroutine is not in MOM6/src but may be required by legacy drivers +use mpp_domains_mod, only : global_field_sum => mpp_global_sum + +! The `group_pass_type` fields are never accessed, so we keep it as an FMS type +use mpp_domains_mod, only : group_pass_type => mpp_group_update_type + +implicit none ; private + +! These types are inherited from mpp, but are treated as opaque here. +public :: domain2D, domain1D, group_pass_type +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: create_MOM_domain, clone_MOM_domain, get_domain_components, get_domain_extent +public :: deallocate_MOM_domain, get_global_shape, compute_block_extent +public :: pass_var, pass_vector, fill_symmetric_edges, rescale_comp_data +public :: pass_var_start, pass_var_complete, pass_vector_start, pass_vector_complete +public :: create_group_pass, do_group_pass, start_group_pass, complete_group_pass +public :: redistribute_array, broadcast_domain, global_field +public :: get_simple_array_i_ind, get_simple_array_j_ind +public :: MOM_thread_affinity_set, set_MOM_thread_affinity +! These are encoding constant parmeters. +public :: To_East, To_West, To_North, To_South, To_All, Omit_Corners +public :: AGRID, BGRID_NE, CGRID_NE, SCALAR_PAIR +public :: CORNER, CENTER, NORTH_FACE, EAST_FACE +! These are no longer used by MOM6 because the reproducing sum works so well, but they are +! still referenced by some of the non-GFDL couplers. +public :: global_field_sum, BITWISE_EXACT_SUM + +!> Do a halo update on an array +interface pass_var + module procedure pass_var_3d, pass_var_2d +end interface pass_var + +!> Do a halo update on a pair of arrays representing the two components of a vector +interface pass_vector + module procedure pass_vector_3d, pass_vector_2d +end interface pass_vector + +!> Initiate a non-blocking halo update on an array +interface pass_var_start + module procedure pass_var_start_3d, pass_var_start_2d +end interface pass_var_start + +!> Complete a non-blocking halo update on an array +interface pass_var_complete + module procedure pass_var_complete_3d, pass_var_complete_2d +end interface pass_var_complete + +!> Initiate a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_start + module procedure pass_vector_start_3d, pass_vector_start_2d +end interface pass_vector_start + +!> Complete a halo update on a pair of arrays representing the two components of a vector +interface pass_vector_complete + module procedure pass_vector_complete_3d, pass_vector_complete_2d +end interface pass_vector_complete + +!> Set up a group of halo updates +interface create_group_pass + module procedure create_var_group_pass_2d + module procedure create_var_group_pass_3d + module procedure create_vector_group_pass_2d + module procedure create_vector_group_pass_3d +end interface create_group_pass + +!> Do a set of halo updates that fill in the values at the duplicated edges +!! of a staggered symmetric memory domain +interface fill_symmetric_edges + module procedure fill_vector_symmetric_edges_2d !, fill_vector_symmetric_edges_3d +! module procedure fill_scalar_symmetric_edges_2d, fill_scalar_symmetric_edges_3d +end interface fill_symmetric_edges + +!> Rescale the values of an array in its computational domain by a constant factor +interface rescale_comp_data + module procedure rescale_comp_data_4d, rescale_comp_data_3d, rescale_comp_data_2d +end interface rescale_comp_data + +!> Pass an array from one MOM domain to another +interface redistribute_array + module procedure redistribute_array_3d, redistribute_array_2d +end interface redistribute_array + +!> Copy one MOM_domain_type into another +interface clone_MOM_domain + module procedure clone_MD_to_MD, clone_MD_to_d2D +end interface clone_MOM_domain + +!> Extract the 1-d domain components from a MOM_domain or domain2d +interface get_domain_components + module procedure get_domain_components_MD, get_domain_components_d2D +end interface get_domain_components + +!> Returns the index ranges that have been stored in a MOM_domain_type +interface get_domain_extent + module procedure get_domain_extent_MD, get_domain_extent_d2D +end interface get_domain_extent + + +!> The MOM_domain_type contains information about the domain decomposition. +type, public :: MOM_domain_type + character(len=64) :: name !< The name of this domain + type(domain2D), pointer :: mpp_domain => NULL() !< The FMS domain with halos + !! on this processor, centered at h points. + type(domain2D), pointer :: mpp_domain_d2 => NULL() !< A coarse FMS domain with halos + !! on this processor, centered at h points. + integer :: niglobal !< The total horizontal i-domain size. + integer :: njglobal !< The total horizontal j-domain size. + integer :: nihalo !< The i-halo size in memory. + integer :: njhalo !< The j-halo size in memory. + logical :: symmetric !< True if symmetric memory is used with this domain. + logical :: nonblocking_updates !< If true, non-blocking halo updates are + !! allowed. The default is .false. (for now). + logical :: thin_halo_updates !< If true, optional arguments may be used to + !! specify the width of the halos that are + !! updated with each call. + integer :: layout(2) !< This domain's processor layout. This is + !! saved to enable the construction of related + !! new domains with different resolutions or + !! other properties. + integer :: io_layout(2) !< The IO-layout used with this domain. + integer :: X_FLAGS !< Flag that specifies the properties of the + !! domain in the i-direction in a define_domain call. + integer :: Y_FLAGS !< Flag that specifies the properties of the + !! domain in the j-direction in a define_domain call. + logical, pointer :: maskmap(:,:) => NULL() !< A pointer to an array indicating + !! which logical processors are actually used for + !! the ocean code. The other logical processors + !! would be contain only land points and are not + !! assigned to actual processors. This need not be + !! assigned if all logical processors are used. +end type MOM_domain_type + +integer, parameter :: To_All = To_East + To_West + To_North + To_South !< A flag for passing in all directions + +contains + +!> pass_var_3d does a halo update for a three-dimensional array. +subroutine pass_var_3d(array, MOM_dom, sideflag, complete, position, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! sothe halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_3d + +!> pass_var_2d does a halo update for a two-dimensional array. +subroutine pass_var_2d(array, MOM_dom, sideflag, complete, position, halo, inner_halo, clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full halo + !! by default. + integer, optional, intent(in) :: inner_halo !< The size of an inner halo to avoid updating, + !! or 0 to avoid updating symmetric memory + !! computational domain points. Setting this >=0 + !! also enforces that complete=.true. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + real, allocatable, dimension(:,:) :: tmp + integer :: pos, i_halo, j_halo + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + integer :: inner, i, j, isfw, iefw, isfe, iefe, jsfs, jefs, jsfn, jefn + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + block_til_complete = .true. ; if (present(complete)) block_til_complete = complete + pos = CENTER ; if (present(position)) pos = position + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + ! Store the original values. + allocate(tmp(size(array,1), size(array,2))) + tmp(:,:) = array(:,:) + block_til_complete = .true. + endif ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(array, MOM_dom%mpp_domain, flags=dirflag, & + complete=block_til_complete, position=position) + endif + + if (present(inner_halo)) then ; if (inner_halo >= 0) then + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + ! Convert to local indices for arrays starting at 1. + isc = isc - (isd-1) ; iec = iec - (isd-1) ; ied = ied - (isd-1) ; isd = 1 + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) ; jed = jed - (jsd-1) ; jsd = 1 + i_halo = min(inner_halo, isc-1) ; j_halo = min(inner_halo, jsc-1) + + ! Figure out the array index extents of the eastern, western, northern and southern regions to copy. + if (pos == CENTER) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CENTER array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CENTER array.") ; endif + elseif (pos == CORNER) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for CORNER array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for CORNER array.") ; endif + elseif (pos == NORTH_FACE) then + if (size(array,1) == ied) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for NORTH_FACE array.") ; endif + if (size(array,2) == jed) then + jsfs = max(jsc - (j_halo+1), 1) ; jefs = jsc ; jsfn = jec ; jefn = jec + j_halo + elseif (size(array,2) == jed+1) then + jsfs = jsc - j_halo ; jefs = jsc+1 ; jsfn = jec+1 ; jefn = min(jec + 1 + j_halo, jed+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for NORTH_FACE array.") ; endif + elseif (pos == EAST_FACE) then + if (size(array,1) == ied) then + isfw = max(isc - (i_halo+1), 1) ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + elseif (size(array,1) == ied+1) then + isfw = isc - i_halo ; iefw = isc+1 ; isfe = iec+1 ; iefe = min(iec + 1 + i_halo, ied+1) + else ; call MOM_error(FATAL, "pass_var_2d: wrong i-size for EAST_FACE array.") ; endif + if (size(array,2) == jed) then + isfw = isc - i_halo ; iefw = isc ; isfe = iec ; iefe = iec + i_halo + else ; call MOM_error(FATAL, "pass_var_2d: wrong j-size for EAST_FACE array.") ; endif + else + call MOM_error(FATAL, "pass_var_2d: Unrecognized position") + endif + + ! Copy back the stored inner halo points + do j=jsfs,jefn ; do i=isfw,iefw ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefn ; do i=isfe,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfs,jefs ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + do j=jsfn,jefn ; do i=isfw,iefe ; array(i,j) = tmp(i,j) ; enddo ; enddo + + deallocate(tmp) + endif ; endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_2d + +!> pass_var_start_2d starts a halo update for a two-dimensional array. +function pass_var_start_2d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_2d !0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_2d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_2d + +!> pass_var_start_3d starts a halo update for a three-dimensional array. +function pass_var_start_3d(array, MOM_dom, sideflag, position, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before + !! progress resumes. Omitting complete is the + !! same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_var_start_3d !< The integer index for this update. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_var_start_3d = mpp_start_update_domains(array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_var_start_3d + +!> pass_var_complete_2d completes a halo update for a two-dimensional array. +subroutine pass_var_complete_2d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_2d + +!> pass_var_complete_3d completes a halo update for a three-dimensional array. +subroutine pass_var_complete_3d(id_update, array, MOM_dom, sideflag, position, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has + !! been returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, array, MOM_dom%mpp_domain, & + flags=dirflag, position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_var_complete_3d + +!> pass_vector_2d does a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_2d + +!> fill_vector_symmetric_edges_2d does an usual set of halo updates that only +!! fill in the values at the edge of a pair of symmetric memory two-dimensional +!! arrays representing the components of a two-dimensional horizontal vector. +!! If symmetric memory is not being used, this subroutine does nothing except to +!! possibly turn optional cpu clocks on or off. +subroutine fill_vector_symmetric_edges_2d(u_cmpt, v_cmpt, MOM_dom, stagger, scalar, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: scalar !< An optional argument indicating whether. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + integer :: i, j, isc, iec, jsc, jec, isd, ied, jsd, jed, IscB, IecB, JscB, JecB + real, allocatable, dimension(:) :: sbuff_x, sbuff_y, wbuff_x, wbuff_y + logical :: block_til_complete + + if (.not. MOM_dom%symmetric) then + return + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + if (.not.(stagger_local == CGRID_NE .or. stagger_local == BGRID_NE)) return + + call mpp_get_compute_domain(MOM_dom%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(MOM_dom%mpp_domain, isd, ied, jsd, jed) + + ! Adjust isc, etc., to account for the fact that the input arrays indices all + ! start at 1 (and are effectively on a SW grid!). + isc = isc - (isd-1) ; iec = iec - (isd-1) + jsc = jsc - (jsd-1) ; jec = jec - (jsd-1) + IscB = isc ; IecB = iec+1 ; JscB = jsc ; JecB = jec+1 + + dirflag = To_All ! 60 + if (present(scalar)) then ; if (scalar) dirflag = To_All+SCALAR_PAIR ; endif + + if (stagger_local == CGRID_NE) then + allocate(wbuff_x(jsc:jec)) ; allocate(sbuff_y(isc:iec)) + wbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbuffery=sbuff_y, & + gridtype=CGRID_NE) + do i=isc,iec + v_cmpt(i,JscB) = sbuff_y(i) + enddo + do j=jsc,jec + u_cmpt(IscB,j) = wbuff_x(j) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_y) + elseif (stagger_local == BGRID_NE) then + allocate(wbuff_x(JscB:JecB)) ; allocate(sbuff_x(IscB:IecB)) + allocate(wbuff_y(JscB:JecB)) ; allocate(sbuff_y(IscB:IecB)) + wbuff_x(:) = 0.0 ; wbuff_y(:) = 0.0 ; sbuff_x(:) = 0.0 ; sbuff_y(:) = 0.0 + call mpp_get_boundary(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + wbufferx=wbuff_x, sbufferx=sbuff_x, & + wbuffery=wbuff_y, sbuffery=sbuff_y, & + gridtype=BGRID_NE) + do I=IscB,IecB + u_cmpt(I,JscB) = sbuff_x(I) ; v_cmpt(I,JscB) = sbuff_y(I) + enddo + do J=JscB,JecB + u_cmpt(IscB,J) = wbuff_x(J) ; v_cmpt(IscB,J) = wbuff_y(J) + enddo + deallocate(wbuff_x) ; deallocate(sbuff_x) + deallocate(wbuff_y) ; deallocate(sbuff_y) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine fill_vector_symmetric_edges_2d + +!> pass_vector_3d does a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + logical :: block_til_complete + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + block_til_complete = .true. + if (present(complete)) block_til_complete = complete + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_update_domains(u_cmpt, v_cmpt, MOM_dom%mpp_domain, flags=dirflag, & + gridtype=stagger_local, complete = block_til_complete) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_3d + +!> pass_vector_start_2d starts a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +function pass_vector_start_2d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_2d !< The integer index for this + !! update. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_2d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_2d + +!> pass_vector_start_3d starts a halo update for a pair of three-dimensional arrays +!! representing the components of a three-dimensional horizontal vector. +function pass_vector_start_3d(u_cmpt, v_cmpt, MOM_dom, direction, stagger, complete, halo, & + clock) + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + logical, optional, intent(in) :: complete !< An optional argument indicating whether the + !! halo updates should be completed before progress resumes. + !! Omitting complete is the same as setting complete to .true. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + integer :: pass_vector_start_3d !< The integer index for this + !! update. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + pass_vector_start_3d = mpp_start_update_domains(u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end function pass_vector_start_3d + +!> pass_vector_complete_2d completes a halo update for a pair of two-dimensional arrays +!! representing the components of a two-dimensional horizontal vector. +subroutine pass_vector_complete_2d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_2d + +!> pass_vector_complete_3d completes a halo update for a pair of three-dimensional +!! arrays representing the components of a three-dimensional horizontal vector. +subroutine pass_vector_complete_3d(id_update, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + integer, intent(in) :: id_update !< The integer id of this update which has been + !! returned from a previous call to + !! pass_var_start. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local, & + whalo=halo, ehalo=halo, shalo=halo, nhalo=halo) + else + call mpp_complete_update_domains(id_update, u_cmpt, v_cmpt, & + MOM_dom%mpp_domain, flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine pass_vector_complete_3d + +!> create_var_group_pass_2d sets up a group of two-dimensional array halo updates. +subroutine create_var_group_pass_2d(group, array, MOM_dom, sideflag, position, & + halo, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_2d + +!> create_var_group_pass_3d sets up a group of three-dimensional array halo updates. +subroutine create_var_group_pass_3d(group, array, MOM_dom, sideflag, position, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: array !< The array which is having its halos points + !! exchanged. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: sideflag !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH. For example, TO_EAST sends the data to the processor to the east, + !! so the halos on the western side are filled. TO_ALL is the default if sideflag is omitted. + integer, optional, intent(in) :: position !< An optional argument indicating the position. + !! This is CENTER by default and is often CORNER, + !! but could also be EAST_FACE or NORTH_FACE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + dirflag = To_All ! 60 + if (present(sideflag)) then ; if (sideflag > 0) dirflag = sideflag ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,array) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, array, MOM_dom%mpp_domain, flags=dirflag, & + position=position) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_var_group_pass_3d + +!> create_vector_group_pass_2d sets up a group of two-dimensional vector halo updates. +subroutine create_vector_group_pass_2d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_2d + +!> create_vector_group_pass_3d sets up a group of three-dimensional vector halo updates. +subroutine create_vector_group_pass_3d(group, u_cmpt, v_cmpt, MOM_dom, direction, stagger, halo, & + clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + real, dimension(:,:,:), intent(inout) :: u_cmpt !< The nominal zonal (u) component of the vector + !! pair which is having its halos points + !! exchanged. + real, dimension(:,:,:), intent(inout) :: v_cmpt !< The nominal meridional (v) component of the + !! vector pair which is having its halos points + !! exchanged. + + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: direction !< An optional integer indicating which + !! directions the data should be sent. It is TO_ALL or the sum of any of TO_EAST, TO_WEST, + !! TO_NORTH, and TO_SOUTH, possibly plus SCALAR_PAIR if these are paired non-directional + !! scalars discretized at the typical vector component locations. For example, TO_EAST sends + !! the data to the processor to the east, so the halos on the western side are filled. TO_ALL + !! is the default if omitted. + integer, optional, intent(in) :: stagger !< An optional flag, which may be one of A_GRID, + !! BGRID_NE, or CGRID_NE, indicating where the two components of the vector are + !! discretized. Omitting stagger is the same as setting it to CGRID_NE. + integer, optional, intent(in) :: halo !< The size of the halo to update - the full + !! halo by default. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + ! Local variables + integer :: stagger_local + integer :: dirflag + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + stagger_local = CGRID_NE ! Default value for type of grid + if (present(stagger)) stagger_local = stagger + + dirflag = To_All ! 60 + if (present(direction)) then ; if (direction > 0) dirflag = direction ; endif + + if (mpp_group_update_initialized(group)) then + call mpp_reset_group_update_field(group,u_cmpt, v_cmpt) + elseif (present(halo) .and. MOM_dom%thin_halo_updates) then + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local, whalo=halo, ehalo=halo, & + shalo=halo, nhalo=halo) + else + call mpp_create_group_update(group, u_cmpt, v_cmpt, MOM_dom%mpp_domain, & + flags=dirflag, gridtype=stagger_local) + endif + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine create_vector_group_pass_3d + +!> do_group_pass carries out a group halo update. +subroutine do_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_do_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine do_group_pass + +!> start_group_pass starts out a group halo update. +subroutine start_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_start_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine start_group_pass + +!> complete_group_pass completes a group halo update. +subroutine complete_group_pass(group, MOM_dom, clock) + type(group_pass_type), intent(inout) :: group !< The data type that store information for + !! group update. This data will be used in + !! do_group_pass. + type(MOM_domain_type), intent(inout) :: MOM_dom !< The MOM_domain_type containing the mpp_domain + !! needed to determine where data should be + !! sent. + integer, optional, intent(in) :: clock !< The handle for a cpu time clock that should be + !! started then stopped to time this routine. + real :: d_type + + if (present(clock)) then ; if (clock>0) call cpu_clock_begin(clock) ; endif + + call mpp_complete_group_update(group, MOM_dom%mpp_domain, d_type) + + if (present(clock)) then ; if (clock>0) call cpu_clock_end(clock) ; endif + +end subroutine complete_group_pass + + +!> Pass a 2-D array from one MOM domain to another +subroutine redistribute_array_2d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_2d + +!> Pass a 3-D array from one MOM domain to another +subroutine redistribute_array_3d(Domain1, array1, Domain2, array2, complete) + type(domain2d), & + intent(in) :: Domain1 !< The MOM domain from which to extract information. + real, dimension(:,:,:), intent(in) :: array1 !< The array from which to extract information. + type(domain2d), & + intent(in) :: Domain2 !< The MOM domain receiving information. + real, dimension(:,:,:), intent(out) :: array2 !< The array receiving information. + logical, optional, intent(in) :: complete !< If true, finish communication before proceeding. + + ! Local variables + logical :: do_complete + + do_complete=.true.;if (PRESENT(complete)) do_complete = complete + + call mpp_redistribute(Domain1, array1, Domain2, array2, do_complete) + +end subroutine redistribute_array_3d + + +!> Rescale the values of a 4-D array in its computational domain by a constant factor +subroutine rescale_comp_data_4d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:,:) = scale*array(is:ie,js:je,:,:) + +end subroutine rescale_comp_data_4d + +!> Rescale the values of a 3-D array in its computational domain by a constant factor +subroutine rescale_comp_data_3d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je,:) = scale*array(is:ie,js:je,:) + +end subroutine rescale_comp_data_3d + +!> Rescale the values of a 2-D array in its computational domain by a constant factor +subroutine rescale_comp_data_2d(domain, array, scale) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + real, dimension(:,:), intent(inout) :: array !< The array which is having the data in its + !! computational domain rescaled + real, intent(in) :: scale !< A scaling factor by which to multiply the + !! values in the computational domain of array + integer :: is, ie, js, je + + if (scale == 1.0) return + + call get_simple_array_i_ind(domain, size(array,1), is, ie) + call get_simple_array_j_ind(domain, size(array,2), js, je) + array(is:ie,js:je) = scale*array(is:ie,js:je) + +end subroutine rescale_comp_data_2d + +!> create_MOM_domain creates and initializes a MOM_domain_type variables, based on the information +!! provided in arguments. +subroutine create_MOM_domain(MOM_dom, n_global, n_halo, reentrant, tripolar_N, layout, io_layout, & + domain_name, mask_table, symmetric, thin_halos, nonblocking) + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to the MOM_domain_type being defined here. + integer, dimension(2), intent(in) :: n_global !< The number of points on the global grid in + !! the i- and j-directions + integer, dimension(2), intent(in) :: n_halo !< The number of halo points on each processor + logical, dimension(2), intent(in) :: reentrant !< If true the grid is periodic in the i- and j- directions + logical, intent(in) :: tripolar_N !< If true the grid uses northern tripolar connectivity + integer, dimension(2), intent(in) :: layout !< The layout of logical PEs in the i- and j-directions. + integer, dimension(2), optional, intent(in) :: io_layout !< The layout for parallel input and output. + character(len=*), optional, intent(in) :: domain_name !< A name for this domain, "MOM" if missing. + character(len=*), optional, intent(in) :: mask_table !< The full relative or absolute path to the mask table. + logical, optional, intent(in) :: symmetric !< If present, this specifies whether this domain + !! uses symmetric memory, or true if missing. + logical, optional, intent(in) :: thin_halos !< If present, this specifies whether to permit the use of + !! thin halo updates, or true if missing. + logical, optional, intent(in) :: nonblocking !< If present, this specifies whether to permit the use of + !! nonblocking halo updates, or false if missing. + + ! local variables + integer, dimension(4) :: global_indices ! The lower and upper global i- and j-index bounds + integer :: X_FLAGS ! A combination of integers encoding the x-direction grid connectivity. + integer :: Y_FLAGS ! A combination of integers encoding the y-direction grid connectivity. + integer :: xhalo_d2, yhalo_d2 + character(len=200) :: mesg ! A string for use in error messages + logical :: mask_table_exists ! Mask_table is present and the file it points to exists + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + + MOM_dom%name = "MOM" ; if (present(domain_name)) MOM_dom%name = trim(domain_name) + + X_FLAGS = 0 ; Y_FLAGS = 0 + if (reentrant(1)) X_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (reentrant(2)) Y_FLAGS = CYCLIC_GLOBAL_DOMAIN + if (tripolar_N) then + Y_FLAGS = FOLD_NORTH_EDGE + if (reentrant(2)) call MOM_error(FATAL,"MOM_domains: "// & + "TRIPOLAR_N and REENTRANT_Y may not be used together.") + endif + + MOM_dom%nonblocking_updates = nonblocking + MOM_dom%thin_halo_updates = thin_halos + MOM_dom%symmetric = .true. ; if (present(symmetric)) MOM_dom%symmetric = symmetric + MOM_dom%niglobal = n_global(1) ; MOM_dom%njglobal = n_global(2) + MOM_dom%nihalo = n_halo(1) ; MOM_dom%njhalo = n_halo(2) + + ! Save the extra data for creating other domains of different resolution that overlay this domain. + MOM_dom%X_FLAGS = X_FLAGS + MOM_dom%Y_FLAGS = Y_FLAGS + MOM_dom%layout(:) = layout(:) + + ! Set up the io_layout, with error handling. + MOM_dom%io_layout(:) = (/ 1, 1 /) + if (present(io_layout)) then + if (io_layout(1) == 0) then + MOM_dom%io_layout(1) = layout(1) + elseif (io_layout(1) > 1) then + MOM_dom%io_layout(1) = io_layout(1) + if (modulo(layout(1), io_layout(1)) /= 0) then + write(mesg,'("MOM_domains_init: The i-direction I/O-layout, IO_LAYOUT(1)=",i4, & + &", does not evenly divide the i-direction layout, NIPROC=,",i4,".")') io_layout(1), layout(1) + call MOM_error(FATAL, mesg) + endif + endif + + if (io_layout(2) == 0) then + MOM_dom%io_layout(2) = layout(2) + elseif (io_layout(2) > 1) then + MOM_dom%io_layout(2) = io_layout(2) + if (modulo(layout(2), io_layout(2)) /= 0) then + write(mesg,'("MOM_domains_init: The j-direction I/O-layout, IO_LAYOUT(2)=",i4, & + &", does not evenly divide the j-direction layout, NJPROC=,",i4,".")') io_layout(2), layout(2) + call MOM_error(FATAL, mesg) + endif + endif + endif + + if (present(mask_table)) then + mask_table_exists = file_exist(mask_table) + if (mask_table_exists) then + allocate(MOM_dom%maskmap(layout(1), layout(2))) + call parse_mask_table(mask_table, MOM_dom%maskmap, MOM_dom%name) + endif + else + mask_table_exists = .false. + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain) + + !For downsampled domain, recommend a halo of 1 (or 0?) since we're not doing wide-stencil computations. + !But that does not work because the downsampled field would not have the correct size to pass the checks, e.g., we get + !error: downsample_diag_indices_get: peculiar size 28 in i-direction\ndoes not match one of 24 25 26 27 + ! call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, halo_size=(MOM_dom%nihalo/2), coarsen=2) + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, coarsen=2) + +end subroutine create_MOM_domain + +!> dealloc_MOM_domain deallocates memory associated with a pointer to a MOM_domain_type +!! and potentially all of its contents +subroutine deallocate_MOM_domain(MOM_domain, cursory) + type(MOM_domain_type), pointer :: MOM_domain !< A pointer to the MOM_domain_type being deallocated + logical, optional, intent(in) :: cursory !< If true do not deallocate fields associated + !! with the underlying infrastructure + logical :: invasive ! If true, deallocate fields associated with the underlying infrastructure + + invasive = .true. ; if (present(cursory)) invasive = .not.cursory + + if (associated(MOM_domain)) then + if (associated(MOM_domain%mpp_domain)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain) + deallocate(MOM_domain%mpp_domain) + endif + if (associated(MOM_domain%mpp_domain_d2)) then + if (invasive) call mpp_deallocate_domain(MOM_domain%mpp_domain_d2) + deallocate(MOM_domain%mpp_domain_d2) + endif + if (associated(MOM_domain%maskmap)) deallocate(MOM_domain%maskmap) + deallocate(MOM_domain) + endif + +end subroutine deallocate_MOM_domain + +!> MOM_thread_affinity_set returns true if the number of openMP threads have been set to a value greater than 1. +function MOM_thread_affinity_set() + ! Local variables + !$ integer :: ocean_nthreads ! Number of openMP threads + !$ integer :: omp_get_num_threads ! An openMP function that returns the number of threads + logical :: MOM_thread_affinity_set + + MOM_thread_affinity_set = .false. + !$ call fms_affinity_init() + !$OMP PARALLEL + !$OMP MASTER + !$ ocean_nthreads = omp_get_num_threads() + !$OMP END MASTER + !$OMP END PARALLEL + !$ MOM_thread_affinity_set = (ocean_nthreads > 1 ) +end function MOM_thread_affinity_set + +!> set_MOM_thread_affinity sets the number of openMP threads to use with the ocean. +subroutine set_MOM_thread_affinity(ocean_nthreads, ocean_hyper_thread) + integer, intent(in) :: ocean_nthreads !< Number of openMP threads to use for the ocean model + logical, intent(in) :: ocean_hyper_thread !< If true, use hyper threading + + ! Local variables + !$ integer :: omp_get_thread_num, omp_get_num_threads !< These are the results of openMP functions + + !$ call fms_affinity_init() ! fms_affinity_init can be safely called more than once. + !$ call fms_affinity_set('OCEAN', ocean_hyper_thread, ocean_nthreads) + !$ call omp_set_num_threads(ocean_nthreads) + !$OMP PARALLEL + !$ write(6,*) "MOM_domains_mod OMPthreading ", fms_affinity_get(), omp_get_thread_num(), omp_get_num_threads() + !$ flush(6) + !$OMP END PARALLEL +end subroutine set_MOM_thread_affinity + +!> This subroutine retrieves the 1-d domains that make up the 2d-domain in a MOM_domain +subroutine get_domain_components_MD(MOM_dom, x_domain, y_domain) + type(MOM_domain_type), intent(in) :: MOM_dom !< The MOM_domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(MOM_dom%mpp_domain, x_domain, y_domain) +end subroutine get_domain_components_MD + +!> This subroutine retrieves the 1-d domains that make up a 2d-domain +subroutine get_domain_components_d2D(domain, x_domain, y_domain) + type(domain2D), intent(in) :: domain !< The 2D domain whose contents are being extracted + type(domain1D), optional, intent(inout) :: x_domain !< The 1-d logical x-domain + type(domain1D), optional, intent(inout) :: y_domain !< The 1-d logical y-domain + + call mpp_get_domain_components(domain, x_domain, y_domain) +end subroutine get_domain_components_d2D + +!> clone_MD_to_MD copies one MOM_domain_type into another, while allowing +!! some properties of the new type to differ from the original one. +subroutine clone_MD_to_MD(MD_in, MOM_dom, min_halo, halo_size, symmetric, domain_name, & + turns, refine, extra_halo) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain + type(MOM_domain_type), pointer :: MOM_dom !< A pointer to a MOM_domain that will be + !! allocated if it is unassociated, and will have data + !! copied from MD_in + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, copied + !! from MD_in if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns + integer, optional, intent(in) :: refine !< A factor by which to enhance the grid resolution. + integer, optional, intent(in) :: extra_halo !< An extra number of points in the halos + !! compared with MD_in + + integer :: global_indices(4) + logical :: mask_table_exists + integer, dimension(:), allocatable :: exni ! The extents of the grid for each i-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer, dimension(:), allocatable :: exnj ! The extents of the grid for each j-row of the layout. + ! The sum of exni must equal MOM_dom%niglobal. + integer :: qturns ! The number of quarter turns, restricted to the range of 0 to 3. + integer :: i, j, nl1, nl2 + + qturns = 0 + if (present(turns)) qturns = modulo(turns, 4) + + if (.not.associated(MOM_dom)) then + allocate(MOM_dom) + allocate(MOM_dom%mpp_domain) + allocate(MOM_dom%mpp_domain_d2) + endif + +! Save the extra data for creating other domains of different resolution that overlay this domain + MOM_dom%symmetric = MD_in%symmetric + MOM_dom%nonblocking_updates = MD_in%nonblocking_updates + MOM_dom%thin_halo_updates = MD_in%thin_halo_updates + + if (modulo(qturns, 2) /= 0) then + MOM_dom%niglobal = MD_in%njglobal ; MOM_dom%njglobal = MD_in%niglobal + MOM_dom%nihalo = MD_in%njhalo ; MOM_dom%njhalo = MD_in%nihalo + call get_layout_extents(MD_in, exnj, exni) + + MOM_dom%X_FLAGS = MD_in%Y_FLAGS ; MOM_dom%Y_FLAGS = MD_in%X_FLAGS + MOM_dom%layout(:) = MD_in%layout(2:1:-1) + MOM_dom%io_layout(:) = MD_in%io_layout(2:1:-1) + else + MOM_dom%niglobal = MD_in%niglobal ; MOM_dom%njglobal = MD_in%njglobal + MOM_dom%nihalo = MD_in%nihalo ; MOM_dom%njhalo = MD_in%njhalo + call get_layout_extents(MD_in, exni, exnj) + + MOM_dom%X_FLAGS = MD_in%X_FLAGS ; MOM_dom%Y_FLAGS = MD_in%Y_FLAGS + MOM_dom%layout(:) = MD_in%layout(:) + MOM_dom%io_layout(:) = MD_in%io_layout(:) + endif + + ! Ensure that the points per processor are the same on the source and densitation grids. + select case (qturns) + case (1) ; call invert(exni) + case (2) ; call invert(exni) ; call invert(exnj) + case (3) ; call invert(exnj) + end select + + if (associated(MD_in%maskmap)) then + mask_table_exists = .true. + allocate(MOM_dom%maskmap(MOM_dom%layout(1), MOM_dom%layout(2))) + + nl1 = MOM_dom%layout(1) ; nl2 = MOM_dom%layout(2) + select case (qturns) + case (0) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(i, j) + enddo ; enddo + case (1) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(j, nl1+1-i) + enddo ; enddo + case (2) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl1+1-i, nl2+1-j) + enddo ; enddo + case (3) + do j=1,nl2 ; do i=1,nl1 + MOM_dom%maskmap(i,j) = MD_in%maskmap(nl2+1-j, i) + enddo ; enddo + end select + else + mask_table_exists = .false. + endif + + ! Optionally enhance the grid resolution. + if (present(refine)) then ; if (refine > 1) then + MOM_dom%niglobal = refine*MOM_dom%niglobal ; MOM_dom%njglobal = refine*MOM_dom%njglobal + MOM_dom%nihalo = refine*MOM_dom%nihalo ; MOM_dom%njhalo = refine*MOM_dom%njhalo + do i=1,MOM_dom%layout(1) ; exni(i) = refine*exni(i) ; enddo + do j=1,MOM_dom%layout(2) ; exnj(j) = refine*exnj(j) ; enddo + endif ; endif + + ! Optionally enhance the grid resolution. + if (present(extra_halo)) then ; if (extra_halo > 0) then + MOM_dom%nihalo = MOM_dom%nihalo + extra_halo ; MOM_dom%njhalo = MOM_dom%njhalo + extra_halo + endif ; endif + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + if (present(min_halo)) then + MOM_dom%nihalo = max(MOM_dom%nihalo, min_halo(1)) + min_halo(1) = MOM_dom%nihalo + MOM_dom%njhalo = max(MOM_dom%njhalo, min_halo(2)) + min_halo(2) = MOM_dom%njhalo + endif + + if (present(halo_size)) then + MOM_dom%nihalo = halo_size ; MOM_dom%njhalo = halo_size + endif + + if (present(symmetric)) then ; MOM_dom%symmetric = symmetric ; endif + + if (present(domain_name)) then + MOM_dom%name = trim(domain_name) + else + MOM_dom%name = MD_in%name + endif + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain, xextent=exni, yextent=exnj) + + call clone_MD_to_d2D(MOM_dom, MOM_dom%mpp_domain_d2, domain_name=MOM_dom%name, coarsen=2) + +end subroutine clone_MD_to_MD + + +!> clone_MD_to_d2D uses information from a MOM_domain_type to create a new +!! domain2d type, while allowing some properties of the new type to differ from +!! the original one. +subroutine clone_MD_to_d2D(MD_in, mpp_domain, min_halo, halo_size, symmetric, & + domain_name, turns, xextent, yextent, coarsen) + type(MOM_domain_type), intent(in) :: MD_in !< An existing MOM_domain to be cloned + type(domain2d), intent(inout) :: mpp_domain !< The new mpp_domain to be set up + integer, dimension(2), & + optional, intent(inout) :: min_halo !< If present, this sets the + !! minimum halo size for this domain in the i- and j- + !! directions, and returns the actual halo size used. + integer, optional, intent(in) :: halo_size !< If present, this sets the halo + !! size for the domain in the i- and j-directions. + !! min_halo and halo_size can not both be present. + logical, optional, intent(in) :: symmetric !< If present, this specifies + !! whether the new domain is symmetric, regardless of + !! whether the macro SYMMETRIC_MEMORY_ is defined or + !! whether MD_in is symmetric. + character(len=*), & + optional, intent(in) :: domain_name !< A name for the new domain, "MOM" + !! if missing. + integer, optional, intent(in) :: turns !< Number of quarter turns - not implemented here. + integer, optional, intent(in) :: coarsen !< A factor by which to coarsen this grid. + !! The default of 1 is for no coarsening. + integer, dimension(:), optional, intent(in) :: xextent !< The number of grid points in the + !! tracer computational domain for division of the x-layout. + integer, dimension(:), optional, intent(in) :: yextent !< The number of grid points in the + !! tracer computational domain for division of the y-layout. + + integer :: global_indices(4) + integer :: nihalo, njhalo + logical :: symmetric_dom, do_coarsen + character(len=64) :: dom_name + + if (present(turns)) & + call MOM_error(FATAL, "Rotation not supported for MOM_domain to domain2d") + + if (present(halo_size) .and. present(min_halo)) call MOM_error(FATAL, & + "clone_MOM_domain can not have both halo_size and min_halo present.") + + do_coarsen = .false. ; if (present(coarsen)) then ; do_coarsen = (coarsen > 1) ; endif + + nihalo = MD_in%nihalo ; njhalo = MD_in%njhalo + if (do_coarsen) then + nihalo = int(MD_in%nihalo / coarsen) ; njhalo = int(MD_in%njhalo / coarsen) + endif + + if (present(min_halo)) then + nihalo = max(nihalo, min_halo(1)) + njhalo = max(njhalo, min_halo(2)) + min_halo(1) = nihalo ; min_halo(2) = njhalo + endif + if (present(halo_size)) then + nihalo = halo_size ; njhalo = halo_size + endif + + symmetric_dom = MD_in%symmetric + if (present(symmetric)) then ; symmetric_dom = symmetric ; endif + + dom_name = MD_in%name + if (do_coarsen) dom_name = trim(MD_in%name)//"c" + if (present(domain_name)) dom_name = trim(domain_name) + + global_indices(1:4) = (/ 1, MD_in%niglobal, 1, MD_in%njglobal /) + if (do_coarsen) then + global_indices(1:4) = (/ 1, (MD_in%niglobal/coarsen), 1, (MD_in%njglobal/coarsen) /) + endif + + if (associated(MD_in%maskmap)) then + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + xextent=xextent, yextent=yextent, symmetry=symmetric_dom, name=dom_name, & + maskmap=MD_in%maskmap ) + else + call mpp_define_domains( global_indices, MD_in%layout, mpp_domain, & + xflags=MD_in%X_FLAGS, yflags=MD_in%Y_FLAGS, xhalo=nihalo, yhalo=njhalo, & + symmetry=symmetric_dom, xextent=xextent, yextent=yextent, name=dom_name) + endif + + if ((MD_in%io_layout(1) + MD_in%io_layout(2) > 0) .and. & + (MD_in%layout(1)*MD_in%layout(2) > 1)) then + call mpp_define_io_domain(mpp_domain, MD_in%io_layout) + endif + +end subroutine clone_MD_to_d2D + +!> Returns the index ranges that have been stored in a MOM_domain_type +subroutine get_domain_extent_MD(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed, & + isg, ieg, jsg, jeg, idg_offset, jdg_offset, & + symmetric, local_indexing, index_offset, coarsen) + type(MOM_domain_type), & + intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, intent(out) :: isd !< The start i-index of the data domain + integer, intent(out) :: ied !< The end i-index of the data domain + integer, intent(out) :: jsd !< The start j-index of the data domain + integer, intent(out) :: jed !< The end j-index of the data domain + integer, optional, intent(out) :: isg !< The start i-index of the global domain + integer, optional, intent(out) :: ieg !< The end i-index of the global domain + integer, optional, intent(out) :: jsg !< The start j-index of the global domain + integer, optional, intent(out) :: jeg !< The end j-index of the global domain + integer, optional, intent(out) :: idg_offset !< The offset between the corresponding global and + !! data i-index spaces. + integer, optional, intent(out) :: jdg_offset !< The offset between the corresponding global and + !! data j-index spaces. + logical, optional, intent(out) :: symmetric !< True if symmetric memory is used. + logical, optional, intent(in) :: local_indexing !< If true, local tracer array indices start at 1, + !! as in most MOM6 code. The default is true. + integer, optional, intent(in) :: index_offset !< A fixed additional offset to all indices. This + !! can be useful for some types of debugging with + !! dynamic memory allocation. The default is 0. + integer, optional, intent(in) :: coarsen !< A factor by which the grid is coarsened. + !! The default is 1, for no coarsening. + + ! Local variables + integer :: isg_, ieg_, jsg_, jeg_ + integer :: ind_off, idg_off, jdg_off, coarsen_lev + logical :: local + + local = .true. ; if (present(local_indexing)) local = local_indexing + ind_off = 0 ; if (present(index_offset)) ind_off = index_offset + + coarsen_lev = 1 ; if (present(coarsen)) coarsen_lev = coarsen + + if (coarsen_lev == 1) then + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain, isg_, ieg_, jsg_, jeg_) + elseif (coarsen_lev == 2) then + if (.not.associated(Domain%mpp_domain_d2)) call MOM_error(FATAL, & + "get_domain_extent called with coarsen=2, but Domain%mpp_domain_d2 is not associated.") + call mpp_get_compute_domain(Domain%mpp_domain_d2, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain_d2, isd, ied, jsd, jed) + call mpp_get_global_domain(Domain%mpp_domain_d2, isg_, ieg_, jsg_, jeg_) + else + call MOM_error(FATAL, "get_domain_extent called with an unsupported level of coarsening.") + endif + + if (local) then + ! This code institutes the MOM convention that local array indices start at 1. + idg_off = isd - 1 ; jdg_off = jsd - 1 + isc = isc - isd + 1 ; iec = iec - isd + 1 ; jsc = jsc - jsd + 1 ; jec = jec - jsd + 1 + ied = ied - isd + 1 ; jed = jed - jsd + 1 + isd = 1 ; jsd = 1 + else + idg_off = 0 ; jdg_off = 0 + endif + if (ind_off /= 0) then + idg_off = idg_off + ind_off ; jdg_off = jdg_off + ind_off + isc = isc + ind_off ; iec = iec + ind_off + jsc = jsc + ind_off ; jec = jec + ind_off + isd = isd + ind_off ; ied = ied + ind_off + jsd = jsd + ind_off ; jed = jed + ind_off + endif + if (present(isg)) isg = isg_ + if (present(ieg)) ieg = ieg_ + if (present(jsg)) jsg = jsg_ + if (present(jeg)) jeg = jeg_ + if (present(idg_offset)) idg_offset = idg_off + if (present(jdg_offset)) jdg_offset = jdg_off + if (present(symmetric)) symmetric = Domain%symmetric + +end subroutine get_domain_extent_MD + +!> Returns the index ranges that have been stored in a domain2D type +subroutine get_domain_extent_d2D(Domain, isc, iec, jsc, jec, isd, ied, jsd, jed) + type(domain2d), intent(in) :: Domain !< The MOM domain from which to extract information + integer, intent(out) :: isc !< The start i-index of the computational domain + integer, intent(out) :: iec !< The end i-index of the computational domain + integer, intent(out) :: jsc !< The start j-index of the computational domain + integer, intent(out) :: jec !< The end j-index of the computational domain + integer, optional, intent(out) :: isd !< The start i-index of the data domain + integer, optional, intent(out) :: ied !< The end i-index of the data domain + integer, optional, intent(out) :: jsd !< The start j-index of the data domain + integer, optional, intent(out) :: jed !< The end j-index of the data domain + + ! Local variables + integer :: isd_, ied_, jsd_, jed_, jsg_, jeg_, isg_, ieg_ + + call mpp_get_compute_domain(Domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain, isd_, ied_, jsd_, jed_) + + if (present(isd)) isd = isd_ + if (present(ied)) ied = ied_ + if (present(jsd)) jsd = jsd_ + if (present(jed)) jed = jed_ + +end subroutine get_domain_extent_d2D + +!> Return the (potentially symmetric) computational domain i-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_i_ind(domain, size, is, ie, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The i-array size + integer, intent(out) :: is !< The computational domain starting i-index. + integer, intent(out) :: ie !< The computational domain ending i-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + isc = isc-isd+1 ; iec = iec-isd+1 ; ied = ied-isd+1 ; isd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == ied) then ; is = isc ; ie = iec + elseif (size == 1+iec-isc) then ; is = 1 ; ie = size + elseif (sym .and. (size == 1+ied)) then ; is = isc ; ie = iec+1 + elseif (sym .and. (size == 2+iec-isc)) then ; is = 1 ; ie = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_i_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') ied, 1+iec-isc + else + write(mesg2,'("Valid sizes are : ", 4i7)') ied, 1+iec-isc, 1+ied, 2+iec-isc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_i_ind + + +!> Return the (potentially symmetric) computational domain j-bounds for an array +!! passed without index specifications (i.e. indices start at 1) based on an array size. +subroutine get_simple_array_j_ind(domain, size, js, je, symmetric) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(in) :: size !< The j-array size + integer, intent(out) :: js !< The computational domain starting j-index. + integer, intent(out) :: je !< The computational domain ending j-index. + logical, optional, intent(in) :: symmetric !< If present, indicates whether symmetric sizes + !! can be considered. + ! Local variables + logical :: sym + character(len=120) :: mesg, mesg2 + integer :: isc, iec, jsc, jec, isd, ied, jsd, jed + + call mpp_get_compute_domain(Domain%mpp_domain, isc, iec, jsc, jec) + call mpp_get_data_domain(Domain%mpp_domain, isd, ied, jsd, jed) + + jsc = jsc-jsd+1 ; jec = jec-jsd+1 ; jed = jed-jsd+1 ; jsd = 1 + sym = Domain%symmetric ; if (present(symmetric)) sym = symmetric + + if (size == jed) then ; js = jsc ; je = jec + elseif (size == 1+jec-jsc) then ; js = 1 ; je = size + elseif (sym .and. (size == 1+jed)) then ; js = jsc ; je = jec+1 + elseif (sym .and. (size == 2+jec-jsc)) then ; js = 1 ; je = size+1 + else + write(mesg,'("Unrecognized size ", i6, "in call to get_simple_array_j_ind. \")') size + if (sym) then + write(mesg2,'("Valid sizes are : ", 2i7)') jed, 1+jec-jsc + else + write(mesg2,'("Valid sizes are : ", 4i7)') jed, 1+jec-jsc, 1+jed, 2+jec-jsc + endif + call MOM_error(FATAL, trim(mesg)//trim(mesg2)) + endif + +end subroutine get_simple_array_j_ind + +!> Invert the contents of a 1-d array +subroutine invert(array) + integer, dimension(:), intent(inout) :: array !< The 1-d array to invert + integer :: i, ni, swap + ni = size(array) + do i=1,ni + swap = array(i) + array(i) = array(ni+1-i) + array(ni+1-i) = swap + enddo +end subroutine invert + +!> Returns the global shape of h-point arrays +subroutine get_global_shape(domain, niglobal, njglobal) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, intent(out) :: niglobal !< i-index global size of h-point arrays + integer, intent(out) :: njglobal !< j-index global size of h-point arrays + + niglobal = domain%niglobal + njglobal = domain%njglobal +end subroutine get_global_shape + +!> Get the array ranges in one dimension for the divisions of a global index space +subroutine compute_block_extent(isg, ieg, ndivs, ibegin, iend) + integer, intent(in) :: isg !< The starting index of the global index space + integer, intent(in) :: ieg !< The ending index of the global index space + integer, intent(in) :: ndivs !< The number of divisions + integer, dimension(:), intent(out) :: ibegin !< The starting index of each division + integer, dimension(:), intent(out) :: iend !< The ending index of each division + + call mpp_compute_block_extent(isg, ieg, ndivs, ibegin, iend) +end subroutine compute_block_extent + +!> Broadcast a 2-d domain from the root PE to the other PEs +subroutine broadcast_domain(domain) + type(domain2d), intent(inout) :: domain !< The domain2d type that will be shared across PEs. + + call mpp_broadcast_domain(domain) +end subroutine broadcast_domain + +!> Broadcast an entire 2-d array from the root processor to all others. +subroutine global_field(domain, local, global) + type(domain2d), intent(inout) :: domain !< The domain2d type that describes the decomposition + real, dimension(:,:), intent(in) :: local !< The portion of the array on the local PE + real, dimension(:,:), intent(out) :: global !< The whole global array + + call mpp_global_field(domain, local, global) +end subroutine global_field + +!> Returns arrays of the i- and j- sizes of the h-point computational domains for each +!! element of the grid layout. Any input values in the extent arrays are discarded, so +!! they are effectively intent out despite their declared intent of inout. +subroutine get_layout_extents(Domain, extent_i, extent_j) + type(MOM_domain_type), intent(in) :: domain !< MOM domain from which to extract information + integer, dimension(:), allocatable, intent(inout) :: extent_i !< The number of points in the + !! i-direction in each i-row of the layout + integer, dimension(:), allocatable, intent(inout) :: extent_j !< The number of points in the + !! j-direction in each j-row of the layout + + if (allocated(extent_i)) deallocate(extent_i) + if (allocated(extent_j)) deallocate(extent_j) + allocate(extent_i(domain%layout(1))) ; extent_i(:) = 0 + allocate(extent_j(domain%layout(2))) ; extent_j(:) = 0 + call mpp_get_domain_extents(domain%mpp_domain, extent_i, extent_j) +end subroutine get_layout_extents + +end module MOM_domain_infra diff --git a/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 b/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 new file mode 100644 index 0000000000..66bbb86e2f --- /dev/null +++ b/config_src/infra/FMS2/MOM_ensemble_manager_infra.F90 @@ -0,0 +1,95 @@ +!> A simple (very thin) wrapper for managing ensemble member layout information +module MOM_ensemble_manager_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use ensemble_manager_mod, only : FMS_ensemble_manager_init => ensemble_manager_init +use ensemble_manager_mod, only : FMS_ensemble_pelist_setup => ensemble_pelist_setup +use ensemble_manager_mod, only : FMS_get_ensemble_id => get_ensemble_id +use ensemble_manager_mod, only : FMS_get_ensemble_size => get_ensemble_size +use ensemble_manager_mod, only : FMS_get_ensemble_pelist => get_ensemble_pelist +use ensemble_manager_mod, only : FMS_get_ensemble_filter_pelist => get_ensemble_filter_pelist + +implicit none ; private + +public :: ensemble_manager_init, ensemble_pelist_setup +public :: get_ensemble_id, get_ensemble_size +public :: get_ensemble_pelist, get_ensemble_filter_pelist + +contains + +!> Initializes the ensemble manager which divides available resources +!! in order to concurrently execute an ensemble of model realizations. +subroutine ensemble_manager_init() + + call FMS_ensemble_manager_init() + +end subroutine ensemble_manager_init + +!> Create a list of processing elements (PEs) across components +!! associated with the current ensemble member. +subroutine ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + logical, intent(in) :: concurrent !< A logical flag, if True, then ocean fast + !! PEs are run concurrently with + !! slow PEs within the coupler. + integer, intent(in) :: atmos_npes !< The number of atmospheric (fast) PEs + integer, intent(in) :: ocean_npes !< The number of ocean (slow) PEs + integer, intent(in) :: land_npes !< The number of land PEs (fast) + integer, intent(in) :: ice_npes !< The number of ice (fast) PEs + integer, dimension(:), intent(inout) :: Atm_pelist !< A list of Atm PEs + integer, dimension(:), intent(inout) :: Ocean_pelist !< A list of Ocean PEs + integer, dimension(:), intent(inout) :: Land_pelist !< A list of Land PEs + integer, dimension(:), intent(inout) :: Ice_pelist !< A list of Ice PEs + + + call FMS_ensemble_pelist_setup(concurrent, atmos_npes, ocean_npes, land_npes, ice_npes, & + Atm_pelist, Ocean_pelist, Land_pelist, Ice_pelist) + +end subroutine ensemble_pelist_setup + +!> Returns the numeric id for the current ensemble member +function get_ensemble_id() + integer :: get_ensemble_id + + get_ensemble_id = FMS_get_ensemble_id() + +end function get_ensemble_id + +!> Returns ensemble information as follows, +!! index (1) :: ensemble size +!! index (2) :: Number of PEs per ensemble member +!! index (3) :: Number of ocean PEs per ensemble member +!! index (4) :: Number of atmos PEs per ensemble member +!! index (5) :: Number of land PEs per ensemble member +!! index (6) :: Number of ice PEs per ensemble member +function get_ensemble_size() + integer, dimension(6) :: get_ensemble_size + + get_ensemble_size = FMS_get_ensemble_size() + +end function get_ensemble_size + +!> Returns the list of PEs associated with all ensemble members +!! Results are stored in the argument array which must be large +!! enough to contain the list. If the optional name argument is present, +!! the returned processor list are for a particular component (atmos, ocean ,land, ice) +subroutine get_ensemble_pelist(pelist, name) + integer, intent(inout) :: pelist(:,:) !< A processor list for all ensemble members + character(len=*), optional, intent(in) :: name !< An optional component name (atmos, ocean, land, ice) + + call FMS_get_ensemble_pelist(pelist, name) + +end subroutine get_ensemble_pelist + +!> Returns the list of PEs associated with the named ensemble filter application. +!! Valid component names include ('atmos', 'ocean', 'land', and 'ice') +subroutine get_ensemble_filter_pelist(pelist, name) + integer, intent(inout) :: pelist(:) !< A processor list for the ensemble filter + character(len=*), intent(in) :: name !< The component name (atmos, ocean, land, ice) + + call FMS_get_Ensemble_filter_pelist(pelist, name) + +end subroutine get_ensemble_filter_pelist + +end module MOM_ensemble_manager_infra diff --git a/config_src/infra/FMS2/MOM_error_infra.F90 b/config_src/infra/FMS2/MOM_error_infra.F90 new file mode 100644 index 0000000000..e5a8b8dc68 --- /dev/null +++ b/config_src/infra/FMS2/MOM_error_infra.F90 @@ -0,0 +1,42 @@ +!> Routines for error handling and I/O management +module MOM_error_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use mpp_mod, only : mpp_error, mpp_pe, mpp_root_pe, mpp_stdlog=>stdlog, mpp_stdout=>stdout +use mpp_mod, only : NOTE, WARNING, FATAL + +implicit none ; private + +public :: MOM_err, is_root_pe, stdlog, stdout +!> Integer parameters encoding the severity of an error message +public :: NOTE, WARNING, FATAL + +contains + +!> MOM_err writes an error message, and may cause the run to stop depending on the +!! severity of the error. +subroutine MOM_err(severity, message) + integer, intent(in) :: severity !< The severity level of this error + character(len=*), intent(in) :: message !< A message to write out + + call mpp_error(severity, message) +end subroutine MOM_err + +!> stdout returns the standard Fortran unit number for output +integer function stdout() + stdout = mpp_stdout() +end function stdout + +!> stdlog returns the standard Fortran unit number to use to log messages +integer function stdlog() + stdlog = mpp_stdlog() +end function stdlog + +!> is_root_pe returns .true. if the current PE is the root PE. +logical function is_root_pe() + is_root_pe = .false. + if (mpp_pe() == mpp_root_pe()) is_root_pe = .true. +end function is_root_pe + +end module MOM_error_infra diff --git a/config_src/infra/FMS2/MOM_interp_infra.F90 b/config_src/infra/FMS2/MOM_interp_infra.F90 new file mode 100644 index 0000000000..ca5b2b8516 --- /dev/null +++ b/config_src/infra/FMS2/MOM_interp_infra.F90 @@ -0,0 +1,251 @@ +!> This module wraps the FMS temporal and spatial interpolation routines +module MOM_interp_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, domain2d +use MOM_io_infra, only : axistype +use MOM_time_manager, only : time_type +use horiz_interp_mod, only : horiz_interp_new, horiz_interp, horiz_interp_init, horiz_interp_type +use time_interp_external_mod, only : time_interp_external +use time_interp_external_mod, only : init_external_field, time_interp_external_init +use time_interp_external_mod, only : get_external_field_size +use time_interp_external_mod, only : get_external_field_axes, get_external_field_missing + +implicit none ; private + +public :: horiz_interp_type, horiz_interp_init +public :: time_interp_extern, init_extern_field, time_interp_external_init +public :: get_external_field_info +public :: run_horiz_interp, build_horiz_interp_weights + +!> Read a field based on model time, and rotate to the model domain. +interface time_interp_extern + module procedure time_interp_extern_0d + module procedure time_interp_extern_2d + module procedure time_interp_extern_3d +end interface time_interp_extern + +!> perform horizontal interpolation of field +interface run_horiz_interp + module procedure horiz_interp_from_weights_field2d + module procedure horiz_interp_from_weights_field3d +end interface + +!> build weights for horizontal interpolation of field +interface build_horiz_interp_weights + module procedure build_horiz_interp_weights_2d_to_2d +end interface build_horiz_interp_weights + +contains + +!> perform horizontal interpolation of a 2d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field2d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: data_in !< input data + real, dimension(:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, & + mask_in, mask_out, missing_value, missing_permit, & + err_msg, new_missing_handle=.true. ) + +end subroutine horiz_interp_from_weights_field2d + + +!> perform horizontal interpolation of a 3d field using pre-computed weights +!! source and destination coordinates are 2d +subroutine horiz_interp_from_weights_field3d(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + + type(horiz_interp_type), intent(in) :: Interp !< type containing interpolation options and weights + real, dimension(:,:,:), intent(in) :: data_in !< input data + real, dimension(:,:,:), intent(out) :: data_out !< output data + integer, optional, intent(in) :: verbose !< verbosity level + real, dimension(:,:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:,:), optional, intent(out) :: mask_out !< mask for output data + real, optional, intent(in) :: missing_value !< A value indicating missing data + integer, optional, intent(in) :: missing_permit !< number of allowed points with + !! missing value for interpolation (0-3) + character(len=*), optional, intent(out) :: err_msg !< error message + + call horiz_interp(Interp, data_in, data_out, verbose, mask_in, mask_out, & + missing_value, missing_permit, err_msg) + +end subroutine horiz_interp_from_weights_field3d + + +!> build horizontal interpolation weights from source grid defined by 2d lon/lat to destination grid +!! defined by 2d lon/lat +subroutine build_horiz_interp_weights_2d_to_2d(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + + type(horiz_interp_type), intent(inout) :: Interp !< type containing interpolation options and weights + real, dimension(:,:), intent(in) :: lon_in !< input longitude 2d + real, dimension(:,:), intent(in) :: lat_in !< input latitude 2d + real, dimension(:,:), intent(in) :: lon_out !< output longitude 2d + real, dimension(:,:), intent(in) :: lat_out !< output latitude 2d + integer, optional, intent(in) :: verbose !< verbosity level + character(len=*), optional, intent(in) :: interp_method !< interpolation method + integer, optional, intent(in) :: num_nbrs !< number of nearest neighbors + real, optional, intent(in) :: max_dist !< maximum region of influence + logical, optional, intent(in) :: src_modulo !< periodicity of E-W boundary + real, dimension(:,:), optional, intent(in) :: mask_in !< mask for input data + real, dimension(:,:), optional, intent(inout) :: mask_out !< mask for output data + logical, optional, intent(in) :: is_latlon_in !< input grid is regular lat/lon grid + logical, optional, intent(in) :: is_latlon_out !< output grid is regular lat/lon grid + + call horiz_interp_new(Interp, lon_in, lat_in, lon_out, lat_out, & + verbose, interp_method, num_nbrs, max_dist, & + src_modulo, mask_in, mask_out, & + is_latlon_in, is_latlon_out) + +end subroutine build_horiz_interp_weights_2d_to_2d + + +!> get size of an external field from field index +function get_extern_field_size(index) + + integer, intent(in) :: index !< field index + integer :: get_extern_field_size(4) !< field size + + get_extern_field_size = get_external_field_size(index) + +end function get_extern_field_size + + +!> get axes of an external field from field index +function get_extern_field_axes(index) + + integer, intent(in) :: index !< field index + type(axistype), dimension(4) :: get_extern_field_axes !< field axes + + get_extern_field_axes = get_external_field_axes(index) + +end function get_extern_field_axes + + +!> get missing value of an external field from field index +function get_extern_field_missing(index) + + integer, intent(in) :: index !< field index + real :: get_extern_field_missing !< field missing value + + get_extern_field_missing = get_external_field_missing(index) + +end function get_extern_field_missing + + +!> Get information about the external fields. +subroutine get_external_field_info(field_id, size, axes, missing) + integer, intent(in) :: field_id !< The integer index of the external + !! field returned from a previous + !! call to init_external_field() + integer, dimension(4), optional, intent(inout) :: size !< Dimension sizes for the input data + type(axistype), dimension(4), optional, intent(inout) :: axes !< Axis types for the input data + real, optional, intent(inout) :: missing !< Missing value for the input data + + if (present(size)) then + size(1:4) = get_extern_field_size(field_id) + endif + + if (present(axes)) then + axes(1:4) = get_extern_field_axes(field_id) + endif + + if (present(missing)) then + missing = get_extern_field_missing(field_id) + endif + +end subroutine get_external_field_info + + +!> Read a scalar field based on model time. +subroutine time_interp_extern_0d(field_id, time, data_in, verbose) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, intent(inout) :: data_in !< The interpolated value + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + + call time_interp_external(field_id, time, data_in, verbose=verbose) +end subroutine time_interp_extern_0d + + +!> Read a 2d field from an external based on model time, potentially including horizontal +!! interpolation and rotation of the data +subroutine time_interp_extern_2d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_2d + + +!> Read a 3d field based on model time, and rotate to the model grid +subroutine time_interp_extern_3d(field_id, time, data_in, interp, verbose, horz_interp, mask_out) + integer, intent(in) :: field_id !< The integer index of the external field returned + !! from a previous call to init_external_field() + type(time_type), intent(in) :: time !< The target time for the data + real, dimension(:,:,:), intent(inout) :: data_in !< The array in which to store the interpolated values + integer, optional, intent(in) :: interp !< A flag indicating the temporal interpolation method + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(horiz_interp_type), & + optional, intent(in) :: horz_interp !< A structure to control horizontal interpolation + logical, dimension(:,:,:), & + optional, intent(out) :: mask_out !< An array that is true where there is valid data + + call time_interp_external(field_id, time, data_in, interp=interp, verbose=verbose, & + horz_interp=horz_interp, mask_out=mask_out) +end subroutine time_interp_extern_3d + + +!> initialize an external field +integer function init_extern_field(file, fieldname, MOM_domain, domain, verbose, & + threading, ierr, ignore_axis_atts ) + + character(len=*), intent(in) :: file !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the field in the file + integer, optional, intent(in) :: threading !< A flag specifying whether the root PE reads + !! the data and broadcasts it (SINGLE_FILE) or all + !! processors read (MULTIPLE, the default). + logical, optional, intent(in) :: verbose !< If true, write verbose output for debugging + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(out) :: ierr !< Returns a non-zero error code in case of failure + logical, optional, intent(in) :: ignore_axis_atts !< If present and true, do not issue a + !! fatal error if the axis Cartesian attribute is + !! not set to a recognized value. + + if (present(MOM_Domain)) then + init_extern_field = init_external_field(file, fieldname, domain=MOM_domain%mpp_domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts) + else + init_extern_field = init_external_field(file, fieldname, domain=domain, & + verbose=verbose, threading=threading, ierr=ierr, ignore_axis_atts=ignore_axis_atts) + endif + +end function init_extern_field + +end module MOM_interp_infra diff --git a/config_src/infra/FMS2/MOM_io_infra.F90 b/config_src/infra/FMS2/MOM_io_infra.F90 new file mode 100644 index 0000000000..3ea201235a --- /dev/null +++ b/config_src/infra/FMS2/MOM_io_infra.F90 @@ -0,0 +1,801 @@ +!> This module contains a thin inteface to mpp and fms I/O code +module MOM_io_infra + +! This file is part of MOM6. See LICENSE.md for the license. + +use MOM_domain_infra, only : MOM_domain_type, rescale_comp_data, AGRID, BGRID_NE, CGRID_NE +use MOM_domain_infra, only : domain2d, domain1d, CENTER, CORNER, NORTH_FACE, EAST_FACE +use MOM_error_infra, only : MOM_error=>MOM_err, NOTE, FATAL, WARNING + +use fms_mod, only : write_version_number, open_namelist_file, check_nml_error +use fms_io_mod, only : file_exist, field_exist, field_size, read_data +use fms_io_mod, only : fms_io_exit, get_filename_appendix +use mpp_io_mod, only : mpp_open, mpp_close, mpp_flush +use mpp_io_mod, only : mpp_write_meta, mpp_write +use mpp_io_mod, only : mpp_get_atts, mpp_attribute_exist +use mpp_io_mod, only : mpp_get_axes, axistype, mpp_get_axis_data +use mpp_io_mod, only : mpp_get_fields, fieldtype +use mpp_io_mod, only : mpp_get_info, mpp_get_times +use mpp_io_mod, only : mpp_io_init +! These are encoding constants. +use mpp_io_mod, only : APPEND_FILE=>MPP_APPEND, WRITEONLY_FILE=>MPP_WRONLY +use mpp_io_mod, only : OVERWRITE_FILE=>MPP_OVERWR, READONLY_FILE=>MPP_RDONLY +use mpp_io_mod, only : NETCDF_FILE=>MPP_NETCDF, ASCII_FILE=>MPP_ASCII +use mpp_io_mod, only : MULTIPLE=>MPP_MULTI, SINGLE_FILE=>MPP_SINGLE +use iso_fortran_env, only : int64 + +implicit none ; private + +! These interfaces are actually implemented or have explicit interfaces in this file. +public :: open_file, open_ASCII_file, file_is_open, close_file, flush_file, file_exists +public :: get_file_info, get_file_fields, get_file_times, get_filename_suffix +public :: MOM_read_data, MOM_read_vector, write_metadata, write_field +public :: field_exists, get_field_atts, get_field_size, get_axis_data, read_field_chksum +public :: io_infra_init, io_infra_end, MOM_namelist_file, check_namelist_error, write_version +! These types are inherited from underlying infrastructure code, to act as containers for +! information about fields and axes, respectively, and are opaque to this module. +public :: fieldtype, axistype +! These are encoding constant parmeters. +public :: ASCII_FILE, NETCDF_FILE, SINGLE_FILE, MULTIPLE +public :: APPEND_FILE, READONLY_FILE, OVERWRITE_FILE, WRITEONLY_FILE +public :: CENTER, CORNER, NORTH_FACE, EAST_FACE + +!> Indicate whether a file exists, perhaps with domain decomposition +interface file_exists + module procedure FMS_file_exists + module procedure MOM_file_exists +end interface + +!> Open a file (or fileset) for parallel or single-file I/). +interface open_file + module procedure open_file_type, open_file_unit +end interface open_file + +!> Read a data field from a file +interface MOM_read_data + module procedure MOM_read_data_4d + module procedure MOM_read_data_3d + module procedure MOM_read_data_2d, MOM_read_data_2d_region + module procedure MOM_read_data_1d, MOM_read_data_1d_int + module procedure MOM_read_data_0d, MOM_read_data_0d_int +end interface + +!> Write a registered field to an output file +interface write_field + module procedure write_field_4d + module procedure write_field_3d + module procedure write_field_2d + module procedure write_field_1d + module procedure write_field_0d + module procedure MOM_write_axis +end interface write_field + +!> Read a pair of data fields representing the two components of a vector from a file +interface MOM_read_vector + module procedure MOM_read_vector_3d + module procedure MOM_read_vector_2d +end interface MOM_read_vector + +!> Write metadata about a variable or axis to a file and store it for later reuse +interface write_metadata + module procedure write_metadata_axis, write_metadata_field +end interface write_metadata + +!> Close a file (or fileset). If the file handle does not point to an open file, +!! close_file simply returns without doing anything. +interface close_file + module procedure close_file_type, close_file_unit +end interface close_file + +!> Ensure that the output stream associated with a file handle is fully sent to disk +interface flush_file + module procedure flush_file_type, flush_file_unit +end interface flush_file + +!> Type for holding a handle to an open file and related information +type, public :: file_type ; private + integer :: unit = -1 !< The framework identfier or netCDF unit number of an output file + character(len=:), allocatable :: filename !< The path to this file, if it is open + logical :: open_to_read = .false. !< If true, this file or fileset can be read + logical :: open_to_write = .false. !< If true, this file or fileset can be written to +end type file_type + +contains + +!> Reads the checksum value for a field that was recorded in a file, along with a flag indicating +!! whether the file contained a valid checksum for this field. +subroutine read_field_chksum(field, chksum, valid_chksum) + type(fieldtype), intent(in) :: field !< The field whose checksum attribute is to be read. + integer(kind=int64), intent(out) :: chksum !< The checksum for the field. + logical, intent(out) :: valid_chksum !< If true, chksum has been successfully read. + ! Local variables + integer(kind=int64), dimension(3) :: checksum_file + + checksum_file(:) = -1 + valid_chksum = mpp_attribute_exist(field, "checksum") + if (valid_chksum) then + call get_field_atts(field, checksum=checksum_file) + chksum = checksum_file(1) + else + chksum = -1 + endif +end subroutine read_field_chksum + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function MOM_file_exists(filename, MOM_Domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + +! This function uses the fms_io function file_exist to determine whether +! a named file (or its decomposed variant) exists. + + MOM_file_exists = file_exist(filename, MOM_Domain%mpp_domain) + +end function MOM_file_exists + +!> Returns true if the named file or its domain-decomposed variant exists. +logical function FMS_file_exists(filename, domain, no_domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + type(domain2d), optional, intent(in) :: domain !< The mpp domain2d that describes the decomposition + logical, optional, intent(in) :: no_domain !< This file does not use domain decomposition +! This function uses the fms_io function file_exist to determine whether +! a named file (or its decomposed variant) exists. + + FMS_file_exists = file_exist(filename, domain, no_domain) + +end function FMS_file_exists + +!> indicates whether an I/O handle is attached to an open file +logical function file_is_open(IO_handle) + type(file_type), intent(in) :: IO_handle !< Handle to a file to inquire about + + file_is_open = (IO_handle%unit >= 0) +end function file_is_open + +!> closes a file (or fileset). If the file handle does not point to an open file, +!! close_file_type simply returns without doing anything. +subroutine close_file_type(IO_handle) + type(file_type), intent(inout) :: IO_handle !< The I/O handle for the file to be closed + + call mpp_close(IO_handle%unit) + if (allocated(IO_handle%filename)) deallocate(IO_handle%filename) + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .false. +end subroutine close_file_type + +!> closes a file. If the unit does not point to an open file, +!! close_file_unit simply returns without doing anything. +subroutine close_file_unit(unit) + integer, intent(inout) :: unit !< The I/O unit for the file to be closed + + call mpp_close(unit) +end subroutine close_file_unit + +!> Ensure that the output stream associated with a file handle is fully sent to disk. +subroutine flush_file_type(file) + type(file_type), intent(in) :: file !< The I/O handle for the file to flush + + call mpp_flush(file%unit) +end subroutine flush_file_type + +!> Ensure that the output stream associated with a unit is fully sent to disk. +subroutine flush_file_unit(unit) + integer, intent(in) :: unit !< The I/O unit for the file to flush + + call mpp_flush(unit) +end subroutine flush_file_unit + +!> Initialize the underlying I/O infrastructure +subroutine io_infra_init(maxunits) + integer, optional, intent(in) :: maxunits !< An optional maximum number of file + !! unit numbers that can be used. + call mpp_io_init(maxunit=maxunits) +end subroutine io_infra_init + +!> Gracefully close out and terminate the underlying I/O infrastructure +subroutine io_infra_end() + call fms_io_exit() +end subroutine io_infra_end + +!> Open a single namelist file that is potentially readable by all PEs. +function MOM_namelist_file(file) result(unit) + character(len=*), optional, intent(in) :: file !< The file to open, by default "input.nml". + integer :: unit !< The opened unit number of the namelist file + unit = open_namelist_file(file) +end function MOM_namelist_file + +!> Checks the iostat argument that is returned after reading a namelist variable and writes a +!! message if there is an error. +subroutine check_namelist_error(IOstat, nml_name) + integer, intent(in) :: IOstat !< An I/O status field from a namelist read call + character(len=*), intent(in) :: nml_name !< The name of the namelist + integer :: ierr + ierr = check_nml_error(IOstat, nml_name) +end subroutine check_namelist_error + +!> Write a file version number to the log file or other output file +subroutine write_version(version, tag, unit) + character(len=*), intent(in) :: version !< A string that contains the routine name and version + character(len=*), optional, intent(in) :: tag !< A tag name to add to the message + integer, optional, intent(in) :: unit !< An alternate unit number for output + + call write_version_number(version, tag, unit) +end subroutine write_version + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_unit(unit, filename, action, form, threading, fileset, nohdrs, domain, MOM_domain) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: filename !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: form !< A flag indicating the format of a new file. The + !! default is ASCII_FILE, but NETCDF_FILE is also common. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + logical, optional, intent(in) :: nohdrs !< If nohdrs is .TRUE., headers are not written to + !! ASCII files. The default is .false. + type(domain2d), optional, intent(in) :: domain !< A domain2d type that describes the decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + + if (present(MOM_Domain)) then + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=MOM_Domain%mpp_domain) + else + call mpp_open(unit, filename, action=action, form=form, threading=threading, fileset=fileset, & + nohdrs=nohdrs, domain=domain) + endif +end subroutine open_file_unit + +!> open_file opens a file for parallel or single-file I/O. +subroutine open_file_type(IO_handle, filename, action, MOM_domain, threading, fileset) + type(file_type), intent(inout) :: IO_handle !< The handle for the opened file + character(len=*), intent(in) :: filename !< The path name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + !! The default is WRITE_ONLY. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + if (present(MOM_Domain)) then + call mpp_open(IO_handle%unit, filename, action=action, form=NETCDF_FILE, threading=threading, & + fileset=fileset, domain=MOM_Domain%mpp_domain) + else + call mpp_open(IO_handle%unit, filename, action=action, form=NETCDF_FILE, threading=threading, & + fileset=fileset) + endif + IO_handle%filename = trim(filename) + if (present(action)) then + if (action == READONLY_FILE) then + IO_handle%open_to_read = .true. ; IO_handle%open_to_write = .false. + else + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .true. + endif + else + IO_handle%open_to_read = .false. ; IO_handle%open_to_write = .true. + endif + +end subroutine open_file_type + +!> open_file opens an ascii file for parallel or single-file I/O using Fortran read and write calls. +subroutine open_ASCII_file(unit, file, action, threading, fileset) + integer, intent(out) :: unit !< The I/O unit for the opened file + character(len=*), intent(in) :: file !< The name of the file being opened + integer, optional, intent(in) :: action !< A flag indicating whether the file can be read + !! or written to and how to handle existing files. + integer, optional, intent(in) :: threading !< A flag indicating whether one (SINGLE_FILE) + !! or multiple PEs (MULTIPLE) participate in I/O. + !! With the default, the root PE does I/O. + integer, optional, intent(in) :: fileset !< A flag indicating whether multiple PEs doing I/O due + !! to threading=MULTIPLE write to the same file (SINGLE_FILE) + !! or to one file per PE (MULTIPLE, the default). + + call mpp_open(unit, file, action=action, form=ASCII_FILE, threading=threading, fileset=fileset, & + nohdrs=.true.) + +end subroutine open_ASCII_file + + +!> Provide a string to append to filenames, to differentiate ensemble members, for example. +subroutine get_filename_suffix(suffix) + character(len=*), intent(out) :: suffix !< A string to append to filenames + + call get_filename_appendix(suffix) +end subroutine get_filename_suffix + + +!> Get information about the number of dimensions, variables, global attributes and time levels +!! in the file associated with an open file unit +subroutine get_file_info(IO_handle, ndim, nvar, natt, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + integer, optional, intent(out) :: ndim !< The number of dimensions in the file + integer, optional, intent(out) :: nvar !< The number of variables in the file + integer, optional, intent(out) :: natt !< The number of global attributes in the file + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + ! Local variables + integer :: ndims, nvars, natts, ntimes + + call mpp_get_info(IO_handle%unit, ndims, nvars, natts, ntimes ) + + if (present(ndim)) ndim = ndims + if (present(nvar)) nvar = nvars + if (present(natt)) natt = natts + if (present(ntime)) ntime = ntimes + +end subroutine get_file_info + + +!> Get the times of records from a file + !### Modify this to also convert to time_type, using information about the dimensions? +subroutine get_file_times(IO_handle, time_values, ntime) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + real, allocatable, dimension(:), intent(inout) :: time_values !< The real times for the records in file. + integer, optional, intent(out) :: ntime !< The number of time levels in the file + + integer :: ntimes + + if (allocated(time_values)) deallocate(time_values) + call get_file_info(IO_handle, ntime=ntimes) + if (present(ntime)) ntime = ntimes + if (ntimes > 0) then + allocate(time_values(ntimes)) + call mpp_get_times(IO_handle%unit, time_values) + endif +end subroutine get_file_times + +!> Set up the field information (e.g., names and metadata) for all of the variables in a file. The +!! argument fields must be allocated with a size that matches the number of variables in a file. +subroutine get_file_fields(IO_handle, fields) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for I/O + type(fieldtype), dimension(:), intent(inout) :: fields !< Field-type descriptions of all of + !! the variables in a file. + call mpp_get_fields(IO_handle%unit, fields) +end subroutine get_file_fields + +!> Extract information from a field type, as stored or as found in a file +subroutine get_field_atts(field, name, units, longname, checksum) + type(fieldtype), intent(in) :: field !< The field to extract information from + character(len=*), optional, intent(out) :: name !< The variable name + character(len=*), optional, intent(out) :: units !< The units of the variable + character(len=*), optional, intent(out) :: longname !< The long name of the variable + integer(kind=int64), dimension(:), & + optional, intent(out) :: checksum !< The checksums of the variable in a file + call mpp_get_atts(field, name=name, units=units, longname=longname, checksum=checksum) +end subroutine get_field_atts + +!> Field_exists returns true if the field indicated by field_name is present in the +!! file file_name. If file_name does not exist, it returns false. +function field_exists(filename, field_name, domain, no_domain, MOM_domain) + character(len=*), intent(in) :: filename !< The name of the file being inquired about + character(len=*), intent(in) :: field_name !< The name of the field being sought + type(domain2d), target, optional, intent(in) :: domain !< A domain2d type that describes the decomposition + logical, optional, intent(in) :: no_domain !< This file does not use domain decomposition + type(MOM_domain_type), optional, intent(in) :: MOM_Domain !< A MOM_Domain that describes the decomposition + logical :: field_exists !< True if filename exists and field_name is in filename + + if (present(MOM_domain)) then + field_exists = field_exist(filename, field_name, domain=MOM_domain%mpp_domain, no_domain=no_domain) + else + field_exists = field_exist(filename, field_name, domain=domain, no_domain=no_domain) + endif + +end function field_exists + +!> Given filename and fieldname, this subroutine returns the size of the field in the file +subroutine get_field_size(filename, fieldname, sizes, field_found, no_domain) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The name of the variable whose sizes are returned + integer, dimension(:), intent(inout) :: sizes !< The sizes of the variable in each dimension + logical, optional, intent(out) :: field_found !< This indicates whether the field was found in + !! the input file. Without this argument, there + !! is a fatal error if the field is not found. + logical, optional, intent(in) :: no_domain !< If present and true, do not check for file + !! names with an appended tile number + + call field_size(filename, fieldname, sizes, field_found=field_found, no_domain=no_domain) + +end subroutine get_field_size + +!> Extracts and returns the axis data stored in an axistype. +subroutine get_axis_data( axis, dat ) + type(axistype), intent(in) :: axis !< An axis type + real, dimension(:), intent(out) :: dat !< The data in the axis variable + + call mpp_get_axis_data( axis, dat ) +end subroutine get_axis_data + +!> This routine uses the fms_io subroutine read_data to read a scalar named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine MOM_read_data_0d(filename, fieldname, data, timelevel, scale, MOM_Domain) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + + if (present(MOM_Domain)) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data = scale*data + endif ; endif + +end subroutine MOM_read_data_0d + +!> This routine uses the fms_io subroutine read_data to read a 1-D data field named +!! "fieldname" from a single or domain-decomposed file "filename". +subroutine MOM_read_data_1d(filename, fieldname, data, timelevel, scale, MOM_Domain) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before they are returned. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + + if (present(MOM_Domain)) then + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, timelevel=timelevel) + else + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + data(:) = scale*data(:) + endif ; endif + +end subroutine MOM_read_data_1d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 2-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine MOM_read_data_2d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine MOM_read_data_2d + +!> This routine uses the fms_io subroutine read_data to read a region from a distributed or +!! global 2-D data field named "fieldname" from file "filename". +subroutine MOM_read_data_2d_region(filename, fieldname, data, start, nread, MOM_domain, & + no_domain, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:), intent(inout) :: data !< The 2-dimensional array into which the data + !! should be read + integer, dimension(:), intent(in) :: start !< The starting index to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + integer, dimension(:), intent(in) :: nread !< The number of points to read in each of 4 + !! dimensions. For this 2-d read, the 3rd + !! and 4th values are always 1. + type(MOM_domain_type), & + optional, intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + logical, optional, intent(in) :: no_domain !< If present and true, this variable does not + !! use domain decomposion. + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + if (present(MOM_Domain)) then + call read_data(filename, fieldname, data, start, nread, domain=MOM_Domain%mpp_domain, & + no_domain=no_domain) + else + call read_data(filename, fieldname, data, start, nread, no_domain=no_domain) + endif + + if (present(scale)) then ; if (scale /= 1.0) then + if (present(MOM_Domain)) then + call rescale_comp_data(MOM_Domain, data, scale) + else + ! Dangerously rescale the whole array + data(:,:) = scale*data(:,:) + endif + endif ; endif + +end subroutine MOM_read_data_2d_region + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 3-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine MOM_read_data_3d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:), intent(inout) :: data !< The 3-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine MOM_read_data_3d + +!> This routine uses the fms_io subroutine read_data to read a distributed +!! 4-D data field named "fieldname" from file "filename". Valid values for +!! "position" include CORNER, CENTER, EAST_FACE and NORTH_FACE. +subroutine MOM_read_data_4d(filename, fieldname, data, MOM_Domain, & + timelevel, position, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + real, dimension(:,:,:,:), intent(inout) :: data !< The 4-dimensional array into which the data + !! should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: position !< A flag indicating where this data is located + real, optional, intent(in) :: scale !< A scaling factor that the field is multiplied + !! by before it is returned. + + call read_data(filename, fieldname, data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=position) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, data, scale) + endif ; endif + +end subroutine MOM_read_data_4d + +!> This routine uses the fms_io subroutine read_data to read a scalar integer +!! data field named "fieldname" from file "filename". +subroutine MOM_read_data_0d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + +end subroutine MOM_read_data_0d_int + +!> This routine uses the fms_io subroutine read_data to read a 1-D integer +!! data field named "fieldname" from file "filename". +subroutine MOM_read_data_1d_int(filename, fieldname, data, timelevel) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: fieldname !< The variable name of the data in the file + integer, dimension(:), intent(inout) :: data !< The 1-dimensional array into which the data + integer, optional, intent(in) :: timelevel !< The time level in the file to read + + call read_data(filename, fieldname, data, timelevel=timelevel, no_domain=.true.) + +end subroutine MOM_read_data_1d_int + + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 2-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine MOM_read_vector_2d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:), intent(inout) :: u_data !< The 2 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:), intent(inout) :: v_data !< The 2 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + integer :: u_pos, v_pos + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine MOM_read_vector_2d + +!> This routine uses the fms_io subroutine read_data to read a pair of distributed +!! 3-D data fields with names given by "[uv]_fieldname" from file "filename". Valid values for +!! "stagger" include CGRID_NE, BGRID_NE, and AGRID. +subroutine MOM_read_vector_3d(filename, u_fieldname, v_fieldname, u_data, v_data, MOM_Domain, & + timelevel, stagger, scalar_pair, scale) + character(len=*), intent(in) :: filename !< The name of the file to read + character(len=*), intent(in) :: u_fieldname !< The variable name of the u data in the file + character(len=*), intent(in) :: v_fieldname !< The variable name of the v data in the file + real, dimension(:,:,:), intent(inout) :: u_data !< The 3 dimensional array into which the + !! u-component of the data should be read + real, dimension(:,:,:), intent(inout) :: v_data !< The 3 dimensional array into which the + !! v-component of the data should be read + type(MOM_domain_type), intent(in) :: MOM_Domain !< The MOM_Domain that describes the decomposition + integer, optional, intent(in) :: timelevel !< The time level in the file to read + integer, optional, intent(in) :: stagger !< A flag indicating where this vector is discretized + logical, optional, intent(in) :: scalar_pair !< If true, a pair of scalars are to be read.cretized + real, optional, intent(in) :: scale !< A scaling factor that the fields are multiplied + !! by before they are returned. + + integer :: u_pos, v_pos + + u_pos = EAST_FACE ; v_pos = NORTH_FACE + if (present(stagger)) then + if (stagger == CGRID_NE) then ; u_pos = EAST_FACE ; v_pos = NORTH_FACE + elseif (stagger == BGRID_NE) then ; u_pos = CORNER ; v_pos = CORNER + elseif (stagger == AGRID) then ; u_pos = CENTER ; v_pos = CENTER ; endif + endif + + call read_data(filename, u_fieldname, u_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=u_pos) + call read_data(filename, v_fieldname, v_data, MOM_Domain%mpp_domain, & + timelevel=timelevel, position=v_pos) + + if (present(scale)) then ; if (scale /= 1.0) then + call rescale_comp_data(MOM_Domain, u_data, scale) + call rescale_comp_data(MOM_Domain, v_data, scale) + endif ; endif + +end subroutine MOM_read_vector_3d + + +!> Write a 4d field to an output file. +subroutine write_field_4d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_4d + +!> Write a 3d field to an output file. +subroutine write_field_3d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_3d + +!> Write a 2d field to an output file. +subroutine write_field_2d(IO_handle, field_md, MOM_domain, field, tstamp, tile_count, fill_value) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + type(MOM_domain_type), intent(in) :: MOM_domain !< The MOM_Domain that describes the decomposition + real, dimension(:,:), intent(inout) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + integer, optional, intent(in) :: tile_count !< PEs per tile (default: 1) + real, optional, intent(in) :: fill_value !< Missing data fill value + + call mpp_write(IO_handle%unit, field_md, MOM_domain%mpp_domain, field, tstamp=tstamp, & + tile_count=tile_count, default_data=fill_value) +end subroutine write_field_2d + +!> Write a 1d field to an output file. +subroutine write_field_1d(IO_handle, field_md, field, tstamp) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, dimension(:), intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + + call mpp_write(IO_handle%unit, field_md, field, tstamp=tstamp) +end subroutine write_field_1d + +!> Write a 0d field to an output file. +subroutine write_field_0d(IO_handle, field_md, field, tstamp) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(in) :: field_md !< Field type with metadata + real, intent(in) :: field !< Field to write + real, optional, intent(in) :: tstamp !< Model timestamp + + call mpp_write(IO_handle%unit, field_md, field, tstamp=tstamp) +end subroutine write_field_0d + +!> Write the data for an axis +subroutine MOM_write_axis(IO_handle, axis) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(in) :: axis !< An axis type variable with information to write + + call mpp_write(IO_handle%unit, axis) + +end subroutine MOM_write_axis + +!> Store information about an axis in a previously defined axistype and write this +!! information to the file indicated by unit. +subroutine write_metadata_axis(IO_handle, axis, name, units, longname, cartesian, sense, domain, data, calendar) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(axistype), intent(inout) :: axis !< The axistype where this information is stored. + character(len=*), intent(in) :: name !< The name in the file of this axis + character(len=*), intent(in) :: units !< The units of this axis + character(len=*), intent(in) :: longname !< The long description of this axis + character(len=*), optional, intent(in) :: cartesian !< A variable indicating which direction + !! this axis corresponds with. Valid values + !! include 'X', 'Y', 'Z', 'T', and 'N' for none. + integer, optional, intent(in) :: sense !< This is 1 for axes whose values increase upward, or + !! -1 if they increase downward. + type(domain1D), optional, intent(in) :: domain !< The domain decomposion for this axis + real, dimension(:), optional, intent(in) :: data !< The coordinate values of the points on this axis + character(len=*), optional, intent(in) :: calendar !< The name of the calendar used with a time axis + + call mpp_write_meta(IO_handle%unit, axis, name, units, longname, cartesian=cartesian, sense=sense, & + domain=domain, data=data, calendar=calendar) +end subroutine write_metadata_axis + +!> Store information about an output variable in a previously defined fieldtype and write this +!! information to the file indicated by unit. +subroutine write_metadata_field(IO_handle, field, axes, name, units, longname, & + min, max, fill, scale, add, pack, standard_name, checksum) + type(file_type), intent(in) :: IO_handle !< Handle for a file that is open for writing + type(fieldtype), intent(inout) :: field !< The fieldtype where this information is stored + type(axistype), dimension(:), intent(in) :: axes !< Handles for the axis used for this variable + character(len=*), intent(in) :: name !< The name in the file of this variable + character(len=*), intent(in) :: units !< The units of this variable + character(len=*), intent(in) :: longname !< The long description of this variable + real, optional, intent(in) :: min !< The minimum valid value for this variable + real, optional, intent(in) :: max !< The maximum valid value for this variable + real, optional, intent(in) :: fill !< Missing data fill value + real, optional, intent(in) :: scale !< An multiplicative factor by which to scale + !! the variable before output + real, optional, intent(in) :: add !< An offset to add to the variable before output + integer, optional, intent(in) :: pack !< A precision reduction factor with which the + !! variable. The default, 1, has no reduction, + !! but 2 is not uncommon. + character(len=*), optional, intent(in) :: standard_name !< The standard (e.g., CMOR) name for this variable + integer(kind=int64), dimension(:), & + optional, intent(in) :: checksum !< Checksum values that can be used to verify reads. + + + call mpp_write_meta(IO_handle%unit, field, axes, name, units, longname, min=min, max=max, & + fill=fill, scale=scale, add=add, pack=pack, standard_name=standard_name, checksum=checksum) + +end subroutine write_metadata_field + +end module MOM_io_infra diff --git a/config_src/infra/FMS2/MOM_time_manager.F90 b/config_src/infra/FMS2/MOM_time_manager.F90 new file mode 100644 index 0000000000..5f3279b713 --- /dev/null +++ b/config_src/infra/FMS2/MOM_time_manager.F90 @@ -0,0 +1,54 @@ +!> Wraps the FMS time manager functions +module MOM_time_manager + +! This file is part of MOM6. See LICENSE.md for the license. + +use time_manager_mod, only : time_type, get_time, set_time +use time_manager_mod, only : time_type_to_real, real_to_time_type +use time_manager_mod, only : operator(+), operator(-), operator(*), operator(/) +use time_manager_mod, only : operator(>), operator(<), operator(>=), operator(<=) +use time_manager_mod, only : operator(==), operator(/=), operator(//) +use time_manager_mod, only : set_ticks_per_second , get_ticks_per_second +use time_manager_mod, only : get_date, set_date, increment_date +use time_manager_mod, only : days_in_month, month_name +use time_manager_mod, only : set_calendar_type, get_calendar_type +use time_manager_mod, only : JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN +use time_manager_mod, only : NO_CALENDAR + +implicit none ; private + +public :: time_type, get_time, set_time +public :: time_type_to_real, real_to_time_type, real_to_time +public :: set_ticks_per_second, get_ticks_per_second +public :: operator(+), operator(-), operator(*), operator(/) +public :: operator(>), operator(<), operator(>=), operator(<=) +public :: operator(==), operator(/=), operator(//) +public :: get_date, set_date, increment_date, month_name, days_in_month +public :: JULIAN, NOLEAP, THIRTY_DAY_MONTHS, GREGORIAN, NO_CALENDAR +public :: set_calendar_type, get_calendar_type + +contains + +!> Returns a time_type version of a real time in seconds, using an alternate implementation to the +!! FMS function real_to_time_type that is accurate over a larger range of input values. With 32 bit +!! signed integers, this version should work over the entire valid range (2^31 days or ~5.8835 +!! million years) of time_types, whereas the standard version in the FMS time_manager stops working +!! for conversions of times greater than 2^31 seconds, or ~68.1 years. +type(time_type) function real_to_time(x, err_msg) +! type(time_type) :: real_to_time !< The output time as a time_type + real, intent(in) :: x !< The input time in real seconds. + character(len=*), optional, intent(out) :: err_msg !< An optional returned error message. + + ! Local variables + integer :: seconds, days, ticks + real :: real_subsecond_remainder + + days = floor(x/86400.) + seconds = floor(x - 86400.*days) + real_subsecond_remainder = x - (days*86400. + seconds) + ticks = nint(real_subsecond_remainder * get_ticks_per_second()) + + real_to_time = set_time(seconds=seconds, days=days, ticks=ticks, err_msg=err_msg) +end function real_to_time + +end module MOM_time_manager