diff --git a/CMakeLists.txt b/CMakeLists.txt index 888a622d9..6287f6850 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,7 @@ endif() option(OPENMP "Enable OpenMP threading" OFF) option(32BIT "Enable single precision (r4) arithmetic in FV3 dycore" ON) option(DEBUG "Enable compiler definition -DDEBUG" OFF) +option(MOVING_NEST "Enable compiler definition -DMOVING_NEST" OFF) option(MULTI_GASES "Enable compiler definition -DMULTI_GASES" OFF) option(USE_GFSL63 "Enable compiler definition -DUSE_GFSL63" OFF) option(GFS_PHYS "Enable compiler definition -DGFS_PHYS" OFF) @@ -56,6 +57,15 @@ if(NOT FMS_FOUND) add_library(fms ALIAS FMS::fms_${kind}) endif() +list(APPEND moving_srcs + moving_nest/bounding_box.F90 + moving_nest/fv_tracker.F90 + moving_nest/fv_moving_nest.F90 + moving_nest/fv_moving_nest_main.F90 + moving_nest/fv_moving_nest_physics.F90 + moving_nest/fv_moving_nest_types.F90 + moving_nest/fv_moving_nest_utils.F90) + list(APPEND model_srcs model/a2b_edge.F90 model/multi_gases.F90 @@ -109,6 +119,7 @@ list(APPEND driver_srcs driver/fvGFS/atmosphere.F90) list(APPEND fv3_srcs ${model_srcs} + ${moving_srcs} ${tools_srcs}) list(APPEND fv3_defs SPMD @@ -138,6 +149,10 @@ if(use_WRTCOMP) ${driver_srcs}) endif() +if(MOVING_NEST) + list(APPEND fv3_defs MOVING_NEST) +endif() + if(MULTI_GASES) list(APPEND fv3_defs MULTI_GASES) endif() diff --git a/driver/fvGFS/atmosphere.F90 b/driver/fvGFS/atmosphere.F90 index e83bc6199..6243df9d0 100644 --- a/driver/fvGFS/atmosphere.F90 +++ b/driver/fvGFS/atmosphere.F90 @@ -160,18 +160,21 @@ module atmosphere_mod input_nml_file, mpp_root_pe, & mpp_npes, mpp_pe, mpp_chksum, & mpp_get_current_pelist, & - mpp_set_current_pelist, mpp_sync + mpp_set_current_pelist, & + mpp_sync, mpp_sync_self, mpp_send, mpp_recv use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE -use mpp_domains_mod, only: domain2d, mpp_update_domains +use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, WEST, SOUTH +use mpp_domains_mod, only: domain2d, mpp_update_domains, mpp_global_field +use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain use xgrid_mod, only: grid_box_type use field_manager_mod, only: MODEL_ATMOS use tracer_manager_mod, only: get_tracer_index, get_number_tracers, & NO_TRACER, get_tracer_names use DYCORE_typedefs, only: DYCORE_data_type #ifdef GFS_TYPES -use GFS_typedefs, only: IPD_data_type => GFS_data_type, kind_phys +use GFS_typedefs, only: IPD_data_type => GFS_data_type, IPD_control_type => GFS_control_type, kind_phys #else -use IPD_typedefs, only: IPD_data_type, kind_phys => IPD_kind_phys +use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys #endif use fv_iau_mod, only: IAU_external_data_type #ifdef MULTI_GASES @@ -187,11 +190,17 @@ module atmosphere_mod use fv_fill_mod, only: fill_gfs use fv_dynamics_mod, only: fv_dynamics use fv_nesting_mod, only: twoway_nesting +use boundary_mod, only: fill_nested_grid use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height +#ifdef MOVING_NEST +use fv_tracker_mod, only: fv_diag_tracker, allocate_tracker +use fv_tracker_mod, only: fv_tracker_init, fv_tracker_center, fv_tracker_post_move +use fv_moving_nest_types_mod, only: Moving_nest +#endif use fv_nggps_diags_mod, only: fv_nggps_diag_init, fv_nggps_diag, fv_nggps_tavg use fv_restart_mod, only: fv_restart, fv_write_restart use fv_timing_mod, only: timing_on, timing_off -use fv_mp_mod, only: is_master +use fv_mp_mod, only: is_master, tile_fine use fv_sg_mod, only: fv_subgrid_z use fv_update_phys_mod, only: fv_update_phys use fv_io_mod, only: fv_io_register_nudge_restart @@ -199,8 +208,6 @@ module atmosphere_mod use fv_regional_mod, only: start_regional_restart, read_new_bc_data, & a_step, p_step, current_time_in_seconds use fv_grid_utils_mod, only: g_sum - -use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain use coarse_graining_mod, only: coarse_graining_init use coarse_grained_diagnostics_mod, only: fv_coarse_diag_init, fv_coarse_diag use coarse_grained_restart_files_mod, only: fv_coarse_restart_init @@ -231,6 +238,9 @@ module atmosphere_mod !--- physics/radiation data exchange routines public :: atmos_phys_driver_statein +!--- coupling data exchange routines +public :: atmosphere_fill_nest_cpl + !----------------------------------------------------------------------- ! version number of this module ! Include variable "version" to be written to log file. @@ -239,7 +249,7 @@ module atmosphere_mod !---- private data ---- type (time_type) :: Time_step_atmos - public Atm, mygrid + public Atm, mygrid, p_split, dt_atmos ! Share over to moving nest functions. !These are convenience variables for local use only, and are set to values in Atm% real :: dt_atmos @@ -250,6 +260,8 @@ module atmosphere_mod integer :: nq ! number of transported tracers integer :: sec, seconds, days integer :: id_dynam, id_fv_diag, id_subgridz + integer :: id_fv_tracker + logical :: cold_start = .false. ! used in initial condition integer, dimension(:), allocatable :: id_tracerdt_dyn @@ -341,6 +353,12 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area) call fv_control_init( Atm, dt_atmos, mygrid, grids_on_this_pe, p_split ) ! allocates Atm components; sets mygrid + ! TODO move this higher into atmos_model.F90 for better modularization +#ifdef MOVING_NEST + call fv_tracker_init(size(Atm)) + if (mygrid .eq. 2) call allocate_tracker(mygrid, Atm(mygrid)%bd%isc, Atm(mygrid)%bd%iec, Atm(mygrid)%bd%jsc, Atm(mygrid)%bd%jec) +#endif + Atm(mygrid)%Time_init = Time_init if(Atm(mygrid)%flagstruct%warm_start) then @@ -465,6 +483,9 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area) id_subgridz = mpp_clock_id ('FV subgrid_z',flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) id_fv_diag = mpp_clock_id ('FV Diag', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) +#ifdef MOVING_NEST + id_fv_tracker= mpp_clock_id ('FV tracker', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) +#endif call timing_off('ATMOS_INIT') ! Do CCPP fast physics initialization before call to adiabatic_init (since this calls fv_dynamics) @@ -522,8 +543,8 @@ subroutine atmosphere_init (Time_init, Time, Time_step, Grid_box, area) if (ierr/=0) then cdata%errmsg = ' atmosphere_dynamics: error in ccpp_physics_init for group fast_physics: ' // trim(cdata%errmsg) call mpp_error (FATAL, cdata%errmsg) - end if - end if + endif + endif ! --- initiate the start for a restarted regional forecast if ( Atm(mygrid)%gridstruct%regional .and. Atm(mygrid)%flagstruct%warm_start ) then @@ -696,7 +717,7 @@ subroutine atmosphere_dynamics ( Time ) call timing_off('TWOWAY_UPDATE') endif - end do !p_split + enddo !p_split call mpp_clock_end (id_dynam) !----------------------------------------------------- @@ -744,18 +765,18 @@ subroutine atmosphere_dynamics ( Time ) if (Atm(1)%idiag%id_u_dt_sg > 0) then used = send_data(Atm(1)%idiag%id_u_dt_sg, u_dt(isc:iec,jsc:jec,:), fv_time) - end if + endif if (Atm(1)%idiag%id_v_dt_sg > 0) then used = send_data(Atm(1)%idiag%id_v_dt_sg, v_dt(isc:iec,jsc:jec,:), fv_time) - end if + endif if (Atm(1)%idiag%id_t_dt_sg > 0) then t_dt(:,:,:) = rdt*(Atm(1)%pt(isc:iec,jsc:jec,:) - t_dt(:,:,:)) used = send_data(Atm(1)%idiag%id_t_dt_sg, t_dt, fv_time) - end if + endif if (Atm(1)%idiag%id_qv_dt_sg > 0) then qv_dt(:,:,:) = rdt*(Atm(1)%q(isc:iec,jsc:jec,:,sphum) - qv_dt(:,:,:)) used = send_data(Atm(1)%idiag%id_qv_dt_sg, qv_dt, fv_time) - end if + endif ! zero out t_dt for use as an accumulator t_dt = 0. @@ -784,8 +805,8 @@ subroutine atmosphere_end (Time, Grid_box, restart_endfcst) if (ierr/=0) then cdata%errmsg = ' atmosphere_dynamics: error in ccpp_physics_finalize for group fast_physics: ' // trim(cdata%errmsg) call mpp_error (FATAL, cdata%errmsg) - end if - end if + endif + endif ! initialize domains for writing global physics data if ( Atm(mygrid)%flagstruct%nudge ) call fv_nwp_nudge_end @@ -839,7 +860,7 @@ subroutine atmosphere_resolution (i_size, j_size, global) else i_size = npx - 1 j_size = npy - 1 - end if + endif end subroutine atmosphere_resolution @@ -882,7 +903,7 @@ subroutine atmosphere_grid_ctr (lon, lat) lon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,1) lat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%agrid_64(i,j,2) enddo - end do + enddo end subroutine atmosphere_grid_ctr @@ -890,7 +911,7 @@ end subroutine atmosphere_grid_ctr !>@brief The subroutine 'atmosphere_grid_bdry' is an API to returns the !! longitude and latitude finite volume edges (grid box) for the current MPI-rank. subroutine atmosphere_grid_bdry (blon, blat, global) - real, intent(out) :: blon(:,:), blat(:,:) !< Unit: radian + real(kind=kind_phys), intent(out) :: blon(:,:), blat(:,:) !< Unit: radian logical, intent(in), optional :: global ! Local data: integer i,j @@ -905,7 +926,7 @@ subroutine atmosphere_grid_bdry (blon, blat, global) blon(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,1) blat(i-isc+1,j-jsc+1) = Atm(mygrid)%gridstruct%grid(i,j,2) enddo - end do + enddo end subroutine atmosphere_grid_bdry @@ -932,16 +953,20 @@ end subroutine get_nth_domain_info !! the "domain2d" variable associated with the coupling grid and the !! decomposition for the current cubed-sphere tile. !>@detail Coupling is done using the mass/temperature grid with no halos. - subroutine atmosphere_domain ( fv_domain, layout, regional, nested, ngrids_atmos, mygrid_atmos, pelist ) + subroutine atmosphere_domain ( fv_domain, layout, regional, nested, & + moving_nest_parent, is_moving_nest, & + ngrids_atmos, mygrid_atmos, pelist ) type(domain2d), intent(out) :: fv_domain integer, intent(out) :: layout(2) logical, intent(out) :: regional logical, intent(out) :: nested + logical, intent(out) :: moving_nest_parent + logical, intent(out) :: is_moving_nest integer, intent(out) :: ngrids_atmos integer, intent(out) :: mygrid_atmos integer, pointer, intent(out) :: pelist(:) -! returns the domain2d variable associated with the coupling grid -! note: coupling is done using the mass/temperature grid with no halos + + integer :: n fv_domain = Atm(mygrid)%domain_for_coupler layout(1:2) = Atm(mygrid)%layout(1:2) @@ -952,6 +977,31 @@ subroutine atmosphere_domain ( fv_domain, layout, regional, nested, ngrids_atmos call set_atmosphere_pelist() pelist => Atm(mygrid)%pelist + moving_nest_parent = .false. + is_moving_nest = .false. + +#ifdef MOVING_NEST + ! Currently, the moving nesting configuration only supports one parent (global + ! or regional) with one moving nest. + ! This will need to be revisited when multiple and telescoping moving nests are enabled. + + ! Set is_moving_nest to true if this is a moving nest + is_moving_nest = Moving_nest(mygrid)%mn_flag%is_moving_nest + ! Set parent_of_moving_nest to true if it has a moving nest child + !do n=1,ngrids + ! print '("[INFO] WDR atmosphere_domain npe=",I0," mygrid=",I0," n=",I0," is_moving_nest=",L1)', mpp_pe(), mygrid, n, Moving_nest(n)%mn_flag%is_moving_nest + !enddo + + do n=2,ngrids + if ( mygrid == Atm(n)%parent_grid%grid_number .and. & + Moving_nest(n)%mn_flag%is_moving_nest ) then + moving_nest_parent = .true. + endif + enddo + !print '("[INFO] WDR atmosphere_domain npe=",I0," moving_nest_parent=",L1," is_moving_nest=",L1)', mpp_pe(), moving_nest_parent, is_moving_nest + +#endif + end subroutine atmosphere_domain @@ -1733,6 +1783,29 @@ subroutine atmosphere_state_update (Time, IPD_Data, IAU_Data, Atm_block, flip_vc call mpp_clock_end(id_fv_diag) endif +#ifdef MOVING_NEST + !---- FV internal vortex tracker ----- + if ( Moving_nest(mygrid)%mn_flag%is_moving_nest ) then + if ( Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 2 .or. & + Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 6 .or. & + Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 7 ) then + + fv_time = Time_next + call get_time (fv_time, seconds, days) + call get_time (Time_step_atmos, sec) + if (mod(seconds,Moving_nest(mygrid)%mn_flag%ntrack*sec) .eq. 0) then + call mpp_clock_begin(id_fv_tracker) + call timing_on('FV_TRACKER') + call fv_diag_tracker(Atm(mygrid:mygrid), zvir, fv_time) + call fv_tracker_center(Atm(mygrid), mygrid, fv_time) + call timing_off('FV_TRACKER') + call mpp_clock_end(id_fv_tracker) + endif + + endif + endif +#endif + end subroutine atmosphere_state_update @@ -2326,4 +2399,144 @@ subroutine atmos_phys_qdt_diag(q, phys_diag, nq, dt, begin) end subroutine atmos_phys_qdt_diag +!>@brief The subroutine 'atmosphere_fill_grid_cpl' is to downscale/pass the +!! coupling variables (e.g., sea surface temperature) received by the parent grid +!! down into the nested grid(s). +!>@details First the coupling field(s) is retreived from the IPD_data structure, +!! and then loops through nested grids and call the fill_nest_grid_cpl to actually +!! communicate and fill the nested grid(s) for coupling variables from its parent. +!! After that the updated coupling field(s) is put back to the IPD_data structure. +!! Note: Currently, only sea surface temperature is passed down into the nest(s). + subroutine atmosphere_fill_nest_cpl(Atm_block, IPD_control, IPD_data) + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + + integer :: nb, blen, ix, i, j, n + character*255 :: message + + ! Deal with tsfco (sea surface temperature) + if (IPD_control%cplocn2atm) then + ! Extract the coupling field + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + Atm(mygrid)%parent2nest_2d(i,j) = IPD_Data(nb)%Sfcprop%tsfco(ix) + enddo + enddo + ! Loop through and fill all nested grids + do n=2,ngrids + if (n==mygrid .or. mygrid==Atm(n)%parent_grid%grid_number) then + call fill_nested_grid_cpl(n, n==mygrid) + endif + enddo + ! Update the nested grids + if (Atm(mygrid)%neststruct%nested) then + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + if (IPD_data(nb)%Sfcprop%oceanfrac(ix) > 0.) then + IPD_data(nb)%Sfcprop%tsfco(ix) = Atm(mygrid)%parent2nest_2d(i,j) + endif + enddo + enddo + endif + endif + + ! Deal with zorlwav (sea surface roughness length) + if (IPD_control%cplwav2atm) then + ! Extract the coupling field + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + Atm(mygrid)%parent2nest_2d(i,j) = IPD_Data(nb)%Sfcprop%zorlwav(ix) + enddo + enddo + ! Loop through and fill all nested grids + do n=2,ngrids + if (n==mygrid .or. mygrid==Atm(n)%parent_grid%grid_number) then + call fill_nested_grid_cpl(n, n==mygrid) + endif + enddo + ! Update the nested grids + if (Atm(mygrid)%neststruct%nested) then + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + if (IPD_data(nb)%Sfcprop%oceanfrac(ix) > 0.) then + IPD_data(nb)%Sfcprop%zorlwav(ix) = Atm(mygrid)%parent2nest_2d(i,j) + ! IPD_data(nb)%Sfcprop%zorlw(ix) = Atm(mygrid)%parent2nest_2d(i,j) + endif + enddo + enddo + endif + endif + + end subroutine atmosphere_fill_nest_cpl + +!>@brief The subroutine 'fill_nested_grid_cpl' fills the nested grid for +!! coupling variables from its parent grid +!>@details Fill parent2nest_2d on the nested grid with values from its parent. + subroutine fill_nested_grid_cpl(this_grid, proc_in) + integer, intent(in) :: this_grid + logical, intent(in), optional :: proc_in + + real, allocatable :: g_dat(:,:,:) + integer :: p, sending_proc + integer :: isd_p, ied_p, jsd_p, jed_p + integer :: isg, ieg, jsg, jeg + integer :: isc, iec, jsc, jec + logical :: process + character*255 :: message + + process = .true. + if (present(proc_in)) then + process = proc_in + else + process = .true. + endif + + call mpp_get_global_domain(Atm(this_grid)%parent_grid%domain, isg, ieg, jsg, jeg) + call mpp_get_data_domain(Atm(this_grid)%parent_grid%domain, isd_p, ied_p, jsd_p, jed_p) + call mpp_get_compute_domain(Atm(this_grid)%domain, isc, iec, jsc, jec) + allocate( g_dat(isg:ieg, jsg:jeg, 1) ) + + call timing_on('COMM_TOTAL') + sending_proc = Atm(this_grid)%parent_grid%pelist(1) + & + ( Atm(this_grid)%neststruct%parent_tile-tile_fine(Atm(this_grid)%parent_grid%grid_number)+ & + Atm(this_grid)%parent_grid%flagstruct%ntiles-1 )*Atm(this_grid)%parent_grid%npes_per_tile + !if (Atm(this_grid)%neststruct%parent_proc .and. Atm(this_grid)%neststruct%parent_tile == Atm(this_grid)%parent_grid%global_tile) then + if (Atm(this_grid)%neststruct%parent_tile == Atm(this_grid)%parent_grid%global_tile) then + call mpp_global_field(Atm(this_grid)%parent_grid%domain, & + Atm(this_grid)%parent_grid%parent2nest_2d(isd_p:ied_p,jsd_p:jed_p), & + g_dat(isg:,jsg:,1), position=CENTER) + if (mpp_pe() == sending_proc) then + do p=1,size(Atm(this_grid)%pelist) + call mpp_send(g_dat, size(g_dat), Atm(this_grid)%pelist(p)) + enddo + endif + endif + if (any(Atm(this_grid)%pelist == mpp_pe())) then + call mpp_recv(g_dat, size(g_dat), sending_proc) + endif + call timing_off('COMM_TOTAL') + if (process) then + call fill_nested_grid(Atm(this_grid)%parent2nest_2d, g_dat(isg:,jsg:,1), & + Atm(this_grid)%neststruct%ind_h, Atm(this_grid)%neststruct%wt_h, & + 0, 0, isg, ieg, jsg, jeg, Atm(this_grid)%bd) + endif + + call mpp_sync_self + deallocate(g_dat) + + end subroutine fill_nested_grid_cpl + end module atmosphere_mod diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index 27f44c04b..b89dcefa1 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -29,6 +29,8 @@ module fv_arrays_mod use horiz_interp_type_mod, only: horiz_interp_type use mpp_mod, only: mpp_broadcast use platform_mod, only: r8_kind + + public integer, public, parameter :: R_GRID = r8_kind @@ -1193,6 +1195,7 @@ module fv_arrays_mod ,is_west_uvw ,ie_west_uvw ,js_west_uvw ,je_west_uvw end type fv_regional_bc_bounds_type + type fv_atmos_type logical :: allocated = .false. @@ -1251,6 +1254,9 @@ module fv_arrays_mod real, _ALLOCATABLE :: peln(:,:,:) _NULL !< ln(pe) real, _ALLOCATABLE :: pkz (:,:,:) _NULL !< finite-volume mean pk +! For downscaling/remapping a 2d variable from parent to its nest + real, _ALLOCATABLE :: parent2nest_2d(:,:) _NULL !< 2d arrary for downscaling a variable from parent to its nest + ! For phys coupling: real, _ALLOCATABLE :: u_srf(:,:) _NULL !< Surface u-wind real, _ALLOCATABLE :: v_srf(:,:) _NULL !< Surface v-wind @@ -1475,6 +1481,8 @@ subroutine allocate_fv_atmos_type(Atm, isd_in, ied_in, jsd_in, jed_in, is_in, ie allocate ( Atm%peln(is:ie,npz+1,js:je) ) allocate ( Atm%pkz(is:ie,js:je,npz) ) + allocate ( Atm%parent2nest_2d(isd:ied,jsd:jed) ) + allocate ( Atm%u_srf(is:ie,js:je) ) allocate ( Atm%v_srf(is:ie,js:je) ) @@ -1864,6 +1872,8 @@ subroutine deallocate_fv_atmos_type(Atm) deallocate ( Atm%inline_mp%pres ) deallocate ( Atm%inline_mp%preg ) + deallocate ( Atm%parent2nest_2d ) + deallocate ( Atm%u_srf ) deallocate ( Atm%v_srf ) if( Atm%flagstruct%fv_land ) deallocate ( Atm%sgh ) @@ -2172,12 +2182,14 @@ subroutine deallocate_fv_nest_BC_type_3d(BC) type(fv_nest_BC_type_3d) :: BC - if (.not. BC%allocated) return + !if (.not. BC%allocated) return + if (allocated(BC%north_t1)) then ! Added WDR deallocate(BC%north_t1) deallocate(BC%south_t1) deallocate(BC%west_t1) deallocate(BC%east_t1) + endif ! Added WDR if (allocated(BC%north_t0)) then deallocate(BC%north_t0) @@ -2190,5 +2202,4 @@ subroutine deallocate_fv_nest_BC_type_3d(BC) end subroutine deallocate_fv_nest_BC_type_3d - end module fv_arrays_mod diff --git a/model/fv_control.F90 b/model/fv_control.F90 index b58558119..5b24ccf56 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -161,6 +161,11 @@ module fv_control_mod use molecular_diffusion_mod, only: molecular_diffusion_init, & read_namelist_molecular_diffusion_nml +#ifdef MOVING_NEST + use fv_moving_nest_types_mod, only: fv_moving_nest_init, deallocate_fv_moving_nests + use fv_tracker_mod, only: deallocate_tracker +#endif + implicit none private @@ -528,18 +533,21 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split, Atm(n)%neststruct%joffset = nest_joffsets(n) Atm(n)%neststruct%parent_tile = tile_coarse(n) Atm(n)%neststruct%refinement = nest_refine(n) - else - Atm(n)%neststruct%ioffset = -999 Atm(n)%neststruct%joffset = -999 Atm(n)%neststruct%parent_tile = -1 Atm(n)%neststruct%refinement = -1 - endif - enddo +#ifdef MOVING_NEST + ! This has to be called on the input.nml namelist for all PEs + ! input_nest02.nml does not have any of the moving nest parameters + ! Later call to read_input_nml changes which namelist is used + call fv_moving_nest_init(Atm) +#endif + if (pecounter /= npes) then if (mpp_pe() == 0) then print*, 'npes = ', npes, ', grid_pes = ', grid_pes(1:ngrids) @@ -711,6 +719,7 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split, !reset to universal pelist call mpp_set_current_pelist( global_pelist ) !Except for npes_nest_tile all arrays should be just the nests and should NOT include the top level + call mpp_define_nest_domains(global_nest_domain, Atm(this_grid)%domain, & ngrids-1, nest_level=nest_level(2:ngrids) , & istart_coarse=nest_ioffsets(2:ngrids), jstart_coarse=nest_joffsets(2:ngrids), & @@ -1000,6 +1009,7 @@ subroutine read_namelist_fv_nest_nml end subroutine read_namelist_fv_nest_nml + subroutine read_namelist_fv_grid_nml integer :: f_unit, ios, ierr @@ -1323,6 +1333,10 @@ subroutine fv_end(Atm, this_grid, restart_endfcst) call deallocate_coarse_restart_type(Atm(n)%coarse_graining%restart) end do +#ifdef MOVING_NEST + call deallocate_fv_moving_nests(ngrids) + call deallocate_tracker(ngrids) +#endif end subroutine fv_end !------------------------------------------------------------------------------- diff --git a/model/fv_grid_utils.F90 b/model/fv_grid_utils.F90 index 3ca58f88b..962a0d649 100644 --- a/model/fv_grid_utils.F90 +++ b/model/fv_grid_utils.F90 @@ -900,6 +900,11 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) Atm%gridstruct%dy6 = Atm%gridstruct%dy6_64 endif +#ifndef MOVING_NEST +! WDR Need to use these arrays again if moving the nest +! So don't deallocate them. +! TODO clean them up at end of model run for completeness + !--- deallocate the higher-order gridstruct arrays !rab deallocate ( Atm%gridstruct%grid_64 ) !rab deallocate ( Atm%gridstruct%agrid_64 ) @@ -913,6 +918,10 @@ subroutine grid_utils_init(Atm, npx, npy, npz, non_ortho, grid_type, c2l_order) deallocate ( Atm%gridstruct%dyc_64 ) deallocate ( Atm%gridstruct%cosa_64 ) deallocate ( Atm%gridstruct%sina_64 ) + +! WDR TODO double checking on this +#endif + if ( Atm%flagstruct%molecular_diffusion ) then deallocate ( Atm%gridstruct%area_u_64 ) deallocate ( Atm%gridstruct%area_v_64 ) @@ -1766,6 +1775,9 @@ subroutine latlon2xyz(p, e, id) real (f_p):: q(2) real (f_p):: e1, e2, e3 + logical, save :: first_time = .true. + integer, save :: id_latlon + do n=1,2 q(n) = p(n) enddo diff --git a/model/fv_nesting.F90 b/model/fv_nesting.F90 index c41dd3e9c..0cdca0b2b 100644 --- a/model/fv_nesting.F90 +++ b/model/fv_nesting.F90 @@ -143,9 +143,92 @@ module fv_nesting_mod real, dimension(:,:,:), allocatable, target :: dum_West, dum_East, dum_North, dum_South private -public :: twoway_nesting, setup_nested_grid_BCs, set_physics_BCs +public :: twoway_nesting, setup_nested_grid_BCs, set_physics_BCs, dealloc_nested_buffers contains + +!>@brief The subroutine 'dealloc_nested_buffers' deallocates the BC buffers +!! so that they can be resized after nest motion. +!! Ramstrom/HRD Moving Nest upgrade +subroutine dealloc_nested_buffers(Atm) + type(fv_atmos_type), intent(in) :: Atm + + integer :: n, ncnst + !logical :: dummy = .false. + logical :: debug_log = .false. + + integer :: this_pe + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR NBC deallocating buffers fv_nesting.F90 npe=",I0)', this_pe + + call deallocate_fv_nest_BC_type(u_buf) + call deallocate_fv_nest_BC_type(v_buf) + call deallocate_fv_nest_BC_type(uc_buf) + call deallocate_fv_nest_BC_type(vc_buf) + + call deallocate_fv_nest_BC_type(delp_buf) + call deallocate_fv_nest_BC_type(delz_buf) + + call deallocate_fv_nest_BC_type(pt_buf) + call deallocate_fv_nest_BC_type(w_buf) + call deallocate_fv_nest_BC_type(divg_buf) + + call deallocate_fv_nest_BC_type(pe_u_buf) + call deallocate_fv_nest_BC_type(pe_v_buf) + call deallocate_fv_nest_BC_type(pe_b_buf) + + ncnst = size(q_buf) + do n=1,ncnst + call deallocate_fv_nest_BC_type(q_buf(n)) + end do + + ! TODO remove the allocation steps + + ! Reallocate based on the new Atm structure + !ns = Atm%neststruct%nsponge + + !! The code for using the BC buffers will allocate when needed in boundary.F90::nested_grid_BC_recv() + + !allocate_fv_nest_BC_type_3D_Atm(BC,Atm,ns,istag,jstag,dummy) + + ! Rely on the previously set values for istag, jstag + +! call allocate_fv_nest_BC_type(Atm%neststruct%delp_BC,Atm,ns,0,0,dummy) +! call allocate_fv_nest_BC_type(Atm%neststruct%u_BC,Atm,ns,0,1,dummy) +! call allocate_fv_nest_BC_type(Atm%neststruct%v_BC,Atm,ns,1,0,dummy) +! call allocate_fv_nest_BC_type(Atm%neststruct%uc_BC,Atm,ns,1,0,dummy) +! call allocate_fv_nest_BC_type(Atm%neststruct%vc_BC,Atm,ns,0,1,dummy) +! call allocate_fv_nest_BC_type(Atm%neststruct%divg_BC,Atm,ns,1,1,dummy) + +! if (ncnst > 0) then +! allocate(Atm%neststruct%q_BC(ncnst)) +! do n=1,ncnst +! call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy) +! enddo +! endif + + !call allocate_fv_nest_BC_type_3D_Atm(u_buf, Atm, ns, u_buf%istag, u_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(v_buf, Atm, ns, v_buf%istag, v_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(uc_buf, Atm, ns, uc_buf%istag, uc_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(vc_buf, Atm, ns, vc_buf%istag, vc_buf%jstag, dummy) + + !call allocate_fv_nest_BC_type_3D_Atm(delp_buf, Atm, ns, 0, 0, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(delz_buf, Atm, ns, delz_buf%istag, delz_buf%jstag, dummy) + + !call allocate_fv_nest_BC_type_3D_Atm(pt_buf, Atm, ns, pt_buf%istag, pt_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(pkz_buf, Atm, ns, pkz_buf%istag, pkz_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(w_buf, Atm, ns, w_buf%istag, w_buf%jstag, dummy) + !call allocate_fv_nest_BC_type_3D_Atm(divg_buf, Atm, ns, divg_buf%istag, divg_buf%jstag, dummy) + + !do n=1,ncnst + ! call allocate_fv_nest_BC_type_3D_Atm(q_buf(n), Atm, ns, q_buf(n)%istag, q_buf(n)%jstag, dummy) + !end do + +end subroutine dealloc_nested_buffers + + !>@brief The subroutine 'setup_nested_grid_BCs' fetches data from the coarse grid !! to set up the nested-grid boundary conditions. subroutine setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & diff --git a/moving_nest/bounding_box.F90 b/moving_nest/bounding_box.F90 new file mode 100644 index 000000000..cd93a8308 --- /dev/null +++ b/moving_nest/bounding_box.F90 @@ -0,0 +1,175 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + + +!*********************************************************************** +!> @file +!! @brief Provides subroutines for grid bounding boxes for moving nest +!! @author W. Ramstrom, AOML/HRD 07/28/2021 +!! @email William.Ramstrom@noaa.gov +!=======================================================================! + + +module bounding_box_mod + use mpp_domains_mod, only : mpp_get_C2F_index, nest_domain_type + use mpp_mod, only : mpp_pe + use fv_arrays_mod, only : R_GRID + +#ifdef GFS_TYPES + use GFS_typedefs, only : kind_phys +#else + use IPD_typedefs, only : kind_phys => IPD_kind_phys +#endif + + ! Simple aggregation of the start and end indices of a 2D grid + ! Makes argument lists clearer to read + type bbox + integer :: is, ie, js, je + end type bbox + + interface fill_bbox + module procedure fill_bbox_r4_2d + module procedure fill_bbox_r4_3d + module procedure fill_bbox_r4_4d + module procedure fill_bbox_r8_2d + module procedure fill_bbox_r8_3d + module procedure fill_bbox_r8_4d + end interface fill_bbox + +contains + + subroutine fill_bbox_r4_2d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_2d + + + subroutine fill_bbox_r4_3d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_3d + + subroutine fill_bbox_r4_4d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_4d + + + subroutine fill_bbox_r8_2d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_2d + + subroutine fill_bbox_r8_3d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_3d + + + subroutine fill_bbox_r8_4d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_4d + + subroutine show_bbox(tag, in_bbox, lats, lons) + character(len=*) :: tag + type(bbox), intent(out) :: in_bbox + real(kind=kind_phys), allocatable, intent(in) :: lats(:,:), lons(:,:) + + integer :: x,y + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi180 + real :: rad2deg, deg2rad + + pi180 = pi / 180.0 + deg2rad = pi / 180.0 + rad2deg = 1.0 / pi180 + + x = in_bbox%is + y = in_bbox%js + !print '("[INFO] WDR show_bbox ",A8," lats(",I0,",",I0,")=",F10.5," lons(",I0,",",I0,")="F10.5)', tag, x, y, lats(x,y)*rad2deg, x, y, lons(x,y)*rad2deg + x = in_bbox%ie + y = in_bbox%js + !print '("[INFO] WDR show_bbox ",A8," lats(",I0,",",I0,")=",F10.5," lons(",I0,",",I0,")="F10.5)', tag, x, y, lats(x,y)*rad2deg, x, y, lons(x,y)*rad2deg + x = in_bbox%is + y = in_bbox%je + !print '("[INFO] WDR show_bbox ",A8," lats(",I0,",",I0,")=",F10.5," lons(",I0,",",I0,")="F10.5)', tag, x, y, lats(x,y)*rad2deg, x, y, lons(x,y)*rad2deg + x = in_bbox%ie + y = in_bbox%je + !print '("[INFO] WDR show_bbox ",A8," lats(",I0,",",I0,")=",F10.5," lons(",I0,",",I0,")="F10.5)', tag, x, y, lats(x,y)*rad2deg, x, y, lons(x,y)*rad2deg + + end subroutine show_bbox + + + !>@brief This subroutine returns the nest grid indices that correspond to the input nest domain, direction, and position + !>@details Simplifies the call signature with the bbox type rather than 4 separate integers + subroutine bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + implicit none + type(nest_domain_type), intent(in) :: nest_domain + type(bbox), intent(out) :: bbox_fine, bbox_coarse + integer, intent(in) :: direction, position + + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + this_pe = mpp_pe() + + !print '("[INFO] WDR enter bbox_get_C2F_index npe=",I0)', this_pe + + call mpp_get_C2F_index(nest_domain, bbox_fine%is, bbox_fine%ie, bbox_fine%js, bbox_fine%je, & + bbox_coarse%is, bbox_coarse%ie, bbox_coarse%js, bbox_coarse%je, direction, nest_level, position=position) + + !print '("[INFO] WDR bbox_get_C2F_index npe=",I0," dir=",I0," pos=",I0," fine (",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, position, bbox_fine%is, bbox_fine%ie, bbox_fine%js, bbox_fine%je + + !print '("[INFO] WDR bbox_get_C2F_index npe=",I0," dir=",I0," pos=",I0," coarse (",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, position, bbox_coarse%is, bbox_coarse%ie, bbox_coarse%js, bbox_coarse%je + + end subroutine bbox_get_C2F_index + +end module bounding_box_mod diff --git a/moving_nest/fv_moving_nest.F90 b/moving_nest/fv_moving_nest.F90 new file mode 100644 index 000000000..8fc645b6d --- /dev/null +++ b/moving_nest/fv_moving_nest.F90 @@ -0,0 +1,2740 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + + +!*********************************************************************** +!> @file +!! @brief Provides Moving Nest functionality in FV3 dynamic core +!! @author W. Ramstrom, AOML/HRD 01/15/2021 +!! @email William.Ramstrom@noaa.gov +!=======================================================================! + + +!=======================================================================! +! +! Notes +! +!------------------------------------------------------------------------ +! Moving Nest Subroutine Naming Convention +!----------------------------------------------------------------------- +! +! mn_meta_* subroutines perform moving nest operations for FV3 metadata. +! These routines will run only once per nest move. +! +! mn_var_* subroutines perform moving nest operations for an individual FV3 variable. +! These routines will run many times per nest move. +! +! mn_prog_* subroutines perform moving nest operations for the list of prognostic fields. +! These routines will run only once per nest move. +! +! mn_phys_* subroutines perform moving nest operations for the list of physics fields. +! These routines will run only once per nest move. +! +! =======================================================================! + +#define REMAP 1 + +module fv_moving_nest_mod +#ifdef MOVING_NEST + + use block_control_mod, only : block_control_type + use fms_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_ROUTINE, clock_flag_default + use mpp_mod, only : mpp_pe, mpp_sync, mpp_sync_self, mpp_send, mpp_error, NOTE, FATAL + use mpp_domains_mod, only : mpp_update_domains, mpp_get_data_domain, mpp_get_global_domain + use mpp_domains_mod, only : mpp_define_nest_domains, mpp_shift_nest_domains, nest_domain_type, domain2d + use mpp_domains_mod, only : mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only : mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only : NORTH, SOUTH, EAST, WEST, CORNER, CENTER + use mpp_domains_mod, only : NUPDATE, SUPDATE, EUPDATE, WUPDATE, DGRID_NE + +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + use GFS_init, only: GFS_grid_populate + + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use bounding_box_mod, only: bbox, bbox_get_C2F_index, fill_bbox, show_bbox + use constants_mod, only: cp_air, omega, rdgas, grav, rvgas, kappa, pstd_mks, hlv + use field_manager_mod, only: MODEL_ATMOS + use fms_io_mod, only: read_data, write_data, get_global_att_value, fms_io_init, fms_io_exit + use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_type, R_GRID + use fv_arrays_mod, only: allocate_fv_nest_bc_type, deallocate_fv_nest_bc_type + use fv_grid_tools_mod, only: init_grid + use fv_grid_utils_mod, only: grid_utils_init, ptop_min, dist2side_latlon + use fv_mapz_mod, only: Lagrangian_to_Eulerian, moist_cv, compute_total_energy + use fv_moving_nest_utils_mod, only: check_array, check_local_array, show_atm, show_atm_grids, show_nest_grid, show_tile_geo, grid_equal + use fv_nesting_mod, only: dealloc_nested_buffers + use fv_nwp_nudge_mod, only: do_adiabatic_init + use init_hydro_mod, only: p_var + use tracer_manager_mod, only: get_tracer_index, get_tracer_names + use fv_moving_nest_types_mod, only: fv_moving_nest_prog_type, fv_moving_nest_physics_type, Moving_nest + use fv_moving_nest_utils_mod, only: alloc_halo_buffer, load_nest_latlons_from_nc, grid_geometry, output_grid_to_nc, find_nest_alignment + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer, fill_nest_from_buffer_cell_center, fill_nest_from_buffer_nearest_neighbor + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent, fill_grid_from_supergrid, fill_weight_grid + use fv_moving_nest_utils_mod, only: alloc_read_data + + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + logical :: debug_log = .false. + +#include + + !! Step 2 + interface mn_var_fill_intern_nest_halos + module procedure mn_var_fill_intern_nest_halos_r4_2d + module procedure mn_var_fill_intern_nest_halos_r4_3d + module procedure mn_var_fill_intern_nest_halos_r4_4d + + module procedure mn_var_fill_intern_nest_halos_r8_2d + module procedure mn_var_fill_intern_nest_halos_r8_3d + module procedure mn_var_fill_intern_nest_halos_r8_4d + + module procedure mn_var_fill_intern_nest_halos_wind + end interface mn_var_fill_intern_nest_halos + + + !! Step 6 + interface mn_var_shift_data + module procedure mn_var_shift_data_r4_2d + module procedure mn_var_shift_data_r4_3d + module procedure mn_var_shift_data_r4_4d + + module procedure mn_var_shift_data_r8_2d + module procedure mn_var_shift_data_r8_3d + module procedure mn_var_shift_data_r8_4d + end interface mn_var_shift_data + + !! Step 8 + interface mn_var_dump_to_netcdf + module procedure mn_var_dump_2d_to_netcdf + module procedure mn_var_dump_3d_to_netcdf + end interface mn_var_dump_to_netcdf + + interface mn_static_read_hires + module procedure mn_static_read_hires_r4 + module procedure mn_static_read_hires_r8 + end interface mn_static_read_hires + +contains + + !!===================================================================================== + !! Step 1.9 -- Allocate and fill the temporary variable(s) + !! This is to manage variables that are not allocated with a halo + !! on the Atm structure + !!===================================================================================== + + !>@brief The subroutine 'mn_prog_fill_temp_variables' fills the temporary variable for delz + !>@details The delz variable does not have haloes so we need a temporary variable to move it. + subroutine mn_prog_fill_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(in) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: isd, ied, jsd, jed + integer :: is, ie, js, je + integer :: this_pe + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR start mn_prog_fill_temp_variables. npe=",I0," n=",I0)', this_pe, n + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + if (debug_log) print '("[INFO] WDR mn_prog_fill_temp_variables. npe=",I0," isd=",I0," ied=",I0," jsd=",I0," jed=",I0)', this_pe, isd, ied, jsd, jed + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + if (debug_log) print '("[INFO] WDR mn_prog_fill_temp_variables. npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, is, ie, js, je + + ! Reset this to a dummy value, to help flag if the halos don't get updated later. + mn_prog%delz = +99999.9 + mn_prog%delz(is:ie, js:je, 1:npz) = Atm(n)%delz(is:ie, js:je, 1:npz) + + if (debug_log) print '("[INFO] WDR Z mn_prog_fill_temp_variables. npe=",I0," npz=",I0," ",I0," ",I0)', this_pe, npz, lbound(Atm(n)%delz,3), ubound(Atm(n)%delz,3) + if (debug_log) print '("[INFO] WDR end mn_prog_fill_temp_variables. npe=",I0," n=",I0)', this_pe, n + + end subroutine mn_prog_fill_temp_variables + + !>@brief The subroutine 'mn_prog_apply_temp_variables' fills the Atm%delz value from the temporary variable after nest move + !>@details The delz variable does not have haloes so we need a temporary variable to move it. + subroutine mn_prog_apply_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: is, ie, js, je + integer :: this_pe + integer :: i,j,k + integer :: bad_values, good_values + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR start mn_prog_apply_temp_variables. npe=",I0," n=",I0)', this_pe, n + + ! Check if the variables were filled in properly. + + if (debug_log) then + good_values = 0 + bad_values = 0 + + if (is_fine_pe) then + do i = Atm(n)%bd%isd, Atm(n)%bd%ied + do j = Atm(n)%bd%jsd, Atm(n)%bd%jed + do k = 1, npz + if (mn_prog%delz(i,j,k) .gt. 20000.0) then + print '("[WARN] WDR BAD NEST mn_prog%delz value. npe=",I0," mn_prog%delz(",I0,",",I0,",",I0,")=",F12.3)', this_pe, i, j, k, mn_prog%delz(i,j,k) + bad_values = bad_values + 1 + else + good_values = good_values + 1 + endif + enddo + enddo + enddo + else + do i = Atm(n)%bd%is, Atm(n)%bd%ie + do j = Atm(n)%bd%js, Atm(n)%bd%je + do k = 1, npz + if (mn_prog%delz(i,j,k) .gt. 20000.0) then + print '("[WARN] WDR BAD GLOBAL mn_prog%delz value. npe=",I0," mn_prog%delz(",I0,",",I0,",",I0,")=",F12.3)', this_pe, i, j, k, mn_prog%delz(i,j,k) + bad_values = bad_values + 1 + else + good_values = good_values + 1 + endif + enddo + enddo + enddo + endif + + i = Atm(n)%bd%is + j = Atm(n)%bd%js + k = npz + + print '("[WARN] WDR Surface mn_prog%delz value. npe=",I0," mn_prog%delz(",I0,",",I0,",",I0,")=",F18.3)', this_pe, i, j, k, mn_prog%delz(i,j,k) + + print '("INFO] WDR mn_prog%delz values. npe=",I0," good_values=",I0," bad_values=",I0)', this_pe, good_values, bad_values + endif + + if (is_fine_pe) then + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + if (debug_log) print '("[INFO] WDR mn_prog_apply_temp_variables. npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, is, ie, js, je + + Atm(n)%delz(is:ie, js:je, 1:npz) = mn_prog%delz(is:ie, js:je, 1:npz) + endif + + if (debug_log) print '("[INFO] WDR end mn_prog_apply_temp_variables. npe=",I0," n=",I0)', this_pe, n + + end subroutine mn_prog_apply_temp_variables + + + !!===================================================================================== + !! Step 2 -- Fill the nest edge halos from parent grid before nest motion + !! OR Refill the nest edge halos from parent grid after nest motion + !! Parent and nest PEs need to execute these subroutines + !!===================================================================================== + + !>@brief The subroutine 'mn_prog_fill_nest_halos_from_parent' fills the nest edge halos from the parent + !>@details Parent and nest PEs must run this subroutine. It transfers data and interpolates onto fine nest. + subroutine mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Domain structure for nest + integer, intent(in) :: nz !< Number of vertical levels + + integer :: position, position_u, position_v + integer :: interp_type, interp_type_u, interp_type_v + integer :: x_refine, y_refine + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + ! TODO Rename this from interp_type to stagger_type + interp_type = 1 ! cell-centered A-grid + interp_type_u = 4 ! D-grid + interp_type_v = 4 ! D-grid + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + ! Fill centered-grid variables + call fill_nest_halos_from_parent("q_con", Atm(n)%q_con, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("pt", Atm(n)%pt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("w", Atm(n)%w, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + !call fill_nest_halos_from_parent("omga", Atm(n)%omga, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("delp", Atm(n)%delp, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("delz", mn_prog%delz, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("q", Atm(n)%q, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + ! Move the A-grid winds. TODO consider recomputing them from D grid instead + call fill_nest_halos_from_parent("ua", Atm(n)%ua, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("va", Atm(n)%va, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + ! Fill staggered D-grid variables + call fill_nest_halos_from_parent("u", Atm(n)%u, interp_type_u, Atm(child_grid_num)%neststruct%wt_u, & + Atm(child_grid_num)%neststruct%ind_u, x_refine, y_refine, is_fine_pe, nest_domain, position_u, nz) + call fill_nest_halos_from_parent("v", Atm(n)%v, interp_type_v, Atm(child_grid_num)%neststruct%wt_v, & + Atm(child_grid_num)%neststruct%ind_v, x_refine, y_refine, is_fine_pe, nest_domain, position_v, nz) + + end subroutine mn_prog_fill_nest_halos_from_parent + + !!============================================================================ + !! Step 3 -- Redefine the nest domain to new location + !! This calls mpp_shift_nest_domains. + !! -- Similar to med_nest_configure() from HWRF + !!============================================================================ + + !>@brief The subroutine 'mn_meta_move_nest' resets the metadata for the nest + !>@details Parent and nest PEs run this subroutine. + subroutine mn_meta_move_nest(delta_i_c, delta_j_c, pelist, is_fine_pe, extra_halo, nest_domain, domain_fine, domain_coarse, & + istart_coarse, iend_coarse, jstart_coarse, jend_coarse, istart_fine, iend_fine, jstart_fine, jend_fine) + + implicit none + + integer, intent(in) :: delta_i_c, delta_j_c !< Coarse grid delta i,j for nest move + integer, allocatable, intent(in) :: pelist(:) !< List of involved PEs + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: extra_halo !< Extra halo points (not fully implemented) + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + type(domain2d), intent(inout) :: domain_coarse, domain_fine !< Coarse and fine domain structures + integer, intent(inout) :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse !< Bounds of coarse grid + integer, intent(in) :: istart_fine, iend_fine, jstart_fine, jend_fine !< Bounds of fine grid + + ! Local variables + integer :: num_nest + integer :: this_pe + + integer :: delta_i_coarse(1), delta_j_coarse(1) + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR start mn_meta_move_nest. npe=",I0)', this_pe + + ! Initial implementation only supports single moving nest. Update this later. + ! mpp_shift_nest_domains has a call signature to support multiple moving nests, though has not been tested for correctness. + delta_i_coarse(1) = delta_i_c + delta_j_coarse(1) = delta_j_c + + !!=========================================================== + !! + !! Relocate where the nest is aligned on the parent + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRD0. npe=",I0," ",I0," ",I0," ",I0," ",I0," num_nest=",I0," delta_i_c=",I0," delta_j_c=",I0)', this_pe, istart_coarse, iend_coarse, jstart_coarse, jend_coarse, num_nest, delta_i_c, delta_j_c + + istart_coarse = istart_coarse + delta_i_c + iend_coarse = iend_coarse + delta_i_c + + jstart_coarse = jstart_coarse + delta_j_c + jend_coarse = jend_coarse + delta_j_c + + ! The fine nest will maintain the same indices + + num_nest = nest_domain%num_nest + + if (debug_log) print '("[INFO] WDR NRD1 about to call mpp_shift_nest_domains. npe=",I0," ",I0," ",I0," ",I0," ",I0," num_nest=",I0," delta_i_c=",I0," delta_j_c=",I0)', this_pe, istart_coarse, iend_coarse, jstart_coarse, jend_coarse, num_nest, delta_i_c, delta_j_c + + + ! WDR TODO Verify whether rerunning this will cause (small) memory leaks. + if (is_fine_pe) then + call mpp_shift_nest_domains(nest_domain, domain_fine, delta_i_coarse, delta_j_coarse, extra_halo) + else + call mpp_shift_nest_domains(nest_domain, domain_coarse, delta_i_coarse, delta_j_coarse, extra_halo) + endif + + if (debug_log) print '("[INFO] WDR NRD2 after call to mpp_define_nest_domains. npe=",I0)', this_pe + + end subroutine mn_meta_move_nest + + + !================================================================================ + !! Step 4 -- Updates the internal nest tile halos + !================================================================================ + + !>@brief The subroutine 'mn_prog_fill_intern_nest_halos' fill internal nest halos for prognostic variables + !>@details Only nest PEs call this subroutine. + subroutine mn_prog_fill_intern_nest_halos(Atm, domain_fine, is_fine_pe) + type(fv_atmos_type), target, intent(inout) :: Atm !< Single instance of atmospheric data + type(domain2d), intent(inout) :: domain_fine !< Domain structure for nest + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(2)%mn_prog ! TODO allow nest number to vary + this_pe = mpp_pe() + + call mn_var_fill_intern_nest_halos(Atm%q_con, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%pt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%w, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(Atm%omga, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%delp, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_prog%delz, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(Atm%ua, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%va, domain_fine, is_fine_pe) + + if (debug_log) then + call check_array(Atm%u, this_pe, "Atm%u", -300.0, 300.0) + call check_array(Atm%v, this_pe, "Atm%v", -300.0, 300.0) + endif + + ! The vector form of the subroutine takes care of the staggering of the wind variables internally. + call mn_var_fill_intern_nest_halos(Atm%u, Atm%v, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(Atm%q, domain_fine, is_fine_pe) + + end subroutine mn_prog_fill_intern_nest_halos + + + !================================================================================ + ! + ! Step 4 -- Per variable fill internal nest halos + ! + !================================================================================ + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_2d' fills internal nest halos + !>@details This version of the subroutine is for 2D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_2d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:) !< Model variable data + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH2 before call to mpp_update_domains. npe=",I0)', this_pe + ! mpp_update_domains fills the halo region of the fine grids for the interior of the nest. + ! The fine nest boundary with the coarse grid remains unchanged. + ! seems that this only performs communication between fine nest PEs + ! Just transfers halo data between tiles of same resolution -- doesn't perform any interpolation! + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH2 after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_2d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_2d' fills internal nest halos + !>@details This version of the subroutine is for 2D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_2d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:) !< Double precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH2p before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH2p after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_2d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_3d' fills internal nest halos + !>@details This version of the subroutine is for 3D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_3d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:,:) !< Single precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH3 before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH3 after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_3d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_3d' fills internal nest halos + !>@details This version of the subroutine is for 3D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_3d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:,:) !< Double precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH3p before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH3p after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_3d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_wind' fills internal nest halos for u and v wind + !>@details This version of the subroutine is for 3D arrays of single precision reals for each wind component + subroutine mn_var_fill_intern_nest_halos_wind(u_var, v_var, domain_fine, is_fine_pe) + real, allocatable, intent(inout) :: u_var(:,:,:) !< Staggered u wind + real, allocatable, intent(inout) :: v_var(:,:,:) !< Staggered v wind + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH3W before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(u_var, v_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE, gridtype=DGRID_NE) + if (debug_log) print '("[INFO] WDR INH3W after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_wind + + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_4d' fills internal nest halos + !>@details This version of the subroutine is for 4D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_4d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) !< Single prevision variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH4 before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH4 after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_4d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_4d' fills internal nest halos + !>@details This version of the subroutine is for 4D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_4d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) !< Double precision variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INH4 before call to mpp_update_domains. npe=",I0)', this_pe + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + if (debug_log) print '("[INFO] WDR INH4 after call to mpp_update_domains. npe=",I0)', this_pe + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_4d + + + !!============================================================================ + !! Step 5.1 -- Load the latlon data from NetCDF + !! update parent_geo, tile_geo*, p_grid*, n_grid* + !!============================================================================ + + !>@brief The subroutine 'mn_latlon_load_parent' loads parent latlon data from netCDF + !>@details Updates parent_geo, tile_geo*, p_grid*, n_grid* + subroutine mn_latlon_load_parent(surface_dir, Atm, n, parent_tile, delta_i_c, delta_j_c, child_grid_num, parent_geo, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, p_grid, n_grid, p_grid_u, n_grid_u, p_grid_v, n_grid_v) + character(len=*), intent(in) :: surface_dir !< Directory for static files + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) !< Atm data array + integer, intent(in) :: n, parent_tile, child_grid_num !< Grid numbers + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in delta i,j + type(grid_geometry), intent(inout) :: parent_geo, tile_geo, tile_geo_u, tile_geo_v !< Tile geometries + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent grid at high-resolution geometry + real(kind=R_GRID), allocatable, intent(out) :: p_grid(:,:,:), n_grid(:,:,:) !< A-stagger lat/lon grids + real(kind=R_GRID), allocatable, intent(out) :: p_grid_u(:,:,:), n_grid_u(:,:,:) !< u-wind staggered lat/lon grids + real(kind=R_GRID), allocatable, intent(out) :: p_grid_v(:,:,:), n_grid_v(:,:,:) !< v-wind staggered lat/lon grids + + character(len=256) :: grid_filename + logical, save :: first_nest_move = .true. + integer, save :: p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine + integer :: x, y, fp_i, fp_j + integer :: position, position_u, position_v + integer :: x_refine, y_refine + integer :: nest_x, nest_y, parent_x, parent_y + integer :: this_pe + + this_pe = mpp_pe() + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + ! Setup parent_geo with the values for the parent tile + ! Note that lat/lon are stored in the model in RADIANS + ! Only the netCDF files use degrees + + if (first_nest_move) then + if (debug_log) print '("[INFO] WDR mn_latlon_load_parent READING static coarse file on npe=",I0)', this_pe + + call mn_static_filename(surface_dir, parent_tile, 'grid', 1, grid_filename) + call load_nest_latlons_from_nc(grid_filename, Atm(1)%npx, Atm(1)%npy, 1, & + parent_geo, p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine) + + first_nest_move = .false. + endif + + parent_geo%nxp = Atm(1)%npx + parent_geo%nyp = Atm(1)%npy + + parent_geo%nx = Atm(1)%npx - 1 + parent_geo%ny = Atm(1)%npy - 1 + + if (debug_log) then + call show_tile_geo(parent_geo, this_pe, "parent_geo") + call show_atm_grids(Atm, n) + endif + + !=========================================================== + ! Begin tile_geo per PE. + !=========================================================== + + !------------------------ + ! Grid Definitions + !------------------------ + ! + ! tile_geo - lat/lons on A-grid (cell centers) for nest, on data domain (includes halo) for each PE + ! parent_geo - lat/lons of supergrid for parent + ! n_grid - lat/lons of cell centers for nest + ! p_grid - lat/lons of cell centers for parent + ! + ! gridstruct%agrid - cell centers for each PE + ! gridstruct%grid - cell corners for each PE + + ! Allocate tile_geo just for this PE, copied from Atm(n)%gridstruct%agrid + tile_geo%nx = ubound(Atm(n)%gridstruct%agrid, 1) - lbound(Atm(n)%gridstruct%agrid, 1) + tile_geo%ny = ubound(Atm(n)%gridstruct%agrid, 2) - lbound(Atm(n)%gridstruct%agrid, 2) + tile_geo%nxp = tile_geo%nx + 1 + tile_geo%nyp = tile_geo%ny + 1 + + allocate(tile_geo%lons(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + allocate(tile_geo%lats(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + + tile_geo%lats = -999.9 + tile_geo%lons = -999.9 + + do x = lbound(Atm(n)%gridstruct%agrid, 1), ubound(Atm(n)%gridstruct%agrid, 1) + do y = lbound(Atm(n)%gridstruct%agrid, 2), ubound(Atm(n)%gridstruct%agrid, 2) + tile_geo%lons(x,y) = Atm(n)%gridstruct%agrid(x,y,1) + tile_geo%lats(x,y) = Atm(n)%gridstruct%agrid(x,y,2) + enddo + enddo + + if (debug_log) call show_tile_geo(tile_geo, this_pe, "tile_geo") + call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + if (parent_x .eq. -999) then + print '("[ERROR] WDR mn_latlon_load_parent on npe=",I0," parent and nest grids are not aligned!")', this_pe + call mpp_error(FATAL, "mn_latlon_load_parent parent and nest grids are not aligned.") + endif + + ! Allocate tile_geo_u just for this PE, copied from Atm(n)%gridstruct%grid + ! grid is 1 larger than agrid + ! u(npx, npy+1) + tile_geo_u%nx = ubound(Atm(n)%gridstruct%agrid, 1) - lbound(Atm(n)%gridstruct%agrid, 1) + tile_geo_u%ny = ubound(Atm(n)%gridstruct%grid, 2) - lbound(Atm(n)%gridstruct%grid, 2) + tile_geo_u%nxp = tile_geo_u%nx + 1 + tile_geo_u%nyp = tile_geo_u%ny + 1 + + allocate(tile_geo_u%lons(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%grid, 2):ubound(Atm(n)%gridstruct%grid, 2))) + allocate(tile_geo_u%lats(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%grid, 2):ubound(Atm(n)%gridstruct%grid, 2))) + + tile_geo_u%lons = -999.9 + tile_geo_u%lats = -999.9 + + do x = lbound(tile_geo_u%lats, 1), ubound(tile_geo_u%lats, 1) + do y = lbound(tile_geo_u%lats, 2), ubound(tile_geo_u%lats, 2) + fp_i = (x - nest_x) * 2 + parent_x - 1 + fp_j = (y - nest_y) * 2 + parent_y + + !print '("[INFO] WDR mn_latlon_load_parent on npe=",I0," fp_i=",I0," fp_j=",I0,4I6)', this_pe, fp_i, fp_j, nest_x, nest_y, parent_x, parent_y + + tile_geo_u%lons(x,y) = fp_super_tile_geo%lons(fp_i, fp_j) + tile_geo_u%lats(x,y) = fp_super_tile_geo%lats(fp_i, fp_j) + enddo + enddo + + if (debug_log) call show_tile_geo(tile_geo_u, this_pe, "tile_geo_u") + + ! Allocate tile_geo_v just for this PE, copied from Atm(n)%gridstruct%grid + ! grid is 1 larger than agrid + ! u(npx, npy+1) + tile_geo_v%nx = ubound(Atm(n)%gridstruct%grid, 1) - lbound(Atm(n)%gridstruct%grid, 1) + tile_geo_v%ny = ubound(Atm(n)%gridstruct%agrid, 2) - lbound(Atm(n)%gridstruct%agrid, 2) + tile_geo_v%nxp = tile_geo_v%nx + 1 + tile_geo_v%nyp = tile_geo_v%ny + 1 + + allocate(tile_geo_v%lons(lbound(Atm(n)%gridstruct%grid, 1):ubound(Atm(n)%gridstruct%grid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + allocate(tile_geo_v%lats(lbound(Atm(n)%gridstruct%grid, 1):ubound(Atm(n)%gridstruct%grid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + + tile_geo_v%lons = -999.9 + tile_geo_v%lats = -999.9 + + do x = lbound(tile_geo_v%lats, 1), ubound(tile_geo_v%lats, 1) + do y = lbound(tile_geo_v%lats, 2), ubound(tile_geo_v%lats, 2) + fp_i = (x - nest_x) * 2 + parent_x + fp_j = (y - nest_y) * 2 + parent_y - 1 + + tile_geo_v%lons(x,y) = fp_super_tile_geo%lons(fp_i, fp_j) + tile_geo_v%lats(x,y) = fp_super_tile_geo%lats(fp_i, fp_j) + enddo + enddo + + if (debug_log) call show_tile_geo(tile_geo_v, this_pe, "tile_geo_v") + + !=========================================================== + ! End tile_geo per PE. + !=========================================================== + + allocate(p_grid(1:parent_geo%nxp, 1:parent_geo%nyp,2)) + allocate(n_grid(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 2)) + n_grid = real_snan + + allocate(p_grid_u(1:parent_geo%nxp, 1:parent_geo%nyp+1,2)) + allocate(n_grid_u(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed+1, 2)) + n_grid_u = real_snan + + allocate(p_grid_v(1:parent_geo%nxp+1, 1:parent_geo%nyp,2)) + allocate(n_grid_v(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied+1, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 2)) + n_grid_v = real_snan + + ! TODO - propagate tile_geo information back to Atm structure + ! TODO - deallocate tile_geo lat/lons + ! TODO - ensure the allocation of tile_geo lat/lons is only performed once - outside the loop + + if (debug_log) print '("[INFO] WDR MV_NST2 run step 2 atmosphere.F90 npe=",I0, " tile_geo: nxp=",I0," nyp=",I0," nx=",I0," ny=", I0)', this_pe, tile_geo%nxp, tile_geo%nyp, tile_geo%nx, tile_geo%ny + if (debug_log) print *, "[INFO] WDR MV_NST2 run step 2 atmosphere.F90 shape(tile_geo%lats)=", shape(tile_geo%lats) + if (debug_log) print '("[INFO] WDR MV_NST2 bounds1 (tile_geo%lats)=",I0,"-",I0)', lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + if (debug_log) print '("[INFO] WDR MV_NST2 bounds2 (tile_geo%lats)=",I0,"-",I0)', lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + + call move_nest_geo(tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, delta_i_c, delta_j_c, x_refine, y_refine) + + call assign_n_p_grids(parent_geo, tile_geo, p_grid, n_grid, position) + call assign_n_p_grids(parent_geo, tile_geo_u, p_grid_u, n_grid_u, position_u) + call assign_n_p_grids(parent_geo, tile_geo_v, p_grid_v, n_grid_v, position_v) + + end subroutine mn_latlon_load_parent + + !>@brief The subroutine 'mn_static_filename' generates the full pathname for a static file for each run + !>@details Constructs the full pathname for a variable and refinement level and tests whether it exists + subroutine mn_static_filename(surface_dir, tile_num, tag, refine, grid_filename) + character(len=*), intent(in) :: surface_dir !< Directory + character(len=*), intent(in) :: tag !< Variable name + integer, intent(in) :: tile_num !< Tile number + integer, intent(in) :: refine !< Nest refinement + character(len=*), intent(out) :: grid_filename !< Output pathname to netCDF file + + character(len=256) :: refine_str, parent_str + character(len=1) :: divider + logical :: file_exists + + write(parent_str, '(I0)'), tile_num + + if (refine .eq. 1 .and. (tag .eq. 'grid' .or. tag .eq. 'oro_data')) then + ! For 1x files in INPUT directory; go at the symbolic link + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.nc') + else + if (refine .eq. 1) then + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.nc') + else + write(refine_str, '(I0,A1)'), refine, 'x' + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.' // trim(refine_str) // '.nc') + endif + endif + + grid_filename = trim(grid_filename) + + inquire(FILE=grid_filename, EXIST=file_exists) + if (.not. file_exists) then + print '("[ERROR] WDR mn_static_filename DOES NOT EXIST npe=",I0," exists="L1," ",A256)', mpp_pe(), file_exists, grid_filename + endif + + end subroutine mn_static_filename + + !>@brief The subroutine 'mn_latlon_read_hires_parent' reads in static data from a netCDF file + subroutine mn_latlon_read_hires_parent(npx, npy, refine, fp_super_tile_geo, surface_dir, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of points in x,y, and refinement + type(grid_geometry), intent(inout) :: fp_super_tile_geo !< Geometry of supergrid for parent tile at high resolution + character(len=*), intent(in) :: surface_dir !< Surface directory to read netCDF file from + integer, intent(in) :: parent_tile !< Parent tile number + + integer :: fp_super_istart_fine, fp_super_jstart_fine,fp_super_iend_fine, fp_super_jend_fine + character(len=256) :: grid_filename + + call mn_static_filename(surface_dir, parent_tile, 'grid', refine, grid_filename) + + call load_nest_latlons_from_nc(trim(grid_filename), npx, npy, refine, fp_super_tile_geo, & + fp_super_istart_fine, fp_super_iend_fine, fp_super_jstart_fine, fp_super_jend_fine) + + end subroutine mn_latlon_read_hires_parent + + !>@brief The subroutine 'mn_orog_read_hires_parent' loads parent orography data from netCDF + !>@details Gathers a number of terrain-related variables from the netCDF file + subroutine mn_orog_read_hires_parent(npx, npy, refine, surface_dir, filtered_terrain, orog_grid, orog_std_grid, ls_mask_grid, land_frac_grid, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of points in x,y, and refinement + character(len=*), intent(in) :: surface_dir !< Surface directory to read netCDF file from + logical, intent(in) :: filtered_terrain !< Whether to use filtered terrain + real, allocatable, intent(out) :: orog_grid(:,:) !< Output orography grid + real, allocatable, intent(out) :: orog_std_grid(:,:) !< Output orography standard deviation grid + real, allocatable, intent(out) :: ls_mask_grid(:,:) !< Output land sea mask grid + real, allocatable, intent(out) :: land_frac_grid(:,:)!< Output land fraction grid + integer, intent(in) :: parent_tile !< Parent tile number + + integer :: nx_cubic, nx, ny, fp_nx, fp_ny, mid_nx, mid_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + character(len=512) :: nc_filename + character(len=16) :: orog_var_name + integer :: this_pe + + this_pe = mpp_pe() + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + mid_nx = (fp_iend_fine - fp_istart_fine) / 2 + mid_ny = (fp_jend_fine - fp_jstart_fine) / 2 + + call mn_static_filename(surface_dir, parent_tile, 'oro_data', refine, nc_filename) + + if (filtered_terrain) then + orog_var_name = 'orog_filt' + else + orog_var_name = 'orog_raw' + endif + + if (debug_log) print '("[INFO] WDR NCREAD LOFC mn_orog_read_hires_parent npe=",I0,I4,I4,I4,I4," ",A12," ",A128)', this_pe, fp_nx, fp_ny, mid_nx,mid_ny, orog_var_name, nc_filename + + call alloc_read_data(nc_filename, orog_var_name, fp_nx, fp_ny, orog_grid) + !call check_array(orog_grid, this_pe, "parent coarse" // orog_var_name, -1000.0, 5000.0) + call alloc_read_data(nc_filename, 'slmsk', fp_nx, fp_ny, ls_mask_grid) + !call check_array(ls_mask_grid, this_pe, 'slmsk', 0.0, 3.0) + + call alloc_read_data(nc_filename, 'stddev', fp_nx, fp_ny, orog_std_grid) ! TODO validate if this is needed + call alloc_read_data(nc_filename, 'land_frac', fp_nx, fp_ny, land_frac_grid) ! TODO validate if this is needed + + end subroutine mn_orog_read_hires_parent + + !>@brief The subroutine 'mn_static_read_hires_r4' loads high resolution data from netCDF + !>@details Gathers a single variable from the netCDF file + subroutine mn_static_read_hires_r4(npx, npy, refine, surface_dir, file_prefix, var_name, data_grid, parent_tile, time) + integer, intent(in) :: npx, npy, refine !< Number of x,y points and nest refinement + character(len=*), intent(in) :: surface_dir, file_prefix !< Surface directory and file tag + character(len=*), intent(in) :: var_name !< Variable name in netCDF file + real*4, allocatable, intent(out) :: data_grid(:,:) !< Output data grid + integer, intent(in) :: parent_tile !< Parent tile number + integer, intent(in), optional :: time !< Optional month number for time-varying parameters + + character(len=256) :: res_str, parent_str + character(len=16) :: halo + character(len=512) :: nc_filename + integer :: nx_cubic, nx, ny, fp_nx, fp_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + integer :: this_pe + + this_pe = mpp_pe() + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + if (debug_log) print '("[INFO] WDR NCREAD LOFC mn_static_read_hires npe=",I0,I4,I4," ",A128," ",A128)', this_pe, fp_nx, fp_ny, var_name, nc_filename + + call mn_static_filename(surface_dir, parent_tile, file_prefix, refine, nc_filename) + + if (present(time)) then + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid, time) + else + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid) + endif + + end subroutine mn_static_read_hires_r4 + + !>@brief The subroutine 'mn_static_read_hires_r8' loads high resolution data from netCDF + !>@details Gathers a single variable from the netCDF file + subroutine mn_static_read_hires_r8(npx, npy, refine, surface_dir, file_prefix, var_name, data_grid, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of x,y points and nest refinement + character(len=*), intent(in) :: surface_dir, file_prefix !< Surface directory and file tag + character(len=*), intent(in) :: var_name !< Variable name in netCDF file + real*8, allocatable, intent(out) :: data_grid(:,:) !< Output data grid + integer, intent(in) :: parent_tile !< Parent tile number + + character(len=256) :: res_str, parent_str + character(len=16) :: halo + character(len=512) :: nc_filename + + integer :: nx_cubic, nx, ny, fp_nx, fp_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + integer :: this_pe + + this_pe = mpp_pe() + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + if (debug_log) print '("[INFO] WDR NCREAD LOFC mn_static_read_hires npe=",I0,I4,I4," ",A128," ",A128)', this_pe, fp_nx, fp_ny, var_name, nc_filename + + call mn_static_filename(surface_dir, parent_tile, file_prefix, refine, nc_filename) + + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid) + + end subroutine mn_static_read_hires_r8 + + + !!============================================================================ + !! Step 5.2 -- Recalculate nest halo weights + !!============================================================================ + + !>@brief The subroutine 'mn_meta_recalc' recalculates nest halo weights + subroutine mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo, parent_geo, fp_super_tile_geo, & + is_fine_pe, nest_domain, position, p_grid, n_grid, wt, istart_coarse, jstart_coarse) + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in delta i,j + integer, intent(in) :: x_refine, y_refine !< Nest refinement + type(grid_geometry), intent(inout) :: tile_geo, parent_geo, fp_super_tile_geo !< tile geometries + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + real(kind=R_GRID), allocatable, intent(inout) :: p_grid(:,:,:) !< Parent lat/lon grid + real(kind=R_GRID), allocatable, intent(inout) :: n_grid(:,:,:) !< Nest lat/lon grid + real, allocatable, intent(inout) :: wt(:,:,:) !< Interpolation weights + integer, intent(inout) :: position !< Stagger + integer, intent(in) :: istart_coarse, jstart_coarse !< Initian nest offsets + + type(bbox) :: wt_fine, wt_coarse + integer :: this_pe + + this_pe = mpp_pe() + + ! Update the coarse and fine indices after shifting the nest + if (is_fine_pe) then + + if (debug_log) print '("[INFO] WDR NRD4 is_fine_pe=TRUE about to call bbox_get_C2F_index. npe=",I0, " position=",I0)', this_pe, position + + !!=========================================================== + !! + !! Recalculate halo weights + !! + !!=========================================================== + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, EAST, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, WEST, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, NORTH, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, SOUTH, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + endif + + end subroutine mn_meta_recalc + + + !!============================================================================ + !! Step 5.3 -- Adjust index by delta_i_c, delta_j_c + !!============================================================================ + + !>@brief The subroutine 'mn_shift_index' adjusts the index array for a nest move + !>@details Fast routine to increment indices by the delta in i,j direction + subroutine mn_shift_index(delta_i_c, delta_j_c, ind) + integer, intent(in) :: delta_i_c, delta_j_c !< Nest move deltas in i,j + integer, allocatable, intent(inout) :: ind(:,:,:) !< Nest to parent index + + ! Shift the index by the delta of this nest move. + ! TODO -- validate that we are not moving off the edge of the parent grid. + integer :: i, j + + do i = lbound(ind,1), ubound(ind,1) + do j = lbound(ind,2), ubound(ind,2) + ind(i,j,1) = ind(i,j,1) + delta_i_c + ind(i,j,2) = ind(i,j,2) + delta_j_c + enddo + enddo + + end subroutine mn_shift_index + + + !================================================================================ + ! + ! Prognostic and Physics Variable Nest Motion + ! + !================================================================================ + + !!============================================================================ + !! Step 6 Shift the data on each nest PE + !! -- similar to med_nest_move in HWRF + !!============================================================================ + + !>@brief The subroutine 'mn_prog_shift_data' shifts the data on each nest PE + !>@details Iterates through the prognostic variables + subroutine mn_prog_shift_data(Atm, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atm data array + integer, intent(in) :: n, child_grid_num !< Grid numbers + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< Delta i,j, nest refinement + logical, intent(in) :: is_fine_pe !< Is this is a nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: nz !< Number of vertical levels + + ! Constants for mpp calls + integer :: interp_type = 1 ! cell-centered A-grid + integer :: interp_type_u = 4 ! D-grid + integer :: interp_type_v = 4 ! D-grid + integer :: position = CENTER ! CENTER, NORTH, EAST + integer :: position_u = NORTH + integer :: position_v = EAST + + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + call mn_var_shift_data(Atm(n)%q_con, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%pt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%w, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + !call mn_var_shift_data(Atm(n)%omga, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%delp, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + !call mn_var_shift_data(Atm(n)%delz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(mn_prog%delz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%ua, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%va, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%q, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + !if (debug_log) print '("[INFO] WDR MV_NST6 show wt_u run step 6 atmosphere.F90 npe=",I0," n=",I0)', this_pe, n + !call check_array(Atm(n)%neststruct%wt_u, this_pe, "Atm(n)%neststruct%wt_u", 0.0, 1.0) + !call check_array(wt_u, this_pe, "wt_u", 0.0, 1.0) + !if (debug_log) print '("[INFO] WDR MV_NST6 stagger run step 6 atmosphere.F90 npe=",I0," n=",I0)', this_pe, n + + call mn_var_shift_data(Atm(n)%u, interp_type_u, wt_u, Atm(child_grid_num)%neststruct%ind_u, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position_u, nz) + + call mn_var_shift_data(Atm(n)%v, interp_type_v, wt_v, Atm(child_grid_num)%neststruct%ind_v, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position_v, nz) + + end subroutine mn_prog_shift_data + + + !!============================================================================ + !! Step 6 - per variable + !!============================================================================ + + !>@brief The subroutine 'mn_prog_shift_data_r4_2d' shifts the data for a variable on each nest PE + !>@details For single variable + subroutine mn_var_shift_data_r4_2d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + real*4, allocatable, intent(inout) :: data_var(:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position !< Grid offset + + real*4, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: is, ie, js, je + integer :: this_pe + + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRD5. npe=",I0)', this_pe + + if (debug_log) print '("[INFO] show_nest_domain npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + + if (debug_log) print '("[INFO] show_nest_domain npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + if (debug_log) print '("[INFO] show_nest_domain npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + + if (debug_log) print '("[INFO] show_nest_domain npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + if (debug_log) print '("[INFO] show_nest_domain npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + + if (debug_log) print '("[INFO] data_var npe=",I0," data_var(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2) + + if (debug_log) print '("[INFO] wt npe=",I0," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + + !==================================================== + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0)', this_pe, position + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + if (debug_log) print '("[INFO] WDR allocate_halo_buffers DONE. npe=",I0)', this_pe + + !==================================================== + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + if (debug_log) print '("[INFO] WDR NRF1 mn_var_shift_data start. npe=",I0)', this_pe + + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (debug_log) print '("[INFO] WDR NRF2 mn_var_shift_data start. npe=",I0)', this_pe + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR NRF3 mn_var_shift_data start. npe=",I0)', this_pe + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + if (debug_log) print '("[INFO] WDR NREX mn_var_shift_data start. npe=",I0)', this_pe + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + if (debug_log) print '("[INFO] WDR NREY mn_var_shift_data start. npe=",I0)', this_pe + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_2d + + !>@brief The subroutine 'mn_prog_shift_data_r8_2d' shifts the data for a variable on each nest PE + !>@details For one double precision 2D variable + subroutine mn_var_shift_data_r8_2d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + real*8, allocatable, intent(inout) :: data_var(:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position !< Grid offset + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: is, ie, js, je + integer :: this_pe + + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRD5. npe=",I0)', this_pe + + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_2d + + !>@brief The subroutine 'mn_prog_shift_data_r4_3d' shifts the data for a variable on each nest PE + !>@details For one single precision 3D variable + subroutine mn_var_shift_data_r4_3d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + real*4, allocatable, intent(inout) :: data_var(:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*4, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: is, ie, js, je + integer :: this_pe + + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + + !==================================================== + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_3d + + + !>@brief The subroutine 'mn_prog_shift_data_r8_3d' shifts the data for a variable on each nest PE + !>@details For one double precision 3D variable + subroutine mn_var_shift_data_r8_3d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + real*8, allocatable, intent(inout) :: data_var(:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number vertical levels + + real*8, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: is, ie, js, je + integer :: this_pe + + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_3d + + + !>@brief The subroutine 'mn_prog_shift_data_r4_4d' shifts the data for a variable on each nest PE + !>@details For one single precision 4D variable + subroutine mn_var_shift_data_r4_4d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*4, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: n4d + integer :: this_pe + integer :: is, ie, js, je + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + n4d = ubound(data_var, 4) + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_4d + + + !>@brief The subroutine 'mn_prog_shift_data_r8_4d' shifts the data for a variable on each nest PE + !>@details For one double precision 4D variable + subroutine mn_var_shift_data_r8_4d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*8, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + logical :: parent_proc, child_proc + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: my_stat + character(256) :: my_errmsg + integer :: n4d + integer :: this_pe + integer :: is, ie, js, je + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + n4d = ubound(data_var, 4) + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_4d + + + !================================================================================ + ! + ! Step 7 -- Gridstruct resetting and reallocation of static buffers + ! init_grid() also updates the wt arrays + !================================================================================ + + !>@brief The subroutine 'mn_meta_reset_gridstruct' resets navigation data and reallocates needed data in the gridstruct after nest move + !>@details This routine is computationally demanding and is a target for later optimization. + subroutine mn_meta_reset_gridstruct(Atm, n, child_grid_num, nest_domain, fp_super_tile_geo, x_refine, y_refine, is_fine_pe, wt_h, wt_u, wt_v, a_step, dt_atmos) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atm data array + integer, intent(in) :: n, child_grid_num !< This level and nest level + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent high-resolution geometry + integer, intent(in) :: x_refine, y_refine !< Nest refinement + logical, intent(in) :: is_fine_pe !< Is nest PE? + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: a_step !< Which timestep + real, intent(in) :: dt_atmos !< Timestep duration in seconds + + integer :: isg, ieg, jsg, jeg + integer :: ng, pp, nn, parent_tile, refinement, ioffset, joffset + integer :: this_pe, gid + integer :: tile_coarse(2) + integer :: half_x, half_y + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg, half_lat, half_lon + + ! Coriolis parameter variables + real :: alpha = 0. + real, pointer, dimension(:,:,:) :: grid, agrid + real, pointer, dimension(:,:) :: fC, f0 + integer :: isd, ied, jsd, jed + integer :: i, j + + logical, save :: first_time = .true. + integer, save :: id_reset1, id_reset2, id_reset3, id_reset4, id_reset5, id_reset6, id_reset7 + + logical :: use_timers = .false. ! Set this to true to generate performance profiling information in out.* file + + if (first_time .and. use_timers) then + id_reset1 = mpp_clock_id ('MN 7 Reset 1', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset2 = mpp_clock_id ('MN 7 Reset 2', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset3 = mpp_clock_id ('MN 7 Reset 3', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset4 = mpp_clock_id ('MN 7 Reset 4', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset5 = mpp_clock_id ('MN 7 Reset 5', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset6 = mpp_clock_id ('MN 7 Reset 6', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset7 = mpp_clock_id ('MN 7 Reset 7', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + endif + + rad2deg = 180.0 / pi + + this_pe = mpp_pe() + gid = this_pe + + parent_tile = Atm(child_grid_num)%neststruct%parent_tile + ioffset = Atm(child_grid_num)%neststruct%ioffset + joffset = Atm(child_grid_num)%neststruct%joffset + + ! Log the bounds of this PE's grid after nest motion. TODO replace step 4 with timestep + if (is_fine_pe .and. debug_log) then + call show_nest_grid(Atm(n), this_pe, 4) + endif + + ! Reset the gridstruct values for the nest + if (is_fine_pe) then + ! Fill in values from high resolution, full panel, supergrid + if (use_timers) call mpp_clock_begin (id_reset1) + + call fill_grid_from_supergrid(Atm(n)%gridstruct%grid, CORNER, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%agrid, CENTER, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%grid_64, CORNER, fp_super_tile_geo, & + ioffset, joffset, x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%agrid_64, CENTER, fp_super_tile_geo, & + ioffset, joffset, x_refine, y_refine) + + ! What's the status of Atm(n)%grid_global? + if (debug_log) print '("[INFO] WDR Atm(1) GLOBAL npe=",I0," grid_global(",I0,"-",I0",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, & + lbound(Atm(1)%grid_global,1), ubound(Atm(1)%grid_global,1), & + lbound(Atm(1)%grid_global,2), ubound(Atm(1)%grid_global,2), & + lbound(Atm(1)%grid_global,3), ubound(Atm(1)%grid_global,3), & + lbound(Atm(1)%grid_global,4), ubound(Atm(1)%grid_global,4) + + if (debug_log) print '("[INFO] WDR Atm(n) GLOBAL npe=",I0," grid_global(",I0,"-",I0",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, & + lbound(Atm(n)%grid_global,1), ubound(Atm(n)%grid_global,1), & + lbound(Atm(n)%grid_global,2), ubound(Atm(n)%grid_global,2), & + lbound(Atm(n)%grid_global,3), ubound(Atm(n)%grid_global,3), & + lbound(Atm(n)%grid_global,4), ubound(Atm(n)%grid_global,4) + + + ! Reset the coriolis parameters, using code from external_ic.F90::get_external_ic() + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + grid => Atm(n)%gridstruct%grid + agrid => Atm(n)%gridstruct%agrid + fC => Atm(n)%gridstruct%fC + f0 => Atm(n)%gridstruct%f0 + + ! * Initialize coriolis param: + + do j=jsd,jed+1 + do i=isd,ied+1 + fC(i,j) = 2.*omega*( -1.*cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) + & + sin(grid(i,j,2))*cos(alpha) ) + enddo + enddo + + do j=jsd,jed + do i=isd,ied + f0(i,j) = 2.*omega*( -1.*cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) + & + sin(agrid(i,j,2))*cos(alpha) ) + enddo + enddo + + + + + + !! Let this get reset in init_grid()/setup_aligned_nest() + !call fill_grid_from_supergrid(Atm(n)%grid_global, CORNER, fp_super_tile_geo, & + ! ioffset, joffset, x_refine, y_refine) + + if (use_timers) call mpp_clock_end (id_reset1) + if (use_timers) call mpp_clock_begin (id_reset2) + + ! TODO should these get reset by init_grid instead?? + call fill_weight_grid(Atm(n)%neststruct%wt_h, wt_h) + call fill_weight_grid(Atm(n)%neststruct%wt_u, wt_u) + call fill_weight_grid(Atm(n)%neststruct%wt_v, wt_v) + ! WDR TODO -- Seems like this is not used anywhere, other than being allocated, filled, deallocated + !call fill_weight_grid(Atm(n)%neststruct%wt_b, wt_b) + + if (use_timers) call mpp_clock_end (id_reset2) + + endif + + if (debug_log) print '("[INFO] WDR INIT_GRID AP1 fv_moving_nest.F90 npe=",I0," n=",I0)', this_pe, n + + if (use_timers) call mpp_clock_begin (id_reset3) + + ! TODO Write clearer comments on what is happening here. + + ! This code runs several communications steps: + ! 1. As npe=0, it gets the global_grid domain setup + ! 2. sends the global_grid to the other parent PEs + ! 3. global_grid is received in call to setup_aligned_nest() in fv_grid_tools.F90::init_grid() + ! Other communication is contained full within setup_aligned_nest(). + + ! Sends around data from the parent grids, and recomputes the update indices + ! This code copied from fv_control.F90 + ! Need to SEND grid_global to any child grids; this is received in setup_aligned_nest in fv_grid_tools + ! if (Atm(pp)%neststruct%nested) then + + ! TODO phrase this more carefully to choose the parent master PE grid if we are operating in a nested setup. + ! Unlike in fv_control.F90, this will be running on Atm(1) when it's on pe=0, so we don't need to navigate to parent_grid. + + first_time = .false. + + ! Seems like we do not need to resend this -- setup_aligned_nest now saves the parent tile information during model initialization, + ! which happens before we enter the moving nest code. + if (this_pe .eq. 0 .and. first_time) then + + ! This is the Atm index for the nest values. + pp = child_grid_num + + if (debug_log) print '("[INFO] WDR INIT_GRID AP2 fv_moving_nest.F90 npe=",I0," n=",I0," pp=",I0)', this_pe, n, pp + + refinement = x_refine + ng = Atm(n)%ng + + call mpp_get_global_domain( Atm(n)%domain, isg, ieg, jsg, jeg) + + !if (debug_log) print '("[INFO] WDR INIT_GRID AP3.1 fv_moving_nest.F90 npe=",I0," gid=",I0," associated(parent_grid)=",L1)', this_pe, gid, associated(Atm(pp)%parent_grid) + if (debug_log) print '("[INFO] WDR INIT_GRID AP3.1 fv_moving_nest.F90 npe=",I0," gid=",I0," parent_tile=",I0)', this_pe, gid, parent_tile + if (debug_log) print '("[INFO] WDR INIT_GRID AP3.2 fv_moving_nest.F90 npe=",I0," gid=",I0," size(pelist)=",I0)', this_pe, gid, size(Atm(pp)%pelist) + if (debug_log) print '("[INFO] WDR INIT_GRID AP3.3 fv_moving_nest.F90 npe=",I0," gid=",I0," pelist1=",I0)', this_pe, gid, Atm(pp)%pelist(1) + !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the + ! nested PEs instead of sending it around. + !if (gid == Atm(pp)%parent_grid%pelist(1)) then + if (debug_log) print '("[INFO] WDR INIT_GRID XFER AP4 fv_moving_nest.F90 npe=",I0," send to pe=",I0," size=",I0)', this_pe, Atm(pp)%pelist(1), size(Atm(n)%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile)) + + call mpp_send(Atm(n)%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile), & + size(Atm(n)%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile)), & + Atm(pp)%pelist(1)) !send to p_ind in setup_aligned_nest + if (debug_log) print '("[INFO] WDR INIT_GRID AP5 fv_moving_nest.F90 npe=",I0)', this_pe + call mpp_sync_self() + if (debug_log) print '("[INFO] WDR INIT_GRID AP6 fv_moving_nest.F90 npe=",I0)', this_pe + !endif + endif + + if (debug_log) print '("[INFO] WDR INIT_GRID AP9 fv_moving_nest.F90 npe=",I0)', this_pe + + !if (ngrids > 1) call setup_update_regions ! Originally from fv_control.F90 + call mn_setup_update_regions(Atm, n, nest_domain) + + if (use_timers) call mpp_clock_end (id_reset3) + if (use_timers) call mpp_clock_begin (id_reset4) + + if (Atm(n)%neststruct%nested) then + if (debug_log) print '("[INFO] WDR INIT_GRID setup_aligned_nestA fv_moving_nest.F90 npe=",I0," n=",I0)', this_pe, n + + ! New code from fv_control.F90 + ! call init_grid(Atm(this_grid), Atm(this_grid)%flagstruct%grid_name, Atm(this_grid)%flagstruct%grid_file, & + ! Atm(this_grid)%flagstruct%npx, Atm(this_grid)%flagstruct%npy, Atm(this_grid)%flagstruct%npz, Atm(this_grid)%flagstruct%ndims, Atm(this_grid)%flagstruct%ntiles, Atm(this_grid)%ng, tile_coarse) + + ! Atm(n)%neststruct%parent_tile = tile_coarse(n) + + ! Old Code + !call init_grid(Atm(n), Atm(n)%flagstruct%grid_name, Atm(n)%flagstruct%grid_file, & + ! Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, Atm(n)%ng) + + !tile_coarse(1) = Atm(n)%neststruct%parent_tile + tile_coarse(1) = parent_tile + tile_coarse(2) = parent_tile + + call init_grid(Atm(n), Atm(n)%flagstruct%grid_name, Atm(n)%flagstruct%grid_file, & + Atm(n)%flagstruct%npx, Atm(n)%flagstruct%npy, Atm(n)%flagstruct%npz, & + Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, Atm(n)%ng, tile_coarse) + if (debug_log) print '("[INFO] WDR INIT_GRID setup_aligned_nestB fv_moving_nest.F90 npe=",I0)', this_pe + endif + + if (use_timers) call mpp_clock_end (id_reset4) + if (use_timers) call mpp_clock_begin (id_reset5) + + ! Reset the gridstruct values for the nest + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR INIT_GRID AA fv_moving_nest.F90 npe=",I0)', this_pe + if (debug_log) print '("[INFO] WDR INIT_GRID BB fv_moving_nest.F90 npe=",I0)', this_pe + + call grid_utils_init(Atm(n), Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, & + Atm(n)%flagstruct%non_ortho, Atm(n)%flagstruct%grid_type, Atm(n)%flagstruct%c2l_ord) + + if (debug_log) print '("[INFO] WDR INIT_GRID CC fv_moving_nest.F90 npe=",I0)', this_pe + endif + + if (use_timers) call mpp_clock_end (id_reset5) + if (use_timers) call mpp_clock_begin (id_reset6) + + if (debug_log) print '("[INFO] WDR NEST_DOMAIN ZZ fv_moving_nest.F90 npe=",I0)', this_pe + + if (debug_log) print '("[INFO] WDR REINIT1 CT fv_moving_nest.F90. npe=",I0," twowaynest=",L1" Atm(1)%neststruct%parent_tile=",I0)', & + this_pe, Atm(1)%neststruct%twowaynest, Atm(1)%neststruct%parent_tile + + if (debug_log) print '("[INFO] WDR REINIT2 CT fv_moving_nest.F90. npe=",I0," twowaynest=",L1," Atm(2)%neststruct%parent_tile=",I0," n=",I0)', & + this_pe, Atm(2)%neststruct%twowaynest, Atm(2)%neststruct%parent_tile, n + + !call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + ! Needs to run for parent and nest Atm(2) + ! Nest PEs update ind_update_h -- this now seems obsolete + ! Parent tile PEs update isu, ieu, jsu, jeu + ! Global tiles that are not parent have no changes + if (debug_log) print '("[INFO] WDR REINIT CV fv_moving_nest.F90. npe=",I0, " n=",I0)', this_pe, n + + ! WDR This is now accomplished with the earlier call to setup_update_regions() + !call reinit_parent_indices(Atm(2)) + !!call reinit_parent_indices(Atm(n)) + !if (debug_log) print '("[INFO] WDR REINIT CW fv_moving_nest.F90. npe=",I0)', this_pe + + do nn = 1, size(Atm) + if (debug_log) call show_atm("3", Atm(nn), nn, this_pe) + enddo + + + ! Output the center lat/lon of the nest + ! only the PE that holds the center point will output this information to the logfile + ! lat = agrid(:,:,2) and lon = agrid(:,:,1), in radians + if (is_fine_pe) then + half_x = Atm(child_grid_num)%npx / 2 + half_y = Atm(child_grid_num)%npy / 2 + + if (half_x .ge. Atm(child_grid_num)%bd%is .and. half_x .le. Atm(child_grid_num)%bd%ie .and. half_y .ge. Atm(child_grid_num)%bd%js .and. half_y .le. Atm(child_grid_num)%bd%je) then + + half_lat = Atm(child_grid_num)%gridstruct%agrid(half_x, half_y,2) * rad2deg + half_lon = Atm(child_grid_num)%gridstruct%agrid(half_x, half_y,1) * rad2deg + if (half_lon .gt. 180.0) half_lon = half_lon - 360.0 + + print '("[INFO] fv_moving_nest.F90 NEST MOVED to npe=",I0," x=",I0," y=",I0," lat=",F6.2," lon=",F7.2," a_step=",I8," fcst_hr=",F12.3)', this_pe, \ + half_x, half_y, half_lat, half_lon, a_step, a_step * dt_atmos / 3600.0 + endif + + endif + + ! Reallocate the halo buffers in the neststruct, as some are now the wrong size + ! Optimization would be to only deallocate the edges that have changed. + + ! TODO Write comments on the t0 and t1 buffers + if (use_timers) call mpp_clock_end (id_reset6) + if (use_timers) call mpp_clock_begin (id_reset7) + + if (is_fine_pe) then + !call reallocate_BC_buffers(Atm(child_grid_num)) + call reallocate_BC_buffers(Atm(1)) + if (debug_log) print '("[INFO] WDR INIT_GRID DD fv_moving_nest.F90 npe=",I0)', this_pe + + ! Reallocate buffers that are declared in fv_nesting.F90 + call dealloc_nested_buffers(Atm(1)) + + if (debug_log) print '("[INFO] WDR INIT_GRID EE fv_moving_nest.F90 npe=",I0)', this_pe + + ! Set both to true so the call to setup_nested_grid_BCs() (at the beginning of fv_dynamics()) will reset t0 buffers + ! They will be returned to false by setup_nested_grid_BCs() + + if (debug_log) print '("[INFO] WDR RESET_BCs first_step=.true. fv_moving_nest.F90 npe=",I0)', this_pe + Atm(n)%neststruct%first_step = .true. + !Atm(n)%flagstruct%make_nh= .true. + + !! Fill in the BC time1 buffers + !call setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & + ! u, v, w, pt, delp, delz, q, uc, vc, pkz, & + ! neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, & + ! gridstruct, flagstruct, neststruct, & + ! neststruct%nest_timestep, neststruct%tracer_nest_timestep, & + ! domain, bd, nwat) + + ! Transfer the BC time1 buffers to time0 + + !call set_NH_BCs_t0(neststruct) + !call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) + + endif + if (use_timers) call mpp_clock_end (id_reset7) + + end subroutine mn_meta_reset_gridstruct + + + ! Copied and adapted from fv_control.F90::setup_update_regions(); where it is an internal subroutine + ! Modifications only to pass necessary variables as arguments + + !>@brief The subroutine 'mn_setup_update_regions' performs some of the tasks of fv_control.F90::setup_update_regions() for nest motion + !>@details This routine only updates indices, so is computationally efficient + subroutine mn_setup_update_regions(Atm, this_grid, nest_domain) + type(fv_atmos_type), allocatable, intent(INOUT) :: Atm(:) !< Array of atmospheric data + integer, intent(IN) :: this_grid !< Parent or child grid number + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + + integer :: isu, ieu, jsu, jeu ! update regions + integer :: isc, jsc, iec, jec + integer :: upoff + integer :: ngrids, n, nn + integer :: isu_stag, isv_stag, jsu_stag, jsv_stag + integer :: ieu_stag, iev_stag, jeu_stag, jev_stag + integer :: this_pe + + this_pe = mpp_pe() + + ! Need to get the following variables from nest_domain + ! tile_coarse() + ! icount_coarse() + ! from mpp_define_nest_domains.inc: iend_coarse(n) = istart_coarse(n) + icount_coarse(n) - 1 + ! rearrange to: iend_coarse(n) - istart_coarse(n) + 1 = icount_coarse(n) + ! jcount_coarse() + ! nest_ioffsets() + ! in fv_control.F90. pass nest_ioffsets as istart_coarse + ! nest_joffsets() + + isc = Atm(this_grid)%bd%isc + jsc = Atm(this_grid)%bd%jsc + iec = Atm(this_grid)%bd%iec + jec = Atm(this_grid)%bd%jec + + upoff = Atm(this_grid)%neststruct%upoff + + ngrids = size(Atm) + + if (debug_log) print '("[INFO] WDR SUR fv_moving_nest.F90. npe=",I0," ngrids=",I0," nest_domain%tile_coarse(",I0,"-",I0,")")', this_pe, ngrids, lbound(nest_domain%tile_coarse), ubound(nest_domain%tile_coarse) + + if (debug_log) print '("[INFO] WDR tile_coarse fv_moving_nest.F90 npe=",I0," tile_coarse(",I0,"-",I0") ngrids=",I0," tile_coarse(1)=",I0)', this_pe, & + lbound(nest_domain%tile_coarse,1), ubound(nest_domain%tile_coarse,1), ngrids, nest_domain%tile_coarse(1) + + if (debug_log) print '("[INFO] WDR tile_coarse fv_moving_nest.F90 npe=",I0," istart_coarse(",I0,"-",I0")")', this_pe, & + lbound(nest_domain%istart_coarse,1), ubound(nest_domain%istart_coarse,1) + + do n=2,ngrids + nn = n - 1 ! WDR TODO revise this to handle multiple nests. This adjusts to match fv_control.F90 where these + ! arrays are passed in to mpp_define_nest_domains with bounds (2:ngrids) + + ! Updated code from new fv_control.F90 November 8. 2021 Ramstrom + + if (nest_domain%tile_coarse(nn) == Atm(this_grid)%global_tile) then + + !isu = nest_ioffsets(n) + isu = nest_domain%istart_coarse(nn) + !ieu = isu + icount_coarse(n) - 1 + ieu = isu + (nest_domain%iend_coarse(nn) - nest_domain%istart_coarse(nn) + 1) - 1 + + !jsu = nest_joffsets(n) + jsu = nest_domain%jstart_coarse(nn) + !jeu = jsu + jcount_coarse(n) - 1 + jeu = jsu + (nest_domain%jend_coarse(nn) - nest_domain%jstart_coarse(nn) + 1) - 1 + +!!! Begin new + isu_stag = isu + jsu_stag = jsu + ieu_stag = ieu + jeu_stag = jeu+1 + + isv_stag = isu + jsv_stag = jsu + iev_stag = ieu+1 + jev_stag = jeu +!!! End new + + + !update offset adjustment + isu = isu + upoff + ieu = ieu - upoff + jsu = jsu + upoff + jeu = jeu - upoff + +!!! Begin new + isu_stag = isu_stag + upoff + ieu_stag = ieu_stag - upoff + jsu_stag = jsu_stag + upoff + jeu_stag = jeu_stag - upoff + + isv_stag = isv_stag + upoff + iev_stag = iev_stag - upoff + jsv_stag = jsv_stag + upoff + jev_stag = jev_stag - upoff + + ! Absolute boundary for the staggered point update region on the parent. + ! This is used in remap_uv to control the update of the last staggered point + ! when the the update region coincides with a pe domain to avoid cross-restart repro issues + + Atm(n)%neststruct%jeu_stag_boundary = jeu_stag + Atm(n)%neststruct%iev_stag_boundary = iev_stag + + if (isu > iec .or. ieu < isc .or. & + jsu > jec .or. jeu < jsc ) then + isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000 + else + isu = max(isu,isc) ; jsu = max(jsu,jsc) + ieu = min(ieu,iec) ; jeu = min(jeu,jec) + endif + + ! Update region for staggered quantity to avoid cross repro issues when the pe domain boundary + ! coincide with the nest. Basically write the staggered update on compute domains + + if (isu_stag > iec .or. ieu_stag < isc .or. & + jsu_stag > jec .or. jeu_stag < jsc ) then + isu_stag = -999 ; jsu_stag = -999 ; ieu_stag = -1000 ; jeu_stag = -1000 + else + isu_stag = max(isu_stag,isc) ; jsu_stag = max(jsu_stag,jsc) + ieu_stag = min(ieu_stag,iec) ; jeu_stag = min(jeu_stag,jec) + endif + + if (isv_stag > iec .or. iev_stag < isc .or. & + jsv_stag > jec .or. jev_stag < jsc ) then + isv_stag = -999 ; jsv_stag = -999 ; iev_stag = -1000 ; jev_stag = -1000 + else + isv_stag = max(isv_stag,isc) ; jsv_stag = max(jsv_stag,jsc) + iev_stag = min(iev_stag,iec) ; jev_stag = min(jev_stag,jec) + endif +!!! End new + + if (isu > iec .or. ieu < isc .or. & + jsu > jec .or. jeu < jsc ) then + isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000 + else + isu = max(isu,isc) ; jsu = max(jsu,jsc) + ieu = min(ieu,iec) ; jeu = min(jeu,jec) + endif + + ! lump indices + isu=max(isu, isu_stag, isv_stag) + jsu=max(jsu, jsu_stag, jsv_stag) + jeu_stag=max(jeu, jeu_stag) + jev_stag=max(jeu, jev_stag) + ieu_stag=max(ieu ,ieu_stag) + iev_stag=max(ieu ,iev_stag) + + Atm(n)%neststruct%isu = isu + Atm(n)%neststruct%ieu = ieu_stag + Atm(n)%neststruct%jsu = jsu + Atm(n)%neststruct%jeu = jev_stag + + Atm(n)%neststruct%jeu_stag = jeu_stag + Atm(n)%neststruct%iev_stag = iev_stag + endif + enddo + + end subroutine mn_setup_update_regions + + + !================================================================================================== + ! + ! Recalculation Section -- Buffers that have to change size after nest motion + ! + !================================================================================================== + + !>@brief The subroutine 'reallocate_BC_buffers' reallocates boundary condition buffers - some need to change size after a nest move. + !>@details Thought they would be reallocated in boundary.F90 nested_grid_BC_recv() when needed, but seem not to. + subroutine reallocate_BC_buffers(Atm) + type(fv_atmos_type), intent(inout) :: Atm !< Single instance of atmospheric data + + integer :: n, ns + logical :: dummy = .false. ! same as grids_on_this_pe(n) + + call deallocate_fv_nest_BC_type(Atm%neststruct%delp_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%u_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%v_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%uc_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%vc_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%divg_BC) + + if (allocated(Atm%neststruct%q_BC)) then + do n=1,size(Atm%neststruct%q_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%q_BC(n)) + enddo + endif + +#ifndef SW_DYNAMICS + call deallocate_fv_nest_BC_type(Atm%neststruct%pt_BC) +#ifdef USE_COND + call deallocate_fv_nest_BC_type(Atm%neststruct%q_con_BC) +#ifdef MOIST_CAPPA + call deallocate_fv_nest_BC_type(Atm%neststruct%cappa_BC) +#endif +#endif + if (.not.Atm%flagstruct%hydrostatic) then + call deallocate_fv_nest_BC_type(Atm%neststruct%w_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%delz_BC) + endif +#endif + + ! Reallocate the buffers + + ns = Atm%neststruct%nsponge + + call allocate_fv_nest_BC_type(Atm%neststruct%delp_BC,Atm,ns,0,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%u_BC,Atm,ns,0,1,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%v_BC,Atm,ns,1,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%uc_BC,Atm,ns,1,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%vc_BC,Atm,ns,0,1,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%divg_BC,Atm,ns,1,1,dummy) + + ! if (ncnst > 0) then + ! allocate(Atm%neststruct%q_BC(ncnst)) + ! do n=1,ncnst + ! call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy) + ! enddo + ! endif + + if (allocated(Atm%neststruct%q_BC)) then + do n=1,size(Atm%neststruct%q_BC) + call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy) + enddo + endif + +#ifndef SW_DYNAMICS + call allocate_fv_nest_BC_type(Atm%neststruct%pt_BC,Atm,ns,0,0,dummy) +#ifdef USE_COND + call allocate_fv_nest_BC_type(Atm%neststruct%q_con_BC,Atm,ns,0,0,dummy) +#ifdef MOIST_CAPPA + call allocate_fv_nest_BC_type(Atm%neststruct%cappa_BC,Atm,ns,0,0,dummy) +#endif +#endif + if (.not.Atm%flagstruct%hydrostatic) then + call allocate_fv_nest_BC_type(Atm%neststruct%w_BC,Atm,ns,0,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%delz_BC,Atm,ns,0,0,dummy) + endif +#endif + + end subroutine reallocate_BC_buffers + + + !!============================================================================ + !! Step 8 -- Moving Nest Output to NetCDF + !!============================================================================ + + !>@brief The subroutine 'mn_prog_dump_to_netcdf' dumps selected prognostic variables to netCDF file. + !>@details Can be modified to output more of the prognostic variables if wanted. Certain 3D variables were commented out for performance. + subroutine mn_prog_dump_to_netcdf(Atm, time_val, file_prefix, is_fine_pe, domain_coarse, domain_fine, nz) + type(fv_atmos_type), intent(in) :: Atm !< Single instance of atmospheric data + integer, intent(in) :: time_val !< Timestep number + character(len=*), intent(in) :: file_prefix !< Filename prefix + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: nz !< Number of vertical levels + + integer :: n_moist + character(len=16) :: out_var_name + integer :: position = CENTER + !integer :: position_u = NORTH + !integer :: position_v = EAST + + call mn_var_dump_to_netcdf(Atm%pt , is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "tempK") + call mn_var_dump_to_netcdf(Atm%pt(:,:,64) , is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "T64") + !call mn_var_dump_to_netcdf(Atm%delp , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "DELP") + call mn_var_dump_to_netcdf(Atm%delz , is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "DELZ") + call mn_var_dump_to_netcdf(Atm%q_con, is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "qcon") + + !call mn_var_dump_to_netcdf(Atm%w , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "WWND") + !call mn_var_dump_to_netcdf(Atm%ua , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "UA") + !call mn_var_dump_to_netcdf(Atm%va , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "VA") + + call mn_var_dump_to_netcdf(Atm%ps , is_fine_pe, domain_coarse, domain_fine, position, 1 , & + time_val, Atm%global_tile, file_prefix, "PS") + + !! TODO figure out what to do with ze0; different bounds - only compute domain + + !! TODO Wind worked fine when in its own file. Can it merge in with the regular file?? + !!call mn_var_dump_to_netcdf(Atm%u, is_fine_pe, domain_coarse, domain_fine, position_u, nz, & + !! time_val, Atm%global_tile, "wxvarU", "UWND") + !!call mn_var_dump_to_netcdf(Atm%v, is_fine_pe, domain_coarse, domain_fine, position_v, nz, & + !! time_val, Atm%global_tile, "wxvarU", "VWND") + + ! Latitude and longitude in radians + call mn_var_dump_to_netcdf( Atm%gridstruct%agrid(:,:,2), is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "latrad") + call mn_var_dump_to_netcdf( Atm%gridstruct%agrid(:,:,1), is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "lonrad") + + !do n_moist = lbound(Atm%q, 4), ubound(Atm%q, 4) + ! call get_tracer_names(MODEL_ATMOS, n_moist, out_var_name) + ! call mn_var_dump_to_netcdf( Atm%q(:,:,:,n_moist), is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, trim(out_var_name)) + !enddo + + end subroutine mn_prog_dump_to_netcdf + + + !! Step 8 -- Moving Nest Output Individual Variables + + !>@brief The subroutine 'mn_var_dump_3d_to_netcdf' dumps a 3D single precision variable to netCDF file. + subroutine mn_var_dump_3d_to_netcdf( data_var, is_fine_pe, domain_coarse, domain_fine, position, nz, time_step, this_tile, file_prefix, var_name) + real, intent(in) :: data_var(:,:,:) !< Single precision model variable + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: position, nz, time_step, this_tile !< Stagger, number vertical levels, timestep, tile number + character(len=*) :: file_prefix, var_name !< Filename prefix, and netCDF variable name + + integer :: isd_coarse, ied_coarse, jsd_coarse, jed_coarse + integer :: isd_fine, ied_fine, jsd_fine, jed_fine + integer :: this_pe + character(len=64) :: prefix_fine, prefix_coarse + + this_pe = mpp_pe() + + prefix_fine = trim(file_prefix) // "_fine" + prefix_coarse = trim(file_prefix) // "_coarse" + + !!=========================================================== + !! + !! Output the grid data from both nest grids and parent grids to netCDF + !! + !!=========================================================== + + if (is_fine_pe) then + call mpp_get_data_domain(domain_fine, isd_fine, ied_fine, jsd_fine, jed_fine, position=position) + + if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," size of x=(",I0,",",I0,",",I0")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," Data domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + this_pe, isd_fine, ied_fine, jsd_fine, jed_fine, ied_fine - isd_fine + 1, jed_fine - jsd_fine + 1 + + call output_grid_to_nc("GH", isd_fine, ied_fine, jsd_fine, jed_fine, nz, data_var, prefix_fine, var_name, time_step, domain_fine, position) + + else + if (this_tile == 6) then + !call mpp_get_compute_domain(domain_coarse, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, position=position) + call mpp_get_data_domain(domain_coarse, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, position=position) + !call mpp_get_memory_domain(domain_coarse, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, position=position) + + if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," size of x=(",I0,",",I0,",",I0")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Data domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + this_pe, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, ied_coarse - isd_coarse + 1, jed_coarse - jsd_coarse + 1 + !if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Compute domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, iec_coarse - isc_coarse + 1, jec_coarse - jsc_coarse + 1 + !if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Memory domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, iem_coarse - ism_coarse + 1, jem_coarse - jsm_coarse + 1 + + call output_grid_to_nc("GH", isd_coarse, ied_coarse, jsd_coarse, jed_coarse, nz, data_var, prefix_coarse, var_name, time_step, domain_coarse, position) + + endif + endif + + end subroutine mn_var_dump_3d_to_netcdf + + !>@brief The subroutine 'mn_var_dump_2d_to_netcdf' dumps a 3D single precision variable to netCDF file. + subroutine mn_var_dump_2d_to_netcdf( data_var, is_fine_pe, domain_coarse, domain_fine, position, nz, time_step, this_tile, file_prefix, var_name) + implicit none + real, intent(in) :: data_var(:,:) !< Data variable + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: position, nz, time_step, this_tile !< Stagger, number vertical levels, timestep, tile number + character(len=*) :: file_prefix, var_name !< Filename prefix, and netCDF variable name + + integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse + integer :: isd_coarse, ied_coarse, jsd_coarse, jed_coarse + integer :: isd_fine, ied_fine, jsd_fine, jed_fine + integer :: isc_fine, iec_fine, jsc_fine, jec_fine + + integer :: ism_coarse, iem_coarse, jsm_coarse, jem_coarse + integer :: ism_fine, iem_fine, jsm_fine, jem_fine + + integer :: this_pe + + character(len=64) :: prefix_fine, prefix_coarse + + this_pe = mpp_pe() + + prefix_fine = trim(file_prefix) // "_fine" + prefix_coarse = trim(file_prefix) // "_coarse" + + !!=========================================================== + !! + !! Output the grid data from both nest grids and parent grids to netCDF + !! + !!=========================================================== + + if (is_fine_pe) then + ! Maybe don't need to call mpp_get_compute_domain here? + !call mpp_get_compute_domain(domain_fine, isc_fine, iec_fine, jsc_fine, jec_fine, position=position) + call mpp_get_data_domain(domain_fine, isd_fine, ied_fine, jsd_fine, jed_fine, position=position) + !call mpp_get_memory_domain(domain_fine, ism_fine, iem_fine, jsm_fine, jem_fine, position=position) + + if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," size of x=(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," Data domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + this_pe, isd_fine, ied_fine, jsd_fine, jed_fine, ied_fine - isd_fine + 1, jed_fine - jsd_fine + 1 + !if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," Compute domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, isc_fine, iec_fine, jsc_fine, jec_fine, iec_fine - isc_fine + 1, jec_fine - jsc_fine + 1 + !if (debug_log) print '("[INFO] WDR NRF FG mn_var_dump_to_netcdf start. npe=",I0," Memory domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, ism_fine, iem_fine, jsm_fine, jem_fine, iem_fine - ism_fine + 1, jem_fine - jsm_fine + 1 + + call output_grid_to_nc("GH", isd_fine, ied_fine, jsd_fine, jed_fine, nz, data_var, prefix_fine, var_name, time_step, domain_fine, position) + else + + if (this_tile == 6) then + !call mpp_get_compute_domain(domain_coarse, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, position=position) + call mpp_get_data_domain(domain_coarse, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, position=position) + !call mpp_get_memory_domain(domain_coarse, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, position=position) + + if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," size of x=(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Data domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + this_pe, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, ied_coarse - isd_coarse + 1, jed_coarse - jsd_coarse + 1 + !if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Compute domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, iec_coarse - isc_coarse + 1, jec_coarse - jsc_coarse + 1 + !if (debug_log) print '("[INFO] WDR NRF CG mn_var_dump_to_netcdf start. npe=",I0," Memory domain i=",I0,"-",I0," j=",I0,"-",I0," (",I0,",",I0,")")', & + ! this_pe, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, iem_coarse - ism_coarse + 1, jem_coarse - jsm_coarse + 1 + + call output_grid_to_nc("GH", isd_coarse, ied_coarse, jsd_coarse, jed_coarse, nz, data_var, prefix_coarse, var_name, time_step, domain_coarse, position) + + endif + endif + + end subroutine mn_var_dump_2d_to_netcdf + + + !!========================================================================================= + !! Step 9 -- Perform vertical remapping on nest(s) and recalculate auxiliary pressures + !! Should help stabilize the fields before dynamics runs + !!========================================================================================= + + !>@brief The subroutine 'recalc_aux_pressures' updates auxiliary pressures after a nest move. + subroutine recalc_aux_pressures(Atm) + type(fv_atmos_type), intent(inout) :: Atm !< Single Atm structure + + ! Update the auxiliary pressure variables + ! In nest moving code, we moved delp and delz; this will update ps, pk, pe, peln, and pkz + ! Note this routine makes hydrostatic calculations (but has non-hydrostatic branches) + ! Perhaps not appropriate for a non-hydrostatic run. + ! May need to find or write a non-hydrostatic version of this routine + + ! TODO determine if this is the correct way to recalculate the auxiliary pressure variables + + call p_var(Atm%npz, Atm%bd%is, Atm%bd%ie, Atm%bd%js, Atm%bd%je, Atm%ptop, ptop_min, & + Atm%delp, Atm%delz, & + Atm%pt, Atm%ps, & + Atm%pe, Atm%peln, & + Atm%pk, Atm%pkz, kappa, & + Atm%q, Atm%ng, Atm%flagstruct%ncnst, Atm%gridstruct%area_64, 0., & + .false., .false., & !mountain argument not used + Atm%flagstruct%moist_phys, Atm%flagstruct%hydrostatic, & + Atm%flagstruct%nwat, Atm%domain, .false.) + + end subroutine recalc_aux_pressures + + + !================================================================================================== + ! + ! Utility Section -- After Step 9 + ! + !================================================================================================== + + !>@brief The subroutine 'init_ijk_mem' was copied from dyn_core.F90 to avoid circular dependencies + subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var) + integer, intent(in):: i1, i2, j1, j2, km + real, intent(inout):: array(i1:i2,j1:j2,km) + real, intent(in):: var + integer:: i, j, k + + !$OMP parallel do default(none) shared(i1,i2,j1,j2,km,array,var) + do k=1,km + do j=j1,j2 + do i=i1,i2 + array(i,j,k) = var + enddo + enddo + enddo + + end subroutine init_ijk_mem + + !>@brief The function 'almost_equal' tests whether real values are within a tolerance of one another. + function almost_equal(a, b) + logical :: almost_equal + real, intent(in):: a,b + + real :: tolerance = 0.00001 + + if ( abs(a - b) < tolerance ) then + almost_equal = .true. + else + almost_equal = .false. + endif + end function almost_equal + + + + !>@brief The subroutine 'move_nest_geo' shifts tile_geo values using the data from fp_super_tile_geo + subroutine move_nest_geo(tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, delta_i_c, delta_j_c, x_refine, y_refine) + implicit none + type(grid_geometry), intent(inout) :: tile_geo !< A-grid tile geometry + type(grid_geometry), intent(inout) :: tile_geo_u !< u-wind tile geometry + type(grid_geometry), intent(inout) :: tile_geo_v !< v-wind tile geometry + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent high-resolution supergrid tile geometry + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + + integer :: nest_x, nest_y, parent_x, parent_y + + type(bbox) :: tile_bbox, fp_tile_bbox, tile_bbox_u, tile_bbox_v + integer :: i, j, fp_i, fp_j + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + call fill_bbox(tile_bbox, tile_geo%lats) + call fill_bbox(tile_bbox_u, tile_geo_u%lats) + call fill_bbox(tile_bbox_v, tile_geo_v%lats) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + ! delta_{i,j}_c are at the coarse center grid resolution + parent_x = parent_x + delta_i_c * 2 * x_refine + parent_y = parent_y + delta_j_c * 2 * y_refine + + ! Brute force repopulation of full tile_geo grids. + ! Optimization would be to use EOSHIFT and bring in just leading edge + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! replace with a fatal error + endif + + tile_geo%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + do i = tile_bbox_u%is, tile_bbox_u%ie + do j = tile_bbox_u%js, tile_bbox_u%je + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y - 1 + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! replace with a fatal error + endif + + tile_geo_u%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo_u%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + do i = tile_bbox_v%is, tile_bbox_v%ie + do j = tile_bbox_v%js, tile_bbox_v%je + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + if (debug_log) print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! replace with a fatal error + endif + + tile_geo_v%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo_v%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine move_nest_geo + + !>@brief The subroutine 'assign_n_p_grids' sets values for parent and nest grid arrays from the grid_geometry structures. + subroutine assign_n_p_grids(parent_geo, tile_geo, p_grid, n_grid, position) + type(grid_geometry), intent(in) :: parent_geo, tile_geo !< Parent geometry, nest geometry + real(kind=R_GRID), allocatable, intent(inout) :: p_grid(:,:,:) !< Parent grid + real(kind=R_GRID), allocatable, intent(inout) :: n_grid(:,:,:) !< Nest grid + integer, intent(in) :: position !< Grid offset + + integer :: i,j + + if (position == CENTER) then + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + !if (debug_log) print '("[INFO] WDR populate ngrid npe=",I0, I4,I4, F12.4, F12.4)', this_pe, i, j, n_grid(i,j,1), n_grid(i,j,2) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j) + enddo + enddo + + ! u(npx, npy+1) + elseif (position == NORTH) then ! u wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + !if (debug_log) print '("[INFO] WDR populate ngrid_u npe=",I0, I4,I4, F12.4, F12.4)', this_pe, i, j, n_grid(i,j,1), n_grid(i,j,2) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j-1) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j-1) + enddo + enddo + + ! v(npx+1, npy) + elseif (position == EAST) then ! v wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + !if (debug_log) print '("[INFO] WDR populate ngrid_v npe=",I0, I4,I4, F12.4, F12.4)', this_pe, i, j, n_grid(i,j,1), n_grid(i,j,2) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i-1, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i-1, 2*j) + enddo + enddo + + endif + + end subroutine assign_n_p_grids + + + !>@brief The subroutine 'calc_nest_halo_weights' calculates the interpolation weights + !>@details Computationally demanding; target for optimization after nest moves + subroutine calc_nest_halo_weights(bbox_fine, bbox_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + implicit none + + type(bbox), intent(in) :: bbox_coarse, bbox_fine !< Bounding boxes of parent and nest + real(kind=R_GRID), allocatable, intent(in) :: p_grid(:,:,:), n_grid(:,:,:) !< Latlon rids of parent and nest in radians + real, allocatable, intent(inout) :: wt(:,:,:) !< Interpolation weight array + integer, intent(in) :: istart_coarse, jstart_coarse, x_refine, y_refine !< Offsets and nest refinements + + integer :: i,j, ic, jc + real :: dist1, dist2, dist3, dist4, sum + logical :: verbose = .false. + !logical :: verbose = .true. + + integer :: this_pe + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi180 + real :: rad2deg, deg2rad + + pi180 = pi / 180.0 + deg2rad = pi / 180.0 + rad2deg = 1.0 / pi180 + + this_pe = mpp_pe() + + if ( bbox_coarse%is == 0 .and. bbox_coarse%ie == -1 ) then + ! Skip this one + if (debug_log) print '("[INFO] WDR skip calc weights npe=",I0)', this_pe + + else + if (debug_log) print '("[INFO] WDR run calc weights npe=",I0)', this_pe + + ! Calculate the bounding parent grid points for the nest grid point + ! Rely on the nest being aligned + ! code is from $CUBE/tools/fv_grid_tools.F90 + ! + + do j = bbox_fine%js, bbox_fine%je + ! F90 integer division truncates + jc = jstart_coarse + (j + y_refine/2 + 1) / y_refine + do i = bbox_fine%is, bbox_fine%ie + ic = istart_coarse + (i + x_refine/2 + 1) / x_refine + + if (verbose) then + if (debug_log) print '("[INFO] WDR MAP npe=",I0," istart_coarse, jstart_coarse, ic,if,jc,jf",I3,I3," ",I3,I3,I3,I3)', this_pe, istart_coarse, jstart_coarse,ic,i,jc,j + + if (debug_log) print '("[INFO] WDR LATLON npe=",I0," p_grid(",I3,I3,")",F8.2,F8.2, F8.2)', this_pe, ic, jc, rad2deg*p_grid(ic,jc,1)-360.0 , rad2deg*p_grid(ic,jc,2), rad2deg*p_grid(ic,jc,1) + if (debug_log) print '("[INFO] WDR LATLON npe=",I0," nest n_grid(",I3,I3,") ",F8.2,F8.2, F8.2)', this_pe, i, j, rad2deg*n_grid(i,j,1)-360.0, rad2deg*n_grid(i,j,2), rad2deg*n_grid(i,j,1) + + if (debug_log) print '("[INFO] WDR LOC npe=",I0," -------------------")', this_pe + if (debug_log) print '("[INFO] WDR LOC npe=",I0," A p_grid(",I3,I3,")",F12.6,F12.6, F12.6)', this_pe, ic, jc, rad2deg*p_grid(ic,jc,1)-360.0, rad2deg*p_grid(ic,jc,2), rad2deg*p_grid(ic,jc,1) + if (debug_log) print '("[INFO] WDR LOC npe=",I0," B p_grid(",I3,I3,")",F12.6,F12.6, F12.6)', this_pe, ic, jc+1, rad2deg*p_grid(ic,jc+1,1)-360.0, rad2deg*p_grid(ic,jc+1,2), rad2deg*p_grid(ic,jc+1,1) + if (debug_log) print '("[INFO] WDR LOC npe=",I0," C p_grid(",I3,I3,")",F12.6,F12.6, F12.6)', this_pe, ic+1, jc+1, rad2deg*p_grid(ic+1,jc+1,1)-360.0, rad2deg*p_grid(ic+1,jc+1,2), rad2deg*p_grid(ic+1,jc+1,1) + if (debug_log) print '("[INFO] WDR LOC npe=",I0," D p_grid(",I3,I3,")",F12.6,F12.6, F12.6)', this_pe, ic+1, jc, rad2deg*p_grid(ic+1,jc,1)-360.0, rad2deg*p_grid(ic+1,jc,2), rad2deg*p_grid(ic+1,jc,1) + if (debug_log) print '("[INFO] WDR LOC npe=",I0," nest n_grid(",I3,I3,") ",F12.6,F12.6, F12.6)', this_pe, i, j, rad2deg*n_grid(i,j,1)-360.0, rad2deg*n_grid(i,j,2), rad2deg*n_grid(i,j,1) + endif + + ! dist2side_latlon takes points in longitude-latitude coordinates. + dist1 = dist2side_latlon(p_grid(ic,jc,:), p_grid(ic,jc+1,:), n_grid(i,j,:)) + + if (verbose) then + if (debug_log) print '("[INFO] WDR LATLON npe=",I0," dist1=",F9.4," p_grid(",I3,I3,")=",F9.4,F9.4," p_grid(",I3,I3,")=",F9.4,F9.4," n_grid(",I3,I3,")=",F9.4,F9.4)', this_pe, dist1, ic, jc, rad2deg*p_grid(ic,jc,1)-360.0, rad2deg*p_grid(ic,jc,2), ic, jc+1, rad2deg*p_grid(ic,jc+1,1)-360.0, rad2deg*p_grid(ic,jc+1,2), i, j, rad2deg*n_grid(i,j,1)-360.0, rad2deg*n_grid(i,j,2) + endif + dist2 = dist2side_latlon(p_grid(ic,jc+1,:), p_grid(ic+1,jc+1,:), n_grid(i,j,:)) + dist3 = dist2side_latlon(p_grid(ic+1,jc+1,:), p_grid(ic+1,jc,:), n_grid(i,j,:)) + dist4 = dist2side_latlon(p_grid(ic,jc,:), p_grid(ic+1,jc,:), n_grid(i,j,:)) + + !if (debug_log) print '("[INFO] WDR LATLON npe=",I0," dists at (",I3,I3,"): dist: ",F12.4, F12.4, F12.4, F12.4)', this_pe, i, j, dist1*RADIUS, dist2*RADIUS, dist3*RADIUS, dist4*RADIUS + if (verbose) then + if (debug_log) print '("[INFO] WDR LATLON npe=",I0," dists at (",I3,I3,"): dist: ",F12.4, F12.4, F12.4, F12.4)', this_pe, i, j, dist1, dist2, dist3, dist4 + endif + + wt(i,j,1)=dist2*dist3 ! ic, jc weight + wt(i,j,2)=dist3*dist4 ! ic, jc+1 weight + wt(i,j,3)=dist4*dist1 ! ic+1, jc+1 weight + wt(i,j,4)=dist1*dist2 ! ic+1, jc weight + + sum=wt(i,j,1)+wt(i,j,2)+wt(i,j,3)+wt(i,j,4) + wt(i,j,:)=wt(i,j,:)/sum + + if (verbose) then + if (debug_log) print '("[INFO] WDR LATLON npe=",I0," sum (",I3,I3,"): ",F12.2," wt: ",F12.6, F12.6, F12.6, F12.6)', this_pe, i, j, sum, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + endif + + enddo + enddo + endif + + if (debug_log) print '("[INFO] WDR DONE calc weights npe=",I0)', this_pe + + end subroutine calc_nest_halo_weights + +#endif ! MOVING_NEST + +end module fv_moving_nest_mod + diff --git a/moving_nest/fv_moving_nest_main.F90 b/moving_nest/fv_moving_nest_main.F90 new file mode 100644 index 000000000..c8691db18 --- /dev/null +++ b/moving_nest/fv_moving_nest_main.F90 @@ -0,0 +1,1199 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides top-level interface for moving nest functionality +!! @author W. Ramstrom, AOML/HRD 05/27/2021 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_main_mod +#ifdef MOVING_NEST + +#include + + !----------------- + ! FMS modules: + !----------------- + use block_control_mod, only: block_control_type + use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks + use time_manager_mod, only: time_type, get_time, get_date, set_time, operator(+), & + operator(-), operator(/), time_type_to_real + use fms_mod, only: file_exist, open_namelist_file, & + close_file, error_mesg, FATAL, & + check_nml_error, stdlog, & + write_version_number, & + mpp_clock_id, mpp_clock_begin, & + mpp_clock_end, CLOCK_SUBCOMPONENT, & + clock_flag_default + use mpp_mod, only: mpp_error, stdout, FATAL, WARNING, NOTE, & + input_nml_file, mpp_root_pe, & + mpp_npes, mpp_pe, mpp_chksum, & + mpp_get_current_pelist, & + mpp_set_current_pelist, mpp_sync + use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE + use mpp_domains_mod, only: domain2d, mpp_update_domains + use xgrid_mod, only: grid_box_type + use field_manager_mod, only: MODEL_ATMOS + use tracer_manager_mod, only: get_tracer_index, get_number_tracers, & + NO_TRACER, get_tracer_names + use DYCORE_typedefs, only: DYCORE_data_type +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + + use fv_iau_mod, only: IAU_external_data_type +#ifdef MULTI_GASES + use multi_gases_mod, only: virq, virq_max, num_gas, ri, cpi +#endif + + !----------------- + ! FV core modules: + !----------------- + use atmosphere_mod, only: Atm, mygrid, p_split, dt_atmos + use fv_arrays_mod, only: fv_atmos_type, R_GRID, fv_grid_bounds_type, phys_diag_type + use fv_moving_nest_types_mod, only: allocate_fv_moving_nest_prog_type, allocate_fv_moving_nest_physics_type + use fv_moving_nest_types_mod, only: Moving_nest + use fv_diagnostics_mod, only: fv_diag_init, fv_diag_reinit, fv_diag, fv_time, prt_maxmin, prt_height + use fv_restart_mod, only: fv_restart, fv_write_restart + use fv_timing_mod, only: timing_on, timing_off + use fv_mp_mod, only: is_master + use fv_regional_mod, only: start_regional_restart, read_new_bc_data, a_step, p_step, current_time_in_seconds + + !----------------------------------------- + ! External routines + !----------------------------------------- + use fms_io_mod, only: fms_io_exit + use mpp_domains_mod, only: NORTH, NORTH_EAST, EAST, SOUTH_EAST, CORNER, CENTER + use mpp_domains_mod, only: nest_domain_type + use mpp_mod, only: mpp_sync, mpp_exit + use mpp_domains_mod, only: mpp_get_global_domain + use mpp_mod, only: mpp_send, mpp_sync_self, mpp_broadcast + + use fv_mp_mod, only: global_nest_domain + + use tracer_manager_mod, only: get_tracer_names + use field_manager_mod, only: MODEL_ATMOS + use fv_io_mod, only: fv_io_exit + !!use fv_restart_mod, only: d2c_setup + + !------------------------------------ + ! Moving Nest Routines + !------------------------------------ + + ! Prognostic variable routines + use fv_moving_nest_mod, only: mn_prog_fill_intern_nest_halos, mn_prog_fill_nest_halos_from_parent, & + mn_prog_dump_to_netcdf, mn_prog_shift_data + ! Physics variable routines + use fv_moving_nest_physics_mod, only: mn_phys_fill_intern_nest_halos, mn_phys_fill_nest_halos_from_parent, & + mn_phys_dump_to_netcdf, mn_phys_shift_data, mn_phys_reset_sfc_props, move_nsst + + ! Metadata routines + use fv_moving_nest_mod, only: mn_meta_move_nest, mn_meta_recalc, mn_meta_reset_gridstruct, mn_shift_index + + ! Temporary variable routines (delz) + use fv_moving_nest_mod, only: mn_prog_fill_temp_variables, mn_prog_apply_temp_variables + use fv_moving_nest_physics_mod, only: mn_phys_fill_temp_variables, mn_phys_apply_temp_variables + + ! Load static datasets + use fv_moving_nest_mod, only: mn_latlon_read_hires_parent, mn_latlon_load_parent + use fv_moving_nest_mod, only: mn_orog_read_hires_parent, mn_static_read_hires + use fv_moving_nest_utils_mod, only: load_nest_latlons_from_nc, compare_terrain, set_smooth_nest_terrain, set_blended_terrain + + use fv_moving_nest_physics_mod, only: mn_reset_phys_latlon, mn_surface_grids + + ! Grid reset routines + use fv_moving_nest_mod, only: grid_geometry, assign_n_p_grids, move_nest_geo + use fv_moving_nest_utils_mod, only: fill_grid_from_supergrid, fill_weight_grid + + ! Physics moving logical variables + use fv_moving_nest_physics_mod, only: move_physics, move_nsst + + ! Recalculation routines + use fv_moving_nest_mod, only: reallocate_BC_buffers, recalc_aux_pressures + + ! Logging and debugging information + use fv_moving_nest_mod, only: check_array + use fv_moving_nest_utils_mod, only: show_atm, show_atm_grids, show_tile_geo, show_nest_grid, show_gridstruct, grid_equal + use fv_moving_nest_utils_mod, only: validate_hires_parent + + use fv_tracker_mod, only: Tracker, allocate_tracker + + implicit none + + !----------------------------------------------------------------------- + ! version number of this module + ! Include variable "version" to be written to log file. +#include + character(len=20) :: mod_name = 'fvGFS/fv_moving_nest_main_mod' + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + ! Enable these for more debugging outputs + logical :: debug_log = .false. ! Produces logging to out.* file + logical :: tsvar_out = .false. ! Produces netCDF outputs; be careful to not exceed file number limits set in namelist + + ! --- Clock ids for moving_nest performance metering + integer :: id_movnest1, id_movnest1_9, id_movnest2, id_movnest3, id_movnest4, id_movnest5 + integer :: id_movnest6, id_movnest7_0, id_movnest7_1, id_movnest7_2, id_movnest7_3, id_movnest8, id_movnest9 + integer :: id_movnestTot + logical :: use_timers = .False. ! Set this to true for detailed performance profiling. False only profiles total moving nest time. + integer, save :: output_step = 0 + +contains + + !>@brief The subroutine 'update_moving_nest' decides whether the nest should be moved, and if so, performs the move. + !>@details This subroutine evaluates the automatic storm tracker (or prescribed motion configuration), then decides + !! if the nest should be moved. If it should be moved, it calls fv_moving_nest_exec() to perform the nest move. + subroutine update_moving_nest(Atm_block, IPD_control, IPD_data, time_step) + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + type(time_type), intent(in) :: time_step !< Current timestep + + logical :: do_move + integer :: delta_i_c, delta_j_c + integer :: parent_grid_num, child_grid_num, nest_num + integer, allocatable :: global_pelist(:) + integer :: n + integer :: this_pe + + this_pe = mpp_pe() + + do_move = .false. + + ! dt_atmos was initialized in atmosphere.F90::atmosphere_init() + + n = mygrid ! Public variable from atmosphere.F90 + + ! Hard-coded for now - these will need to be looked up on each PE when multiple and telescoped nests are enabled. + parent_grid_num = 1 + child_grid_num = 2 + nest_num = 1 + + call eval_move_nest(Atm, a_step, parent_grid_num, child_grid_num, do_move, delta_i_c, delta_j_c, dt_atmos) + + allocate(global_pelist(Atm(parent_grid_num)%npes_this_grid+Atm(child_grid_num)%npes_this_grid)) + global_pelist=(/Atm(parent_grid_num)%pelist, Atm(child_grid_num)%pelist/) + + call mpp_set_current_pelist(global_pelist) + call mpp_broadcast( delta_i_c, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_broadcast( delta_j_c, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_broadcast( do_move, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_set_current_pelist(Atm(n)%pelist) + + if (do_move) then + call fv_moving_nest_exec(Atm, Atm_block, IPD_control, IPD_data, delta_i_c, delta_j_c, n, nest_num, parent_grid_num, child_grid_num, dt_atmos) + endif + + end subroutine update_moving_nest + + !>@brief The subroutine 'dump_moving_nest' outputs native grid format data to netCDF files + !>@details This subroutine exports model variables using FMS IO to netCDF files if tsvar_out is set to .True. + subroutine dump_moving_nest(Atm_block, IPD_control, IPD_data, time_step) + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(in) :: IPD_data(:) !< Physics variable data + type(time_type), intent(in) :: time_step !< Current timestep + + type(domain2d), pointer :: domain_coarse, domain_fine + logical :: is_fine_pe + integer :: parent_grid_num, child_grid_num, nz, this_pe, n + + this_pe = mpp_pe() + n = mygrid + + parent_grid_num = 1 + child_grid_num = 2 + + domain_fine => Atm(child_grid_num)%domain + domain_coarse => Atm(parent_grid_num)%domain + is_fine_pe = Atm(n)%neststruct%nested .and. ANY(Atm(n)%pelist(:) == this_pe) + nz = Atm(n)%npz + + if (debug_log) print '("[INFO] WDR ptbounds 3 atmosphere.F90 npe=",I0," pt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(Atm(n)%pt,1), ubound(Atm(n)%pt,1), lbound(Atm(n)%pt,2), ubound(Atm(n)%pt,2), lbound(Atm(n)%pt,3), ubound(Atm(n)%pt,3) + + ! Enable this to dump debug netCDF files. Make sure to enable fms_io_exit() in fv_control.F90 so that files are written and closed. + !if (mod(a_step, 20) .eq. 0 ) then + ! if (tsvar_out) call mn_prog_dump_to_netcdf(Atm(n), a_step, "tsavar", is_fine_pe, domain_coarse, domain_fine, nz) + ! if (tsvar_out) call mn_phys_dump_to_netcdf(Atm(n), Atm_block, IPD_control, IPD_data, a_step, "tsavar", is_fine_pe, domain_coarse, domain_fine, nz) + !endif + + end subroutine dump_moving_nest + + !>@brief The subroutine 'fv_moving_nest_init_clocks' intializes performance profiling timers of sections of the moving nest code. + !>@details Starts timers for subcomponents of moving nest code to determine performance. mpp routines group them into separate + !! sections for parent and nest PEs. + subroutine fv_moving_nest_init_clocks() + + ! --- initialize clocks for moving_nest + if (use_timers) then + id_movnest1 = mpp_clock_id ('MN Part 1 Init', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest1_9 = mpp_clock_id ('MN Part 1.9 Copy delz', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest2 = mpp_clock_id ('MN Part 2 Fill Halos from Parent', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest3 = mpp_clock_id ('MN Part 3 Meta Move Nest', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest4 = mpp_clock_id ('MN Part 4 Fill Intern Nest Halos', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5 = mpp_clock_id ('MN Part 5 Recalc Weights', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest6 = mpp_clock_id ('MN Part 6 EOSHIFT', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + id_movnest7_0 = mpp_clock_id ('MN Part 7.0 Recalc gridstruct', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_1 = mpp_clock_id ('MN Part 7.1 Refill halos from Parent', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_2 = mpp_clock_id ('MN Part 7.2 Refill Intern Nest Halos', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_3 = mpp_clock_id ('MN Part 7.3 Fill delz', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + id_movnest8 = mpp_clock_id ('MN Part 8 Dump to netCDF', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest9 = mpp_clock_id ('MN Part 9 Aux Pressure', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + endif + + id_movnestTot = mpp_clock_id ('Moving Nest Total', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + end subroutine fv_moving_nest_init_clocks + + !>@brief The subroutine 'eval_move_nest' determines whether the nest should be moved and in which direction. + !>@details This subroutine can execute prescribed motion or automated storm tracking based on namelist options. + subroutine eval_move_nest(Atm, a_step, parent_grid_num, child_grid_num, do_move, delta_i_c, delta_j_c, dt_atmos) + type(fv_atmos_type), intent(inout) :: Atm(:) !< Input atmospheric data + integer, intent(in) :: a_step !< Timestep + integer, intent(in) :: parent_grid_num, child_grid_num !< Grid numbers of parent and child + logical, intent(out) :: do_move !< Logical for whether to move nest + integer, intent(out) :: delta_i_c, delta_j_c !< Each can be -1, 0, or +1 + real, intent(in) :: dt_atmos !< only needed for the simple version of this subroutine + + integer :: n + integer :: cx, cy + real :: xdiff, ydiff + integer :: nest_i_c, nest_j_c + integer :: nis, nie, njs, nje + integer :: this_pe + character*255 :: message + + ! On the tropical channel configuration, tile 6 numbering starts at 0,0 off the coast of Spain + ! delta_i_c = +1 is westward + ! delta_i_c = -1 is eastward + ! + ! delta_j_c = +1 is southward + ! delta_j_c = -1 is northward + + this_pe = mpp_pe() + n = mygrid ! Public variable from atmosphere.F90 + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + + if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 0 .or. Atm(n)%grid_number .eq. 1) then + ! No need to move + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + else if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 1 ) then + ! Prescribed move according to ntrack, move_cd_x and move_cd_y + ! Move every ntrack of dt_atmos time step + if ( mod(a_step,Moving_nest(n)%mn_flag%ntrack) .eq. 0) then + do_move = .true. + delta_i_c = Moving_nest(n)%mn_flag%move_cd_x + delta_j_c = Moving_nest(n)%mn_flag%move_cd_y + endif + else if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 2 .or. & + Moving_nest(n)%mn_flag%vortex_tracker .eq. 6 .or. & + Moving_nest(n)%mn_flag%vortex_tracker .eq. 7 ) then + ! Automatic moving following the internal storm tracker + if ( mod(a_step,Moving_nest(n)%mn_flag%ntrack) .eq. 0) then + if(Tracker(n)%tracker_gave_up) then + call mpp_error(NOTE,'Not moving: tracker decided the storm dissapated') + return + endif + if(.not.Tracker(n)%tracker_havefix) then + call mpp_error(NOTE,'Not moving: tracker did not find a storm') + return + endif + ! Calcuate domain center indexes + cx=(Atm(n)%npx-1)/2+1 + cy=(Atm(n)%npy-1)/2+1 + ! Calculate distance in parent grid index space between storm + ! center and domain center + ! Consider using xydiff as integers in the future? + xdiff=(Tracker(n)%tracker_ifix-real(cx))/Atm(n)%neststruct%refinement + ydiff=(Tracker(n)%tracker_jfix-real(cy))/Atm(n)%neststruct%refinement + if(xdiff .ge. 1.0) then + Moving_nest(n)%mn_flag%move_cd_x=1 + else if(xdiff .le. -1.0) then + Moving_nest(n)%mn_flag%move_cd_x=-1 + else + Moving_nest(n)%mn_flag%move_cd_x=0 + endif + if(ydiff .ge. 1.0) then + Moving_nest(n)%mn_flag%move_cd_y=1 + else if(ydiff .le. -1.0) then + Moving_nest(n)%mn_flag%move_cd_y=-1 + else + Moving_nest(n)%mn_flag%move_cd_y=0 + endif + if(abs(Moving_nest(n)%mn_flag%move_cd_x)>0 .or. abs(Moving_nest(n)%mn_flag%move_cd_y)>0) then + call mpp_error(NOTE,'Moving: tracker center shifted from nest center') + do_move = .true. + delta_i_c = Moving_nest(n)%mn_flag%move_cd_x + delta_j_c = Moving_nest(n)%mn_flag%move_cd_y + else + call mpp_error(NOTE,'Not moving: tracker center is near nest center') + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + endif + endif + else + write(message,*) 'Wrong vortex_tracker option: ', Moving_nest(n)%mn_flag%vortex_tracker + call mpp_error(FATAL,message) + endif + + ! Override to prevent move on first timestep + if (a_step .eq. 0) then + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + endif + + ! Check whether or not the nest move is permitted + if (n==child_grid_num) then + ! Figure out the bounds of the cube face + + ! x parent bounds: 1 to Atm(parent_grid_num)%flagstruct%npx + ! y parent bounds: 1 to Atm(parent_grid_num)%flagstruct%npy + + ! Figure out the bounds of the nest + + ! x nest bounds: 1 to Atm(child_grid_num)%flagstruct%npx + ! y nest bounds: 1 to Atm(child_grid_num)%flagstruct%npy + + ! Nest refinement: Atm(child_grid_num)%neststruct%refinement + ! Nest starting cell in x direction: Atm(child_grid_num)%neststruct%ioffset + ! Nest starting cell in y direction: Atm(child_grid_num)%neststruct%joffset + + nest_i_c = ( Atm(child_grid_num)%flagstruct%npx - 1 ) / Atm(child_grid_num)%neststruct%refinement + nest_j_c = ( Atm(child_grid_num)%flagstruct%npy - 1 ) / Atm(child_grid_num)%neststruct%refinement + + nis = Atm(child_grid_num)%neststruct%ioffset + delta_i_c + nie = Atm(child_grid_num)%neststruct%ioffset + nest_i_c + delta_i_c + + njs = Atm(child_grid_num)%neststruct%joffset + delta_j_c + nje = Atm(child_grid_num)%neststruct%joffset + nest_j_c + delta_j_c + + ! Will the nest motion push the nest over one of the edges? + ! Handle each direction individually, so that nest could slide along edge + + ! Causes a crash if we use .le. 1 + if (nis .le. Moving_nest(child_grid_num)%mn_flag%corral_x) then + delta_i_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in x direction blocked. small nis: ', nis + call mpp_error(WARNING,message) + endif + if (njs .le. Moving_nest(child_grid_num)%mn_flag%corral_y) then + delta_j_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in y direction blocked. small njs: ', njs + call mpp_error(WARNING,message) + endif + + if (nie .ge. Atm(parent_grid_num)%flagstruct%npx - Moving_nest(child_grid_num)%mn_flag%corral_x) then + delta_i_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in x direction blocked. large nie: ', nie + call mpp_error(WARNING,message) + endif + if (nje .ge. Atm(parent_grid_num)%flagstruct%npy - Moving_nest(child_grid_num)%mn_flag%corral_y) then + delta_j_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in y direction blocked. large nje: ', nje + call mpp_error(WARNING,message) + endif + + if (delta_i_c .eq. 0 .and. delta_j_c .eq. 0) then + do_move = .false. + endif + + endif + + write(message, *) 'eval_move_nest: move_cd_x=', delta_i_c, 'move_cd_y=', delta_j_c, 'do_move=', do_move + call mpp_error(NOTE,message) + + end subroutine eval_move_nest + + !>@brief The subroutine 'fv_moving_nest_exec' performs the nest move - most work occurs on nest PEs but some on parent PEs. + !>@details This subroutine shifts the prognostic and physics/surface variables. + !! It also updates metadata and interpolation weights. + subroutine fv_moving_nest_exec(Atm, Atm_block, IPD_control, IPD_data, delta_i_c, delta_j_c, n, nest_num, parent_grid_num, child_grid_num, dt_atmos) + implicit none + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atmospheric variables + type(block_control_type), intent(in) :: Atm_block !< Physics block + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion increments + integer, intent(in) :: n, nest_num !< Nest indices + integer, intent(in) :: parent_grid_num, child_grid_num !< Grid numbers + real, intent(in) :: dt_atmos !< Timestep in seconds + + !---- Moving Nest local variables ----- + integer :: this_pe + integer, pointer :: ioffset, joffset + real, pointer, dimension(:,:,:) :: grid, agrid + type(domain2d), pointer :: domain_coarse, domain_fine + real(kind=R_GRID), pointer, dimension(:,:,:,:) :: grid_global + + ! Constants for mpp calls + integer :: position = CENTER + integer :: position_u = NORTH + integer :: position_v = EAST + logical :: do_move = .True. + integer :: x_refine, y_refine ! Currently equal, but allows for future flexibility + logical :: is_fine_pe + + ! TODO read halo size from the namelist instead to allow nest refinement > 3 + integer :: ehalo = 3 + integer :: whalo = 3 + integer :: nhalo = 3 + integer :: shalo = 3 + integer :: extra_halo = 0 ! Extra halo for moving nest routines + + integer :: istart_fine, iend_fine, jstart_fine, jend_fine + integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse + integer :: nx, ny, nz, nx_cubic, ny_cubic + integer :: p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine + + ! Parent tile data, saved between timesteps + logical, save :: first_nest_move = .true. + type(grid_geometry), save :: parent_geo + type(grid_geometry), save :: fp_super_tile_geo + type(mn_surface_grids), save :: mn_static + + type(grid_geometry) :: tile_geo, tile_geo_u, tile_geo_v + real(kind=R_GRID), allocatable :: p_grid(:,:,:), n_grid(:,:,:) + real(kind=R_GRID), allocatable :: p_grid_u(:,:,:), n_grid_u(:,:,:) + real(kind=R_GRID), allocatable :: p_grid_v(:,:,:), n_grid_v(:,:,:) + real, allocatable :: wt_h(:,:,:) + real, allocatable :: wt_u(:,:,:) + real, allocatable :: wt_v(:,:,:) + !real :: ua(isd:ied,jsd:jed) + !real :: va(isd:ied,jsd:jed) + + logical :: filtered_terrain = .True. ! TODO set this from namelist + integer :: i, j, x, y, z, p, nn, n_moist + integer :: parent_tile + logical :: found_nest_domain = .false. + + ! Variables to enable debugging use of mpp_sync + logical :: debug_sync = .false. + integer, allocatable :: full_pelist(:) + integer :: pp, p1, p2 + + ! Variables for parent side of setup_aligned_nest() + integer :: isg, ieg, jsg, jeg, gid + integer :: isc_p, iec_p, jsc_p, jec_p + integer :: upoff, jind + integer :: ng, refinement + integer :: npx, npy, npz, ncnst, pnats + integer :: isc, iec, jsc, jec + integer :: isd, ied, jsd, jed + integer :: nq ! number of transported tracers + integer :: is, ie, js, je, k ! For recalculation of omga + integer, save :: output_step = 0 + integer, allocatable :: pelist(:) + character(len=16) :: errstring + logical :: is_moving_nest !! TODO Refine this per Atm(n) structure to allow some static and some moving nests in same run + integer :: year, month, day, hour, minute, second + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + + rad2deg = 180.0 / pi + + gid = mpp_pe() + this_pe = mpp_pe() + + allocate(pelist(mpp_npes())) + call mpp_get_current_pelist(pelist) + + ! Get month to use for reading static datasets + call get_date(Atm(n)%Time_init, year, month, day, hour, minute, second) + + ! mygrid and n are the same in atmosphere.F90 + npx = Atm(n)%npx + npy = Atm(n)%npy + npz = Atm(n)%npz + ncnst = Atm(n)%ncnst + pnats = Atm(n)%flagstruct%pnats + + isc = Atm(n)%bd%isc + iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc + jec = Atm(n)%bd%jec + + isd = isc - Atm(n)%bd%ng + ied = iec + Atm(n)%bd%ng + jsd = jsc - Atm(n)%bd%ng + jed = jec + Atm(n)%bd%ng + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + nq = ncnst-pnats + + is_fine_pe = Atm(n)%neststruct%nested .and. ANY(Atm(n)%pelist(:) == this_pe) + + + if (first_nest_move) then + if (debug_log) print '("[INFO] WDR Start Clocks npe=",I0," n=",I0)', this_pe, n + call fv_moving_nest_init_clocks() + + ! If NSST is turned off, do not move the NSST variables. + ! Namelist switches are confusing; this should be the correct way to distinguish, not using nst_anl + if (IPD_Control%nstf_name(1) == 0) then + move_nsst=.false. + else + move_nsst=.true. + endif + + ! This will only allocate the mn_prog and mn_phys for the active Atm(n), not all of them + ! The others can safely remain unallocated. + if (debug_log) print '("[INFO] WDR call allocate_fv_moving_nest_prog npe=",I0," n=",I0)', this_pe, n + call allocate_fv_moving_nest_prog_type(isd, ied, jsd, jed, npz, Moving_nest(n)%mn_prog) + call allocate_fv_moving_nest_physics_type(isd, ied, jsd, jed, npz, move_physics, move_nsst, & + IPD_Control%lsoil, IPD_Control%nmtvr, IPD_Control%levs, IPD_Control%ntot2d, IPD_Control%ntot3d, & + Moving_nest(n)%mn_phys) + + endif + + !================================================================================================== + ! + ! Begin moving nest code + ! W. Ramstrom - AOML/HRD/CIMAS 01/15/2021 + ! + !================================================================================================== + + !!================================================================ + !! Step 1 -- Initialization + !!================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 1====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST1 run step 1 fv_moving_nest_main.F90 npe=",I0)', this_pe + + domain_fine => Atm(child_grid_num)%domain + parent_tile = Atm(child_grid_num)%neststruct%parent_tile + domain_coarse => Atm(parent_grid_num)%domain + is_moving_nest = Moving_nest(child_grid_num)%mn_flag%is_moving_nest + nz = Atm(n)%npz + + if (debug_log) then + if (is_fine_pe) then + print '("[INFO] WDR move_nest FINE. npe=",I0, " ", I2.2," do_move=",L1," delta_i_c=",I0," delta_j_c=",I0)', this_pe, n, do_move, delta_i_c, delta_j_c + else + print '("[INFO] WDR move_nest COARSE. npe=",I0, " ", I2.2)', this_pe, n + endif + + do nn = 1, size(Atm) + call show_atm("1", Atm(nn), nn, this_pe) + enddo + print '("[INFO] WDR diag Atm DONE npe=",I0," Atm(",I0,")")', this_pe, n + endif + + if (is_moving_nest .and. do_move) then + call mpp_clock_begin (id_movnestTot) + if (use_timers) call mpp_clock_begin (id_movnest1) + + !!================================================================ + !! Step 1.1 -- Show the nest grids + !!================================================================ + + if (debug_log .and. this_pe .eq. 0) then + !call show_nest_grid(Atm(n), this_pe, 0) + print '("[INFO] WDR BD init fv_moving_nest_main.F90 npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, Atm(n)%bd%is, Atm(n)%bd%ie, Atm(n)%bd%js, Atm(n)%bd%je + print '("[INFO] WDR BD init fv_moving_nest_main.F90 npe=",I0," isd=",I0," ied=",I0," jsd=",I0," jed=",I0)', this_pe, Atm(n)%bd%isd, Atm(n)%bd%ied, Atm(n)%bd%jsd, Atm(n)%bd%jed + print '("[INFO] WDR BD init fv_moving_nest_main.F90 npe=",I0," isc=",I0," iec=",I0," jsc=",I0," jec=",I0)', this_pe, Atm(n)%bd%isc, Atm(n)%bd%iec, Atm(n)%bd%jsc, Atm(n)%bd%jec + endif + + !!================================================================ + !! Step 1.2 -- Configure local variables + !!================================================================ + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + ioffset => Atm(child_grid_num)%neststruct%ioffset + joffset => Atm(child_grid_num)%neststruct%joffset + + if (debug_log) print '("[INFO] WDR MV_NST0 fv_moving_nest_main.F90 processing Atm(n) npe=",I0," n=",I0," ioffset=",I0," joffset=",I0)', this_pe, n, ioffset, joffset + + istart_fine = global_nest_domain%istart_fine(nest_num) + iend_fine = global_nest_domain%iend_fine(nest_num) + jstart_fine = global_nest_domain%jstart_fine(nest_num) + jend_fine = global_nest_domain%jend_fine(nest_num) + + istart_coarse = global_nest_domain%istart_coarse(nest_num) + iend_coarse = global_nest_domain%iend_coarse(nest_num) + jstart_coarse = global_nest_domain%jstart_coarse(nest_num) + jend_coarse = global_nest_domain%jend_coarse(nest_num) + + ! Allocate the local weight arrays. TODO OPTIMIZE change to use the ones from the gridstruct + if (is_fine_pe) then + allocate(wt_h(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 4)) + wt_h = real_snan + + allocate(wt_u(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed+1, 4)) + wt_u = real_snan + + allocate(wt_v(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied+1, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 4)) + wt_v = real_snan + else + allocate(wt_h(1,1,4)) + wt_h = 0.0 + + allocate(wt_u(1,1,4)) + wt_u = 0.0 + + allocate(wt_v(1,1,4)) + wt_v = 0.0 + endif + + ! This full list of PEs is used for the mpp_sync for debugging. Can later be removed. + p1 = size(Atm(1)%pelist) ! Parent PEs + p2 = size(Atm(2)%pelist) ! Nest PEs + + allocate(full_pelist(p1 + p2)) + do pp=1,p1 + full_pelist(pp) = Atm(1)%pelist(pp) + enddo + do pp=1,p2 + full_pelist(p1+pp) = Atm(2)%pelist(pp) + enddo + + !!============================================================================ + !! Step 1.3 -- Dump the prognostic variables before we do the nest motion. + !!============================================================================ + + if (debug_log) print '("[INFO] WDR MV_NST0 run step 0 fv_moving_nest_main.F90 npe=",I0)', this_pe + output_step = output_step + 1 + + !!============================================================================ + !! Step 1.4 -- Read in the full panel grid definition + !!============================================================================ + + if (debug_log) then + print '("[INFO] WDR check grid_global fv_moving_nest_main.F90 npe=",I0," n=",I0)', this_pe, 1 + call check_array(Atm(1)%grid_global, this_pe, "grid_global", -2.0*3.1415926536, 2.0*3.1415926536) + print '("[INFO] WDR check grid_global fv_moving_nest_main.F90 npe=",I0," n=",I0)', this_pe, 2 + call check_array(Atm(2)%grid_global, this_pe, "grid_global", -2.0*3.1415926536, 2.0*3.1415926536) + endif + + if (is_fine_pe) then + + nx_cubic = Atm(1)%npx - 1 + ny_cubic = Atm(1)%npy - 1 + + nx = Atm(n)%npx - 1 + ny = Atm(n)%npy - 1 + + grid => Atm(n)%gridstruct%grid + agrid => Atm(n)%gridstruct%agrid + + if (debug_log) print '("[INFO] WDR MV_NST0 fv_moving_nest_main.F90 processing Atm(n) npe=",I0," nx_cubic=",I0," ny_cubic=",I0," nx=",I0," ny=",I0)', this_pe, nx_cubic, ny_cubic, nx ,ny + + ! Read in static lat/lon data for parent at nest resolution; returns fp_ full panel variables + ! Also read in other static variables from the orography and surface files + + if (first_nest_move) then + if (debug_log) print '("[INFO] WDR mn_latlon_read_hires_parent READING static fine file on npe=",I0)', this_pe + + call mn_latlon_read_hires_parent(Atm(1)%npx, Atm(1)%npy, x_refine, fp_super_tile_geo, & + Moving_nest(child_grid_num)%mn_flag%surface_dir, parent_tile) + + call mn_orog_read_hires_parent(Atm(1)%npx, Atm(1)%npy, x_refine, & + Moving_nest(child_grid_num)%mn_flag%surface_dir, filtered_terrain, & + mn_static%orog_grid, mn_static%orog_std_grid, mn_static%ls_mask_grid, mn_static%land_frac_grid, parent_tile) + + ! If terrain_smoother method 1 is chosen, we need the parent coarse terrain + if (Moving_nest(n)%mn_flag%terrain_smoother .eq. 1) then + if (filtered_terrain) then + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, 1, Moving_nest(child_grid_num)%mn_flag%surface_dir, "oro_data", "orog_filt", mn_static%parent_orog_grid, parent_tile) + else + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, 1, Moving_nest(child_grid_num)%mn_flag%surface_dir, "oro_data", "orog_raw", mn_static%parent_orog_grid, parent_tile) + endif + endif + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "substrate_temperature", "substrate_temperature", mn_static%deep_soil_temp_grid, parent_tile) + ! set any -999s to +4C + call mn_replace_low_values(mn_static%deep_soil_temp_grid, -100.0, 277.0) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "soil_type", "soil_type", mn_static%soil_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in soil_type + call mn_replace_low_values(mn_static%soil_type_grid, -100.0, 0.0) + + + !! TODO investigate reading high-resolution veg_frac and veg_greenness + !call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "", mn_static%veg_frac_grid) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "vegetation_type", "vegetation_type", mn_static%veg_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in veg_type + call mn_replace_low_values(mn_static%veg_type_grid, -100.0, 0.0) + + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "slope_type", "slope_type", mn_static%slope_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in slope_type + call mn_replace_low_values(mn_static%slope_type_grid, -100.0, 0.0) + + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "maximum_snow_albedo", "maximum_snow_albedo", mn_static%max_snow_alb_grid, parent_tile) + ! Set any -999s to 0.5 + call mn_replace_low_values(mn_static%max_snow_alb_grid, -100.0, 0.5) + + ! Albedo fraction -- read and calculate + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "facsf", "facsf", mn_static%facsf_grid, parent_tile) + + allocate(mn_static%facwf_grid(lbound(mn_static%facsf_grid,1):ubound(mn_static%facsf_grid,1),lbound(mn_static%facsf_grid,2):ubound(mn_static%facsf_grid,2))) + + ! For land points, set facwf = 1.0 - facsf + ! To match initialization behavior, set any -999s to 0 + do i=lbound(mn_static%facsf_grid,1),ubound(mn_static%facsf_grid,1) + do j=lbound(mn_static%facsf_grid,2),ubound(mn_static%facsf_grid,2) + if (mn_static%facsf_grid(i,j) .lt. -100) then + mn_static%facsf_grid(i,j) = 0 + mn_static%facwf_grid(i,j) = 0 + else + mn_static%facwf_grid(i,j) = 1.0 - mn_static%facsf_grid(i,j) + endif + enddo + enddo + + ! Additional albedo variables + ! black sky = strong cosz -- direct sunlight + ! white sky = weak cosz -- diffuse light + + ! alvsf = visible strong cosz = visible_black_sky_albedo + ! alvwf = visible weak cosz = visible_white_sky_albedo + ! alnsf = near IR strong cosz = near_IR_black_sky_albedo + ! alnwf = near IR weak cosz = near_IR_white_sky_albedo + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "visible_black_sky_albedo", mn_static%alvsf_grid, parent_tile, time=month) + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "visible_white_sky_albedo", mn_static%alvwf_grid, parent_tile, time=month) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "near_IR_black_sky_albedo", mn_static%alnsf_grid, parent_tile, time=month) + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "near_IR_white_sky_albedo", mn_static%alnwf_grid, parent_tile, time=month) + + ! Set the -999s to small value of 0.06, matching initialization code in chgres + + call mn_replace_low_values(mn_static%alvsf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alvwf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alnsf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alnwf_grid, -100.0, 0.06) + + endif + + ! Validation/logging calls that can be disabled + if (debug_log) then + call show_tile_geo(fp_super_tile_geo, this_pe, "fp_super_tile_geo") + call show_gridstruct(Atm(n)%gridstruct, this_pe) + !call validate_hires_parent(fp_super_tile_geo, Atm(n)%gridstruct%grid, Atm(n)%gridstruct%agrid, x_refine, y_refine, ioffset, joffset) + endif + endif + + if (first_nest_move) first_nest_move = .false. + + if (use_timers) call mpp_clock_end (id_movnest1) + if (use_timers) call mpp_clock_begin (id_movnest1_9) + + !!===================================================================================== + !! Step 1.9 -- Allocate and fill the temporary variable(s) + !!===================================================================================== + + call mn_prog_fill_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + call mn_phys_fill_temp_variables(Atm, Atm_block, IPD_control, IPD_data, n, child_grid_num, is_fine_pe, npz) + + if (use_timers) call mpp_clock_end (id_movnest1_9) + if (use_timers) call mpp_clock_begin (id_movnest2) + + !!============================================================================ + !! Step 2 -- Fill in the halos from the coarse grids + !!============================================================================ + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 2====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST2 run step 2 fv_moving_nest_main.F90 npe=",I0)', this_pe + + ! The halos seem to be empty at least on the first model timestep. + ! These calls need to be executed by the parent and nest PEs in order to do the communication + ! This is before any nest motion has occurred + + call mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + call mn_phys_fill_nest_halos_from_parent(Atm, IPD_control, IPD_data, mn_static, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + + if (use_timers) call mpp_clock_end (id_movnest2) + if (use_timers) call mpp_clock_begin (id_movnest3) + + !!============================================================================ + !! Step 3 -- Redefine the nest domain to new location + !! This calls mpp_define_nest_domains. Following the code in fv_control.F90, only should + !! be executed on the nest PEs. Operates only on indices. + !! -- Similar to med_nest_configure() from HWRF + !!============================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 3====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST3 run step 3 fv_moving_nest_main.F90 npe=",I0)', this_pe + + if (debug_log) print '("[INFO] WDR MV_NST3 run step 3 fv_moving_nest_main.F90 processing Atm(n) npe=",I0," n=",I0," ioffset=",I0," joffset=",I0)', this_pe, n, ioffset, joffset + + call mn_meta_move_nest(delta_i_c, delta_j_c, pelist, is_fine_pe, extra_halo, & + global_nest_domain, domain_fine, domain_coarse, & + istart_coarse, iend_coarse, jstart_coarse, jend_coarse, & + istart_fine, iend_fine, jstart_fine, jend_fine) + + ! This code updates the values in neststruct; ioffset/joffset are pointers: ioffset => Atm(child_grid_num)%neststruct%ioffset + ioffset = ioffset + delta_i_c + joffset = joffset + delta_j_c + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest3) + if (use_timers) call mpp_clock_begin (id_movnest4) + + !!============================================================================ + !! Step 4 -- Fill the internal nest halos for the prognostic variables, + !! then physics variables + !! Only acts on the nest PEs + !! -- similar to med_nest_initial + !!============================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 4====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST4 run step 4 fv_moving_nest_main.F90 npe=",I0)', this_pe + + ! TODO should/can this run before the mn_meta_move_nest? + if (is_fine_pe) then + call mn_prog_fill_intern_nest_halos(Atm(n), domain_fine, is_fine_pe) + call mn_phys_fill_intern_nest_halos(Moving_nest(n), IPD_control, IPD_data, domain_fine, is_fine_pe) + else + if (debug_log) print '("[INFO] WDR MV_NST4 skip step 4 fv_moving_nest_main.F90 npe=",I0)', this_pe + endif + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest4) + if (use_timers) call mpp_clock_begin (id_movnest5) + + !!============================================================================ + !! Step 5 -- Recalculate nest halo weights (for fine PEs only) and indices + !! -- Similiar to med_nest_weights + !!============================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 5====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST5 run step 5 fv_moving_nest_main.F90 npe=",I0)', this_pe + + if (is_fine_pe) then + !!============================================================================ + !! Step 5.1 -- Fill the p_grid* and n_grid* variables + !!============================================================================ + if (debug_log) print '("[INFO] WDR MV_NST5 run step 5 fv_moving_nest_main.F90 npe=",I0, " tile_geo%lats allocated:",L1)', this_pe, allocated(tile_geo%lats) + if (debug_log) print '("[INFO] WDR MV_NST5 run step 5 fv_moving_nest_main.F90 npe=",I0, " parent_geo%lats allocated:",L1)', this_pe, allocated(parent_geo%lats) + + ! parent_geo is only loaded first time; afterwards it is reused. + ! This is the coarse resolution data for the parent + call mn_latlon_load_parent(Moving_nest(child_grid_num)%mn_flag%surface_dir, Atm, n, parent_tile, & + delta_i_c, delta_j_c, child_grid_num, & + parent_geo, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, & + p_grid, n_grid, p_grid_u, n_grid_u, p_grid_v, n_grid_v) + + ! tile_geo holds the center lat/lons for the entire nest (all PEs). + call mn_reset_phys_latlon(Atm, n, tile_geo, fp_super_tile_geo, Atm_block, IPD_control, IPD_data) + + !!============================================================================ + !! Step 5.2 -- Fill the wt* variables for each stagger + !!============================================================================ + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position, p_grid, n_grid, wt_h, istart_coarse, jstart_coarse) + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo_u, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position_u, p_grid_u, n_grid_u, wt_u, istart_coarse, jstart_coarse) + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo_v, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position_v, p_grid_v, n_grid_v, wt_v, istart_coarse, jstart_coarse) + + endif + + !!============================================================================ + !! Step 5.3 -- Adjust the indices by the values of delta_i_c, delta_j_c + !!============================================================================ + + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_h) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_u) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_v) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_b) + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest5) + if (use_timers) call mpp_clock_begin (id_movnest6) + + !!============================================================================ + !! Step 6 Shift the data on each nest PE + !! -- similar to med_nest_move in HWRF + !!============================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 6====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST6 run step 6 fv_moving_nest_main.F90 npe=",I0," n=",I0)', this_pe, n + + call mn_prog_shift_data(Atm, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, global_nest_domain, nz) + + call mn_phys_shift_data(Atm, IPD_control, IPD_data, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, global_nest_domain, nz) + + if (debug_log) print '("[INFO] WDR MV_NST6 complete step 6 fv_moving_nest_main.F90 npe=",I0)', this_pe + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest6) + if (use_timers) call mpp_clock_begin (id_movnest7_0) + + !!===================================================================================== + !! Step 7 -- Reset the grid definition data and buffer sizes and weights after the nest motion + !! Mostly needed when dynamics is executed + !!===================================================================================== + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 7====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST7 run step 7 fv_moving_nest_main.F90 npe=",I0," n=",I0)', this_pe, n + + call mn_meta_reset_gridstruct(Atm, n, child_grid_num, global_nest_domain, fp_super_tile_geo, x_refine, y_refine, is_fine_pe, wt_h, wt_u, wt_v, a_step, dt_atmos) + + if (use_timers) call mpp_clock_end (id_movnest7_0) + if (use_timers) call mpp_clock_begin (id_movnest7_1) + + !!===================================================================================== + !! Step 7.01 -- Reset the orography data that was read from the hires static file + !! + !!===================================================================================== + + if (is_fine_pe) then + ! phis is allocated in fv_arrays.F90 as: allocate ( Atm%phis(isd:ied ,jsd:jed ) ) + ! 0 -- all high-resolution data, 1 - static nest smoothing algorithm, 5 - 5 point smoother, 9 - 9 point smoother + ! Defaults to 1 - static nest smoothing algorithm; this seems to produce the most stable solutions + !print '("[INFO] WDR Moving Nest terrain_smoother=",I0," High-resolution terrain. npe=",I0)', Atm(n)%neststruct%terrain_smoother, this_pe + + select case(Moving_nest(n)%mn_flag%terrain_smoother) + case (0) + ! High-resolution terrain for entire nest + if (debug_log) print '("[INFO] WDR Moving Nest terrain_smoother=0 High-resolution terrain. npe=",I0)', this_pe + Atm(n)%phis(isd:ied, jsd:jed) = mn_static%orog_grid((ioffset-1)*x_refine+isd:(ioffset-1)*x_refine+ied, (joffset-1)*y_refine+jsd:(joffset-1)*y_refine+jed) * grav + case (1) + ! Static nest smoothing algorithm - interpolation of coarse terrain in halo zone and 5 point blending zone of coarse and fine data + if (debug_log) print '("[INFO] WDR Moving Nest terrain_smoother=1 Blending5 algorithm. npe=",I0)', this_pe + call set_blended_terrain(Atm(n), mn_static%parent_orog_grid, mn_static%orog_grid, x_refine, Atm(n)%bd%ng, 5, a_step) + case (2) + ! Static nest smoothing algorithm - interpolation of coarse terrain in halo zone and 5 point blending zone of coarse and fine data + if (debug_log) print '("[INFO] WDR Moving Nest terrain_smoother=1 Blending10 algorithm. npe=",I0)', this_pe + call set_blended_terrain(Atm(n), mn_static%parent_orog_grid, mn_static%orog_grid, x_refine, Atm(n)%bd%ng, 10, a_step) + case (5) + ! 5 pt smoother. blend zone of 5 to match static nest + if (debug_log) print '("[INFO] WDR Moving Nest terrain_smoother=5 5-point smoother. npe=",I0)', this_pe + call set_smooth_nest_terrain(Atm(n), mn_static%orog_grid, x_refine, 5, Atm(n)%bd%ng, 5) + case (9) + ! 9 pt smoother. blend zone of 5 to match static nest + if (debug_log) print '("[INFO] WDR Moving Nest terrain_smoother=9 9-point smoother. npe=",I0)', this_pe + call set_smooth_nest_terrain(Atm(n), mn_static%orog_grid, x_refine, 9, Atm(n)%bd%ng, 5) + case default + write (errstring, "(I0)") Moving_nest(n)%mn_flag%terrain_smoother + call mpp_error(FATAL,'Invalid terrain_smoother in fv_moving_nest_main '//errstring) + end select + + ! Reinitialize diagnostics -- zsurf which is g * Atm%phis + call fv_diag_reinit(Atm(n:n)) + + ! sgh and oro were only fully allocated if fv_land is True + ! if false, oro is (1,1), and sgh is not allocated + if ( Atm(n)%flagstruct%fv_land ) then + if (debug_log) print '("[INFO] WDR shift orography data fv_land TRUE npe=",I0)', this_pe + ! oro and sgh are allocated only for the compute domain -- they do not have halos + + !fv_arrays.F90 oro() !< land fraction (1: all land; 0: all water) + !real, _ALLOCATABLE :: oro(:,:) _NULL !< land fraction (1: all land; 0: all water) + Atm(n)%oro(isc:iec, jsc:jec) = mn_static%land_frac_grid((ioffset-1)*x_refine+isc:(ioffset-1)*x_refine+iec, (joffset-1)*y_refine+jsc:(joffset-1)*y_refine+jec) + + !real, _ALLOCATABLE :: sgh(:,:) _NULL !< Terrain standard deviation + Atm(n)%sgh(isc:iec, jsc:jec) = mn_static%orog_std_grid((ioffset-1)*x_refine+isc:(ioffset-1)*x_refine+iec, (joffset-1)*y_refine+jsc:(joffset-1)*y_refine+jec) + else + if (debug_log) print '("[INFO] WDR shift orography data fv_land FALSE npe=",I0)', this_pe + endif + + call mn_phys_reset_sfc_props(Atm, n, mn_static, Atm_block, IPD_data, ioffset, joffset, x_refine) + endif + + !!===================================================================================== + !! Step 7.1 Refill the nest edge halos from parent grid after nest motion + !! Parent and nest PEs need to execute these subroutines + !!===================================================================================== + + ! Refill the halos around the edge of the nest from the parent + call mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + call mn_phys_fill_nest_halos_from_parent(Atm, IPD_control, IPD_data, mn_static, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + + if (use_timers) call mpp_clock_end (id_movnest7_1) + + if (is_fine_pe) then + if (use_timers) call mpp_clock_begin (id_movnest7_2) + + ! Refill the internal halos after nest motion + call mn_prog_fill_intern_nest_halos(Atm(n), domain_fine, is_fine_pe) + call mn_phys_fill_intern_nest_halos(Moving_nest(n), IPD_control, IPD_data, domain_fine, is_fine_pe) + + if (use_timers) call mpp_clock_end (id_movnest7_2) + endif + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + !!===================================================================================== + !! Step 7.3 -- Apply the temporary variable to the prognostics and physics structures + !!===================================================================================== + if (use_timers) call mpp_clock_begin (id_movnest7_3) + + call mn_prog_apply_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + call mn_phys_apply_temp_variables(Atm, Atm_block, IPD_control, IPD_data, n, child_grid_num, is_fine_pe, npz) + + if (use_timers) call mpp_clock_end (id_movnest7_3) + if (use_timers) call mpp_clock_begin (id_movnest8) + + !!============================================================================ + !! Step 8 -- Dump to netCDF + !!============================================================================ + + if (debug_log) print '("WDR_NEST_HALO_RECV,",I0,"===STEP 8====")', this_pe + if (debug_log) print '("[INFO] WDR MV_NST8 run step 8 fv_moving_nest_main.F90 npe=",I0)', this_pe + + if (is_fine_pe) then + do i=isc,iec + do j=jsc,jec + ! WDR EMIS PATCH - Force to positive at all locations matching the landmask + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%emis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 2 .and. Moving_nest(n)%mn_phys%emis_ice(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_ice(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 0 .and. Moving_nest(n)%mn_phys%emis_wat(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_wat(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdirnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) = 0.5 + + ! WDR EMIS PATCH - Force to positive at all locations. + if (Moving_nest(n)%mn_phys%emis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%emis_ice(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_ice(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%emis_wat(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_wat(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdirnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) = 0.5 + + !if (Moving_nest(n)%mn_phys%semis(i,j) .lt. 0.0) then + ! print '("[INFO] WDR SEMIS fv_moving_nest_main.F90 npe=",I0," semis(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%semis(i,j) + !endif + !if (Moving_nest(n)%mn_phys%semisbase(i,j) .lt. 0.0) then + ! print '("[INFO] WDR SEMISBASE fv_moving_nest_main.F90 npe=",I0," semisbase(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%semisbase(i,j) + !endif + + if ( Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%emis_lnd(i,j) .lt. 0.0) then + print '("[INFO] WDR SEMISLND fv_moving_nest_main.F90 npe=",I0," emis_lnd(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%emis_lnd(i,j) + endif + if ( Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 2 .and. Moving_nest(n)%mn_phys%emis_ice(i,j) .lt. 0.0) then + print '("[INFO] WDR SEMISLND fv_moving_nest_main.F90 npe=",I0," emis_ice(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%emis_ice(i,j) + endif + if ( Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 0 .and. Moving_nest(n)%mn_phys%emis_wat(i,j) .lt. 0.0) then + print '("[INFO] WDR SEMISLND fv_moving_nest_main.F90 npe=",I0," emis_wat(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%emis_wat(i,j) + endif + if ( Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) .lt. 0.0) then + print '("[INFO] WDR ALBLND fv_moving_nest_main.F90 npe=",I0," albdirvis_lnd(",I0,",",I0,")=",F15.5)', this_pe, i, j, Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) + endif + enddo + enddo + endif + + output_step = output_step + 1 + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest8) + if (use_timers) call mpp_clock_begin (id_movnest9) + + !!========================================================================================= + !! Step 9 -- Recalculate auxiliary pressures + !! Should help stabilize the fields before dynamics runs + !! TODO Consider whether vertical remapping, recalculation of omega, interpolation of winds + !! to A or C grids, and/or divergence recalculation are needed here. + !!========================================================================================= + + if (is_fine_pe) then + if (debug_log) print '("[INFO] WDR MV_NST L2E before recalc auxiliary pressures fv_moving_nest_main.F90 npe=",I0)', this_pe + call recalc_aux_pressures(Atm(n)) + if (debug_log) print '("[INFO] WDR MV_NST L2E after recalc auxiliary pressures fv_moving_nest_main.F90 npe=",I0)', this_pe + endif + + if (debug_log) print '("[INFO] WDR PTVAL fv_dynamics.F90 npe=",I0," AfterNestMove ================================================")', this_pe + output_step = output_step + 1 + endif + + if (use_timers) call mpp_clock_end (id_movnest9) + call mpp_clock_end (id_movnestTot) + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + !! Uncomment to exit and force file IO after single nest move, without dynamics + ! call fms_io_exit() !! Force the output of the buffered NC files + ! if (debug_log) print '("[INFO] WDR calling mpp_exit after moving nest fv_moving_nest_main.F90 npe=",I0)', this_pe + ! call mpp_exit() + ! if (debug_log) print '("[INFO] WDR calling STOP after moving nest fv_moving_nest_main.F90 npe=",I0)', this_pe + ! stop + !! else + !! if (debug_log) print '("[INFO] WDR move_nest not nested PE npe=",I0)', this_pe + !! endif + + !call compare_terrain("phis", Atm(n)%phis, 1, Atm(n)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, global_nest_domain) + + if (debug_log) call show_nest_grid(Atm(n), this_pe, 99) + + end subroutine fv_moving_nest_exec + + !>@brief The subroutine 'mn_replace_low_values' replaces low values with a default value. + subroutine mn_replace_low_values(data_grid, low_value, new_value) + real, _ALLOCATABLE, intent(inout) :: data_grid(:,:) !< 2D grid of data + real, intent(in) :: low_value !< Low value to check for; e.g. negative or fill value + real, intent(in) :: new_value !< Value to replace low value with + + integer :: i, j + + do i=lbound(data_grid,1),ubound(data_grid,1) + do j=lbound(data_grid,2),ubound(data_grid,2) + if (data_grid(i,j) .le. low_value) data_grid(i,j) = new_value + enddo + enddo + end subroutine mn_replace_low_values + +#endif ! MOVING_NEST + +end module fv_moving_nest_main_mod + diff --git a/moving_nest/fv_moving_nest_physics.F90 b/moving_nest/fv_moving_nest_physics.F90 new file mode 100644 index 000000000..074bb3c16 --- /dev/null +++ b/moving_nest/fv_moving_nest_physics.F90 @@ -0,0 +1,1737 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + + +!*********************************************************************** +!> @file +!! @brief Provides Moving Nest functionality for physics and surface variables +!! @author W. Ramstrom. Collaboration with Bin Liu and Chunxi Zhang, EMC +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + + +! =======================================================================! +! +! Notes +! +!------------------------------------------------------------------------ +! Moving Nest Subroutine Naming Convention +!----------------------------------------------------------------------- +! +! mn_meta_* subroutines perform moving nest operations for FV3 metadata. +! These routines will run only once per nest move. +! +! mn_var_* subroutines perform moving nest operations for an individual FV3 variable. +! These routines will run many times per nest move. +! +! mn_prog_* subroutines perform moving nest operations for the list of prognostic fields. +! These routines will run only once per nest move. +! +! mn_phys_* subroutines perform moving nest operations for the list of physics fields. +! These routines will run only once per nest move. +! +! =======================================================================! + +module fv_moving_nest_physics_mod +#ifdef MOVING_NEST + + use block_control_mod, only: block_control_type + use fms_mod, only: mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_ROUTINE, clock_flag_default + use mpp_mod, only: mpp_pe, mpp_sync, mpp_sync_self, mpp_send, mpp_error, NOTE, FATAL + use mpp_domains_mod, only: mpp_update_domains, mpp_get_data_domain, mpp_get_global_domain + use mpp_domains_mod, only: mpp_define_nest_domains, mpp_shift_nest_domains, nest_domain_type, domain2d + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only: mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only: NORTH, SOUTH, EAST, WEST, CORNER, CENTER + use mpp_domains_mod, only: NUPDATE, SUPDATE, EUPDATE, WUPDATE, DGRID_NE + +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + use GFS_init, only: GFS_grid_populate + + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use bounding_box_mod, only: bbox, bbox_get_C2F_index, fill_bbox, show_bbox + use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks, hlv + use field_manager_mod, only: MODEL_ATMOS + use fms_io_mod, only: read_data, write_data, get_global_att_value, fms_io_init, fms_io_exit + use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_type, R_GRID + use fv_moving_nest_types_mod, only: fv_moving_nest_prog_type, fv_moving_nest_physics_type, mn_surface_grids, fv_moving_nest_type + use fv_arrays_mod, only: allocate_fv_nest_bc_type, deallocate_fv_nest_bc_type + use fv_grid_tools_mod, only: init_grid + use fv_grid_utils_mod, only: grid_utils_init, ptop_min, dist2side_latlon + use fv_mapz_mod, only: Lagrangian_to_Eulerian, moist_cv, compute_total_energy + use fv_moving_nest_utils_mod, only: check_array, check_local_array, show_atm, show_atm_grids, show_nest_grid, show_tile_geo, grid_equal + use fv_nesting_mod, only: dealloc_nested_buffers + use fv_nwp_nudge_mod, only: do_adiabatic_init + use init_hydro_mod, only: p_var + use tracer_manager_mod, only: get_tracer_index, get_tracer_names + use fv_moving_nest_utils_mod, only: alloc_halo_buffer, load_nest_latlons_from_nc, grid_geometry, output_grid_to_nc, find_nest_alignment + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer, fill_nest_from_buffer_cell_center, fill_nest_from_buffer_nearest_neighbor + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent, fill_grid_from_supergrid, fill_weight_grid + use fv_moving_nest_utils_mod, only: alloc_read_data + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer_cell_center_masked + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent_masked + + use fv_moving_nest_mod, only: mn_var_fill_intern_nest_halos, mn_var_dump_to_netcdf, mn_var_shift_data + use fv_moving_nest_types_mod, only: Moving_nest + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + logical :: debug_log = .false. + logical :: move_physics = .true. ! Always true, unless developer sets move_physics to .False. here for debugging. + logical :: move_nsst = .true. ! Value is reset in fv_moving_nest_main.F90 from namelist options + + ! Persistent variables to enable debug printing after range warnings. + type (fv_atmos_type), pointer :: save_Atm_n + type (block_control_type), pointer :: save_Atm_block + type(IPD_control_type), pointer :: save_IPD_Control + type(IPD_data_type), pointer :: save_IPD_Data(:) + +#include + +contains + + !>@brief The subroutine 'mn_phys_reset_sfc_props' sets the static surface parameters from the high-resolution input file data + !>@details This subroutine relies on earlier code reading the data from files into the mn_static data structure + !! This subroutine does not yet handle ice points or frac_grid - fractional landfrac/oceanfrac values + subroutine mn_phys_reset_sfc_props(Atm, n, mn_static, Atm_block, IPD_data, ioffset, joffset, refine) + type(fv_atmos_type), intent(inout),allocatable :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n !< Current grid number + type(mn_surface_grids), intent(in) :: mn_static !< Static surface data + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + integer, intent(in) :: ioffset, joffset !< Current nest offset in i,j direction + integer, intent(in) :: refine !< Nest refinement ratio + + ! For iterating through physics/surface vector data + integer :: nb, blen, ix, i_pe, j_pe, i_idx, j_idx + real(kind=kind_phys) :: phys_oro + + ! Setup local land sea mask grid for masked interpolations + do i_pe = Atm(n)%bd%isd, Atm(n)%bd%ied + do j_pe = Atm(n)%bd%jsd, Atm(n)%bd%jed + i_idx = (ioffset-1)*refine + i_pe + j_idx = (joffset-1)*refine + j_pe + + Moving_nest(n)%mn_phys%slmsk(i_pe, j_pe) = mn_static%ls_mask_grid(i_idx, j_idx) + enddo + enddo + + ! Reset the variables from the fix_sfc files + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i_pe = Atm_block%index(nb)%ii(ix) + j_pe = Atm_block%index(nb)%jj(ix) + + i_idx = (ioffset-1)*refine + i_pe + j_idx = (joffset-1)*refine + j_pe + + ! Reset the land sea mask from the hires parent data + IPD_data(nb)%Sfcprop%slmsk(ix) = mn_static%ls_mask_grid(i_idx, j_idx) + + ! IFD values are 0 for land, and 1 for oceans/lakes -- reverse of the land sea mask + ! Land Sea Mask has values of 0 for oceans/lakes, 1 for land, 2 for sea ice + ! TODO figure out what ifd should be for sea ice + if (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 ) then + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 0 ! Land + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 0 ! Land -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 1 ! Land -- TODO permit fractions + else + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 1 ! Ocean + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 1 ! Ocean -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 0 ! Ocean -- TODO permit fractions + endif + + IPD_data(nb)%Sfcprop%tg3(ix) = mn_static%deep_soil_temp_grid(i_idx, j_idx) + + ! Follow logic from FV3/io/FV3GFS_io.F90 line 1187 + ! TODO this will need to be more complicated if we support frac_grid + !if (nint(mn_static%soil_type_grid(i_idx, j_idx)) == 14 .or. int(mn_static%soil_type_grid(i_idx, j_idx)+0.5) <= 0) then + !if (nint(mn_static%soil_type_grid(i_idx, j_idx)) == 14 .or. + + !if ( (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 .and. nint(mn_static%land_frac_grid(i_idx, j_idx)) == 0) .or. & + ! mn_static%soil_type_grid(i_idx, j_idx) < 0.5) then + if (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 .and. nint(mn_static%land_frac_grid(i_idx, j_idx)) == 0 ) then + ! Water soil type == lake, etc. -- override the other variables and make this water + print '("WDR mn_phys_reset_sfc_props LAKE SOIL npe=",I0," x,y=",I0,",",I0," lat=",F10.3," lon=",F10.3)', mpp_pe(), i_idx, j_idx, IPD_data(nb)%Grid%xlat_d(ix), IPD_data(nb)%Grid%xlon_d(ix)-360.0 + + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 1 ! Ocean + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 1 ! Ocean -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 0 ! Ocean -- TODO permit fractions + + IPD_data(nb)%Sfcprop%stype(ix) = 0 + IPD_data(nb)%Sfcprop%slmsk(ix) = 0 + else + IPD_data(nb)%Sfcprop%stype(ix) = nint(mn_static%soil_type_grid(i_idx, j_idx)) + endif + + !IPD_data(nb)%Sfcprop%vfrac(ix) = mn_static%veg_frac_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%vtype(ix) = nint(mn_static%veg_type_grid(i_idx, j_idx)) + IPD_data(nb)%Sfcprop%slope(ix) = nint(mn_static%slope_type_grid(i_idx, j_idx)) + IPD_data(nb)%Sfcprop%snoalb(ix) = mn_static%max_snow_alb_grid(i_idx, j_idx) + + IPD_data(nb)%Sfcprop%facsf(ix) = mn_static%facsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%facwf(ix) = mn_static%facwf_grid(i_idx, j_idx) + + IPD_data(nb)%Sfcprop%alvsf(ix) = mn_static%alvsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alvwf(ix) = mn_static%alvwf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alnsf(ix) = mn_static%alnsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alnwf(ix) = mn_static%alnwf_grid(i_idx, j_idx) + + ! Reset the orography in the physics arrays, using the smoothed values from above + phys_oro = Atm(n)%phis(i_pe, j_pe) / grav + IPD_data(nb)%Sfcprop%oro(ix) = phys_oro + IPD_data(nb)%Sfcprop%oro_uf(ix) = phys_oro + + enddo + enddo + + end subroutine mn_phys_reset_sfc_props + + !>@brief The subroutine 'mn_phys_reset_phys_latlon' sets the lat/lons from the high-resolution input file data + !>@details This subroutine sets lat/lons of the moved nest, then recalculates all the derived quantities (dx,dy,etc.) + subroutine mn_reset_phys_latlon(Atm, n, tile_geo, fp_super_tile_geo, Atm_block, IPD_control, IPD_data) + type(fv_atmos_type), intent(in) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n !< Current grid number + type(grid_geometry), intent(in) :: tile_geo !< Bounds of this grid + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Bounds of high-resolution parent grid + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + + integer :: isc, jsc, iec, jec + integer :: x, y, fp_i, fp_j + integer :: nest_x, nest_y, parent_x, parent_y + integer :: this_pe + + real(kind=kind_phys), allocatable :: lats(:,:), lons(:,:), area(:,:) + + this_pe = mpp_pe() + + isc = Atm(n)%bd%isc + jsc = Atm(n)%bd%jsc + iec = Atm(n)%bd%iec + jec = Atm(n)%bd%jec + + allocate(lats(isc:iec, jsc:jec)) + allocate(lons(isc:iec, jsc:jec)) + allocate(area(isc:iec, jsc:jec)) + + ! This is going to be slow -- replace with better way to calculate index offsets, or pass them from earlier calculations + ! TODO optimization here + call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + !print '("WDR mn_reset_phys_latlon AB npe=",I0)', this_pe + + do x = isc, iec + do y = jsc, jec + fp_i = (x - nest_x) * 2 + parent_x + fp_j = (y - nest_y) * 2 + parent_y + + lons(x,y) = fp_super_tile_geo%lons(fp_i, fp_j) + lats(x,y) = fp_super_tile_geo%lats(fp_i, fp_j) + + ! Need to add the areas from 4 squares, because the netCDF file has areas calculated for the supergrid cells + ! We need the area of the whole center of the cell. + ! Example dimensions for C288_grid.tile6.nc + ! longitude -- x(577,577) + ! latitude -- y(577,577) + ! area -- x(576,576) + + ! Extracting lat/lon/area from Supergrid + ! + ! 1,1----2,1----3,1 + ! | | | + ! | a1,1 | a2,1 | + ! | | | + ! 1,2----2,2----3,2 + ! | | | + ! | a1,2 | a2,2 | + ! | | | + ! 1,3----2,3----3,3 + ! + ! The model A-grid cell 1,1 is centered at supergrid location 2,2 + ! The area of the A-grid cell is the sum of the 4 supergrid areas A = a(1,1) + a(1,2) + a(2,1) + a(2,2) + + area(x,y) = fp_super_tile_geo%area(fp_i - 1, fp_j - 1) + fp_super_tile_geo%area(fp_i - 1, fp_j) + & + fp_super_tile_geo%area(fp_i, fp_j - 1) + fp_super_tile_geo%area(fp_i, fp_j) ! TODO make sure these offsets are correct. + enddo + enddo + + call GFS_grid_populate(IPD_data%Grid, lons, lats, area) + + deallocate(lats) + deallocate(lons) + deallocate(area) + + end subroutine mn_reset_phys_latlon + + !>@brief The subroutine 'dump_surface_physics' outputs surface physics data for a given point and its neighbors to stdout + !>@details This subroutine is appropriate to be called for debugging when range warnings are detected, in tools/fv_diagnostics.F90. + subroutine dump_surface_physics(i_out, j_out, k_out) + integer, intent(in) :: i_out, j_out, k_out !< i,j,k values of point to output + + integer :: nb, blen, ix, i, j, k, kk + integer :: this_pe + + this_pe = mpp_pe() + + if (associated(save_Atm_block)) then + print '("WDR dump_surface_physics npe=",I0)', this_pe + else + print '("WDR dump_surface_physics RANGE RETURN npe=",I0)', this_pe + return + end if + + k = k_out + + do nb = 1,save_Atm_block%nblks + blen = save_Atm_block%blksz(nb) + do ix = 1, blen + ! Get the indices only once, before iterating through vertical levels or number of variables + ! Was there a different efficiency from having the k loop outside? + i = save_Atm_block%index(nb)%ii(ix) + j = save_Atm_block%index(nb)%jj(ix) + + if (i .ge. i_out-2 .and. i .le. i_out+2 .and. j .ge. j_out-2 .and. j .le. j_out+2) then + + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") slmsk=",F8.5, " lakefrac=",F10.5, " lakedepth=",F14.5, " landfrac=",F10.5, " oro=",F10.5, " oro_uf=",F10.5)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%slmsk(ix), save_IPD_Data(nb)%Sfcprop%lakefrac(ix), save_IPD_Data(nb)%Sfcprop%lakedepth(ix), save_IPD_Data(nb)%Sfcprop%landfrac(ix), save_IPD_Data(nb)%Sfcprop%oro(ix), save_IPD_Data(nb)%Sfcprop%oro_uf(ix) + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") oro=",F10.5, " oro_uf=",F10.5, " phis/g=",F10.5, " slope=",I0)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%oro(ix), save_IPD_Data(nb)%Sfcprop%oro_uf(ix), save_Atm_n%phis(i,j)/grav, save_IPD_Data(nb)%Sfcprop%slope(ix) + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") emis_lnd=",F10.4," emis_ice=",F10.4," emis_wat=",F10.4," hflx=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%emis_lnd(ix), save_IPD_Data(nb)%Sfcprop%emis_ice(ix), save_IPD_Data(nb)%Sfcprop%emis_wat(ix), save_IPD_Data(nb)%Sfcprop%hflx(ix) + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") albdirvis_lnd=",F10.4," albdirnir_lnd=",F10.4," albdifvis_lnd=",F10.4," albdifnir_lnd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%albdirvis_lnd(ix), save_IPD_Data(nb)%Sfcprop%albdirnir_lnd(ix), save_IPD_Data(nb)%Sfcprop%albdifvis_lnd(ix), save_IPD_Data(nb)%Sfcprop%albdifnir_lnd(ix) + !print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") albdirvis_ice=",F10.4," albdirnir_ice=",F10.4," albdifvis_ice=",F10.4," albdifnir_ice=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%albdirvis_ice(ix), save_IPD_Data(nb)%Sfcprop%albdirnir_ice(ix), save_IPD_Data(nb)%Sfcprop%albdifvis_ice(ix), save_IPD_Data(nb)%Sfcprop%albdifnir_ice(ix) + if (associated(save_IPD_Data(nb)%Sfcprop%qss)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") qss=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%qss(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%evap)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") evap=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%evap(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%sncovr)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sncovr",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sncovr(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%sncovr_ice)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sncovr_ice",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sncovr_ice(ix) + endif + if (associated(save_IPD_Data(nb)%Intdiag%total_albedo)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Intdiag%total_albedo=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Intdiag%total_albedo(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%ifd)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") ifd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%ifd(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%semisbase)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") semisbase=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%semisbase(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%sfalb_lnd)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sfalb_lnd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sfalb_lnd(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%sfalb_ice)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sfalb_ice=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sfalb_ice(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%sfalb_lnd_bck)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sfalb_lnd_bck=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%emis_lnd)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") emis_lnd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%emis_lnd(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%emis_ice)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") emis_ice=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%emis_ice(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%emis_wat)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") emis_wat=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%emis_wat(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%tvxy)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") veg temp tvxy=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%tvxy(ix) + endif + + if (associated(save_IPD_Data(nb)%Sfcprop%tgxy)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") ground temp tgxy=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%tgxy(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%tg3)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") deep soil temp tg3=",F10.4," slmsk=",F8.3)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%tg3(ix), save_IPD_Data(nb)%Sfcprop%slmsk(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%alboldxy)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") alboldxy=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%alboldxy(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%shdmin)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") shdmin=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%shdmin(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%shdmax)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") shdmax=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%shdmax(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%stype)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") stype=",I0)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%stype(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%vtype)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") vtype=",I0)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%vtype(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%stype_save)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") stype_save=",I0)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%stype_save(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%vtype_save)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") vtype_save=",I0)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%vtype_save(ix) + endif + do kk = 1, save_IPD_Control%nmtvr + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") hprime(",I0,")=",F10.4)', this_pe, i, j, kk, save_IPD_Data(nb)%Sfcprop%hprime(ix,kk) + enddo + if (associated(save_IPD_Data(nb)%Sfcprop%snowd)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") snowd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%snowd(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%weasd)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") weasd=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%weasd(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%ffmm)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") ffmm=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%ffmm(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%ffhh)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") ffhh=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%ffhh(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%f10m)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") f10m=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%f10m(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%uustar)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") uustar=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%uustar(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%z0base)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") z0base=",F18.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%z0base(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%zorl)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") zorl=",F18.6," ",E15.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%zorl(ix), save_IPD_Data(nb)%Sfcprop%zorl(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%zorlw)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") zorlw=",F18.6," ",E15.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%zorlw(ix), save_IPD_Data(nb)%Sfcprop%zorlw(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%zorll)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") zorll=",F18.6," ",E15.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%zorll(ix), save_IPD_Data(nb)%Sfcprop%zorll(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%zorli)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") zorli=",F15.6," ",E15.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%zorli(ix), save_IPD_Data(nb)%Sfcprop%zorli(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%zorlwav)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") zorlwav=",F15.6," ",E15.6)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%zorlwav(ix), save_IPD_Data(nb)%Sfcprop%zorlwav(ix) + endif + if (associated(save_IPD_Data(nb)%Coupling%tsfc_radtime)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Coupling%tsfc_radtime=",F15.6)', this_pe, i, j, save_IPD_Data(nb)%Coupling%tsfc_radtime(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%canopy)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") canopy=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%canopy(ix) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%vfrac)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") vfrac=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%vfrac(ix) + endif + if (associated(save_IPD_Data(nb)%Radtend%sfalb)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%sfalb=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Radtend%sfalb(ix) + endif + if (associated(save_IPD_Data(nb)%Radtend%semis)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%semis=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Radtend%semis(ix) + endif + if (associated(save_IPD_Data(nb)%Radtend%sfcfsw)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%sfcfsw upfxc=",F10.4," upfx0=",F10.4," dnfxc=",F10.4," dnfx0=",F10.4)', & + this_pe, i, j, save_IPD_Data(nb)%Radtend%sfcfsw(ix)%upfxc, save_IPD_Data(nb)%Radtend%sfcfsw(ix)%upfx0, save_IPD_Data(nb)%Radtend%sfcfsw(ix)%dnfxc, save_IPD_Data(nb)%Radtend%sfcfsw(ix)%dnfx0 + endif + if (associated(save_IPD_Data(nb)%Radtend%sfcflw)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%sfcflw upfxc=",F10.4," upfx0=",F10.4," dnfxc=",F10.4," dnfx0=",F10.4)', & + this_pe, i, j, save_IPD_Data(nb)%Radtend%sfcflw(ix)%upfxc, save_IPD_Data(nb)%Radtend%sfcflw(ix)%upfx0, save_IPD_Data(nb)%Radtend%sfcflw(ix)%dnfxc, save_IPD_Data(nb)%Radtend%sfcflw(ix)%dnfx0 + endif + if (associated(save_IPD_Data(nb)%Radtend%coszen)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%coszen=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Radtend%coszen(ix) + endif + if (associated(save_IPD_Data(nb)%Radtend%coszdg)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") Radtend%coszdg=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Radtend%coszdg(ix) + endif + + !if (associated(save_IPD_Data(nb)%Sfcprop%semisbase)) then + ! print '("[INFO] WDR RANGEP AA this_pe= ",I0)', this_pe + ! !if (associated (save_IPD_Data(nb)%Sfcprop%sfalb_lnd)) then + ! print '("[INFO] WDR RANGEP AB this_pe= ",I0)', this_pe + ! !if (associated(save_IPD_Data(nb)%Sfcprop%sfalb_ice)) then + ! print '("[INFO] WDR RANGEP AC this_pe= ",I0)', this_pe + ! if (associated(save_IPD_Data(nb)%Sfcprop%sfalb_lnd_bck)) then + ! !print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") semisbase=",F10.4," sfalb_lnd=",F10.4," sfalb_ice=",F10.4," sfalb_lnd_bck=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%semisbase(ix), save_IPD_Data(nb)%Sfcprop%sfalb_lnd(ix), save_IPD_Data(nb)%Sfcprop%sfalb_ice(ix), save_IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) + ! print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") semisbase=",F10.4," sfalb_lnd_bck=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%semisbase(ix), save_IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) + ! endif + ! !endif + ! !endif + !endif + + if (associated(save_IPD_Data(nb)%Sfcprop%alvsf)) then + !print '("[INFO] WDR RANGEP BA this_pe= ",I0)', this_pe + if (associated(save_IPD_Data(nb)%Sfcprop%alnsf)) then + !print '("[INFO] WDR RANGEP BB this_pe= ",I0)', this_pe + if (associated(save_IPD_Data(nb)%Sfcprop%alvwf)) then + !print '("[INFO] WDR RANGEP BC this_pe= ",I0)', this_pe + if (associated(save_IPD_Data(nb)%Sfcprop%alnwf)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") alvsf=",F10.4," alnsf=",F10.4," alvwf=",F10.4," alnwf=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%alvsf(ix), save_IPD_Data(nb)%Sfcprop%alnsf(ix), save_IPD_Data(nb)%Sfcprop%alvwf(ix), save_IPD_Data(nb)%Sfcprop%alnwf(ix) + endif + endif + endif + endif + + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") sncovr=",F10.4," snoalb=",F10.4," facsf=",F10.4," facwf=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%sncovr(ix), save_IPD_Data(nb)%Sfcprop%snoalb(ix), save_IPD_Data(nb)%Sfcprop%facsf(ix), save_IPD_Data(nb)%Sfcprop%facwf(ix) + + if (associated(save_IPD_Data(nb)%Sfcprop%t2m)) then + if (associated(save_IPD_Data(nb)%Sfcprop%th2m)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") t2m=",F10.4," th2m=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%t2m(ix), save_IPD_Data(nb)%Sfcprop%th2m(ix) + else + print '("[INFO] WDR RANGEP CB this_pe= ",I0)', this_pe + endif + else + print '("[INFO] WDR RANGEP CA this_pe= ",I0)', this_pe + endif + + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") tsfc=",F10.4," tsfco=",F10.4," tsfcl=",F10.4," tisfc=",F10.4," stc1=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%tsfc(ix), save_IPD_Data(nb)%Sfcprop%tsfco(ix), save_IPD_Data(nb)%Sfcprop%tsfcl(ix), save_IPD_Data(nb)%Sfcprop%tisfc(ix), save_IPD_Data(nb)%Sfcprop%stc(ix,1) + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") psurf=",F10.4," t2m=",F10.4," th2m=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%IntDiag%psurf(ix), save_IPD_Data(nb)%Sfcprop%t2m(ix), save_IPD_Data(nb)%Sfcprop%th2m(ix) + + if (associated(save_IPD_Data(nb)%Sfcprop%slc)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") soil moist slc1=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%slc(ix,1) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%smc)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") tot soil moist smc1=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%smc(ix,1) + endif + if (associated(save_IPD_Data(nb)%Sfcprop%stc)) then + print '("[INFO] WDR RANGE this_pe= ",I0," i,j=(",I0,",",I0,") soil temp stc1=",F10.4)', this_pe, i, j, save_IPD_Data(nb)%Sfcprop%stc(ix,1) + endif + + endif + enddo + enddo + end subroutine dump_surface_physics + + !>@brief The subroutine 'mn_phys_fill_temp_variables' extracts 1D physics data into a 2D array for nest motion + !>@details This subroutine fills in the mn_phys structure on the Atm object with 2D arrays of physics/surface variables. + !! Note that ice variables are not yet handled. + subroutine mn_phys_fill_temp_variables(Atm, Atm_block, IPD_Control, IPD_Data, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type (block_control_type), target, intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), target, intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), target, intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: isd, ied, jsd, jed + integer :: is, ie, js, je + integer :: this_pe + + integer :: nb, blen, i, j, k, ix, nv + type(fv_moving_nest_physics_type), pointer :: mn_phys + + this_pe = mpp_pe() + + save_Atm_n => Atm(n) + save_Atm_block => Atm_block + save_IPD_Control => IPD_Control + save_IPD_Data => IPD_Data + + if (debug_log) print '("[INFO] WDR start mn_phys_fill_temp_variables. npe=",I0," n=",I0)', this_pe, n + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + !if (is_fine_pe) call dump_surface_physics(isd+8, jsd+8, npz-1) + + if (debug_log) print '("[INFO] WDR mn_phys_fill_temp_variables. npe=",I0," isd=",I0," ied=",I0," jsd=",I0," jed=",I0)', this_pe, isd, ied, jsd, jed + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + if (debug_log) print '("[INFO] WDR mn_phys_fill_temp_variables. npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, is, ie, js, je + + mn_phys => Moving_nest(n)%mn_phys + + mn_phys%ts(is:ie, js:je) = Atm(n)%ts(is:ie, js:je) + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + ! Get the indices only once, before iterating through vertical levels or number of variables + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + do k = 1, IPD_Control%lsoil + mn_phys%smc(i,j,k) = IPD_Data(nb)%Sfcprop%smc(ix,k) + mn_phys%stc(i,j,k) = IPD_Data(nb)%Sfcprop%stc(ix,k) + mn_phys%slc(i,j,k) = IPD_Data(nb)%Sfcprop%slc(ix,k) + enddo + + mn_phys%emis_lnd(i,j) = IPD_Data(nb)%Sfcprop%emis_lnd(ix) + mn_phys%emis_ice(i,j) = IPD_Data(nb)%Sfcprop%emis_ice(ix) + mn_phys%emis_wat(i,j) = IPD_Data(nb)%Sfcprop%emis_wat(ix) + + !mn_phys%sfalb_lnd(i,j) = IPD_Data(nb)%Sfcprop%sfalb_lnd(ix) + !mn_phys%sfalb_lnd_bck(i,j) = IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) + !mn_phys%semis(i,j) = IPD_Data(nb)%Radtend%semis(ix) + !mn_phys%semisbase(i,j) = IPD_Data(nb)%Sfcprop%semisbase(ix) + !mn_phys%sfalb(i,j) = IPD_Data(nb)%Radtend%sfalb(ix) + + mn_phys%albdirvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirvis_lnd(ix) + mn_phys%albdirnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirnir_lnd(ix) + mn_phys%albdifvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifvis_lnd(ix) + mn_phys%albdifnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifnir_lnd(ix) + + mn_phys%u10m(i,j) = IPD_Data(nb)%IntDiag%u10m(ix) + mn_phys%v10m(i,j) = IPD_Data(nb)%IntDiag%v10m(ix) + mn_phys%tprcp(i,j) = IPD_Data(nb)%Sfcprop%tprcp(ix) + + do k = 1, IPD_Control%nmtvr + mn_phys%hprime(i,j,k) = IPD_Data(nb)%Sfcprop%hprime(ix,k) + enddo + + mn_phys%lakefrac(i,j) = IPD_Data(nb)%Sfcprop%lakefrac(ix) + mn_phys%lakedepth(i,j) = IPD_Data(nb)%Sfcprop%lakedepth(ix) + + mn_phys%canopy(i,j) = IPD_Data(nb)%Sfcprop%canopy(ix) + mn_phys%vegfrac(i,j)= IPD_Data(nb)%Sfcprop%vfrac(ix) + mn_phys%uustar(i,j) = IPD_Data(nb)%Sfcprop%uustar(ix) + mn_phys%shdmin(i,j) = IPD_Data(nb)%Sfcprop%shdmin(ix) + mn_phys%shdmax(i,j) = IPD_Data(nb)%Sfcprop%shdmax(ix) + mn_phys%zorl(i,j) = IPD_Data(nb)%Sfcprop%zorl(ix) + mn_phys%zorll(i,j) = IPD_Data(nb)%Sfcprop%zorll(ix) + mn_phys%zorlwav(i,j)= IPD_Data(nb)%Sfcprop%zorlwav(ix) + mn_phys%zorlw(i,j) = IPD_Data(nb)%Sfcprop%zorlw(ix) + mn_phys%tsfco(i,j) = IPD_Data(nb)%Sfcprop%tsfco(ix) + mn_phys%tsfcl(i,j) = IPD_Data(nb)%Sfcprop%tsfcl(ix) + mn_phys%tsfc(i,j) = IPD_Data(nb)%Sfcprop%tsfc(ix) + + mn_phys%albdirvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirvis_lnd(ix) + mn_phys%albdirnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirnir_lnd(ix) + mn_phys%albdifvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifvis_lnd(ix) + mn_phys%albdifnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifnir_lnd(ix) + + do nv = 1, IPD_Control%ntot2d + mn_phys%phy_f2d(i,j,nv) = IPD_Data(nb)%Tbd%phy_f2d(ix, nv) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + mn_phys%phy_f3d(i,j,k,nv) = IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv) + enddo + enddo + + ! Cloud prop data has x,y dimensions + mn_phys%cv(i,j) = IPD_Data(nb)%Cldprop%cv(ix) + mn_phys%cvt(i,j) = IPD_Data(nb)%Cldprop%cvt(ix) + mn_phys%cvb(i,j) = IPD_Data(nb)%Cldprop%cvb(ix) + endif + + if (move_nsst) then + mn_phys%tref(i,j) = IPD_Data(nb)%Sfcprop%tref(ix) + mn_phys%z_c(i,j) = IPD_Data(nb)%Sfcprop%z_c(ix) + mn_phys%c_0(i,j) = IPD_Data(nb)%Sfcprop%c_0(ix) + mn_phys%c_d(i,j) = IPD_Data(nb)%Sfcprop%c_d(ix) + mn_phys%w_0(i,j) = IPD_Data(nb)%Sfcprop%w_0(ix) + mn_phys%w_d(i,j) = IPD_Data(nb)%Sfcprop%w_d(ix) + mn_phys%xt(i,j) = IPD_Data(nb)%Sfcprop%xt(ix) + mn_phys%xs(i,j) = IPD_Data(nb)%Sfcprop%xs(ix) + mn_phys%xu(i,j) = IPD_Data(nb)%Sfcprop%xu(ix) + mn_phys%xv(i,j) = IPD_Data(nb)%Sfcprop%xv(ix) + mn_phys%xz(i,j) = IPD_Data(nb)%Sfcprop%xz(ix) + mn_phys%zm(i,j) = IPD_Data(nb)%Sfcprop%zm(ix) + mn_phys%xtts(i,j) = IPD_Data(nb)%Sfcprop%xtts(ix) + mn_phys%xzts(i,j) = IPD_Data(nb)%Sfcprop%xzts(ix) + mn_phys%d_conv(i,j) = IPD_Data(nb)%Sfcprop%d_conv(ix) + mn_phys%dt_cool(i,j)= IPD_Data(nb)%Sfcprop%dt_cool(ix) + mn_phys%qrain(i,j) = IPD_Data(nb)%Sfcprop%qrain(ix) + endif + enddo + enddo + + if (debug_log) print '("[INFO] WDR end mn_phys_fill_temp_variables. npe=",I0," n=",I0)', this_pe, n + + end subroutine mn_phys_fill_temp_variables + + !>@brief The subroutine 'mn_phys_apply_temp_variables' copies moved 2D data back into 1D physics arryas for nest motion + !>@details This subroutine fills the 1D physics arrays from the mn_phys structure on the Atm object + !! Note that ice variables are not yet handled. + subroutine mn_phys_apply_temp_variables(Atm, Atm_block, IPD_Control, IPD_Data, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type (block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: is, ie, js, je + integer :: this_pe + integer :: nb, blen, i, j ,k, ix, nv + integer :: bad_values, good_values + type(fv_moving_nest_physics_type), pointer :: mn_phys + + this_pe = mpp_pe() + mn_phys => Moving_nest(n)%mn_phys + + if (debug_log) print '("[INFO] WDR start mn_phys_apply_temp_variables. npe=",I0," n=",I0)', this_pe, n + + ! Check if the variables were filled in properly. + + if (debug_log) then + good_values = 0 + bad_values = 0 + + if (is_fine_pe) then + do i = Atm(n)%bd%isd, Atm(n)%bd%ied + do j = Atm(n)%bd%jsd, Atm(n)%bd%jed + if (mn_phys%ts(i,j) .gt. 20000.0) then + print '("[WARN] WDR BAD NEST ts value. npe=",I0," ts(",I0,",",I0,")=",F12.3)', this_pe, i, j, mn_phys%ts(i,j) + bad_values = bad_values + 1 + else + good_values = good_values + 1 + endif + enddo + enddo + else + do i = Atm(n)%bd%is, Atm(n)%bd%ie + do j = Atm(n)%bd%js, Atm(n)%bd%je + if (mn_phys%ts(i,j) .gt. 20000.0) then + print '("[WARN] WDR BAD GLOBAL ts value. npe=",I0," ts(",I0,",",I0")=",F12.3)', this_pe, i, j, mn_phys%ts(i,j) + bad_values = bad_values + 1 + else + good_values = good_values + 1 + endif + enddo + enddo + endif + + i = Atm(n)%bd%is + j = Atm(n)%bd%js + + print '("[WARN] WDR Surface ts value. npe=",I0," ts(",I0,",",I0,")=",F18.3)', this_pe, i, j, mn_phys%ts(i,j) + + print '("INFO] WDR ts values. npe=",I0," good_values=",I0," bad_values=",I0)', this_pe, good_values, bad_values + endif + + ! Needed to fill the local grids for parent and nest PEs in order to transmit/interpolate data from parent to nest + ! But only the nest PE's have changed the values with nest motion, so they are the only ones that need to update the original arrays + if (is_fine_pe) then + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + if (debug_log) print '("[INFO] WDR mn_phys_apply_temp_variables. npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, is, ie, js, je + + ! SST directly in Atm structure + Atm(n)%ts(is:ie, js:je) = mn_phys%ts(is:ie, js:je) + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + ! Surface properties + do k = 1, IPD_Control%lsoil + IPD_Data(nb)%Sfcprop%smc(ix,k) = mn_phys%smc(i,j,k) + IPD_Data(nb)%Sfcprop%stc(ix,k) = mn_phys%stc(i,j,k) + IPD_Data(nb)%Sfcprop%slc(ix,k) = mn_phys%slc(i,j,k) + enddo + + ! WDR EMIS PATCH - Force to positive at all locations. + if (mn_phys%emis_lnd(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_lnd(ix) = mn_phys%emis_lnd(i,j) + else + IPD_Data(nb)%Sfcprop%emis_lnd(ix) = 0.5 + endif + if (mn_phys%emis_ice(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_ice(ix) = mn_phys%emis_ice(i,j) + else + IPD_Data(nb)%Sfcprop%emis_ice(ix) = 0.5 + endif + if (mn_phys%emis_wat(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_wat(ix) = mn_phys%emis_wat(i,j) + else + IPD_Data(nb)%Sfcprop%emis_wat(ix) = 0.5 + endif + + !IPD_Data(nb)%Sfcprop%sfalb_lnd(ix) = mn_phys%sfalb_lnd(i,j) + !IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) = mn_phys%sfalb_lnd_bck(i,j) + !IPD_Data(nb)%Radtend%semis(ix) = mn_phys%semis(i,j) + !IPD_Data(nb)%Sfcprop%semisbase(ix) = mn_phys%semisbase(i,j) + !IPD_Data(nb)%Radtend%sfalb(ix) = mn_phys%sfalb(i,j) + + IPD_Data(nb)%IntDiag%u10m(ix) = mn_phys%u10m(i,j) + IPD_Data(nb)%IntDiag%v10m(ix) = mn_phys%v10m(i,j) + IPD_Data(nb)%Sfcprop%tprcp(ix) = mn_phys%tprcp(i,j) + + do k = 1, IPD_Control%nmtvr + IPD_Data(nb)%Sfcprop%hprime(ix,k) = mn_phys%hprime(i,j,k) + enddo + + IPD_Data(nb)%Sfcprop%lakefrac(ix) = mn_phys%lakefrac(i,j) + IPD_Data(nb)%Sfcprop%lakedepth(ix) = mn_phys%lakedepth(i,j) + + IPD_Data(nb)%Sfcprop%canopy(ix) = mn_phys%canopy(i,j) + IPD_Data(nb)%Sfcprop%vfrac(ix) = mn_phys%vegfrac(i,j) + IPD_Data(nb)%Sfcprop%uustar(ix) = mn_phys%uustar(i,j) + IPD_Data(nb)%Sfcprop%shdmin(ix) = mn_phys%shdmin(i,j) + IPD_Data(nb)%Sfcprop%shdmax(ix) = mn_phys%shdmax(i,j) + + ! Set roughness lengths to physically reasonable values if they have fill value (possible at coastline) + ! sea/land mask array (sea:0,land:1,sea-ice:2) + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 1 .and. mn_phys%zorll(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorll(ix) = 82.0 ! + else + IPD_Data(nb)%Sfcprop%zorll(ix) = mn_phys%zorll(i,j) + endif + + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 0 .and. mn_phys%zorlw(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorlw(ix) = 83.0 ! + else + IPD_Data(nb)%Sfcprop%zorlw(ix) = mn_phys%zorlw(i,j) + endif + + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 0 .and. mn_phys%zorlwav(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorlwav(ix) = 84.0 ! + else + IPD_Data(nb)%Sfcprop%zorlwav(ix) = mn_phys%zorlwav(i,j) + endif + + if (mn_phys%zorl(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorl(ix) = 85.0 + else + IPD_Data(nb)%Sfcprop%zorl(ix) = mn_phys%zorl(i,j) + endif + + IPD_Data(nb)%Sfcprop%tsfco(ix) = mn_phys%tsfco(i,j) + IPD_Data(nb)%Sfcprop%tsfcl(ix) = mn_phys%tsfcl(i,j) + IPD_Data(nb)%Sfcprop%tsfc(ix) = mn_phys%tsfc(i,j) + + ! Set albedo values to physically reasonable values if they have negative fill values. + if (mn_phys%albdirvis_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdirvis_lnd (ix) = mn_phys%albdirvis_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdirvis_lnd (ix) = 0.5 + endif + + if (mn_phys%albdirnir_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdirnir_lnd (ix) = mn_phys%albdirnir_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdirnir_lnd (ix) = 0.5 + endif + + if (mn_phys%albdifvis_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdifvis_lnd (ix) = mn_phys%albdifvis_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdifvis_lnd (ix) = 0.5 + endif + + if (mn_phys%albdifnir_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdifnir_lnd (ix) = mn_phys%albdifnir_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdifnir_lnd (ix) = 0.5 + endif + + ! Cloud properties + IPD_Data(nb)%Cldprop%cv(ix) = mn_phys%cv(i,j) + IPD_Data(nb)%Cldprop%cvt(ix) = mn_phys%cvt(i,j) + IPD_Data(nb)%Cldprop%cvb(ix) = mn_phys%cvb(i,j) + + do nv = 1, IPD_Control%ntot2d + IPD_Data(nb)%Tbd%phy_f2d(ix, nv) = mn_phys%phy_f2d(i,j,nv) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv) = mn_phys%phy_f3d(i,j,k,nv) + enddo + enddo + endif + + if (move_nsst) then + IPD_Data(nb)%Sfcprop%tref(ix) = mn_phys%tref(i,j) + IPD_Data(nb)%Sfcprop%z_c(ix) = mn_phys%z_c(i,j) + IPD_Data(nb)%Sfcprop%c_0(ix) = mn_phys%c_0(i,j) + IPD_Data(nb)%Sfcprop%c_d(ix) = mn_phys%c_d(i,j) + IPD_Data(nb)%Sfcprop%w_0(ix) = mn_phys%w_0(i,j) + IPD_Data(nb)%Sfcprop%w_d(ix) = mn_phys%w_d(i,j) + IPD_Data(nb)%Sfcprop%xt(ix) = mn_phys%xt(i,j) + IPD_Data(nb)%Sfcprop%xs(ix) = mn_phys%xs(i,j) + IPD_Data(nb)%Sfcprop%xu(ix) = mn_phys%xu(i,j) + IPD_Data(nb)%Sfcprop%xv(ix) = mn_phys%xv(i,j) + IPD_Data(nb)%Sfcprop%xz(ix) = mn_phys%xz(i,j) + IPD_Data(nb)%Sfcprop%zm(ix) = mn_phys%zm(i,j) + IPD_Data(nb)%Sfcprop%xtts(ix) = mn_phys%xtts(i,j) + IPD_Data(nb)%Sfcprop%xzts(ix) = mn_phys%xzts(i,j) + IPD_Data(nb)%Sfcprop%d_conv(ix) = mn_phys%d_conv(i,j) + IPD_Data(nb)%Sfcprop%dt_cool(ix) = mn_phys%dt_cool(i,j) + IPD_Data(nb)%Sfcprop%qrain(ix) = mn_phys%qrain(i,j) + endif + + ! Check if stype and vtype are properly set for land points. Set to reasonable values if they have fill values. + if ( (int(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 1) ) then + + if (IPD_data(nb)%Sfcprop%vtype(ix) .lt. 0.5) then + print '("[INFO] WDR FIXPHYS resetting vtype from 0. npe=",I0," i,j=",I0,",",I0," lat=",F10.3," lon=",F10.3)', this_pe, i,j, IPD_data(nb)%Grid%xlat_d(ix), IPD_data(nb)%Grid%xlon_d(ix)-360.0 + IPD_data(nb)%Sfcprop%vtype(ix) = 7 ! Force to grassland + endif + + if (IPD_data(nb)%Sfcprop%stype(ix) .lt. 0.5) then + print '("[INFO] WDR FIXPHYS resetting stype from 0. npe=",I0," i,j=",I0,",",I0," lat=",F10.3," lon=",F10.3)', this_pe, i,j, IPD_data(nb)%Grid%xlat_d(ix), IPD_data(nb)%Grid%xlon_d(ix)-360.0 + IPD_data(nb)%Sfcprop%stype(ix) = 3 ! Force to sandy loam + endif + + if (IPD_data(nb)%Sfcprop%vtype_save(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%vtype_save(ix) = 7 ! Force to grassland + endif + if (IPD_data(nb)%Sfcprop%stype_save(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%stype_save(ix) = 3 ! Force to sandy loam + endif + + endif + enddo + enddo + endif + + if (debug_log) print '("[INFO] WDR end mn_phys_apply_temp_variables. npe=",I0," n=",I0)', this_pe, n + + end subroutine mn_phys_apply_temp_variables + + + !>@brief The subroutine 'mn_physfill_nest_halos_from_parent' transfers data from the coarse grid to the nest edge + !>@details This subroutine must run on parent and nest PEs to complete the data transfers + subroutine mn_phys_fill_nest_halos_from_parent(Atm, IPD_Control, IPD_Data, mn_static, n, child_grid_num, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + type(mn_surface_grids), intent(in) :: mn_static !< Static data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain for FMS + integer, intent(in) :: nz !< Number of vertical levels + + integer :: position, position_u, position_v + integer :: interp_type, interp_type_u, interp_type_v, interp_type_lmask + integer :: x_refine, y_refine + type(fv_moving_nest_physics_type), pointer :: mn_phys + + interp_type = 1 ! cell-centered A-grid + interp_type_u = 4 ! D-grid + interp_type_v = 4 ! D-grid + interp_type_lmask = 7 ! land mask, cell-centered A-grid + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + mn_phys => Moving_nest(n)%mn_phys + + ! Fill centered-grid variables + + call fill_nest_halos_from_parent("ts", mn_phys%ts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + if (move_physics) then + call fill_nest_halos_from_parent("smc", mn_phys%smc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call fill_nest_halos_from_parent("stc", mn_phys%stc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call fill_nest_halos_from_parent("slc", mn_phys%slc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + + call fill_nest_halos_from_parent("phy_f2d", mn_phys%phy_f2d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%ntot2d) + + call fill_nest_halos_from_parent("phy_f3d", mn_phys%phy_f3d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%levs) + + !! Surface variables + + !call fill_nest_halos_from_parent("sfalb_lnd", mn_phys%sfalb_lnd, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + ! sea/land mask array (sea:0,land:1,sea-ice:2) + + call fill_nest_halos_from_parent_masked("emis_lnd", mn_phys%emis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + + call fill_nest_halos_from_parent_masked("emis_ice", mn_phys%emis_ice, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 2, 0.5D0) + + call fill_nest_halos_from_parent_masked("emis_wat", mn_phys%emis_wat, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 0.5D0) + + !call fill_nest_halos_from_parent("sfalb_lnd_bck", mn_phys%sfalb_lnd_bck, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + + !call fill_nest_halos_from_parent("semis", mn_phys%semis, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + !call fill_nest_halos_from_parent("semisbase", mn_phys%semisbase, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + !call fill_nest_halos_from_parent("sfalb", mn_phys%sfalb, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + + call fill_nest_halos_from_parent("u10m", mn_phys%u10m, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("v10m", mn_phys%v10m, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tprcp", mn_phys%tprcp, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent("hprime", mn_phys%hprime, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%nmtvr) + + call fill_nest_halos_from_parent("lakefrac", mn_phys%lakefrac, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("lakedepth", mn_phys%lakedepth, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent("canopy", mn_phys%canopy, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("vegfrac", mn_phys%vegfrac, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("uustar", mn_phys%uustar, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("shdmin", mn_phys%shdmin, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("shdmax", mn_phys%shdmax, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("zorl", mn_phys%zorl, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent_masked("zorll", mn_phys%zorll, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 86.0D0) + call fill_nest_halos_from_parent_masked("zorlwav", mn_phys%zorlwav, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 77.0D0) + call fill_nest_halos_from_parent_masked("zorlw", mn_phys%zorlw, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 78.0D0) + + call fill_nest_halos_from_parent("tsfco", mn_phys%tsfco, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tsfcl", mn_phys%tsfcl, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tsfc", mn_phys%tsfc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent_masked("albdirvis_lnd", mn_phys%albdirvis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdirnir_lnd", mn_phys%albdirnir_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdifvis_lnd", mn_phys%albdifvis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdifnir_lnd", mn_phys%albdifnir_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + + + + call fill_nest_halos_from_parent("cv", mn_phys%cv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("cvt", mn_phys%cvt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("cvb", mn_phys%cvb, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + endif + + if (move_nsst) then + + call fill_nest_halos_from_parent("tref", mn_phys%tref, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("z_c", mn_phys%z_c, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("c_0", mn_phys%c_0, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("c_d", mn_phys%c_d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("w_0", mn_phys%w_0, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("w_d", mn_phys%w_d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xt", mn_phys%xt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xs", mn_phys%xs, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xu", mn_phys%xu, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xv", mn_phys%xv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xz", mn_phys%xz, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("zm", mn_phys%zm, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xtts", mn_phys%xtts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xzts", mn_phys%xzts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("d_conv", mn_phys%d_conv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("dt_cool", mn_phys%dt_cool, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("qrain", mn_phys%qrain, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + endif + + end subroutine mn_phys_fill_nest_halos_from_parent + + !>@brief The subroutine 'mn_phys_fill_intern_nest_halos' fills the intenal nest halos for the physics variables + !>@details This subroutine is only called for the nest PEs. + subroutine mn_phys_fill_intern_nest_halos(moving_nest, IPD_Control, IPD_Data, domain_fine, is_fine_pe) + type(fv_moving_nest_type), target, intent(inout) :: moving_nest !< Single instance of moving nest data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + type(domain2d), intent(inout) :: domain_fine !< Domain structure for this nest + logical, intent(in) :: is_fine_pe !< Is nest PE - should be True. Argument is redundant. + + type(fv_moving_nest_physics_type), pointer :: mn_phys + + mn_phys => moving_nest%mn_phys + + call mn_var_fill_intern_nest_halos(mn_phys%ts, domain_fine, is_fine_pe) !! Skin Temp/SST + if (move_physics) then + call mn_var_fill_intern_nest_halos(mn_phys%smc, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%stc, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%slc, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%phy_f2d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%phy_f3d, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%emis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%emis_ice, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%emis_wat, domain_fine, is_fine_pe) + + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb_lnd, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb_lnd_bck, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%semis, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%semisbase, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%u10m, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%v10m, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tprcp, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%hprime, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%lakefrac, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%lakedepth, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%canopy, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%vegfrac, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%uustar, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%shdmin, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%shdmax, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorl, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorll, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorlwav, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorlw, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfco, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfcl, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfc, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%albdirvis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdirnir_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdifvis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdifnir_lnd, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%cv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%cvt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%cvb, domain_fine, is_fine_pe) + endif + + if (move_nsst) then + call mn_var_fill_intern_nest_halos(mn_phys%tref, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%z_c, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%c_0, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%c_d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%w_0, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%w_d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xs, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xu, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xz, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zm, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xtts, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xzts, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%d_conv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%dt_cool, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%qrain, domain_fine, is_fine_pe) + endif + + end subroutine mn_phys_fill_intern_nest_halos + + !>@brief The subroutine 'mn_phys_shift_data' shifts the variable in the nest, including interpolating at the leading edge + !>@details This subroutine is called for the nest and parent PEs. + subroutine mn_phys_shift_data(Atm, IPD_Control, IPD_Data, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in i,j direction + integer, intent(in) :: x_refine, y_refine !< Nest refinement + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: nz !< Number of vertical levels + + ! Constants for mpp calls + integer :: interp_type = 1 ! cell-centered A-grid + integer :: interp_type_u = 4 ! D-grid + integer :: interp_type_v = 4 ! D-grid + integer :: position = CENTER + integer :: position_u = NORTH + integer :: position_v = EAST + type(fv_moving_nest_physics_type), pointer :: mn_phys + + mn_phys => Moving_nest(n)%mn_phys + + !! Skin temp/SST + call mn_var_shift_data(mn_phys%ts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + if (move_physics) then + !! Soil variables + call mn_var_shift_data(mn_phys%smc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call mn_var_shift_data(mn_phys%stc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call mn_var_shift_data(mn_phys%slc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + + !! Physics arrays + call mn_var_shift_data(mn_phys%phy_f2d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_control%ntot2d) + + call mn_var_shift_data(mn_phys%phy_f3d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%levs) + + ! Surface variables + + call mn_var_shift_data(mn_phys%emis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%emis_ice, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%emis_wat, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + + !call mn_var_shift_data(mn_phys%sfalb_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%sfalb_lnd_bck, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%semis, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%semisbase, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%sfalb, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + call mn_var_shift_data(mn_phys%u10m, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%v10m, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tprcp, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%hprime, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%nmtvr) + call mn_var_shift_data(mn_phys%lakefrac, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%lakedepth, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%canopy, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%vegfrac, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%uustar, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%shdmin, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%shdmax, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorl, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorll, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorlwav, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorlw, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfco, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfcl, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdirvis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdirnir_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdifvis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdifnir_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cvt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cvb, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + endif + + if (move_nsst) then + call mn_var_shift_data(mn_phys%tref, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%z_c, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%c_0, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%c_d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%w_0, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%w_d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xs, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xu, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zm, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xtts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xzts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%d_conv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%dt_cool, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%qrain, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + endif + + end subroutine mn_phys_shift_data + + !>@brief The subroutine 'mn_phys_dump_to_netcdf' dumps physics variables to debugging netCDF files + !>@details This subroutine is called for the nest and parent PEs. + subroutine mn_phys_dump_to_netcdf(Atm, Atm_block, IPD_Control, IPD_Data, time_val, file_prefix, is_fine_pe, domain_coarse, domain_fine, nz) + type(fv_atmos_type), intent(in) :: Atm !< Single instance of atmospheric data + type (block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(in) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: time_val !< Timestep number for filename + character(len=*), intent(in) :: file_prefix !< Prefix for output netCDF filenames + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures for parent and nest + integer, intent(in) :: nz !< Number of vertical levels + + integer :: is, ie, js, je + integer :: nb, blen, i, j, k, ix, nv + integer :: this_pe + + integer :: n_moist + character(len=16) :: out_var_name, phys_var_name + integer :: position = CENTER + + ! Coerce the double precision variables from physics into single precision for debugging netCDF output + ! Does not affect values used in calculations. + ! TODO do we want to dump these as double precision?? + real, allocatable :: smc_pr_local (:,:,:) !< soil moisture content + real, allocatable :: stc_pr_local (:,:,:) !< soil temperature + real, allocatable :: slc_pr_local (:,:,:) !< soil liquid water content + real, allocatable, dimension(:,:) :: sealand_pr_local, deep_soil_t_pr_local, soil_type_pr_local, veg_type_pr_local, slope_type_pr_local, max_snow_alb_pr_local + real, allocatable, dimension(:,:) :: tsfco_pr_local, tsfcl_pr_local, tsfc_pr_local, vegfrac_pr_local + real, allocatable, dimension(:,:) :: tref_pr_local, c_0_pr_local, xt_pr_local, xu_pr_local, xv_pr_local, ifd_pr_local + real, allocatable, dimension(:,:) :: facsf_pr_local, facwf_pr_local + real, allocatable, dimension(:,:) :: alvsf_pr_local, alvwf_pr_local, alnsf_pr_local, alnwf_pr_local + real, allocatable, dimension(:,:) :: zorl_pr_local, zorll_pr_local, zorlw_pr_local, zorli_pr_local + real, allocatable :: phy_f2d_pr_local (:,:,:) + real, allocatable :: phy_f3d_pr_local (:,:,:,:) + real, allocatable :: lakefrac_pr_local (:,:) !< lake fraction + real, allocatable :: landfrac_pr_local (:,:) !< land fraction + real, allocatable :: emis_lnd_pr_local (:,:) !< emissivity land + + this_pe = mpp_pe() + + ! Skin temp/SST + call mn_var_dump_to_netcdf(Atm%ts, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "SSTK") + ! Terrain height == phis / grav + call mn_var_dump_to_netcdf(Atm%phis / grav, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "orog") + + ! sgh and oro were only fully allocated if fv_land is True + ! if false, oro is (1,1), and sgh is not allocated + if ( Atm%flagstruct%fv_land ) then + ! land frac -- called oro in fv_array.F90 + call mn_var_dump_to_netcdf(Atm%oro, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "LFRAC") + ! terrain standard deviation -- called sgh in fv_array.F90 + call mn_var_dump_to_netcdf(Atm%sgh, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "STDDEV") + endif + + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + + if (debug_log) print '("[INFO] WDR mn_phys_dump_to_netcdf. npe=",I0," is=",I0," ie=",I0," js=",I0," je=",I0)', this_pe, is, ie, js, je + + ! Just allocate compute domain size here for outputs; the nest moving code also has halos added, but we don't need them here. + if (move_physics) then + allocate ( smc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( stc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( slc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( sealand_pr_local(is:ie, js:je) ) + allocate ( lakefrac_pr_local(is:ie, js:je) ) + allocate ( landfrac_pr_local(is:ie, js:je) ) + allocate ( emis_lnd_pr_local(is:ie, js:je) ) + allocate ( phy_f2d_pr_local(is:ie, js:je, IPD_Control%ntot2d) ) + allocate ( phy_f3d_pr_local(is:ie, js:je, IPD_Control%levs, IPD_Control%ntot3d) ) + allocate ( tsfco_pr_local(is:ie, js:je) ) + allocate ( tsfcl_pr_local(is:ie, js:je) ) + allocate ( tsfc_pr_local(is:ie, js:je) ) + allocate ( vegfrac_pr_local(is:ie, js:je) ) + allocate ( alvsf_pr_local(is:ie, js:je) ) + allocate ( alvwf_pr_local(is:ie, js:je) ) + allocate ( alnsf_pr_local(is:ie, js:je) ) + allocate ( alnwf_pr_local(is:ie, js:je) ) + allocate ( deep_soil_t_pr_local(is:ie, js:je) ) + allocate ( soil_type_pr_local(is:ie, js:je) ) + !allocate ( veg_frac_pr_local(is:ie, js:je) ) + allocate ( veg_type_pr_local(is:ie, js:je) ) + allocate ( slope_type_pr_local(is:ie, js:je) ) + allocate ( max_snow_alb_pr_local(is:ie, js:je) ) + allocate ( facsf_pr_local(is:ie, js:je) ) + allocate ( facwf_pr_local(is:ie, js:je) ) + allocate ( zorl_pr_local(is:ie, js:je) ) + allocate ( zorll_pr_local(is:ie, js:je) ) + allocate ( zorlw_pr_local(is:ie, js:je) ) + allocate ( zorli_pr_local(is:ie, js:je) ) + endif + + if (move_nsst) then + allocate ( tref_pr_local(is:ie, js:je) ) + allocate ( c_0_pr_local(is:ie, js:je) ) + allocate ( xt_pr_local(is:ie, js:je) ) + allocate ( xu_pr_local(is:ie, js:je) ) + allocate ( xv_pr_local(is:ie, js:je) ) + allocate ( ifd_pr_local(is:ie, js:je) ) + endif + + if (move_physics) then + smc_pr_local = +99999.9 + stc_pr_local = +99999.9 + slc_pr_local = +99999.9 + sealand_pr_local = +99999.9 + lakefrac_pr_local = +99999.9 + landfrac_pr_local = +99999.9 + emis_lnd_pr_local = +99999.9 + phy_f2d_pr_local = +99999.9 + phy_f3d_pr_local = +99999.9 + tsfco_pr_local = +99999.9 + tsfcl_pr_local = +99999.9 + tsfc_pr_local = +99999.9 + vegfrac_pr_local = +99999.9 + alvsf_pr_local = +99999.9 + alvwf_pr_local = +99999.9 + alnsf_pr_local = +99999.9 + alnwf_pr_local = +99999.9 + endif + if (move_nsst) then + tref_pr_local = +99999.9 + c_0_pr_local = +99999.9 + xt_pr_local = +99999.9 + xu_pr_local = +99999.9 + xv_pr_local = +99999.9 + ifd_pr_local = +99999.9 + endif + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + do k = 1, IPD_Control%lsoil + ! Use real() to lower the precision + smc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%smc(ix,k)) + stc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%stc(ix,k)) + slc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%slc(ix,k)) + enddo + + sealand_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%slmsk(ix)) + lakefrac_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%lakefrac(ix)) + landfrac_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%landfrac(ix)) + emis_lnd_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%emis_lnd(ix)) + deep_soil_t_pr_local(i, j) = IPD_data(nb)%Sfcprop%tg3(ix) + soil_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%stype(ix) + !veg_frac_pr_local(i, j) = IPD_data(nb)%Sfcprop%vfrac(ix) + veg_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%vtype(ix) + slope_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%slope(ix) + facsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%facsf(ix) + facwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%facwf(ix) + zorl_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorl(ix) + zorlw_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorlw(ix) + zorll_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorll(ix) + zorli_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorli(ix) + max_snow_alb_pr_local(i, j) = IPD_data(nb)%Sfcprop%snoalb(ix) + tsfco_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfco(ix) + tsfcl_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfcl(ix) + tsfc_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfc(ix) + vegfrac_pr_local(i, j) = IPD_data(nb)%Sfcprop%vfrac(ix) + alvsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alvsf(ix) + alvwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alvwf(ix) + alnsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alnsf(ix) + alnwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alnwf(ix) + + do nv = 1, IPD_Control%ntot2d + ! Use real() to lower the precision + phy_f2d_pr_local(i,j,nv) = real(IPD_Data(nb)%Tbd%phy_f2d(ix, nv)) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + ! Use real() to lower the precision + phy_f3d_pr_local(i,j,k,nv) = real(IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv)) + enddo + enddo + endif + + if (move_nsst) then + tref_pr_local(i,j) = IPD_data(nb)%Sfcprop%tref(ix) + c_0_pr_local(i,j) = IPD_data(nb)%Sfcprop%c_0(ix) + xt_pr_local(i,j) = IPD_data(nb)%Sfcprop%xt(ix) + xu_pr_local(i,j) = IPD_data(nb)%Sfcprop%xu(ix) + xv_pr_local(i,j) = IPD_data(nb)%Sfcprop%xv(ix) + ifd_pr_local(i,j) = IPD_data(nb)%Sfcprop%ifd(ix) + endif + enddo + enddo + + if (move_physics) then + call mn_var_dump_to_netcdf(stc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILT") + call mn_var_dump_to_netcdf(smc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILM") + call mn_var_dump_to_netcdf(slc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILL") + call mn_var_dump_to_netcdf(sealand_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "LMASK") + call mn_var_dump_to_netcdf(lakefrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "LAKEFRAC") + call mn_var_dump_to_netcdf(landfrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "LANDFRAC") + call mn_var_dump_to_netcdf(emis_lnd_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "EMISLAND") + call mn_var_dump_to_netcdf(deep_soil_t_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "DEEPSOIL") + call mn_var_dump_to_netcdf(soil_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "SOILTP") + !call mn_var_dump_to_netcdf(veg_frac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "VEGFRAC") + call mn_var_dump_to_netcdf(veg_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "VEGTYPE") + call mn_var_dump_to_netcdf(slope_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "SLOPE") + call mn_var_dump_to_netcdf(max_snow_alb_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "SNOWALB") + call mn_var_dump_to_netcdf(tsfco_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "TSFCO") + call mn_var_dump_to_netcdf(tsfcl_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "TSFCL") + call mn_var_dump_to_netcdf(tsfc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "TSFC") + call mn_var_dump_to_netcdf(vegfrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "VEGFRAC") + call mn_var_dump_to_netcdf(alvsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ALVSF") + call mn_var_dump_to_netcdf(alvwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ALVWF") + call mn_var_dump_to_netcdf(alnsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ALNSF") + call mn_var_dump_to_netcdf(alnwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ALNWF") + call mn_var_dump_to_netcdf(facsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "FACSF") + call mn_var_dump_to_netcdf(facwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "FACWF") + call mn_var_dump_to_netcdf(zorl_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ZORL") + call mn_var_dump_to_netcdf(zorlw_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ZORLW") + call mn_var_dump_to_netcdf(zorll_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ZORLL") + call mn_var_dump_to_netcdf(zorli_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "ZORLI") + + do nv = 1, IPD_Control%ntot2d + write (phys_var_name, "(A4,I0.3)") 'PH2D', nv + call mn_var_dump_to_netcdf(phy_f2d_pr_local(:,:,nv), is_fine_pe, domain_coarse, domain_fine, position, 1, & + time_val, Atm%global_tile, file_prefix, phys_var_name) + enddo + + do nv = 1, IPD_Control%ntot3d + write (phys_var_name, "(A4,I0.3)") 'PH3D', nv + call mn_var_dump_to_netcdf(phy_f3d_pr_local(:,:,:,nv), is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%levs, & + time_val, Atm%global_tile, file_prefix, phys_var_name) + enddo + endif + + if (move_nsst) then + call mn_var_dump_to_netcdf(tref_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "TREF") + call mn_var_dump_to_netcdf(c_0_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "C_0") + call mn_var_dump_to_netcdf(xt_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "XT") + call mn_var_dump_to_netcdf(xu_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "XU") + call mn_var_dump_to_netcdf(xv_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "XV") + call mn_var_dump_to_netcdf(ifd_pr_local, is_fine_pe, domain_coarse, domain_fine, position, 1, time_val, Atm%global_tile, file_prefix, "IFD") + endif + + if (move_physics) then + deallocate(smc_pr_local) + deallocate(stc_pr_local) + deallocate(slc_pr_local) + deallocate(lakefrac_pr_local) + deallocate(landfrac_pr_local) + deallocate(emis_lnd_pr_local) + deallocate(sealand_pr_local, deep_soil_t_pr_local, soil_type_pr_local, veg_type_pr_local, max_snow_alb_pr_local) + deallocate(tsfco_pr_local, tsfcl_pr_local, tsfc_pr_local, vegfrac_pr_local) + deallocate(alvsf_pr_local, alvwf_pr_local, alnsf_pr_local, alnwf_pr_local) + deallocate(facsf_pr_local, facwf_pr_local) + deallocate(zorl_pr_local, zorlw_pr_local, zorll_pr_local, zorli_pr_local) + deallocate(phy_f2d_pr_local) + deallocate(phy_f3d_pr_local) + endif + + if (move_nsst) deallocate(tref_pr_local, c_0_pr_local, xt_pr_local, xu_pr_local, xv_pr_local, ifd_pr_local) + + if (debug_log) print '("[INFO] WDR end mn_phys_dump_tp_netcdf npe=",I0)', this_pe + + end subroutine mn_phys_dump_to_netcdf + +#endif MOVING_NEST + +end module fv_moving_nest_physics_mod diff --git a/moving_nest/fv_moving_nest_types.F90 b/moving_nest/fv_moving_nest_types.F90 new file mode 100644 index 000000000..843b666ae --- /dev/null +++ b/moving_nest/fv_moving_nest_types.F90 @@ -0,0 +1,621 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides data structures for moving nest functionality +!! @author W. Ramstrom, AOML/HRD 03/24/2022 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_types_mod + +#ifdef MOVING_NEST +#include + +#ifdef GFS_TYPES + use GFS_typedefs, only: kind_phys +#else + use IPD_typedefs, only: kind_phys => IPD_kind_phys +#endif + + use fms_mod, only: check_nml_error + use fv_arrays_mod, only: fv_atmos_type + use fv_mp_mod, only: MAX_NNEST + use mpp_mod, only: input_nml_file, mpp_pe + + implicit none + + type fv_moving_nest_flag_type + ! Moving Nest Namelist Variables + logical :: is_moving_nest = .false. + character(len=120) :: surface_dir = "INPUT/moving_nest" + integer :: terrain_smoother = 1 + integer :: vortex_tracker = 0 + integer :: ntrack = 1 + integer :: corral_x = 5 + integer :: corral_y = 5 + + integer :: outatcf_lun = 600 + + ! Moving nest related variables + integer :: move_cd_x = 0 + integer :: move_cd_y = 0 + logical :: do_move = .false. + end type fv_moving_nest_flag_type + + ! Encapsulates the grid definition data, such as read from the netCDF files + type grid_geometry + integer :: nx, ny, nxp, nyp + + real(kind=kind_phys), allocatable :: lats(:,:) + real(kind=kind_phys), allocatable :: lons(:,:) + + !real, allocatable :: dx(:,:) + !real, allocatable :: dy(:,:) + real(kind=kind_phys), allocatable :: area(:,:) + end type grid_geometry + + type fv_moving_nest_prog_type + real, _ALLOCATABLE :: delz(:,:,:) _NULL !< layer thickness (meters) + end type fv_moving_nest_prog_type + + ! TODO deallocate these at end of model run. They are only allocated once, at first nest move, inside mn_static_read_hires(). + ! Note these are only 32 bits for now; matching the precision of the input netCDF files + ! though the model generally handles physics variables with 64 bit precision + type mn_surface_grids + real, allocatable :: orog_grid(:,:) _NULL ! orography -- raw or filtered depending on namelist option, in meters + real, allocatable :: orog_std_grid(:,:) _NULL ! terrain standard deviation for gravity wave drag, in meters (?) + real, allocatable :: ls_mask_grid(:,:) _NULL ! land sea mask -- 0 for ocean/lakes, 1, for land. Perhaps 2 for sea ice. + real, allocatable :: land_frac_grid(:,:) _NULL ! Continuous land fraction - 0.0 ocean, 0.5 half of each, 1.0 all land + + real, allocatable :: parent_orog_grid(:,:) _NULL ! parent orography -- only used for terrain_smoother=1. + ! raw or filtered depending on namelist option,in meters + + ! Soil variables + real, allocatable :: deep_soil_temp_grid(:,:) _NULL ! deep soil temperature at 5m, in degrees K + real, allocatable :: soil_type_grid(:,:) _NULL ! STATSGO soil type + + ! Vegetation variables + real, allocatable :: veg_frac_grid(:,:) _NULL ! vegetation fraction + real, allocatable :: veg_type_grid(:,:) _NULL ! IGBP vegetation type + real, allocatable :: veg_greenness_grid(:,:) _NULL ! NESDIS vegetation greenness; netCDF file has monthly values + + ! Orography variables + real, allocatable :: slope_type_grid(:,:) _NULL ! legacy 1 degree GFS slope type + + ! Albedo variables + real, allocatable :: max_snow_alb_grid(:,:) _NULL ! max snow albedo + real, allocatable :: facsf_grid(:,:) _NULL ! fractional coverage with strong cosz dependency + real, allocatable :: facwf_grid(:,:) _NULL ! fractional coverage with weak cosz dependency + + ! Snow free albedo + ! strong cosz angle dependence = black sky + ! weak cosz angle dependence = white sky + ! From the chgres code in static_data.F90, we see the linkage of variable names: + ! type(esmf_field), public :: alvsf_target_grid !< visible black sky albedo + ! type(esmf_field), public :: alvwf_target_grid !< visible white sky albedo + ! type(esmf_field), public :: alnsf_target_grid !< near ir black sky albedo + ! type(esmf_field), public :: alnwf_target_grid !< near ir white sky albedo + + real, allocatable :: alvsf_grid(:,:) _NULL ! Visible black sky albedo; netCDF file has monthly values + real, allocatable :: alvwf_grid(:,:) _NULL ! Visible white sky albedo; netCDF file has monthly values + real, allocatable :: alnsf_grid(:,:) _NULL ! Near IR black sky albedo; netCDF file has monthly values + real, allocatable :: alnwf_grid(:,:) _NULL ! Near IR white sky albedo; netCDF file has monthly values + + end type mn_surface_grids + + type fv_moving_nest_physics_type + real, _ALLOCATABLE :: ts(:,:) _NULL !< 2D skin temperature/SST + real, _ALLOCATABLE :: slmsk(:,:) _NULL !< land sea mask -- 0 for ocean/lakes, 1, for land. Perhaps 2 for sea ice. + real (kind=kind_phys), _ALLOCATABLE :: smc (:,:,:) _NULL !< soil moisture content + real (kind=kind_phys), _ALLOCATABLE :: stc (:,:,:) _NULL !< soil temperature + real (kind=kind_phys), _ALLOCATABLE :: slc (:,:,:) _NULL !< soil liquid water content + + real (kind=kind_phys), _ALLOCATABLE :: u10m (:,:) _NULL !< 10m u wind (a-grid?) + real (kind=kind_phys), _ALLOCATABLE :: v10m (:,:) _NULL !< 10m v wind (a-grid?) + real (kind=kind_phys), _ALLOCATABLE :: hprime (:,:,:) _NULL !< orographic metrics (maybe standard deviation?) + + real (kind=kind_phys), _ALLOCATABLE :: tprcp (:,:) _NULL !< total (of all precip types) precipitation rate + + real (kind=kind_phys), _ALLOCATABLE :: zorl (:,:) _NULL !< roughness length + real (kind=kind_phys), _ALLOCATABLE :: zorll (:,:) _NULL !< land roughness length + !real (kind=kind_phys), _ALLOCATABLE :: zorli (:,:) _NULL !< ice surface roughness length ! TODO do we need this? + real (kind=kind_phys), _ALLOCATABLE :: zorlw (:,:) _NULL !< wave surface roughness length + real (kind=kind_phys), _ALLOCATABLE :: zorlwav (:,:) _NULL !< wave surface roughness in cm derived from wave model + + real (kind=kind_phys), _ALLOCATABLE :: sfalb_lnd(:,:) _NULL !< surface albedo over land for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_lnd(:,:) _NULL !< surface emissivity over land for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_ice(:,:) _NULL !< surface emissivity over ice for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_wat(:,:) _NULL !< surface emissivity over water for LSM + real (kind=kind_phys), _ALLOCATABLE :: sfalb_lnd_bck(:,:) _NULL !< snow-free albedo over land + + !real (kind=kind_phys), _ALLOCATABLE :: semis(:,:) _NULL !< surface lw emissivity in fraction + !real (kind=kind_phys), _ALLOCATABLE :: semisbase(:,:) _NULL !< background surface emissivity + !real (kind=kind_phys), _ALLOCATABLE :: sfalb(:,:) _NULL !< mean surface diffused sw albedo + + real (kind=kind_phys), _ALLOCATABLE :: alvsf(:,:) _NULL !< visible black sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alvwf(:,:) _NULL !< visible white sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alnsf(:,:) _NULL !< near IR black sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alnwf(:,:) _NULL !< near IR white sky albedo + + real (kind=kind_phys), _ALLOCATABLE :: albdirvis_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdirnir_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdifvis_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdifnir_lnd(:,:) _NULL !< + + real (kind=kind_phys), _ALLOCATABLE :: facsf(:,:) _NULL !< fractional coverage for strong zenith angle albedo + real (kind=kind_phys), _ALLOCATABLE :: facwf(:,:) _NULL !< fractional coverage for strong zenith angle albedo + + real (kind=kind_phys), _ALLOCATABLE :: lakefrac (:,:) _NULL !< lake fraction [0:1] + real (kind=kind_phys), _ALLOCATABLE :: lakedepth (:,:) _NULL !< lake depth [ m ] + + real (kind=kind_phys), _ALLOCATABLE :: canopy (:,:) _NULL !< canopy water content + real (kind=kind_phys), _ALLOCATABLE :: vegfrac (:,:) _NULL !< vegetation fraction + real (kind=kind_phys), _ALLOCATABLE :: uustar (:,:) _NULL !< u* wind in similarity theory + real (kind=kind_phys), _ALLOCATABLE :: shdmin (:,:) _NULL !< min fractional coverage of green vegetation + real (kind=kind_phys), _ALLOCATABLE :: shdmax (:,:) _NULL !< max fractional coverage of green vegetation + real (kind=kind_phys), _ALLOCATABLE :: tsfco (:,:) _NULL !< surface temperature ocean + real (kind=kind_phys), _ALLOCATABLE :: tsfcl (:,:) _NULL !< surface temperature land + real (kind=kind_phys), _ALLOCATABLE :: tsfc (:,:) _NULL !< surface temperature + !real (kind=kind_phys), _ALLOCATABLE :: tsfc_radtime (:,:) _NULL !< surface temperature on radiative timestep + + real (kind=kind_phys), _ALLOCATABLE :: cv (:,:) _NULL !< fraction of convective cloud + real (kind=kind_phys), _ALLOCATABLE :: cvt (:,:) _NULL !< convective cloud top pressure + real (kind=kind_phys), _ALLOCATABLE :: cvb (:,:) _NULL !< convective cloud bottom pressure + + real (kind=kind_phys), _ALLOCATABLE :: phy_f2d (:,:,:) _NULL !< 2D physics variables + real (kind=kind_phys), _ALLOCATABLE :: phy_f3d(:,:,:,:) _NULL !< 3D physics variables + + ! NSST Variables + + real (kind=kind_phys), _ALLOCATABLE :: tref (:,:) _NULL !< reference temperature for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: z_c (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: c_0 (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: c_d (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: w_0 (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: w_d (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xt (:,:) _NULL !< heat content for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xs (:,:) _NULL !< salinity for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xu (:,:) _NULL !< u current constant for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xv (:,:) _NULL !< v current constant for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xz (:,:) _NULL !< DTL thickness for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: zm (:,:) _NULL !< MXL for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xtts (:,:) _NULL !< d(xt)/d(ts) for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xzts (:,:) _NULL !< d(xz)/d(ts) for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: d_conv (:,:) _NULL !< think of free convection layer for NSSTM + ! real (kind=kind_phys), _ALLOCATABLE :: ifd (:,:) _NULL !< index to start DTM run for NSSTM ! TODO Probably can't interpolate an index. + ! IFD values are 0 for land, and 1 for oceans/lakes -- reverse of the land sea mask + ! Land Sea Mask has values of 0 for oceans/lakes, 1 for land, 2 for sea ice + real (kind=kind_phys), _ALLOCATABLE :: dt_cool (:,:) _NULL !< sub-layer cooling amount for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: qrain (:,:) _NULL !< sensible heat flux due to rainfall for NSSTM + + end type fv_moving_nest_physics_type + + type fv_moving_nest_type + type(fv_moving_nest_flag_type) :: mn_flag ! Mostly namelist variables + type(mn_surface_grids) :: mn_static + type(fv_moving_nest_prog_type) :: mn_prog + type(fv_moving_nest_physics_type) :: mn_phys + + type(grid_geometry) :: parent_geo + type(grid_geometry) :: fp_super_tile_geo + end type fv_moving_nest_type + + ! Moving Nest Namelist Variables + logical, dimension(MAX_NNEST) :: is_moving_nest = .False. + character(len=120) :: surface_dir = "INPUT/moving_nest" + integer, dimension(MAX_NNEST) :: terrain_smoother = 1 ! 0 -- all high-resolution data, 1 - static nest smoothing algorithm with blending zone of 5 points, 2 - blending zone of 10 points, 5 - 5 point smoother, 9 - 9 point smoother + integer, dimension(MAX_NNEST) :: vortex_tracker = 0 ! 0 - not a moving nest, tracker not needed + ! 1 - prescribed nest moving + ! 2 - following child domain center + ! 3 - tracking Min MSLP + ! 6 - simplified version of GFDL tracker, adopted from HWRF's internal vortex tracker. + ! 7 - nearly the full storm tracking algorithm from GFDL vortex tracker. The only part that is missing is the part that gives up when the storm dissipates, which is left out intentionally. Adopted from HWRF's internal vortex tracker. + integer, dimension(MAX_NNEST) :: ntrack = 1 ! number of dt_atmos steps to call the vortex tracker, tracker time step = ntrack*dt_atmos + integer, dimension(MAX_NNEST) :: move_cd_x = 0 ! the number of parent domain grid cells to move in i direction + integer, dimension(MAX_NNEST) :: move_cd_y = 0 ! the number of parent domain grid cells to move in j direction + ! used to control prescribed nest moving, when vortex_tracker=1 + ! the move happens every ntrack*dt_atmos seconds + ! positive is to move in increasing i and j direction, and + ! negative is to move in decreasing i and j direction. + ! 0 means no move. The limitation is to move only 1 grid cell at each move. + integer, dimension(MAX_NNEST) :: corral_x = 5 ! Minimum parent gridpoints on each side of nest in i direction + integer, dimension(MAX_NNEST) :: corral_y = 5 ! Minimum parent gridpoints on each side of nest in j direction + + integer, dimension(MAX_NNEST) :: outatcf_lun = 600 ! base fortran unit number to write out the partial atcfunix file from the internal tracker + + type(fv_moving_nest_type), _ALLOCATABLE, target :: Moving_nest(:) + +contains + + subroutine fv_moving_nest_init(Atm) + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) + + integer :: n, ngrids + + ! Allocate the array of fv_moving_nest_type structures of the proper length + allocate(Moving_nest(size(Atm))) + + ! Configure namelist variables + + ngrids = size(Atm) + + ! Read in namelist + + call read_namelist_moving_nest_nml + + do n=1,ngrids + if (Atm(n)%neststruct%nested) then + Moving_nest(n)%mn_flag%is_moving_nest = is_moving_nest(n) + Moving_nest(n)%mn_flag%surface_dir = trim(surface_dir) + Moving_nest(n)%mn_flag%terrain_smoother = terrain_smoother(n) + Moving_nest(n)%mn_flag%vortex_tracker = vortex_tracker(n) + Moving_nest(n)%mn_flag%ntrack = ntrack(n) + Moving_nest(n)%mn_flag%move_cd_x = move_cd_x(n) + Moving_nest(n)%mn_flag%move_cd_y = move_cd_y(n) + Moving_nest(n)%mn_flag%corral_x = corral_x(n) + Moving_nest(n)%mn_flag%corral_y = corral_y(n) + Moving_nest(n)%mn_flag%outatcf_lun = outatcf_lun(n) + else + Moving_nest(n)%mn_flag%is_moving_nest = .false. + Moving_nest(n)%mn_flag%vortex_tracker = 0 + Moving_nest(n)%mn_flag%ntrack = 1 + Moving_nest(n)%mn_flag%move_cd_x = 0 + Moving_nest(n)%mn_flag%move_cd_y = 0 + Moving_nest(n)%mn_flag%corral_x = 5 + Moving_nest(n)%mn_flag%corral_y = 5 + Moving_nest(n)%mn_flag%outatcf_lun = 600 + endif + enddo + end subroutine fv_moving_nest_init + + subroutine read_namelist_moving_nest_nml + integer :: f_unit, ios, ierr + namelist /fv_moving_nest_nml/ surface_dir, is_moving_nest, terrain_smoother, & + vortex_tracker, ntrack, move_cd_x, move_cd_y, corral_x, corral_y, outatcf_lun + +#ifdef INTERNAL_FILE_NML + read (input_nml_file,fv_moving_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_moving_nest_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + read (f_unit,fv_moving_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_moving_nest_nml') + call close_file(f_unit) +#endif + + end subroutine read_namelist_moving_nest_nml + + subroutine deallocate_fv_moving_nests(n) + integer, intent(in) :: n + + integer :: i + + do i=1,n + call deallocate_fv_moving_nest(i) + enddo + deallocate(Moving_nest) + end subroutine deallocate_fv_moving_nests + + subroutine deallocate_fv_moving_nest(n) + integer, intent(in) :: n + + call deallocate_fv_moving_nest_prog_type(Moving_nest(n)%mn_prog) + call deallocate_fv_moving_nest_physics_type(Moving_nest(n)%mn_phys) + + end subroutine deallocate_fv_moving_nest + + + subroutine allocate_fv_moving_nest_prog_type(isd, ied, jsd, jed, npz, mn_prog) + integer, intent(in) :: isd, ied, jsd, jed, npz + type(fv_moving_nest_prog_type), intent(inout) :: mn_prog + + allocate ( mn_prog%delz(isd:ied, jsd:jed, 1:npz) ) + mn_prog%delz = +99999.9 + + end subroutine allocate_fv_moving_nest_prog_type + + subroutine deallocate_fv_moving_nest_prog_type(mn_prog) + type(fv_moving_nest_prog_type), intent(inout) :: mn_prog + + if (allocated(mn_prog%delz)) deallocate(mn_prog%delz) + + end subroutine deallocate_fv_moving_nest_prog_type + + subroutine allocate_fv_moving_nest_physics_type(isd, ied, jsd, jed, npz, move_physics, move_nsst, lsoil, nmtvr, levs, ntot2d, ntot3d, mn_phys) + integer, intent(in) :: isd, ied, jsd, jed, npz + logical, intent(in) :: move_physics, move_nsst + integer, intent(in) :: lsoil, nmtvr, levs, ntot2d, ntot3d ! From IPD_Control + type(fv_moving_nest_physics_type), intent(inout) :: mn_phys + + ! The local/temporary variables need to be allocated to the larger data (compute + halos) domain so that the nest motion code has halos to use + allocate ( mn_phys%ts(isd:ied, jsd:jed) ) + + if (move_physics) then + allocate ( mn_phys%slmsk(isd:ied, jsd:jed) ) + allocate ( mn_phys%smc(isd:ied, jsd:jed, lsoil) ) + allocate ( mn_phys%stc(isd:ied, jsd:jed, lsoil) ) + allocate ( mn_phys%slc(isd:ied, jsd:jed, lsoil) ) + + allocate ( mn_phys%sfalb_lnd(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_lnd(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_ice(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_wat(isd:ied, jsd:jed) ) + allocate ( mn_phys%sfalb_lnd_bck(isd:ied, jsd:jed) ) + + !allocate ( mn_phys%semis(isd:ied, jsd:jed) ) + !allocate ( mn_phys%semisbase(isd:ied, jsd:jed) ) + !allocate ( mn_phys%sfalb(isd:ied, jsd:jed) ) + + allocate ( mn_phys%u10m(isd:ied, jsd:jed) ) + allocate ( mn_phys%v10m(isd:ied, jsd:jed) ) + allocate ( mn_phys%tprcp(isd:ied, jsd:jed) ) + + allocate ( mn_phys%hprime(isd:ied, jsd:jed, nmtvr) ) + + allocate ( mn_phys%zorl(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorll(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorlwav(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorlw(isd:ied, jsd:jed) ) + + allocate ( mn_phys%alvsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alvwf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alnsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alnwf(isd:ied, jsd:jed) ) + + allocate ( mn_phys%facsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%facwf(isd:ied, jsd:jed) ) + + allocate ( mn_phys%lakefrac(isd:ied, jsd:jed) ) + allocate ( mn_phys%lakedepth(isd:ied, jsd:jed) ) + + allocate ( mn_phys%canopy(isd:ied, jsd:jed) ) + allocate ( mn_phys%vegfrac(isd:ied, jsd:jed) ) + allocate ( mn_phys%uustar(isd:ied, jsd:jed) ) + allocate ( mn_phys%shdmin(isd:ied, jsd:jed) ) + allocate ( mn_phys%shdmax(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfco(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfcl(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfc(isd:ied, jsd:jed) ) + !allocate ( mn_phys%tsfc_radtime(isd:ied, jsd:jed) ) + + + allocate ( mn_phys%albdirvis_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdirnir_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdifvis_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdifnir_lnd (isd:ied, jsd:jed) ) + + allocate ( mn_phys%cv(isd:ied, jsd:jed) ) + allocate ( mn_phys%cvt(isd:ied, jsd:jed) ) + allocate ( mn_phys%cvb(isd:ied, jsd:jed) ) + + allocate ( mn_phys%phy_f2d(isd:ied, jsd:jed, ntot2d) ) + allocate ( mn_phys%phy_f3d(isd:ied, jsd:jed, levs, ntot3d) ) + end if + + if (move_nsst) then + allocate ( mn_phys%tref(isd:ied, jsd:jed) ) + allocate ( mn_phys%z_c(isd:ied, jsd:jed) ) + allocate ( mn_phys%c_0(isd:ied, jsd:jed) ) + allocate ( mn_phys%c_d(isd:ied, jsd:jed) ) + allocate ( mn_phys%w_0(isd:ied, jsd:jed) ) + allocate ( mn_phys%w_d(isd:ied, jsd:jed) ) + allocate ( mn_phys%xt(isd:ied, jsd:jed) ) + allocate ( mn_phys%xs(isd:ied, jsd:jed) ) + allocate ( mn_phys%xu(isd:ied, jsd:jed) ) + allocate ( mn_phys%xv(isd:ied, jsd:jed) ) + allocate ( mn_phys%xz(isd:ied, jsd:jed) ) + allocate ( mn_phys%zm(isd:ied, jsd:jed) ) + allocate ( mn_phys%xtts(isd:ied, jsd:jed) ) + allocate ( mn_phys%xzts(isd:ied, jsd:jed) ) + allocate ( mn_phys%d_conv(isd:ied, jsd:jed) ) + !allocate ( mn_phys%ifd(isd:ied, jsd:jed) ) + allocate ( mn_phys%dt_cool(isd:ied, jsd:jed) ) + allocate ( mn_phys%qrain(isd:ied, jsd:jed) ) + end if + + mn_phys%ts = +99999.9 + if (move_physics) then + mn_phys%slmsk = +99999.9 + mn_phys%smc = +99999.9 + mn_phys%stc = +99999.9 + mn_phys%slc = +99999.9 + + + mn_phys%sfalb_lnd = +99999.9 + mn_phys%emis_lnd = +99999.9 + mn_phys%emis_ice = +99999.9 + mn_phys%emis_wat = +99999.9 + mn_phys%sfalb_lnd_bck = +99999.9 + + !mn_phys%semis = +99999.9 + !mn_phys%semisbase = +99999.9 + !mn_phys%sfalb = +99999.9 + + mn_phys%u10m = +99999.9 + mn_phys%v10m = +99999.9 + mn_phys%tprcp = +99999.9 + + mn_phys%hprime = +99999.9 + + mn_phys%zorl = +99999.9 + mn_phys%zorll = +99999.9 + mn_phys%zorlwav = +99999.9 + mn_phys%zorlw = +99999.9 + + mn_phys%alvsf = +99999.9 + mn_phys%alvwf = +99999.9 + mn_phys%alnsf = +99999.9 + mn_phys%alnwf = +99999.9 + + mn_phys%facsf = +99999.9 + mn_phys%facwf = +99999.9 + + mn_phys%lakefrac = +99999.9 + mn_phys%lakedepth = +99999.9 + + mn_phys%canopy = +99999.9 + mn_phys%vegfrac = +99999.9 + mn_phys%uustar = +99999.9 + mn_phys%shdmin = +99999.9 + mn_phys%shdmax = +99999.9 + mn_phys%tsfco = +99999.9 + mn_phys%tsfcl = +99999.9 + mn_phys%tsfc = +99999.9 + !mn_phys%tsfc_radtime = +99999.9 + + mn_phys%albdirvis_lnd = +99999.9 + mn_phys%albdirnir_lnd = +99999.9 + mn_phys%albdifvis_lnd = +99999.9 + mn_phys%albdifnir_lnd = +99999.9 + + mn_phys%cv = +99999.9 + mn_phys%cvt = +99999.9 + mn_phys%cvb = +99999.9 + + mn_phys%phy_f2d = +99999.9 + mn_phys%phy_f3d = +99999.9 + end if + + if (move_nsst) then + mn_phys%tref = +99999.9 + mn_phys%z_c = +99999.9 + mn_phys%c_0 = +99999.9 + mn_phys%c_d = +99999.9 + mn_phys%w_0 = +99999.9 + mn_phys%w_d = +99999.9 + mn_phys%xt = +99999.9 + mn_phys%xs = +99999.9 + mn_phys%xu = +99999.9 + mn_phys%xv = +99999.9 + mn_phys%xz = +99999.9 + mn_phys%zm = +99999.9 + mn_phys%xtts = +99999.9 + mn_phys%xzts = +99999.9 + mn_phys%d_conv = +99999.9 + !mn_phys%ifd = +99999.9 + mn_phys%dt_cool = +99999.9 + mn_phys%qrain = +99999.9 + end if + + end subroutine allocate_fv_moving_nest_physics_type + + + subroutine deallocate_fv_moving_nest_physics_type(mn_phys) + type(fv_moving_nest_physics_type), intent(inout) :: mn_phys + + if (allocated(mn_phys%ts)) then + deallocate ( mn_phys%ts ) + else + ! If ts was not allocated, then none of this structure was allocated. + return + end if + + ! if move_phys + if (allocated(mn_phys%smc)) then + deallocate( mn_phys%slmsk ) + deallocate( mn_phys%smc ) + deallocate( mn_phys%stc ) + deallocate( mn_phys%slc ) + + deallocate( mn_phys%sfalb_lnd ) + deallocate( mn_phys%emis_lnd ) + deallocate( mn_phys%emis_ice ) + deallocate( mn_phys%emis_wat ) + deallocate( mn_phys%sfalb_lnd_bck ) + + !deallocate( mn_phys%semis ) + !deallocate( mn_phys%semisbase ) + !deallocate( mn_phys%sfalb ) + + deallocate( mn_phys%u10m ) + deallocate( mn_phys%v10m ) + deallocate( mn_phys%tprcp ) + + deallocate( mn_phys%hprime ) + + deallocate( mn_phys%zorl ) + deallocate( mn_phys%zorll ) + deallocate( mn_phys%zorlwav ) + deallocate( mn_phys%zorlw ) + + deallocate( mn_phys%alvsf ) + deallocate( mn_phys%alvwf ) + deallocate( mn_phys%alnsf ) + deallocate( mn_phys%alnwf ) + + deallocate( mn_phys%facsf ) + deallocate( mn_phys%facwf ) + + deallocate( mn_phys%lakefrac ) + deallocate( mn_phys%lakedepth ) + + deallocate( mn_phys%canopy ) + deallocate( mn_phys%vegfrac ) + deallocate( mn_phys%uustar ) + deallocate( mn_phys%shdmin ) + deallocate( mn_phys%shdmax ) + deallocate( mn_phys%tsfco ) + deallocate( mn_phys%tsfcl ) + deallocate( mn_phys%tsfc ) + !deallocate( mn_phys%tsfc_radtime ) + + deallocate( mn_phys%albdirvis_lnd ) + deallocate( mn_phys%albdirnir_lnd ) + deallocate( mn_phys%albdifvis_lnd ) + deallocate( mn_phys%albdifnir_lnd ) + + deallocate( mn_phys%cv ) + deallocate( mn_phys%cvt ) + deallocate( mn_phys%cvb ) + + deallocate( mn_phys%phy_f2d ) + deallocate( mn_phys%phy_f3d ) + end if + + ! if move_nsst + if (allocated( mn_phys%tref )) then + deallocate( mn_phys%tref ) + deallocate( mn_phys%z_c ) + deallocate( mn_phys%c_0 ) + deallocate( mn_phys%c_d ) + deallocate( mn_phys%w_0 ) + deallocate( mn_phys%w_d ) + deallocate( mn_phys%xt ) + deallocate( mn_phys%xs ) + deallocate( mn_phys%xu ) + deallocate( mn_phys%xv ) + deallocate( mn_phys%xz ) + deallocate( mn_phys%zm ) + deallocate( mn_phys%xtts ) + deallocate( mn_phys%xzts ) + deallocate( mn_phys%d_conv ) + !deallocate( mn_phys%ifd ) + deallocate( mn_phys%dt_cool ) + deallocate( mn_phys%qrain ) + end if + + end subroutine deallocate_fv_moving_nest_physics_type + +#endif +end module fv_moving_nest_types_mod diff --git a/moving_nest/fv_moving_nest_utils.F90 b/moving_nest/fv_moving_nest_utils.F90 new file mode 100644 index 000000000..d751214cc --- /dev/null +++ b/moving_nest/fv_moving_nest_utils.F90 @@ -0,0 +1,3858 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + + +!*********************************************************************** +!> @file +!! @brief Provides subroutines to enable moving nest functionality in FV3 dynamic core. +!! @author W. Ramstrom, AOML/HRD 01/15/2021 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_utils_mod + +#ifdef MOVING_NEST + + use mpp_mod, only: FATAL, WARNING, MPP_DEBUG, NOTE, MPP_CLOCK_SYNC,MPP_CLOCK_DETAILED + use mpp_mod, only: mpp_pe, mpp_npes, mpp_root_pe, mpp_error, mpp_set_warn_level + use mpp_mod, only: mpp_declare_pelist, mpp_set_current_pelist, mpp_sync, mpp_sync_self + use mpp_mod, only: mpp_clock_begin, mpp_clock_end, mpp_clock_id + use mpp_mod, only: mpp_init, mpp_exit, mpp_chksum, stdout, stderr + use mpp_mod, only: input_nml_file + use mpp_mod, only: mpp_get_current_pelist, mpp_broadcast + use mpp_domains_mod, only: GLOBAL_DATA_DOMAIN, BITWISE_EXACT_SUM, BGRID_NE, CGRID_NE, DGRID_NE, AGRID + use mpp_parameter_mod, only: AGRID_PARAM=>AGRID,CGRID_NE_PARAM=>CGRID_NE,SCALAR_PAIR + use mpp_domains_mod, only: FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE, FOLD_WEST_EDGE, FOLD_EAST_EDGE + use mpp_domains_mod, only: MPP_DOMAIN_TIME, CYCLIC_GLOBAL_DOMAIN, NUPDATE,EUPDATE, XUPDATE, YUPDATE, SCALAR_PAIR + use mpp_domains_mod, only: domain1D, domain2D, DomainCommunicator2D, BITWISE_EFP_SUM + use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size + use mpp_domains_mod, only: mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min + use mpp_domains_mod, only: mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain + use mpp_domains_mod, only: mpp_update_domains, mpp_check_field, mpp_redistribute, mpp_get_memory_domain + use mpp_domains_mod, only: mpp_define_layout, mpp_define_domains, mpp_modify_domain + use mpp_domains_mod, only: mpp_define_io_domain + use mpp_domains_mod, only: mpp_get_neighbor_pe, mpp_define_mosaic, mpp_nullify_domain_list + use mpp_domains_mod, only: NORTH, NORTH_EAST, EAST, SOUTH_EAST, CORNER, CENTER + use mpp_domains_mod, only: SOUTH, SOUTH_WEST, WEST, NORTH_WEST, mpp_define_mosaic_pelist + use mpp_domains_mod, only: mpp_get_global_domain, ZERO, NINETY, MINUS_NINETY + use mpp_domains_mod, only: mpp_get_boundary, mpp_start_update_domains, mpp_complete_update_domains + use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only: mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only: mpp_get_domain_shift, EDGEUPDATE, mpp_deallocate_domain + use mpp_domains_mod, only: mpp_group_update_type, mpp_create_group_update + use mpp_domains_mod, only: mpp_do_group_update, mpp_clear_group_update + use mpp_domains_mod, only: mpp_start_group_update, mpp_complete_group_update + use mpp_domains_mod, only: WUPDATE, SUPDATE, mpp_get_compute_domains, NONSYMEDGEUPDATE + use mpp_domains_mod, only: domainUG, mpp_define_unstruct_domain, mpp_get_UG_domain_tile_id + use mpp_domains_mod, only: mpp_get_UG_compute_domain, mpp_pass_SG_to_UG, mpp_pass_UG_to_SG + use mpp_domains_mod, only: mpp_get_ug_global_domain, mpp_global_field_ug + use mpp_memutils_mod, only: mpp_memuse_begin, mpp_memuse_end + +#ifdef GFS_TYPES + use GFS_typedefs, only: kind_phys +#else + use IPD_typedefs, only: kind_phys => IPD_kind_phys +#endif + + use constants_mod, only: grav + + ! Added WDR + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use bounding_box_mod, only: bbox, bbox_get_C2F_index, fill_bbox, show_bbox + use fms_io_mod, only: read_data, write_data, get_global_att_value, fms_io_init, fms_io_exit + use fv_arrays_mod, only: R_GRID + use fv_arrays_mod, only: fv_grid_type, fv_nest_type, fv_atmos_type + use fv_surf_map_mod, only: FV3_zs_filter + use fv_moving_nest_types_mod, only: grid_geometry + + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + + integer, parameter :: UWIND = 1 + integer, parameter :: VWIND = 2 + + logical :: debug_log = .false. + + +#include + + + interface alloc_read_data +#ifdef OVERLOAD_R8 + module procedure alloc_read_data_r4_2d +#endif + module procedure alloc_read_data_r8_2d + end interface alloc_read_data + + interface fill_nest_halos_from_parent + module procedure fill_nest_halos_from_parent_r4_2d + module procedure fill_nest_halos_from_parent_r4_3d + module procedure fill_nest_halos_from_parent_r4_4d + + module procedure fill_nest_halos_from_parent_r8_2d + module procedure fill_nest_halos_from_parent_r8_3d + module procedure fill_nest_halos_from_parent_r8_4d + end interface fill_nest_halos_from_parent + + interface alloc_halo_buffer + module procedure alloc_halo_buffer_r4_2d + module procedure alloc_halo_buffer_r4_3d + module procedure alloc_halo_buffer_r4_4d + + module procedure alloc_halo_buffer_r8_2d + module procedure alloc_halo_buffer_r8_3d + module procedure alloc_halo_buffer_r8_4d + end interface alloc_halo_buffer + + interface fill_nest_from_buffer + module procedure fill_nest_from_buffer_r4_2d + module procedure fill_nest_from_buffer_r4_3d + module procedure fill_nest_from_buffer_r4_4d + + module procedure fill_nest_from_buffer_r8_2d + module procedure fill_nest_from_buffer_r8_3d + module procedure fill_nest_from_buffer_r8_4d + end interface fill_nest_from_buffer + + interface fill_nest_from_buffer_cell_center + module procedure fill_nest_from_buffer_cell_center_r4_2d + module procedure fill_nest_from_buffer_cell_center_r4_3d + module procedure fill_nest_from_buffer_cell_center_r4_4d + + module procedure fill_nest_from_buffer_cell_center_r8_2d + module procedure fill_nest_from_buffer_cell_center_r8_3d + module procedure fill_nest_from_buffer_cell_center_r8_4d + end interface fill_nest_from_buffer_cell_center + + interface output_grid_to_nc + module procedure output_grid_to_nc_2d + module procedure output_grid_to_nc_3d + end interface output_grid_to_nc + + interface fill_grid_from_supergrid + module procedure fill_grid_from_supergrid_r4_3d + module procedure fill_grid_from_supergrid_r8_3d + module procedure fill_grid_from_supergrid_r8_4d + end interface fill_grid_from_supergrid + + interface check_array + module procedure check_array_r4_2d + module procedure check_array_r4_3d + module procedure check_array_r4_4d + + module procedure check_array_r8_2d + module procedure check_array_r8_3d + module procedure check_array_r8_4d + end interface check_array + + interface check_local_array + module procedure check_local_array_r4_2d + module procedure check_local_array_r4_3d + module procedure check_local_array_r8_2d + module procedure check_local_array_r8_3d + end interface check_local_array + +contains + + ! GEMPAK 5-point smoother + !SM5S Smooth scalar grid using a 5-point smoother + ! SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + + ! S (i-1,j) + S (i,j-1) ) + ! GEMPAK 9-point smoother + !SM9S Smooth scalar grid using a 9-point smoother + ! SM5S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + + ! S (i-1,j) + S (i,j-1) ) + ! + .0625 * ( S (i+1,j+1) + + ! S (i+1,j-1) + + ! S (i-1,j+1) + + ! S (i-1,j-1) ) + + + subroutine smooth_5_point(data_var, i, j, val) + real, allocatable, intent(in) :: data_var(:,:) + integer :: i,j + real, intent(out) :: val + + ! Stay in bounds of the array + if ( (i-1) .ge. lbound(data_var,1) .and. i .le. ubound(data_var,1) .and. (j-1) .ge. lbound(data_var,2) .and. j .le. ubound(data_var,2) ) then + val = .5 * data_var(i,j) + .125 * ( data_var(i+1,j) + data_var(i,j+1) + data_var(i-1,j) + data_var(i,j-1) ) + else + ! Don't smooth if at the edge. Could do partial smoothing here also, but don't expect moving nest to reach the edge. + val = data_var(i,j) + endif + + end subroutine smooth_5_point + + + subroutine smooth_9_point(data_var, i, j, val) + real, allocatable, intent(in) :: data_var(:,:) + integer :: i,j + real, intent(out) :: val + + ! Stay in bounds of the array + if ( (i-1) .ge. lbound(data_var,1) .and. i .le. ubound(data_var,1) .and. (j-1) .ge. lbound(data_var,2) .and. j .le. ubound(data_var,2) ) then + val = .25 * data_var(i,j) + .125 * ( data_var(i+1,j) + data_var(i,j+1) + data_var(i-1,j) + data_var(i,j-1) ) & + + .0625 * ( data_var(i+1,j+1) + data_var(i+1,j-1) + data_var(i-1,j+1) + data_var(i-1,j-1) ) + else + ! Don't smooth if at the edge. Could do partial smoothing here also, but don't expect moving nest to reach the edge. + val = data_var(i,j) + endif + + end subroutine smooth_9_point + + ! blend_size is 5 for static nests. We may increase it for moving nests. + ! This is only called for fine PEs. + ! Blends a few points into the nest. Calls zs filtering if enabled in namelist. + subroutine set_blended_terrain(Atm, parent_orog_grid, nest_orog_grid, refine, halo_size, blend_size, a_step) + type(fv_atmos_type), intent(inout), target :: Atm + real, allocatable, intent(in) :: parent_orog_grid(:,:) ! Coarse grid orography + real, allocatable, intent(in) :: nest_orog_grid(:,:) ! orography for the full panel of the parent, at high-resolution + integer, intent(in) :: refine, halo_size, blend_size, a_step + + integer :: i, j, ic, jc + integer :: ioffset, joffset + integer :: npx, npy, isd, ied, jsd, jed + real :: smoothed_orog, hires_orog, blend_wt, blend_orog + + real, pointer, dimension(:,:,:) :: wt + integer, pointer, dimension(:,:,:) :: ind + integer :: this_pe + + this_pe = mpp_pe() + + npx = Atm%npx + npy = Atm%npy + + isd = Atm%bd%isc - halo_size + ied = Atm%bd%iec + halo_size + jsd = Atm%bd%jsc - halo_size + jed = Atm%bd%jec + halo_size + + ioffset = Atm%neststruct%ioffset + joffset = Atm%neststruct%joffset + + wt => Atm%neststruct%wt_h + ind => Atm%neststruct%ind_h + + do j=jsd, jed + do i=isd, ied + ic = ind(i,j,1) + jc = ind(i,j,2) + + smoothed_orog = & + wt(i,j,1)*parent_orog_grid(ic, jc ) + & + wt(i,j,2)*parent_orog_grid(ic, jc+1) + & + wt(i,j,3)*parent_orog_grid(ic+1,jc+1) + & + wt(i,j,4)*parent_orog_grid(ic+1,jc ) + + hires_orog = nest_orog_grid((ioffset-1)*refine+i, (joffset-1)*refine+j) + + ! From tools/external_ic.F90 + if (blend_size .eq. 10) then + blend_wt = max(0.,min(1.,real(10 - min(i,j,npx-i,npy-j,10))/10. )) + else + blend_wt = max(0.,min(1.,real(5 - min(i,j,npx-i,npy-j,5))/5. )) + end if + + !blend_wt = max(0.,min(1.,real(blend_size - min(i,j,npx-i,npy-j,blend_size))/real(blend_size) )) + blend_orog = (1.-blend_wt)*hires_orog + blend_wt*smoothed_orog + + + + Atm%phis(i,j) = blend_orog * grav + + !if (this_pe .ge. 96) then + ! print '("[INFO] WDR BLEND npe=",I0," a_step=",I0," i,j=",I0,",",I0," smoothed_orog=",F10.5," hires_orog=",F10.5," blend_wt=",F6.4," blend_orog=",F10.5)', this_pe, a_step, i, j, smoothed_orog, hires_orog, blend_wt, blend_orog + !endif + + enddo + enddo + + + ! From tools/fv_surf_map.F90::surfdrv() + !print '("[INFO] WDR BLEND npe=",I0," full_zs_filter=",L1," blend_size=",I0)', this_pe, Atm%flagstruct%full_zs_filter, blend_size + if ( Atm%flagstruct%full_zs_filter ) then + !if(is_master()) then + ! write(*,*) 'Applying terrain filters. zero_ocean is', zero_ocean + !endif + !call FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & + ! stretch_fac, bounded_domain, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & + ! agrid, sin_sg, phis, oro_g) + + call FV3_zs_filter (Atm%bd, isd, ied, jsd, jed, Atm%npx, Atm%npy, Atm%neststruct%npx_global, & + Atm%flagstruct%stretch_fac, Atm%gridstruct%bounded_domain, Atm%domain, & + Atm%gridstruct%area_64, Atm%gridstruct%dxa, Atm%gridstruct%dya, & + Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, & + Atm%gridstruct%grid_64, & + Atm%gridstruct%agrid_64, Atm%gridstruct%sin_sg, Atm%phis, parent_orog_grid) + + call mpp_update_domains(Atm%phis, Atm%domain) + endif ! end terrain filter + + end subroutine set_blended_terrain + + subroutine set_smooth_nest_terrain(Atm, fp_orog, refine, num_points, halo_size, blend_size) + type(fv_atmos_type), intent(inout) :: Atm + real, allocatable, intent(in) :: fp_orog(:,:) ! orography for the full panel of the parent, at high-resolution + integer, intent(in) :: refine, num_points, halo_size, blend_size + + integer :: i,j + integer :: ioffset, joffset + integer :: npx, npy, isd, ied, jsd, jed + integer :: smooth_i_lo, smooth_i_hi, smooth_j_lo, smooth_j_hi + real :: smoothed_orog + character(len=16) :: errstring + + npx = Atm%npx + npy = Atm%npy + + isd = Atm%bd%isc - halo_size + ied = Atm%bd%iec + halo_size + jsd = Atm%bd%jsc - halo_size + jed = Atm%bd%jec + halo_size + + ioffset = Atm%neststruct%ioffset + joffset = Atm%neststruct%joffset + + smooth_i_lo = 1 + blend_size + smooth_i_hi = npx - blend_size - halo_size + + smooth_j_lo = 1 + blend_size + smooth_j_hi = npy - blend_size - halo_size + + !Atm(n)%phis(isd:ied, jsd:jed) = mn_static%orog_grid((ioffset-1)*x_refine+isd:(ioffset-1)*x_refine+ied, (joffset-1)*y_refine+jsd:(joffset-1)*y_refine+jed) * grav + + select case(num_points) + case (5) + + do j=jsd, jed + do i=isd, ied + if (i .lt. smooth_i_lo .or. i .gt. smooth_i_hi .or. j .lt. smooth_j_lo .or. j .gt. smooth_j_hi) then + call smooth_5_point(fp_orog, (ioffset-1)*refine + i, (joffset-1)*refine + j, smoothed_orog) + Atm%phis(i,j) = smoothed_orog * grav + else + Atm%phis(i,j) = fp_orog((ioffset-1)*refine + i, (joffset-1)*refine + j) * grav + endif + enddo + enddo + + case (9) + + do j=jsd, jed + do i=isd, ied + if (i .lt. smooth_i_lo .or. i .gt. smooth_i_hi .or. j .lt. smooth_j_lo .or. j .gt. smooth_j_hi) then + call smooth_9_point(fp_orog, (ioffset-1)*refine + i, (joffset-1)*refine + j, smoothed_orog) + Atm%phis(i,j) = smoothed_orog * grav + else + Atm%phis(i,j) = fp_orog((ioffset-1)*refine + i, (joffset-1)*refine + j) * grav + endif + enddo + enddo + + case default + write (errstring, "(I0)") num_points + call mpp_error(FATAL,'Invalid terrain_smoother in set_smooth_nest_terrain '//errstring) + end select + + end subroutine set_smooth_nest_terrain + + ! Compare terrain for parent and nest cells - for debugging + + subroutine compare_terrain(var_name, data_var, interp_type, ind, x_refine, y_refine, is_fine_pe, nest_domain) + character(len=*), intent(in) :: var_name + real, allocatable, intent(in) :: data_var(:,:) + integer, intent(in) :: interp_type + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + + integer :: position = CENTER + + real, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + ! Get the parent terrain through halo mechanism + !print '("[INFO] WDR compare_terrain AA. npe=",I0)', this_pe + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + !print '("[INFO] WDR compare_terrain BB. npe=",I0)', this_pe + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + !print '("[INFO] WDR compare_terrain CC. npe=",I0)', this_pe + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + !print '("[INFO] WDR compare_terrain DD. npe=",I0)', this_pe + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + !print '("[INFO] WDR compare_terrain EE. npe=",I0)', this_pe + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + !print '("[INFO] WDR compare_terrain FF. npe=",I0)', this_pe + + ! Figure out alignment of parent and child data and compare + ! At most one of the buffers will have any data in it from the parent + + if (is_fine_pe) then + call compare_buffer(north_coarse, north_fine, ind, nbuffer, data_var) + !print '("[INFO] WDR compare_terrain GG. npe=",I0)', this_pe + call compare_buffer(south_coarse, south_fine, ind, sbuffer, data_var) + !print '("[INFO] WDR compare_terrain HH. npe=",I0)', this_pe + call compare_buffer(east_coarse, east_fine, ind, ebuffer, data_var) + !print '("[INFO] WDR compare_terrain II. npe=",I0)', this_pe + call compare_buffer(west_coarse, west_fine, ind, wbuffer, data_var) + !print '("[INFO] WDR compare_terrain JJ. npe=",I0)', this_pe + endif + + print '("[INFO] WDR compare_terrain ZZ. npe=",I0)', this_pe + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine compare_terrain + + + subroutine compare_buffer(bbox_coarse, bbox_fine, ind, buffer, fine_var) + type(bbox), intent(in) :: bbox_coarse, bbox_fine + integer, allocatable, intent(in) :: ind(:,:,:) + real, allocatable, intent(in) :: buffer(:,:) + real, allocatable, intent(in) :: fine_var(:,:) + + + integer :: i, j, ic, jc + integer :: this_pe + + this_pe = mpp_pe() + + if ( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + !debug_log = .true. + + !if (debug_log) print '("[INFO] WDR BUFR print large buffer. npe=",I0," buffer(is_c, js_c)=",F12.5," buffer(ie_c-1, je_c-1)=",F12.5)', this_pe, buffer(bbox_coarse%is, bbox_coarse%js), buffer(bbox_coarse%ie-1, bbox_coarse%je-1) + + if (debug_log) print '("[INFO] WDR BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + if (debug_log) print '("[INFO] WDR BOUNDS fine_var npe=",I0," fine_var(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(fine_var,1), ubound(fine_var,1), lbound(fine_var,2), ubound(fine_var,2) + if (debug_log) print '("[INFO] WDR BOUNDS buffer npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2) + + do i=bbox_fine%is, bbox_fine%ie + do j=bbox_fine%js, bbox_fine%je + + ic = ind(i,j,1) + jc = ind(i,j,2) + + !print '("[INFO] WDR BOUNDS_ITER npe=",I0," i=",I0," j=",I0," ic=",I0," jc=",I0)', this_pe, i, j, ic, jc + !print '("[INFO] WDR BOUNDS_FINE npe=",I0," i=",I0," j=",I0," fine_var=",F12.5)', this_pe, i, j, fine_var(i,j) + !print '("[INFO] WDR BOUNDS_BUFFER1 npe=",I0," ic=",I0," jc=",I0," buffer=",F12.5)', this_pe, ic, jc, buffer(ic,jc) + !print '("[INFO] WDR BOUNDS_BUFFER2 npe=",I0," ic=",I0," jc=",I0," buffer=",F12.5)', this_pe, ic, jc+1, buffer(ic,jc+1) + !print '("[INFO] WDR BOUNDS_BUFFER3 npe=",I0," ic=",I0," jc=",I0," buffer=",F12.5)', this_pe, ic+1, jc+1, buffer(ic+1,jc+1) + !print '("[INFO] WDR BOUNDS_BUFFER4 npe=",I0," ic=",I0," jc=",I0," buffer=",F12.5)', this_pe, ic+1, jc, buffer(ic+1,jc) + + if ( (fine_var(i,j) .gt. 0.01) .or. & + (buffer(ic,jc) .gt. 0.01) .or. & + (buffer(ic,jc+1) .gt. 0.01) .or. & + (buffer(ic+1,jc+1) .gt. 0.01) .or. & + (buffer(ic+1,jc) .gt. 0.01)) then + print '("[INFO] WDR COMP_TERR npe=",I0," i=",I0," j=",I0," ic=",I0," jc=",I0,F10.3," ",F10.3," ",F10.3," ",F10.3," ",F10.3)', this_pe, i, j, ic, jc, fine_var(i,j), buffer(ic, jc ), buffer(ic, jc+1), buffer(ic+1,jc+1), buffer(ic+1,jc ) + endif + + !wt(i,j,1)*buffer(ic, jc ) + & + !wt(i,j,2)*buffer(ic, jc+1) + & + !wt(i,j,3)*buffer(ic+1,jc+1) + & + !wt(i,j,4)*buffer(ic+1,jc ) + + enddo + enddo + !print '("[INFO] WDR BOUNDS_DONE npe=",I0," i=",I0," j=",I0)', this_pe, i, j + + debug_log = .false. + !else + ! print '("[INFO] WDR NIL BUFR. npe=",I0)', this_pe + endif + end subroutine compare_buffer + + !================================================================================================== + ! + ! Fill Nest Halos from Parent + ! + !================================================================================================== + + subroutine fill_nest_halos_from_parent_r4_2d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + + real*4, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) then + + print '("[INFO] WDR Start fill_nest_halos_from_parent2D. npe=",I0," var_name=",A16)', this_pe, var_name + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + print '("[INFO] data_var npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2) + print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + endif + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0)', this_pe, position + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent2D. npe=",I0," var_name=",A16)', this_pe, var_name + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r4_2d + + + subroutine fill_nest_halos_from_parent_r8_2d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this also be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) then + + print '("[INFO] WDR Start fill_nest_halos_from_parent2D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + print '("[INFO] data_var npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2) + print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + endif + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0)', this_pe, position + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent2D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_r8_2d + + + subroutine fill_nest_halos_from_parent_masked(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, mask_var, mask_val, default_val) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this also be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + real*4, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) then + + print '("[INFO] WDR Start fill_nest_halos_from_parent2D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + print '("[INFO] data_var npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2) + print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + endif + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0)', this_pe, position + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer_masked(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer_masked(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer_masked(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer_masked(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent2D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_masked + + + subroutine fill_nest_halos_from_parent_r4_3d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*4, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) then + + print '("[INFO] WDR Start fill_nest_halos_from_parent3D. npe=",I0," var_name=",A16)', this_pe, var_name + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + print '("[INFO] data_var npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2), lbound(data_var, 3), ubound(data_var, 3) + print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + endif + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0," nz=",I0)', this_pe, position, nz + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent3D. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_r4_3d + + + subroutine fill_nest_halos_from_parent_r8_3d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*8, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) then + + print '("[INFO] WDR Start fill_nest_halos_from_parent3D. npe=",I0," var_name=",A16)', this_pe, var_name + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + print '("[INFO] data_var npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2), lbound(data_var, 3), ubound(data_var, 3) + print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', & + this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + endif + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0," nz=",I0)', this_pe, position, nz + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent3D. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_r8_3d + + + subroutine fill_nest_halos_from_parent_r4_4d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*4, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d, this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR Start fill_nest_halos_from_parent4D. npe=",I0," var_name=",A16)', this_pe, var_name + + n4d = ubound(data_var, 4) + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + + if (debug_log) print '("[INFO] data_var 4D npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2), lbound(data_var, 3), ubound(data_var, 3), lbound(data_var, 4), ubound(data_var, 4) + + if (debug_log) print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0," nz=",I0)', this_pe, position, nz + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + if (debug_log) print '("[INFO] WDR allocate_halo_buffers DONE. npe=",I0)', this_pe + + !==================================================== + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + + ! Passes data from coarse grid to fine grid's halo + ! Coarse parent PEs send data from data_var + ! Fine halo PEs receive data into one or more of the halo buffers + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (debug_log) print '("[INFO] WDR NRF2 mn_var_shift_data start. npe=",I0)', this_pe + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent4D. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_r4_4d + + + subroutine fill_nest_halos_from_parent_r8_4d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*8, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d, this_pe + integer :: nest_level = 1 ! WDR TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR Start fill_nest_halos_from_parent4D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + + n4d = ubound(data_var, 4) + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%tile_fine=",I0," %tile_coarse=",I0)', this_pe, nest_domain%tile_fine, nest_domain%tile_coarse + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_fine=",I0," %iend_fine=",I0)', this_pe, nest_domain%istart_fine, nest_domain%iend_fine + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_fine=",I0," %jend_fine=",I0)', this_pe, nest_domain%jstart_fine, nest_domain%jend_fine + + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', this_pe, nest_domain%istart_coarse, nest_domain%iend_coarse + if (debug_log) print '("[INFO] fill_nest_halos npe=",I0," nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', this_pe, nest_domain%jstart_coarse, nest_domain%jend_coarse + + if (debug_log) print '("[INFO] data_var 4D npe=",I0," var_name=",A16," data_var(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, var_name, lbound(data_var, 1), ubound(data_var, 1), lbound(data_var, 2), ubound(data_var, 2), lbound(data_var, 3), ubound(data_var, 3), lbound(data_var, 4), ubound(data_var, 4) + + + if (debug_log) print '("[INFO] wt npe=",I0," var_name=",A16," wt(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, var_name, lbound(wt, 1), ubound(wt, 1), lbound(wt, 2), ubound(wt, 2), lbound(wt, 3), ubound(wt, 3) + + + !==================================================== + + if (debug_log) print '("[INFO] WDR ALL1. npe=",I0," position=",I0," nz=",I0)', this_pe, position, nz + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + if (debug_log) print '("[INFO] WDR allocate_halo_buffers DONE. npe=",I0)', this_pe + + !==================================================== + + if (debug_log) print '("[INFO] WDR NRF0.d mn_var_shift_data npe=",I0," data_var(",I0,",",I0,",",I0,")")', this_pe, size(data_var,1), size(data_var,2), size(data_var,3) + + ! Passes data from coarse grid to fine grid's halo + ! Coarse parent PEs send data from data_var + ! Fine halo PEs receive data into one or more of the halo buffers + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (debug_log) print '("[INFO] WDR NRF2 mn_var_shift_data start. npe=",I0)', this_pe + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + if (debug_log) print '("[INFO] WDR NRFI mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF N mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF S mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF E mn_var_shift_data start. npe=",I0)', this_pe + + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + if (debug_log) print '("[INFO] WDR NRF W mn_var_shift_data start. npe=",I0)', this_pe + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + if (debug_log) print '("[INFO] WDR End fill_nest_halos_from_parent4D_kindphys. npe=",I0," var_name=",A16)', this_pe, var_name + + end subroutine fill_nest_halos_from_parent_r8_4d + + + !================================================================================================== + ! + ! Allocate halo buffers + ! + !================================================================================================== + + subroutine alloc_halo_buffer_r8_2d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position) + real*8, dimension(:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_2d + + + subroutine alloc_halo_buffer_r4_2d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position) + real*4, dimension(:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_2d + + + subroutine alloc_halo_buffer_r4_3d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz) + real*4, dimension(:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0," k=",I0,"-",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je, 1, nz + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je,1:nz), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_3d + + + subroutine alloc_halo_buffer_r8_3d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz) + real*8, dimension(:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0," k=",I0,"-",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je, 1, nz + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je,1:nz), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_3d + + + subroutine alloc_halo_buffer_r4_4d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz, n4d) + real*4, dimension(:,:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz, n4d + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC4 npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF4 npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR4 Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0," k=",I0," n4d=",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je, nz, n4d + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je, 1:nz, 1:n4d), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR4 error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR4 only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_4d + + + subroutine alloc_halo_buffer_r8_4d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz, n4d) + real*8, dimension(:,:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz, n4d + + integer :: my_stat + character(256) :: my_errmsg + integer :: this_pe + + this_pe = mpp_pe() + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + if (debug_log) print '("[INFO] WDR FNHC4 npe=",I0," direction=",I0," bbox_coarse(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + if (debug_log) print '("[INFO] WDR FNHF4 npe=",I0," direction=",I0," bbox_fine(",I0,"-",I0,",",I0,"-",I0,")")', this_pe, direction, bbox_fine.is, bbox_fine.ie, bbox_fine.js, bbox_fine.je + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + if (debug_log) print '("[INFO] WDR BUFR4 Allocating large buffer. npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0," k=",I0," n4d=",I0)', this_pe, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je, nz, n4d + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je, 1:nz, 1:n4d), stat=my_stat, errmsg=my_errmsg) + if (my_stat .ne. 0) print '("[ERROR] WDR NBFR4 error allocating buffer. npe=",I0,I0,A80)', this_pe, my_stat, my_errmsg + + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + if (debug_log) print '("[INFO] WDR NBFR4 only allocating single entry buffer. npe=",I0," direction=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, direction, bbox_coarse.is, bbox_coarse.ie, bbox_coarse.js, bbox_coarse.je + allocate(buffer(1,1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_4d + + + !================================================================================================== + ! + ! Load static data from netCDF files + ! + !================================================================================================== + + ! Load the full panel nest latlons from netCDF file + ! character(*), parameter :: nc_filename = '/scratch2/NAGAPE/aoml-hafs1/William.Ramstrom/static_grids/C384_grid.tile6.nc' + ! Read in the lat/lon in degrees, convert to radians + + subroutine load_nest_latlons_from_nc(nc_filename, nxp, nyp, refine, & + fp_tile_geo, & + fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine) + implicit none + + character(*), intent(in) :: nc_filename + integer, intent(in) :: nxp, nyp, refine + type(grid_geometry), intent(out) :: fp_tile_geo + integer, intent(out) :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + + !======================================================================================== + ! + ! Determine which tile this PE is operating on + ! Load the lat/lon data from netCDF file + ! If fine nest, also determine the parent tile + ! load the lat/lon data from that tile + ! This code will only operate for nest motion within a single tile + ! + !======================================================================================== + + ! read lat/lon for this tile + ! lat is y from grid file + ! lon is x from grid file + + integer :: nx, ny + + integer :: nn + integer :: super_nxp, super_nyp, mid_nx, mid_ny + integer :: super_nx, super_ny + type(grid_geometry) :: temp_tile_geo + ! Full panel nest data + integer :: i, j, fi, fj + integer :: this_pe + + real(kind=kind_phys) :: pi = 4d0 * atan(1.0d0) + !real(kind=kind_phys) :: pi180, rad2deg, deg2rad + real(kind=kind_phys) :: deg2rad + + !pi180 = pi / 180.0d0 + deg2rad = pi / 180.0d0 + !rad2deg = 1.0d0 / pi180 + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR NCREAD LLFE load_nest_latlons_from_nc fp interp_single_nest start, nread npe=",I0," nxp=",I0," nyp=",I0," refine=",I0)', this_pe, nxp, nyp, refine + + nx = nxp - 1 + ny = nyp - 1 + + ! Global tiles don't have a halo in lat/lon data + ! Nests have a halo in the lat/lon data + !start = 1 + !nread = 1 + + ! single fine nest + ! full panel variables + !fp_istart_fine = 12 + !fp_iend_fine = 269 + !fp_jstart_fine = 12 + !fp_jend_fine = 269 + !super_nx = 2*(fp_iend_fine - fp_istart_fine + 1) + ( ehalo + whalo ) + !super_ny = 2*(fp_jend_fine - fp_jstart_fine + 1) + ( nhalo + shalo ) + + fp_istart_fine = 1 + fp_iend_fine = nx * refine + fp_jstart_fine = 1 + fp_jend_fine = ny * refine + super_nx = 2*(fp_iend_fine - fp_istart_fine + 1) + super_ny = 2*(fp_jend_fine - fp_jstart_fine + 1) + + super_nxp = super_nx + 1 + super_nyp = super_ny + 1 + + mid_nx = (fp_iend_fine - fp_istart_fine) + mid_ny = (fp_jend_fine - fp_jstart_fine) + + if (debug_log) print '("[INFO] WDR LLFB load_nest_latlons_from_nc allocate fp fine temp_tile_geo%lats npe=",I0," dims: ",I4,":",I4,I4,":",I4,I4)', this_pe, 1, super_nxp, 1, super_nyp + + if (debug_log) print '("[INFO] WDR NCREAD LLFC load_nest_latlons_from_nc fp interp_single_nest. npe=",I0,I4,I4,I4,I4," ",A128)', this_pe, super_nxp, super_nyp, mid_nx,mid_ny, nc_filename + + call alloc_read_data(nc_filename, 'x', super_nxp, super_nyp, fp_tile_geo%lons) + call alloc_read_data(nc_filename, 'y', super_nxp, super_nyp, fp_tile_geo%lats) + call alloc_read_data(nc_filename, 'area', super_nx, super_ny, fp_tile_geo%area) + + ! double dx(nyp, nx) + !call alloc_read_data(nc_filename, 'dx', super_nx, super_nyp, fp_tile_geo%dx) + ! double dy(ny, nxp) + !call alloc_read_data(nc_filename, 'dy', super_nxp, super_ny, fp_tile_geo%dy) + ! double area(ny, nx) + !call alloc_read_data(nc_filename, 'area', super_nx, super_ny, fp_tile_geo%area) + + if (debug_log) print '("[INFO] WDR NCREAD LLFE load_nest_latlons_from_nc fp interp_single_nest start, nread npe=",I0)', this_pe + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! + !! Setup the lat/lons of the actual nest, read from the larger array + !! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !super_nxp = 2*(iend_fine - istart_fine + 1) + 2 * ( ehalo + whalo ) + 1 + !super_nyp = 2*(jend_fine - jstart_fine + 1) + 2 * ( nhalo + shalo ) + 1 + !mid_nx = (iend_fine - istart_fine) + !mid_ny = (jend_fine - jstart_fine) + + ! end reading in nest + + if (debug_log) print '("[INFO] WDR CTR load_nest_latlons_from_nc center lat/lon. npe=",I0, " ", I0," ", I0)', this_pe, mid_nx, mid_ny + if (debug_log) print '("[INFO] WDR CTR load_nest_latlons_from_nc center lat/lon. npe=",I0, " ", I0," ", I0)', this_pe, size(fp_tile_geo%lats, 1), size(fp_tile_geo%lats, 2) + if (debug_log) print '("[INFO] WDR CTR load_nest_latlons_from_nc DEGS center lat/lon. npe=",I0,F8.2,F8.2," ",A128)', this_pe, fp_tile_geo%lats(mid_nx,mid_ny), fp_tile_geo%lons(mid_nx,mid_ny), nc_filename + + fp_tile_geo%lats = fp_tile_geo%lats * deg2rad + fp_tile_geo%lons = fp_tile_geo%lons * deg2rad + + if (debug_log) print '("[INFO] WDR CTR load_nest_latlons_from_nc RADS center lat/lon. npe=",I0,F8.2,F8.2," ",A128)', this_pe, fp_tile_geo%lats(mid_nx,mid_ny), fp_tile_geo%lons(mid_nx,mid_ny), nc_filename + + end subroutine load_nest_latlons_from_nc + +#ifdef OVERLOAD_R8 + subroutine alloc_read_data_r4_2d(nc_filename, var_name, x_size, y_size, data_array, time) + character(len=*), intent(in) :: nc_filename, var_name + integer, intent(in) :: x_size, y_size + real*4, allocatable, intent(inout) :: data_array(:,:) + integer, intent(in),optional :: time + + integer :: start(4), nread(4) + integer :: this_pe + + ! Allocate data_array to match the expected data size, then read in the data + ! This subroutine consolidates the allocation and reading of data to ensure consistency of data sizing and simplify code + ! Could later extend this function to determine data size based on netCDF file metadata + + this_pe = mpp_pe() + + allocate(data_array(x_size, y_size)) + data_array = -9999.9 + + if (debug_log) print '("[INFO] WDR alloc_read_data allocate npe=",I0," ",A16," dims: ",I4,":",I4,I4,":",I4,I4)', this_pe, var_name, 1, x_size, 1, y_size + + start = 1 + nread = 1 + + start(1) = 1 + start(2) = 1 + nread(1) = x_size + nread(2) = y_size + + if (present(time)) then + start(3) = time + nread(3) = 1 + endif + + if (debug_log) print '("[INFO] WDR NCREAD NCRA alloc_read_data. npe=",I0," ",A96," ", A16)', this_pe, trim(nc_filename), var_name + if (debug_log) print '("[INFO] WDR NCREAD NCRB alloc_read_data, nread npe=",I0, " ", A16,I4,I4,I4,I4)', this_pe, var_name, start(1), start(2), nread(1), nread(2) + + call read_data(nc_filename, var_name, data_array, start, nread, no_domain=.TRUE.) ! r4_2d + + if (debug_log) print '("[INFO] WDR NCREAD NCRC alloc_read_data, nread npe=",I0, " ", A16,I4,I4,I4,I4)', this_pe, var_name, start(1), start(2), nread(1), nread(2) + + end subroutine alloc_read_data_r4_2d +#endif + + subroutine alloc_read_data_r8_2d(nc_filename, var_name, x_size, y_size, data_array) + character(len=*), intent(in) :: nc_filename, var_name + integer, intent(in) :: x_size, y_size + real*8, allocatable, intent(inout) :: data_array(:,:) + + integer :: start(4), nread(4) + integer :: this_pe + + ! Allocate data_array to match the expected data size, then read in the data + ! This subroutine consolidates the allocation and reading of data to ensure consistency of data sizing and simplify code + ! Could later extend this function to determine data size based on netCDF file metadata + + this_pe = mpp_pe() + + allocate(data_array(x_size, y_size)) + data_array = -9999.9 + + if (debug_log) print '("[INFO] WDR alloc_read_data allocate npe=",I0," ",A16," dims: ",I4,":",I4,I4,":",I4,I4)', this_pe, var_name, 1, x_size, 1, y_size + + start = 1 + nread = 1 + + start(1) = 1 + start(2) = 1 + nread(1) = x_size + nread(2) = y_size + + if (debug_log) print '("[INFO] WDR NCREAD NCRA alloc_read_data. npe=",I0," ",A96," ", A16)', this_pe, trim(nc_filename), var_name + if (debug_log) print '("[INFO] WDR NCREAD NCRB alloc_read_data, nread npe=",I0, " ", A16,I4,I4,I4,I4)', this_pe, var_name, start(1), start(2), nread(1), nread(2) + + call read_data(nc_filename, var_name, data_array, start, nread, no_domain=.TRUE.) ! r8_2d + + if (debug_log) print '("[INFO] WDR NCREAD NCRC alloc_read_data, nread npe=",I0, " ", A16,I4,I4,I4,I4)', this_pe, var_name, start(1), start(2), nread(1), nread(2) + + end subroutine alloc_read_data_r8_2d + + + ! nest_geo and parent_geo can be centered or supergrids. + ! Assumes and validates that nest_geo is smaller, and inside parent_geo + subroutine find_nest_alignment(nest_geo, parent_geo, nest_x, nest_y, parent_x, parent_y) + implicit none + type(grid_geometry), intent(in) :: nest_geo, parent_geo + integer, intent(out) :: nest_x, nest_y, parent_x, parent_y + + type(bbox) :: nest_bbox, parent_bbox + integer :: x,y + logical :: found + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + integer :: this_pe + + this_pe = mpp_pe() + + rad2deg = 180.0 / pi + + found = .false. + parent_x = -999 + parent_y = -999 + nest_x = -999 + nest_y = -999 + + if (debug_log) print '("[INFO] WDR start find_nest_alignment")' + + call fill_bbox(nest_bbox, nest_geo%lats) + call show_bbox('nest', nest_bbox, nest_geo%lats, nest_geo%lons) + call fill_bbox(parent_bbox, parent_geo%lats) + call show_bbox('parent', parent_bbox, parent_geo%lats, parent_geo%lons) + + !parent_bbox%is = lbound(parent_geo%lats, 1) + !parent_bbox%ie = ubound(parent_geo%lats, 1) + !parent_bbox%js = lbound(parent_geo%lats, 2) + !parent_bbox%je = ubound(parent_geo%lats, 2) + + do x = parent_bbox.is, parent_bbox.ie + do y = parent_bbox.js, parent_bbox.je + + if (abs(parent_geo%lats(x,y) - nest_geo%lats(nest_bbox.is, nest_bbox.js)) .lt. 0.0001) then + if (abs(parent_geo%lons(x,y) - nest_geo%lons(nest_bbox.is, nest_bbox.js)) .lt. 0.0001) then + found = .true. + + parent_x = x + parent_y = y + nest_x = nest_bbox.is + nest_y = nest_bbox.js + + if (debug_log) print '("[INFO] WDR find_nest_alignment parent(",I0,",",I0,") nest(",I0,",",I0,")")', x,y,nest_bbox.is, nest_bbox.js + if (debug_log) print '("[INFO] WDR find_nest_alignment ",F10.5, F10.5)', parent_geo%lats(x,y)*rad2deg, parent_geo%lons(x,y)*rad2deg + endif + + if ( abs(abs(parent_geo%lons(x,y) - nest_geo%lons(nest_bbox.is, nest_bbox.js)) - 2*pi) .lt. 0.0001) then + found = .true. + if (debug_log) print '("[INFO] WDR find_nest_alignment nest WRAP MATCH FOUND npe=",I0,F10.5, F10.5)', this_pe, nest_geo%lats(nest_bbox.is, nest_bbox.js)*rad2deg, nest_geo%lons(nest_bbox.is, nest_bbox.js)*rad2deg + if (debug_log) print '("[INFO] WDR find_nest_alignment WRAP MATCH ",F10.5, F10.5)', parent_geo%lats(x,y)*rad2deg, parent_geo%lons(x,y)*rad2deg + + parent_x = x + parent_y = y + nest_x = nest_bbox.is + nest_y = nest_bbox.js + + if (debug_log) print '("[INFO] WDR find_nest_alignment parent(",I0,",",I0,") nest(",I0,",",I0,")")', x,y,nest_bbox.is, nest_bbox.js + if (debug_log) print '("[INFO] WDR find_nest_alignment ",F10.5, F10.5)', parent_geo%lats(x,y)*rad2deg, parent_geo%lons(x,y)*rad2deg + endif + endif + enddo + enddo + + if (found) then + if (debug_log) print '("[INFO] WDR find_nest_alignment MATCH FOUND",F10.5, F10.5)', nest_geo%lats(nest_bbox.is, nest_bbox.js)*rad2deg, nest_geo%lons(nest_bbox.is, nest_bbox.js)*rad2deg + endif + + if (.not. found .and. debug_log) then + print '("[INFO] WDR find_nest_alignment nest NO MATCH FOUND npe=",I0,F10.5, F10.5)', this_pe, nest_geo%lats(nest_bbox.is, nest_bbox.js)*rad2deg, nest_geo%lons(nest_bbox.is, nest_bbox.js)*rad2deg + print '("[INFO] WDR find_nest_alignment nest NO MATCH FOUND npe=",I0,F10.5, F10.5)', this_pe, nest_geo%lats(nest_bbox.is, nest_bbox.je)*rad2deg, nest_geo%lons(nest_bbox.is, nest_bbox.je)*rad2deg + print '("[INFO] WDR find_nest_alignment nest NO MATCH FOUND npe=",I0,F10.5, F10.5)', this_pe, nest_geo%lats(nest_bbox.ie, nest_bbox.je)*rad2deg, nest_geo%lons(nest_bbox.ie, nest_bbox.je)*rad2deg + print '("[INFO] WDR find_nest_alignment nest NO MATCH FOUND npe=",I0,F10.5, F10.5)', this_pe, nest_geo%lats(nest_bbox.ie, nest_bbox.js)*rad2deg, nest_geo%lons(nest_bbox.ie, nest_bbox.js)*rad2deg + + do x = parent_bbox.is, parent_bbox.ie + do y = parent_bbox.js, parent_bbox.je + print '("[INFO] WDR find_nest_alignment parent NO MATCH FOUND npe="I0," ",I0," ",I0," ",F10.5, F10.5)', this_pe, x, y, parent_geo%lats(x,y)*rad2deg, parent_geo%lons(x,y)*rad2deg + enddo + enddo + endif + + end subroutine find_nest_alignment + + + !================================================================================================== + ! + ! NetCDF Function Section + ! + !================================================================================================== + + subroutine output_grid_to_nc_3d(flag, istart, iend, jstart, jend, k, grid, file_str, var_name, time_step, dom, position) + implicit none + + character(len=*), intent(in) :: flag + integer, intent(in) :: istart, iend, jstart, jend, k + real, dimension(:,:,:), intent(in) :: grid + + character(len=*), intent(in) :: file_str, var_name + integer, intent(in) :: time_step + type(domain2d), intent(in) :: dom + integer, intent(in) :: position + + integer :: this_pe + character(len=256) :: filename + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR output_grid_3d_to_nc calling write_data. ",A8," npe=",I0, " i=",I0,"-",I0, " j=",I0,"-",I0," grid(",I0,",",I0,",",I0,")")', & + flag, this_pe, istart, iend, jstart, jend, size(grid,1), size(grid,2), size(grid,3) + + write (filename, "(A,A1,I0.3,A)") trim(file_str), "_", time_step, ".nc" + + ! Resolves to: + !subroutine write_data_3d_new(filename, fieldname, data, domain, no_domain, scalar_or_1d, & + ! position, tile_count, data_default) + !character(len=*), intent(in) :: filename, fieldname + !real, dimension(:,:,:), intent(in) :: data + !type(domain2d), optional, intent(in), target :: domain + !real, optional, intent(in) :: data_default + !logical, optional, intent(in) :: no_domain + !logical, optional, intent(in) :: scalar_or_1d + !integer, optional, intent(in) :: position, tile_count + + call write_data(filename, var_name, grid, dom, position=position) ! r4_3d + + end subroutine output_grid_to_nc_3d + + + subroutine output_grid_to_nc_2d(flag, istart, iend, jstart, jend, k, grid, file_str, var_name, time_step, dom, position) + implicit none + + character(len=*), intent(in) :: flag + integer, intent(in) :: istart, iend, jstart, jend, k + real, dimension(:,:), intent(in) :: grid + + character(len=*), intent(in) :: file_str, var_name + integer, intent(in) :: time_step + type(domain2d), intent(in) :: dom + integer, intent(in) :: position + + integer :: this_pe + character(len=256) :: filename + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] WDR output_grid_2d_to_nc calling write_data. ",A8," npe=",I0, " i=",I0,"-",I0, " j=",I0,"-",I0," grid(",I0,")")', & + flag, this_pe, istart, iend, jstart, jend, size(grid,1), size(grid,2) + + write (filename, "(A,A1,I0.3,A)") trim(file_str), "_", time_step, ".nc" + + call write_data(filename, var_name, grid, dom, position=position) ! r4_2d + + end subroutine output_grid_to_nc_2d + + + + !================================================================================================== + ! + ! Fill Section + ! + !================================================================================================== + + subroutine fill_grid_from_supergrid_r4_3d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*4, allocatable, intent(inout) :: in_grid(:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! TODO replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! TODO replace with a fatal error + endif + + in_grid(i,j,2) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r4_3d + + + subroutine fill_grid_from_supergrid_r8_3d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*8, allocatable, intent(inout) :: in_grid(:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! TODO replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! TODO replace with a fatal error + endif + + in_grid(i,j,2) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r8_3d + + + subroutine fill_grid_from_supergrid_r8_4d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*8, allocatable, intent(inout) :: in_grid(:,:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + print '("[ERROR] WDR move_nest_geo invalid fp_i=",I0," is=",I0," ie=",I0)', fp_i, fp_tile_bbox%is, fp_tile_bbox%ie + stop ! TODO replace with a fatal error + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + print '("[ERROR] WDR move_nest_geo invalid fp_j=",I0," js=",I0," je=",I0)', fp_j, fp_tile_bbox%js, fp_tile_bbox%je + stop ! TODO replace with a fatal error + endif + + in_grid(i,j,2,1) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r8_4d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r4_2d(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:) + real*4, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_2d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r8_2d(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_2d + + + subroutine fill_nest_from_buffer_masked(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + real, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (7) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= MASKED")', this_pe, interp_type + call fill_nest_from_buffer_cell_center_masked("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_masked + + + + subroutine fill_nest_from_buffer_r4_3d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, 'fill_nest_from_buffer_nearest_neighbor is not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_3d + + + subroutine fill_nest_from_buffer_r8_3d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + call mpp_error(FATAL, 'nearest_neighbor is not yet implemented for fv_moving_nest_utils.F90::fill_nest_from_buffer_3D_kindphys') + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_3d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r4_4d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, '4D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_4d + + + subroutine fill_nest_from_buffer_r8_4d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= cell centered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) + ! if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= C grid staggered")', this_pe, interp_type + case (4) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= D grid staggered")', this_pe, interp_type + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + if (debug_log) print '("[INFO] WDR FNB this_tile. npe=",I0," interp_type=",I0,"= nearest neighbor cell centered")', this_pe, interp_type + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, '4D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + if (debug_log) print '("[ERROR] WDR FNB this_tile. npe=",I0," UNDEFINED interp_type=",I0)', this_pe, interp_type + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_4d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. It can accommodate all grid staggers, using the stagger variable. [The routine needs to be renamed since "_from_cell_center" has become incorrect.) + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_cell_center_r4_2d(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:) + real*4, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC start if (debug_log) print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2) + + if (debug_log) print '("[INFO] WDR FNFBCCX start print ",A1," ",A8," x. npe=",I0," x(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(x,1), ubound(x,1), lbound(x,2), ubound(x,2) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c)=",F12.5," buffer(ie_c-1, je_c-1)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js), buffer(bbox_coarse%ie-1, bbox_coarse%je-1) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j) = & + wt(i,j,1)*buffer(ic, jc ) + & + wt(i,j,2)*buffer(ic, jc+1) + & + wt(i,j,3)*buffer(ic+1,jc+1) + & + wt(i,j,4)*buffer(ic+1,jc ) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + if (debug_log) print '("[INFO] WDR FILLNEST DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r4_2d + + + subroutine fill_nest_from_buffer_cell_center_r8_2d(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC start if (debug_log) print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2) + + if (debug_log) print '("[INFO] WDR FNFBCCX start print ",A1," ",A8," x. npe=",I0," x(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(x,1), ubound(x,1), lbound(x,2), ubound(x,2) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c)=",F12.5," buffer(ie_c-1, je_c-1)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js), buffer(bbox_coarse%ie-1, bbox_coarse%je-1) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j) = & + wt(i,j,1)*buffer(ic, jc ) + & + wt(i,j,2)*buffer(ic, jc+1) + & + wt(i,j,3)*buffer(ic+1,jc+1) + & + wt(i,j,4)*buffer(ic+1,jc ) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + if (debug_log) print '("[INFO] WDR FILLNEST DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r8_2d + + + subroutine fill_nest_from_buffer_cell_center_masked(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + real, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + real :: tw + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC start if (debug_log) print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2) + + if (debug_log) print '("[INFO] WDR FNFBCCX start print ",A1," ",A8," x. npe=",I0," x(",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(x,1), ubound(x,1), lbound(x,2), ubound(x,2) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c)=",F12.5," buffer(ie_c-1, je_c-1)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js), buffer(bbox_coarse%ie-1, bbox_coarse%je-1) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + + ic = ind(i,j,1) + jc = ind(i,j,2) + + !x(i,j) = & + ! wt(i,j,1)*buffer(ic, jc ) + & + ! wt(i,j,2)*buffer(ic, jc+1) + & + ! wt(i,j,3)*buffer(ic+1,jc+1) + & + ! wt(i,j,4)*buffer(ic+1,jc ) + + ! Land type + !if (mask_var(i,j) .eq. mask_val) then + x(i,j) = 0.0 + tw = 0.0 + if (buffer(ic,jc) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic, jc ) + if (buffer(ic,jc+1) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic, jc+1) + if (buffer(ic+1,jc+1) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic+1,jc+1) + if (buffer(ic+1,jc) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic+1,jc ) + + if (buffer(ic,jc) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic,jc+1) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic+1,jc+1) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic+1,jc) .gt. -1.0) tw = tw + wt(i,j,1) + + if (tw .gt. 0.0) then + x(i,j) = x(i,j) / tw + else + x(i,j) = default_val + endif + + + if (x(i,j) .lt. 0.0) print '("[WARN] WDR MASK npe=",I0," i,j=",I5,I5," x()=",F15.5," tw=",F10.5)', this_pe, i, j, x(i,j), tw + + !else + ! x(i,j) = & + ! wt(i,j,1)*buffer(ic, jc ) + & + ! wt(i,j,2)*buffer(ic, jc+1) + & + ! wt(i,j,3)*buffer(ic+1,jc+1) + & + ! wt(i,j,4)*buffer(ic+1,jc ) + !endif + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + if (debug_log) print '("[INFO] WDR FILLNEST DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_masked + + + subroutine fill_nest_from_buffer_cell_center_r4_3d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC start if (debug_log) print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2), lbound(buffer,3), ubound(buffer,3) + + if (debug_log) print '("[INFO] WDR FNFBCCX start print ",A1," ",A8," x. npe=",I0," x(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(x,1), ubound(x,1), lbound(x,2), ubound(x,2), lbound(x,3), ubound(x,3) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c, nz)=",F12.5," buffer(ie_c-1, je_c-1, nz)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js, nz), buffer(bbox_coarse%ie-1, bbox_coarse%je-1, nz) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k) = & + wt(i,j,1)*buffer(ic, jc, k) + & + wt(i,j,2)*buffer(ic, jc+1,k) + & + wt(i,j,3)*buffer(ic+1,jc+1,k) + & + wt(i,j,4)*buffer(ic+1,jc, k) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, buffer(ic,jc,k), buffer(ic,jc+1,k), buffer(ic+1,jc+1,k), buffer(ic+1,jc,k) + if (debug_log) print '("[INFO] WDR after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + if (debug_log) print '("[INFO] WDR FILLNEST from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k) + !! Debugging printing + !if ( ( i == focus_i ) .and. ( j == focus_j ) ) then + ! if (debug_log) print '("[INFO] WDR FOCUS FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + ! if (debug_log) print '("[INFO] WDR FOCUS FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, buffer(ic,jc,k), buffer(ic,jc+1,k), buffer(ic+1,jc+1,k), buffer(ic+1,jc,k) + ! if (debug_log) print '("[INFO] WDR FOCUS after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + ! if (debug_log) print '("[INFO] WDR FOCUS FILLNEST from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k) + !endif + + enddo + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + !if (debug_log) print '("[INFO WDR NIL BUFR ",A8," BOUNDS i npe=",I0,"is_f=",I0," ie_f=",I0,"is_c=",I0," ie_c=",I0)', dir_str, this_pe, is_f, ie_f, is_c, ie_c + !if (debug_log) print '("[INFO WDR NIL BUFR ",A8," BOUNDS j npe=",I0,"js_f=",I0," je_f=",I0,"js_c=",I0," je_c=",I0)', dir_str, this_pe, js_f, je_f, js_c, je_c + + endif + + if (debug_log) print '("[INFO] WDR FILLNEST DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r4_3d + + subroutine fill_nest_from_buffer_cell_center_r8_3d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC start if (debug_log) print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2), lbound(buffer,3), ubound(buffer,3) + + if (debug_log) print '("[INFO] WDR FNFBCCX start print ",A1," ",A8," x. npe=",I0," x(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(x,1), ubound(x,1), lbound(x,2), ubound(x,2), lbound(x,3), ubound(x,3) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c, nz)=",F12.5," buffer(ie_c-1, je_c-1, nz)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js, nz), buffer(bbox_coarse%ie-1, bbox_coarse%je-1, nz) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0," is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0," js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k) = & + wt(i,j,1)*buffer(ic, jc, k) + & + wt(i,j,2)*buffer(ic, jc+1,k) + & + wt(i,j,3)*buffer(ic+1,jc+1,k) + & + wt(i,j,4)*buffer(ic+1,jc, k) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, buffer(ic,jc,k), buffer(ic,jc+1,k), buffer(ic+1,jc+1,k), buffer(ic+1,jc,k) + if (debug_log) print '("[INFO] WDR after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + if (debug_log) print '("[INFO] WDR FILLNEST from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k) + !! Debugging printing + !if ( ( i == focus_i ) .and. ( j == focus_j ) ) then + ! if (debug_log) print '("[INFO] WDR FOCUS FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + ! if (debug_log) print '("[INFO] WDR FOCUS FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, buffer(ic,jc,k), buffer(ic,jc+1,k), buffer(ic+1,jc+1,k), buffer(ic+1,jc,k) + ! if (debug_log) print '("[INFO] WDR FOCUS after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + ! if (debug_log) print '("[INFO] WDR FOCUS FILLNEST from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k) + !endif + + enddo + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + !if (debug_log) print '("[INFO WDR NIL BUFR ",A8," BOUNDS i npe=",I0,"is_f=",I0," ie_f=",I0,"is_c=",I0," ie_c=",I0)', dir_str, this_pe, is_f, ie_f, is_c, ie_c + !if (debug_log) print '("[INFO WDR NIL BUFR ",A8," BOUNDS j npe=",I0,"js_f=",I0," je_f=",I0,"js_c=",I0," je_c=",I0)', dir_str, this_pe, js_f, je_f, js_c, je_c + + endif + + if (debug_log) print '("[INFO] WDR FILLNEST DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r8_3d + + + subroutine fill_nest_from_buffer_cell_center_r4_4d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, v, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC4D start print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2), lbound(buffer,3), ubound(buffer,3), lbound(buffer,4), ubound(buffer,4) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c, nz, 1)=",F12.5," buffer(ie_c-1, je_c-1, nz, 1)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js, nz, 1), buffer(bbox_coarse%ie-1, bbox_coarse%je-1, nz, 1) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0,"is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0,"js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do v=1,ubound(buffer,4) + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + ic = ind(i,j,1) + jc = ind(i,j,2) + + !if (debug_log) print '("[INFO] WDR fill_nest from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0")")', dir_str, this_pe, i, j, ic, jc + + !if (debug_log) print '("[INFO] WDR before FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k,v) + + ! Fill in with weighted interpolation + ! x(i,j,k) = & + ! wt(i,j,1)*buffer(ic, jc, k) + & + ! wt(i,j,2)*buffer(ic, jc+1,k) + & + ! wt(i,j,3)*buffer(ic+1,jc+1,k) + & + ! wt(i,j,4)*buffer(ic+1,jc, k) + + ! wt(iw,jw,1)=dist2*dist3 ! ic, jc weight + ! wt(iw,jw,2)=dist3*dist4 ! ic, jc+2 weight + ! wt(iw,jw,3)=dist4*dist1 ! ic+2, jc+2 weight + ! wt(iw,jw,4)=dist1*dist2 ! ic+2, jc weight + + x(i,j,k,v) = & + wt(i,j,1)*buffer(ic, jc, k, v) + & + wt(i,j,2)*buffer(ic, jc+1,k, v) + & + wt(i,j,3)*buffer(ic+1,jc+1,k, v) + & + wt(i,j,4)*buffer(ic+1,jc, k, v) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + + !if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, v, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + !if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, v, buffer(ic,jc,k,v), buffer(ic,jc+1,k,v), buffer(ic+1,jc+1,k,v), buffer(ic+1,jc,k,v) + + !if (debug_log) print '("[INFO] WDR after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, v,x(i,j,k,v) + + !if (debug_log) print '("[INFO] WDR FILLNEST4D from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k,v) + + enddo + enddo + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + if (debug_log) print '("[INFO] WDR FILLNEST4D DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r4_4d + + + subroutine fill_nest_from_buffer_cell_center_r8_4d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, v, ic, jc + integer :: focus_i = 1 + integer :: focus_j = 1 + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if (debug_log) print '("[INFO] WDR FNFBCC4D start print ",A1," ",A8," buffer. npe=",I0," buffer(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', stagger, dir_str, this_pe, lbound(buffer,1), ubound(buffer,1), lbound(buffer,2), ubound(buffer,2), lbound(buffer,3), ubound(buffer,3), lbound(buffer,4), ubound(buffer,4) + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c, nz, 1)=",F12.5," buffer(ie_c-1, je_c-1, nz, 1)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js, nz, 1), buffer(bbox_coarse%ie-1, bbox_coarse%je-1, nz, 1) + + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS i npe=",I0,"is_f=",I0," ie_f=",I0," is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO] WDR ",A8," BOUNDS j npe=",I0,"js_f=",I0," je_f=",I0," js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do v=1,ubound(buffer,4) + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + ic = ind(i,j,1) + jc = ind(i,j,2) + + !if (debug_log) print '("[INFO] WDR fill_nest from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0")")', dir_str, this_pe, i, j, ic, jc + + !if (debug_log) print '("[INFO] WDR before FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k,v) + + ! Fill in with weighted interpolation + ! x(i,j,k) = & + ! wt(i,j,1)*buffer(ic, jc, k) + & + ! wt(i,j,2)*buffer(ic, jc+1,k) + & + ! wt(i,j,3)*buffer(ic+1,jc+1,k) + & + ! wt(i,j,4)*buffer(ic+1,jc, k) + + ! wt(iw,jw,1)=dist2*dist3 ! ic, jc weight + ! wt(iw,jw,2)=dist3*dist4 ! ic, jc+2 weight + ! wt(iw,jw,3)=dist4*dist1 ! ic+2, jc+2 weight + ! wt(iw,jw,4)=dist1*dist2 ! ic+2, jc weight + + x(i,j,k,v) = & + wt(i,j,1)*buffer(ic, jc, k, v) + & + wt(i,j,2)*buffer(ic, jc+1,k, v) + & + wt(i,j,3)*buffer(ic+1,jc+1,k, v) + & + wt(i,j,4)*buffer(ic+1,jc, k, v) + + !call check_array(buffer, this_pe, "buffer"//dir_str, -300.0, 300.0) + !call check_array(wt, this_pe, "wt"//dir_str, 0.0, 1.0) + + !if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,",",I0,") ic,jc=(",I0,",",I0,"): wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, v, ic, jc, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + !if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,",",I0,") : buffer:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, v, buffer(ic,jc,k,v), buffer(ic,jc+1,k,v), buffer(ic+1,jc+1,k,v), buffer(ic+1,jc,k,v) + + !if (debug_log) print '("[INFO] WDR after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, v,x(i,j,k,v) + + !if (debug_log) print '("[INFO] WDR FILLNEST4D from ",A8," buffer. npe=",I0," i,j=(",I0,",",I0,") ic,jc=(",I0,",",I0") x=",F12.5)', dir_str, this_pe, i, j, ic, jc, x(i,j,k,v) + + enddo + enddo + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + if (debug_log) print '("[INFO] WDR FILLNEST4D DONE print ",A8," buffer. npe=",I0)', dir_str, this_pe + + end subroutine fill_nest_from_buffer_cell_center_r8_4d + + + subroutine fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + implicit none + + real, allocatable, intent(inout) :: x(:,:,:) + real, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, intent(in) :: nz + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: nearest_idx + + integer :: this_pe + + this_pe = mpp_pe() + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + if (debug_log) print '("[INFO] WDR BUFR print ",A8," large buffer. npe=",I0," buffer(is_c, js_c, nz)=",F12.5," buffer(ie_c-1, je_c-1, nz)=",F12.5)', dir_str, this_pe, buffer(bbox_coarse%is, bbox_coarse%js, nz), buffer(bbox_coarse%ie-1, bbox_coarse%je-1, nz) + + if (debug_log) print '("[INFO WDR ",A8," BOUNDS i npe=",I0,"is_f=",I0," ie_f=",I0,"is_c=",I0," ie_c=",I0)', dir_str, this_pe, bbox_fine%is, bbox_fine%ie, bbox_coarse%is, bbox_coarse%ie + if (debug_log) print '("[INFO WDR ",A8," BOUNDS j npe=",I0,"js_f=",I0," je_f=",I0,"js_c=",I0," je_c=",I0)', dir_str, this_pe, bbox_fine%js, bbox_fine%je, bbox_coarse%js, bbox_coarse%je + + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + + ! ic = (ie_c - is_c) / (ie_f - is_c) + ic = bbox_coarse%is + 1 + jc = bbox_coarse%js + 1 + + do k=1,nz + + if (debug_log) print '("[INFO] WDR before FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + + ! Pick the maximum weight of the 4 + ! If two are tied for the max weight, use whichever one maxloc returns first + ! TODO Might need a more deterministic algorithm here for reproducibility; e.g. take the lowest index, etc. + nearest_idx = maxloc(wt(i, j, :), 1) + if (debug_log) print '("[INFO] WDR Nearest Neighbor algorithm index ",I0," buffer. npe=",I0)', nearest_idx, this_pe + + !! Fill in with weighted interpolation + !x(i,j,k) = & + ! wt(i,j,1)*buffer(ic, jc, k) + & + ! wt(i,j,2)*buffer(ic, jc+1,k) + & + ! wt(i,j,3)*buffer(ic+1,jc+1,k) + & + ! wt(i,j,4)*buffer(ic+1,jc, k) + + select case (nearest_idx) + case (1) + x(i,j,k) = buffer(ic, jc, k) + case (2) + x(i,j,k) = buffer(ic, jc+1,k) + case (3) + x(i,j,k) = buffer(ic+1,jc+1,k) + case (4) + x(i,j,k) = buffer(ic+1,jc, k) + case default + ! Fill in with first value and warn + x(i,j,k) = buffer(ic, jc, k) + if (debug_log) print '("[WARN] WDR Nearest Neighbor algorithm mismatch index ",I0," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', nearest_idx, this_pe, i, j, k, x(i,j,k) + end select + + if (debug_log) print '("[INFO] WDR FILL WEIGHTS ",A8," npe=",I0," (",I0,",",I0,",",I0,") : wt:",F12.5,F12.5,F12.5,F12.5)', dir_str, this_pe, i, j, k, wt(i,j,1), wt(i,j,2), wt(i,j,3), wt(i,j,4) + + if (debug_log) print '("[INFO] WDR after FILL nest from ",A8," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', dir_str, this_pe, i, j, k, x(i,j,k) + enddo + enddo + enddo + else + if (debug_log) print '("[INFO] WDR NIL BUFR print ",A8," buffer. npe=",I0)', dir_str, this_pe + endif + + end subroutine fill_nest_from_buffer_nearest_neighbor + + + subroutine fill_weight_grid(atm_wt, new_wt) + real, allocatable, intent(inout) :: atm_wt(:,:,:) + real, allocatable, intent(in) :: new_wt(:,:,:) + + integer :: x,y,z,n + integer :: this_pe + + this_pe = mpp_pe() + + do n=1,3 + if (lbound(atm_wt, n) .ne. lbound(new_wt, n)) then + print '("[ERROR] WDR fill_weight_grid lbound mismatch fv_moving_nest.F90 npe=",I0," n=",I0, I0, I0)', this_pe, n, lbound(atm_wt, n), lbound(new_wt, n) + stop + endif + if (ubound(atm_wt, n) .ne. ubound(new_wt, n)) then + print '("[ERROR] WDR fill_weight_grid ubound mismatch fv_moving_nest.F90 npe=",I0," n=",I0, I0, I0)', this_pe, n, ubound(atm_wt, n), ubound(new_wt, n) + stop + endif + enddo + + if (debug_log) print '("[INFO] WDR running fill_weight_grid fv_moving_nest.F90 npe=",I0)', this_pe + do x = lbound(atm_wt,1),ubound(atm_wt,1) + do y = lbound(atm_wt,2),ubound(atm_wt,2) + do z = 1,4 + atm_wt(x,y,z) = new_wt(x,y,z) + enddo + enddo + enddo + + end subroutine fill_weight_grid + + + !================================================================================================== + ! + ! Array Checking Section + ! + !================================================================================================== + + subroutine check_array_r4_2d(array, this_pe, var_name, min_range, max_range) + real*4, intent(in), allocatable :: array(:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + real :: invalid_last + + invalid_last = 0.0 + + if (allocated(array)) then + + print '("[INFO] WDR 2Darray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + if (array(i,j) < min_range - eps) then + num_invalid = num_invalid + 1 + invalid_last = array(i,j) + elseif (array(i,j) > max_range + eps) then + num_invalid = num_invalid + 1 + invalid_last = array(i,j) + else + num_valid = num_valid + 1 + endif + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 2Darray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0," last invalid=",E12.5)', this_pe, var_name, num_invalid, num_valid, invalid_last + else + print '("[INFO] WDR 2Darray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + else + print '("[INFO] WDR 2Darray not allocated npe=",I0," ",A32)', this_pe, var_name + endif + + end subroutine check_array_r4_2d + + + subroutine check_array_r8_2d(array, this_pe, var_name, min_range, max_range) + real*8, intent(in), allocatable :: array(:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real(kind=R_GRID), intent(in) :: min_range, max_range + + integer :: i,j + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + real :: invalid_last + + invalid_last = 0.0 + + if (allocated(array)) then + + print '("[INFO] WDR 2D64array allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + if (array(i,j) < min_range - eps) then + num_invalid = num_invalid + 1 + invalid_last = array(i,j) + elseif (array(i,j) > max_range + eps) then + num_invalid = num_invalid + 1 + invalid_last = array(i,j) + else + num_valid = num_valid + 1 + endif + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 2D64array invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0," last invalid=",E12.5)', this_pe, var_name, num_invalid, num_valid, invalid_last + else + print '("[INFO] WDR 2D64array all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + else + print '("[INFO] WDR 2D64array not allocated npe=",I0," ",A32)', this_pe, var_name + endif + + end subroutine check_array_r8_2d + + + subroutine check_local_array_r4_2d(array, this_pe, var_name, min_range, max_range) + real*4, intent(in) :: array(:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + print '("[INFO] WDR 2DLarray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + if (array(i,j) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 2DLarray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 2DLarray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + end subroutine check_local_array_r4_2d + + subroutine check_local_array_r8_2d(array, this_pe, var_name, min_range, max_range) + real*8, intent(in) :: array(:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + print '("[INFO] WDR 2DLarray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + if (array(i,j) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 2DLarray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 2DLarray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + end subroutine check_local_array_r8_2d + + subroutine check_array_r4_3d(array, this_pe, var_name, min_range, max_range) + real*4, intent(in), allocatable :: array(:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + if (allocated(array)) then + + print '("[INFO] WDR 3Darray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + if (isnan(array(i,j,k))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 3Darray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 3Darray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + else + print '("[INFO] WDR 3Darray not allocated npe=",I0," ",A32)', this_pe, var_name + endif + + end subroutine check_array_r4_3d + + subroutine check_array_r8_3d(array, this_pe, var_name, min_range, max_range) + real*8, intent(in), allocatable :: array(:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + if (allocated(array)) then + + print '("[INFO] WDR 3Darray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + if (isnan(array(i,j,k))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 3Darray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 3Darray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + else + print '("[INFO] WDR 3Darray not allocated npe=",I0," ",A32)', this_pe, var_name + endif + + end subroutine check_array_r8_3d + + subroutine check_local_array_r4_3d(array, this_pe, var_name, min_range, max_range) + real*4, intent(in) :: array(:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + print '("[INFO] WDR 3DLarray bounds npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + if (isnan(array(i,j,k))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 3DLarray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 3DLarray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + end subroutine check_local_array_r4_3d + + subroutine check_local_array_r8_3d(array, this_pe, var_name, min_range, max_range) + real*8, intent(in) :: array(:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + print '("[INFO] WDR 3DLarray bounds npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + if (isnan(array(i,j,k))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 3DLarray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 3DLarray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + + end subroutine check_local_array_r8_3d + + + subroutine check_array_r4_4d(array, this_pe, var_name, min_range, max_range) + real*4, intent(in), allocatable :: array(:,:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k,v + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + if (allocated(array)) then + + print '("[INFO] WDR 4Darray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3), lbound(array,4), ubound(array,4) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + do v =lbound(array,4), ubound(array,4) + if (isnan(array(i,j,k,v))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k,v) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k,v) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 4Darray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 4Darray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + else + print '("[INFO] WDR 4Darray not allocated npe=",I0," ",A32)', this_pe, var_name + endif + end subroutine check_array_r4_4d + + + subroutine check_array_r8_4d(array, this_pe, var_name, min_range, max_range) + real*8, intent(in), allocatable :: array(:,:,:,:) + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + real, intent(in) :: min_range, max_range + + integer :: i,j,k,v + integer :: num_invalid + integer :: num_valid + real :: eps = 0.0001 + + if (allocated(array)) then + + print '("[INFO] WDR 4Darray allocated npe=",I0," ",A32,"(",I4,":",I4,",",I4,":",I4,",",I4,":",I4,",",I4,":",I4,")")', this_pe, var_name, lbound(array,1), ubound(array,1), lbound(array,2), ubound(array,2), lbound(array,3), ubound(array,3), lbound(array,4), ubound(array,4) + + num_invalid = 0 + num_valid = 0 + + do i = lbound(array,1), ubound(array,1) + do j =lbound(array,2), ubound(array,2) + do k =lbound(array,3), ubound(array,3) + do v =lbound(array,4), ubound(array,4) + if (isnan(array(i,j,k,v))) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k,v) < min_range - eps) then + num_invalid = num_invalid + 1 + elseif (array(i,j,k,v) > max_range + eps) then + num_invalid = num_invalid + 1 + else + num_valid = num_valid + 1 + endif + enddo + enddo + enddo + enddo + + if (num_invalid > 0 ) then + print '("[ERROR] WDR 4Darray invalid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + else + print '("[INFO] WDR 4Darray all valid entries npe=",I0," ",A32," num_invalid=",I0," num_valid=",I0)', this_pe, var_name, num_invalid, num_valid + endif + else + print '("[INFO] WDR 4Darray not allocated npe=",I0," ",A32)', this_pe, var_name + endif + end subroutine check_array_r8_4d + + + !================================================================================================== + ! + ! Debugging Function Section + ! + !================================================================================================== + + subroutine grid_equal(grid1, grid2, tag, this_pe, is_equal) + real, allocatable, intent(in) :: grid1(:,:,:) + real, allocatable, intent(in) :: grid2(:,:,:) + character(len=*), intent(in) :: tag + integer, intent(in) :: this_pe + logical, intent(out) :: is_equal + + integer :: x,y,z + + real :: pi = 4 * atan(1.0d0) + real :: rad2deg + + rad2deg = 180.0 / pi + + is_equal = .true. + + do x=1,3 + if (lbound(grid1,x) /= lbound(grid2,x)) then + print '("[ERROR] WDR grid_equal ",A16," npe=",I0," lbound mismatch ",I0, I0,I0)', tag, x, lbound(grid1,x), lbound(grid2,x) + is_equal = .false. + endif + if (ubound(grid1,x) /= ubound(grid2,x)) then + print '("[ERROR] WDR grid_equal ",A16," npe=",I0," ubound mismatch ",I0, I0,I0)', tag, x, ubound(grid1,x), ubound(grid2,x) + is_equal = .false. + endif + enddo + + if (is_equal) then + do x=lbound(grid1,1), ubound(grid1,1) + do y=lbound(grid1,2), ubound(grid1,2) + do z=lbound(grid1,3), ubound(grid1,3) + if ( abs(grid1(x,y,z) - grid2(x,y,z)) > 0.0001 ) then + print '("[ERROR] WDR grid_equal ",A16," npe=",I0," DEG value mismatch at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', tag, this_pe, x, y, z, grid1(x,y,z)*rad2deg, grid2(x,y,z)*rad2deg, grid1(x,y,z)*rad2deg - grid2(x,y,z)*rad2deg + + print '("[ERROR] WDR grid_equal ",A16," npe=",I0," RAD value mismatch at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', tag, this_pe, x, y, z, grid1(x,y,z), grid2(x,y,z), grid1(x,y,z) - grid2(x,y,z) + is_equal = .false. + else + print '("[INFO] WDR grid_equal ",A16," npe=",I0," DEG value match at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', tag, this_pe, x, y, z, grid1(x,y,z)*rad2deg, grid2(x,y,z)*rad2deg, grid1(x,y,z)*rad2deg - grid2(x,y,z)*rad2deg + + print '("[INFO] WDR grid_equal ",A16," npe=",I0," RAD value match at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', tag, this_pe, x, y, z, grid1(x,y,z), grid2(x,y,z), grid1(x,y,z) - grid2(x,y,z) + endif + enddo + enddo + enddo + endif + + if (is_equal) then + print '("[INFO] WDR grid_equal ",A16," npe=",I0," MATCH.")', tag, this_pe + else + print '("[ERROR] WDR grid_equal ",A16," npe=",I0," MISMATCH.")', tag, this_pe + endif + + end subroutine grid_equal + + + subroutine show_atm_grids(Atm, n) + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) + integer, intent(in) :: n + + integer :: x,y + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi180 + real :: rad2deg, deg2rad + + pi180 = pi / 180.0 + deg2rad = pi / 180.0 + rad2deg = 1.0 / pi180 + + print *, "[INFO] WDR MV_NST2 shape(Atm(1)%grid_global)=", shape(Atm(1)%grid_global) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(1)%grid_global)=",I0,"-",I0)', lbound(Atm(1)%grid_global,1), ubound(Atm(1)%grid_global,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(1)%grid_global)=",I0,"-",I0)', lbound(Atm(1)%grid_global,2), ubound(Atm(1)%grid_global,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(1)%grid_global)=",I0,"-",I0)', lbound(Atm(1)%grid_global,3), ubound(Atm(1)%grid_global,3) + print '("[INFO] WDR MV_NST2 bounds4 (Atm(1)%grid_global)=",I0,"-",I0)', lbound(Atm(1)%grid_global,4), ubound(Atm(1)%grid_global,4) + + print *, "[INFO] WDR MV_NST2 shape(Atm(n)%grid_global)=", shape(Atm(n)%grid_global) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(n)%grid_global)=",I0,"-",I0)', lbound(Atm(n)%grid_global,1), ubound(Atm(n)%grid_global,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(n)%grid_global)=",I0,"-",I0)', lbound(Atm(n)%grid_global,2), ubound(Atm(n)%grid_global,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(n)%grid_global)=",I0,"-",I0)', lbound(Atm(n)%grid_global,3), ubound(Atm(n)%grid_global,3) + print '("[INFO] WDR MV_NST2 bounds4 (Atm(n)%grid_global)=",I0,"-",I0)', lbound(Atm(n)%grid_global,4), ubound(Atm(n)%grid_global,4) + + print *, "[INFO] WDR MV_NST2 shape(Atm(n)%gridstruct%grid)=", shape(Atm(n)%gridstruct%grid) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(n)%gridstruct%grid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid,1), ubound(Atm(n)%gridstruct%grid,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(n)%gridstruct%grid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid,2), ubound(Atm(n)%gridstruct%grid,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(n)%gridstruct%grid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid,3), ubound(Atm(n)%gridstruct%grid,3) + + print *, "[INFO] WDR MV_NST2 shape(Atm(n)%gridstruct%agrid)=", shape(Atm(n)%gridstruct%agrid) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(n)%gridstruct%agrid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid,1), ubound(Atm(n)%gridstruct%agrid,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(n)%gridstruct%agrid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid,2), ubound(Atm(n)%gridstruct%agrid,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(n)%gridstruct%agrid)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid,3), ubound(Atm(n)%gridstruct%agrid,3) + + x = lbound(Atm(n)%gridstruct%agrid,1) + y = lbound(Atm(n)%gridstruct%agrid,2) + print '("[INFO] WDR GRD_SHOa atmosphere.F90 Atm(n)%agrid(",I0,",",I0,")=",F10.5, F10.5)', x, y, Atm(n)%gridstruct%agrid(x,y,2)*rad2deg, Atm(n)%gridstruct%agrid(x,y,1)*rad2deg + + x = ubound(Atm(n)%gridstruct%agrid,1) + y = ubound(Atm(n)%gridstruct%agrid,2) + print '("[INFO] WDR GRD_SHOb atmosphere.F90 Atm(n)%agrid(",I0,",",I0,")=",F10.5, F10.5)', x, y, Atm(n)%gridstruct%agrid(x,y,2)*rad2deg, Atm(n)%gridstruct%agrid(x,y,1)*rad2deg + + print *, "[INFO] WDR MV_NST2 shape(Atm(n)%gridstruct%grid_64)=", shape(Atm(n)%gridstruct%grid_64) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(n)%gridstruct%grid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid_64,1), ubound(Atm(n)%gridstruct%grid_64,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(n)%gridstruct%grid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid_64,2), ubound(Atm(n)%gridstruct%grid_64,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(n)%gridstruct%grid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%grid_64,3), ubound(Atm(n)%gridstruct%grid_64,3) + + print *, "[INFO] WDR MV_NST2 shape(Atm(n)%gridstruct%agrid_64)=", shape(Atm(n)%gridstruct%agrid_64) + print '("[INFO] WDR MV_NST2 bounds1 (Atm(n)%gridstruct%agrid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid_64,1), ubound(Atm(n)%gridstruct%agrid_64,1) + print '("[INFO] WDR MV_NST2 bounds2 (Atm(n)%gridstruct%agrid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid_64,2), ubound(Atm(n)%gridstruct%agrid_64,2) + print '("[INFO] WDR MV_NST2 bounds3 (Atm(n)%gridstruct%agrid_64)=",I0,"-",I0)', lbound(Atm(n)%gridstruct%agrid_64,3), ubound(Atm(n)%gridstruct%agrid_64,3) + + end subroutine show_atm_grids + + + subroutine show_tile_geo(tile_geo, this_pe, var_name) + type(grid_geometry) :: tile_geo + integer, intent(in) :: this_pe + character(len=*), intent(in) :: var_name + + print '("[INFO] WDR 2Darray npe=",I0," ",A32, "nx=", I0," ny=", I0," nxp=",I0," nyp=",I0)', this_pe, var_name, tile_geo%nx, tile_geo%ny, tile_geo%nxp, tile_geo%nyp + + call check_array(tile_geo%lats, this_pe, var_name // "%lats", -90.0D0, 90.0D0) + call check_array(tile_geo%lons, this_pe, var_name // "%lons", -360.0D0, 360.0D0) + !call check_array(tile_geo%dx, this_pe, var_name // "%dx", 0.0, 1.0e9) + !call check_array(tile_geo%dy, this_pe, var_name // "%dy", 0.0, 1.0e9) + call check_array(tile_geo%area, this_pe, var_name // "%area", 0.0D0, 1.0D9) + + end subroutine show_tile_geo + + + subroutine show_atm_array4(tag, array, array_name, atm_n, this_pe) + character(len=*), intent(in) :: tag + real(kind=R_GRID), allocatable, dimension(:,:,:,:), intent(in) :: array + character(len=*), intent(in) :: array_name + integer, intent(in) :: atm_n, this_pe + + if (allocated(array)) then + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%",A12,"(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', tag, this_pe, atm_n, trim(array_name), lbound(array, 1), ubound(array, 1), lbound(array, 2), ubound(array, 2), lbound(array, 3), ubound(array, 3), lbound(array, 4), ubound(array, 4) + + else + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%",A12," is not allocated.")', tag, this_pe, trim(array_name) + endif + + end subroutine show_atm_array4 + + + subroutine show_atm_neststruct(tag, neststruct, atm_n, this_pe) + character(len=*), intent(in) :: tag + type(fv_nest_type), intent(in) :: neststruct + integer, intent(in) :: atm_n, this_pe + + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%parent_tile=",I0," %refinement=",I0)', tag, this_pe, atm_n, neststruct%parent_tile, neststruct%refinement + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nested=",L1," %ioffset=",I0," %joffset=",I0)', tag, this_pe, atm_n, neststruct%nested, neststruct%ioffset, neststruct%joffset + + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nested=",L1," %isu=",I0," %ieu=",I0," %jsu=",I0," %jeu=",I0)', tag, this_pe, atm_n, neststruct%nested, neststruct%isu, neststruct%ieu, neststruct%jsu, neststruct%jeu + + ! WDR ind_update_h seems to have been removed in recent version of the dycore + ! if (allocated(neststruct%ind_update_h)) then + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%ind_update_h(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', tag, this_pe, atm_n, & + ! lbound(neststruct%ind_update_h,1), ubound(neststruct%ind_update_h,1), & + ! lbound(neststruct%ind_update_h,2), ubound(neststruct%ind_update_h,2), & + ! lbound(neststruct%ind_update_h,3), ubound(neststruct%ind_update_h,3) + ! + ! if (ubound(neststruct%ind_update_h,1) > lbound(neststruct%ind_update_h,1)) then + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") val neststruct%ind_update_h(",I0,",",I0,",",I0,")=",I0)', tag, this_pe, atm_n, & + ! lbound(neststruct%ind_update_h,1), lbound(neststruct%ind_update_h,2), lbound(neststruct%ind_update_h,3), & + ! neststruct%ind_update_h(lbound(neststruct%ind_update_h,1), lbound(neststruct%ind_update_h,2), lbound(neststruct%ind_update_h,3)) + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") val neststruct%ind_update_h(",I0,",",I0,",",I0,")=",I0)', tag, this_pe, atm_n, & + ! lbound(neststruct%ind_update_h,1), lbound(neststruct%ind_update_h,2), ubound(neststruct%ind_update_h,3), & + ! neststruct%ind_update_h(lbound(neststruct%ind_update_h,1), lbound(neststruct%ind_update_h,2), ubound(neststruct%ind_update_h,3)) + ! + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") val neststruct%ind_update_h(",I0,",",I0,",",I0,")=",I0)', tag, this_pe, atm_n, & + ! lbound(neststruct%ind_update_h,1)+4, lbound(neststruct%ind_update_h,2)+4, lbound(neststruct%ind_update_h,3), & + ! neststruct%ind_update_h(lbound(neststruct%ind_update_h,1)+4, lbound(neststruct%ind_update_h,2)+4, lbound(neststruct%ind_update_h,3)) + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") val neststruct%ind_update_h(",I0,",",I0,",",I0,")=",I0)', tag, this_pe, atm_n, & + ! lbound(neststruct%ind_update_h,1)+4, lbound(neststruct%ind_update_h,2)+4, ubound(neststruct%ind_update_h,3), & + ! neststruct%ind_update_h(lbound(neststruct%ind_update_h,1)+4, lbound(neststruct%ind_update_h,2)+4, ubound(neststruct%ind_update_h,3)) + ! + ! + ! endif + ! else + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%ind_update_h is not allocated.")', tag, this_pe, atm_n + ! endif + + ! WDR nest_domain_all appears to be obsolete in new dycore + !if (allocated(neststruct%nest_domain_all)) then + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain_all(",I0,"-",I0,")")', tag, this_pe, atm_n, lbound(neststruct%nest_domain_all), ubound(neststruct%nest_domain_all) + !else + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain_all is not allocated.")', tag, this_pe, atm_n + !endif + + ! WDR nest_domain has moved to fv_mp_mod.F90 as a global + !print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain%tile_fine=",I0," %tile_coarse=",I0)', tag, this_pe, atm_n, neststruct%nest_domain%tile_fine, neststruct%nest_domain%tile_coarse + + !print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain%istart_fine=",I0," %iend_fine=",I0)', tag, this_pe, atm_n, neststruct%nest_domain%istart_fine, neststruct%nest_domain%iend_fine + !print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain%jstart_fine=",I0," %jend_fine=",I0)', tag, this_pe, atm_n, neststruct%nest_domain%jstart_fine, neststruct%nest_domain%jend_fine + + !print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain%istart_coarse=",I0," %iend_coarse=",I0)', tag, this_pe, atm_n, neststruct%nest_domain%istart_coarse, neststruct%nest_domain%iend_coarse + !print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") neststruct%nest_domain%jstart_coarse=",I0," %jend_coarse=",I0)', tag, this_pe, atm_n, neststruct%nest_domain%jstart_coarse, neststruct%nest_domain%jend_coarse + + end subroutine show_atm_neststruct + + + subroutine show_atm_gridstruct(tag, gridstruct, atm_n, this_pe) + character(len=*), intent(in) :: tag + type(fv_grid_type), intent(in) :: gridstruct + integer, intent(in) :: atm_n, this_pe + + ! nested is a pointer. + if (associated(gridstruct%nested)) then + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%nested=",L1)', tag, this_pe, atm_n, gridstruct%nested + else + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%nested is not set.")', tag, this_pe, atm_n + endif + + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%cubed_sphere=",L1)', tag, this_pe, atm_n, gridstruct%cubed_sphere + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%have_north_pole=",L1)', tag, this_pe, atm_n, gridstruct%have_north_pole + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%have_south_pole=",L1)', tag, this_pe, atm_n, gridstruct%have_south_pole + if (allocated(gridstruct%agrid)) then + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%agrid(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', tag, this_pe, atm_n, lbound(gridstruct%agrid, 1), ubound(gridstruct%agrid, 1), lbound(gridstruct%agrid, 2), ubound(gridstruct%agrid, 2), lbound(gridstruct%agrid, 3), ubound(gridstruct%agrid, 3) + else + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%agrid is not allocated.")', tag, this_pe, atm_n + endif + + if (allocated(gridstruct%grid)) then + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%grid(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', tag, this_pe, atm_n, lbound(gridstruct%grid, 1), ubound(gridstruct%grid, 1), lbound(gridstruct%grid, 2), ubound(gridstruct%grid, 2), lbound(gridstruct%grid, 3), ubound(gridstruct%grid, 3) + else + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") gridstruct%grid is not allocated.")', tag, this_pe, atm_n + endif + + end subroutine show_atm_gridstruct + + + subroutine show_atm(tag, Atm, atm_n, this_pe) + implicit none + character(len=*), intent(in) :: tag + type(fv_atmos_type), intent(in) :: Atm + integer, intent(in) :: atm_n, this_pe + + integer is, ie, i + + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,")===============================================================")', tag, this_pe, atm_n + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") allocated=",L1," dummy=",L1)', tag, this_pe, atm_n, Atm%allocated, Atm%dummy + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") grid_number=",I0," ncnst=",I0," ng=",I0)', tag, this_pe, atm_n, Atm%grid_number, Atm%ncnst, Atm%ng + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") npx=",I0," npy=",I0," npz=",I0)', tag, this_pe, atm_n, Atm%npx, Atm%npy, Atm%npz + + if (allocated(Atm%pelist)) then + is = lbound(Atm%pelist, 1) + ie = ubound(Atm%pelist, 1) + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") pelist(",I0,"-",I0,")=",I0,"...",I0)', tag, this_pe, atm_n, is, ie, Atm%pelist(is), Atm%pelist(ie) + !do i = is, ie + ! print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") pelist(",I0,")=",I0)', tag, this_pe, atm_n, i, Atm%pelist(i) + !enddo + else + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") pelist is not allocated.")', tag, this_pe, atm_n + endif + + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") bd%(is-ie)=",I0,"-",I0,") (js-je)=",I0,"-",I0,")" )', tag, this_pe, atm_n, Atm%bd%is, Atm%bd%ie, Atm%bd%js, Atm%bd%je + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") bd%(isd-ied)=",I0,"-",I0,") (jsd-jed)=",I0,"-",I0,")" )', tag, this_pe, atm_n, Atm%bd%isd, Atm%bd%ied, Atm%bd%jsd, Atm%bd%jed + print '("[INFO] show_atm ",A8," npe=",I0," Atm(",I0,") bd%(isc-iec)=",I0,"-",I0,") (jsc-jec)=",I0,"-",I0,")" )', tag, this_pe, atm_n, Atm%bd%isc, Atm%bd%iec, Atm%bd%jsc, Atm%bd%jec + + call show_atm_neststruct(tag, Atm%neststruct, atm_n, this_pe) + call show_atm_gridstruct(tag, Atm%gridstruct, atm_n, this_pe) + call show_atm_array4(tag, Atm%grid_global, "grid_global", atm_n, this_pe) + + end subroutine show_atm + + + subroutine show_gridstruct(gridstruct, this_pe) + type(fv_grid_type), intent(in) :: gridstruct + integer, intent(in) :: this_pe + + !real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi = 4 * atan(1.0d0) + + call check_array(gridstruct%grid, this_pe, "SG gridstruct%grid", -2.0*pi, 2.0*pi) + call check_array(gridstruct%agrid, this_pe, "SG gridstruct%agrid", -2.0*pi, 2.0*pi) + + call check_array(gridstruct%area, this_pe, "SG gridstruct%area", 0.0, 1.0e12) + call check_array(gridstruct%area_c, this_pe, "SG gridstruct%area_c", 0.0, 1.0e12) + + call check_array(gridstruct%rarea, this_pe, "SG gridstruct%rarea", 0.0, 1.0e12) + call check_array(gridstruct%rarea_c, this_pe, "SG gridstruct%rarea_c", 0.0, 1.0e12) + + call check_array(gridstruct%sina, this_pe, "SG gridstruct%sina", -1.0, 1.0) + call check_array(gridstruct%cosa, this_pe, "SG gridstruct%cosa", -1.0, 1.0) + + call check_array(gridstruct%dx, this_pe, "SG gridstruct%dx", 0.0, 1.0e12) + call check_array(gridstruct%dy, this_pe, "SG gridstruct%dy", 0.0, 1.0e12) + + call check_array(gridstruct%dxc, this_pe, "SG gridstruct%dxc", 0.0, 1.0e12) + call check_array(gridstruct%dyc, this_pe, "SG gridstruct%dyc", 0.0, 1.0e12) + + call check_array(gridstruct%dxc_64, this_pe, "SG gridstruct%dxc_64", 0D0, 1.0D12) + call check_array(gridstruct%dyc_64, this_pe, "SG gridstruct%dyc_64", 0D0, 1.0D12) + + end subroutine show_gridstruct + + + subroutine show_nest_grid(Atm, this_pe, step_num) + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: this_pe, step_num + + integer :: x,y + integer :: nhalo = 3 !! TODO get value from namelist + real :: crn_lat(4), crn_lon(4) + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi180 + real :: rad2deg, deg2rad + + pi180 = pi / 180.0 + deg2rad = pi / 180.0 + rad2deg = 1.0 / pi180 + + print '("WDR NEST GRID bd, ",I0,",",I0," is,js=(",I0,":",I0,",",I0,":",I0,")" )', & + this_pe, step_num, Atm%bd%is, Atm%bd%ie, Atm%bd%js, Atm%bd%je + + print '("WDR NEST GRID bd, ",I0,",",I0," isd,jsd=(",I0,":",I0,",",I0,":",I0,")" )', & + this_pe, step_num, Atm%bd%isd, Atm%bd%ied, Atm%bd%jsd, Atm%bd%jed + + !do x = lbound(Atm%gridstruct%grid,1), ubound(Atm%gridstruct%grid,1) + ! do y = lbound(Atm%gridstruct%grid,2), ubound(Atm%gridstruct%grid,2) + ! print '("WDR NEST_GRID, ",I0,",",I0,",",I0,",",I0,",",F10.5,",",F10.5)', this_pe, step_num, x, y, & + ! Atm%gridstruct%grid(x,y,2) * rad2deg, Atm%gridstruct%grid(x,y,1) * rad2deg - 360.0 + ! enddo + !enddo + + ! Log the bounds of this PE's grid + + x = lbound(Atm%gridstruct%grid, 1) + y = lbound(Atm%gridstruct%grid, 2) + crn_lon(1) = Atm%gridstruct%grid(x,y,1) + crn_lat(1) = Atm%gridstruct%grid(x,y,2) + + x = ubound(Atm%gridstruct%grid, 1) + y = lbound(Atm%gridstruct%grid, 2) + crn_lon(2) = Atm%gridstruct%grid(x,y,1) + crn_lat(2) = Atm%gridstruct%grid(x,y,2) + + x = ubound(Atm%gridstruct%grid, 1) + y = ubound(Atm%gridstruct%grid, 2) + crn_lon(3) = Atm%gridstruct%grid(x,y,1) + crn_lat(3) = Atm%gridstruct%grid(x,y,2) + + x = lbound(Atm%gridstruct%grid, 1) + y = ubound(Atm%gridstruct%grid, 2) + crn_lon(4) = Atm%gridstruct%grid(x,y,1) + crn_lat(4) = Atm%gridstruct%grid(x,y,2) + + crn_lon(:) = crn_lon(:) * rad2deg + crn_lat(:) = crn_lat(:) * rad2deg + + do x=1,4 + if (crn_lon(x) .gt. 180.0) then + crn_lon(x) = crn_lon(x) - 360.0 + endif + enddo + + print '("PLOT",I0,"_data_corners,",I4.4 ,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5)', & + step_num, this_pe, crn_lat(1), crn_lon(1), crn_lat(2), crn_lon(2), crn_lat(3), crn_lon(3), crn_lat(4), crn_lon(4) + + ! Assume that nhalo is the same as all the other halo values + x = lbound(Atm%gridstruct%grid, 1) + nhalo + y = lbound(Atm%gridstruct%grid, 2) + nhalo + crn_lon(1) = Atm%gridstruct%grid(x,y,1) + crn_lat(1) = Atm%gridstruct%grid(x,y,2) + + x = ubound(Atm%gridstruct%grid, 1) - nhalo + y = lbound(Atm%gridstruct%grid, 2) + nhalo + crn_lon(2) = Atm%gridstruct%grid(x,y,1) + crn_lat(2) = Atm%gridstruct%grid(x,y,2) + + x = ubound(Atm%gridstruct%grid, 1) - nhalo + y = ubound(Atm%gridstruct%grid, 2) - nhalo + crn_lon(3) = Atm%gridstruct%grid(x,y,1) + crn_lat(3) = Atm%gridstruct%grid(x,y,2) + + x = lbound(Atm%gridstruct%grid, 1) + nhalo + y = ubound(Atm%gridstruct%grid, 2) - nhalo + crn_lon(4) = Atm%gridstruct%grid(x,y,1) + crn_lat(4) = Atm%gridstruct%grid(x,y,2) + + crn_lon(:) = crn_lon(:) * rad2deg + crn_lat(:) = crn_lat(:) * rad2deg + + do x=1,4 + if (crn_lon(x) .gt. 180.0) then + crn_lon(x) = crn_lon(x) - 360.0 + endif + enddo + + print '("PLOT",I0,"_compute_corners,",I4.4 ,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5,",",F10.5)', & + step_num, this_pe, crn_lat(1), crn_lon(1), crn_lat(2), crn_lon(2), crn_lat(3), crn_lon(3), crn_lat(4), crn_lon(4) + + end subroutine show_nest_grid + + + subroutine validate_hires_parent(fp_super_tile_geo, grid, agrid, x_refine, y_refine, ioffset, joffset) + type(grid_geometry), intent(in) :: fp_super_tile_geo + real, allocatable, intent(in), dimension(:,:,:) :: grid, agrid + integer, intent(in) :: x_refine, y_refine, ioffset, joffset + + real, allocatable :: local_grid(:,:,:), local_agrid(:,:,:) + real(kind=R_GRID), allocatable :: local_agrid64(:,:,:) + logical :: is_equal + integer :: x, y, z, this_pe, stagger + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + + rad2deg = 180.0 / pi + this_pe = mpp_pe() + + !! Begin test creating of grid and agrid aligned with initial nest + !! This is for testing/validation, and will not be needed in operations + + ! Allocate grid/agrid to proper size/bounds + + allocate(local_grid(lbound(grid,1) : ubound(grid,1), & + lbound(grid,2) : ubound(grid,2), & + lbound(grid,3) : ubound(grid,3))) + + allocate(local_agrid(lbound(agrid,1) : ubound(agrid,1), & + lbound(agrid,2) : ubound(agrid,2), & + lbound(agrid,3) : ubound(agrid,3))) + + allocate(local_agrid64(lbound(agrid,1) : ubound(agrid,1), & + lbound(agrid,2) : ubound(agrid,2), & + lbound(agrid,3) : ubound(agrid,3))) + + ! Fill in values from high resolution, full panel, supergrid + + stagger = CORNER + call fill_grid_from_supergrid(local_grid, stagger, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + stagger = CENTER + call fill_grid_from_supergrid(local_agrid, stagger, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + stagger = CENTER + call fill_grid_from_supergrid(local_agrid64, stagger, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + + ! Verify that values are equivalent to the unmodified values in gridstruct + + call grid_equal(local_grid, grid, "GRID", this_pe, is_equal) + call grid_equal(local_agrid, agrid, "AGRID", this_pe, is_equal) + + do x = lbound(grid,1), lbound(grid,1)+4 + do y = lbound(grid,2), lbound(grid,2)+4 + do z = lbound(grid,3), ubound(grid,3) + print '("[INFO] WDR grid_comp ",A16," npe=",I0," DEG value at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', "GRID", this_pe, x, y, z, local_grid(x,y,z)*rad2deg, grid(x,y,z)*rad2deg, local_grid(x,y,z)*rad2deg - grid(x,y,z)*rad2deg + print '("[INFO] WDR grid_comp ",A16," npe=",I0," RAD value at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', "GRID", this_pe, x, y, z, local_grid(x,y,z), grid(x,y,z), local_grid(x,y,z) - grid(x,y,z) + enddo + enddo + enddo + + do x = lbound(agrid,1), lbound(agrid,1)+4 + do y = lbound(agrid,2), lbound(agrid,2)+4 + do z = lbound(agrid,3), ubound(agrid,3) + print '("[INFO] WDR agrid_comp ",A16," npe=",I0," DEG value at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', "AGRID", this_pe, x, y, z, local_agrid(x,y,z)*rad2deg, agrid(x,y,z)*rad2deg, local_agrid(x,y,z)*rad2deg - agrid(x,y,z)*rad2deg + print '("[INFO] WDR agrid_comp ",A16," npe=",I0," RAD value at (",I0,",",I0,",",I0,") ",F15.11, " ",F15.11, " ",F15.11)', "AGRID", this_pe, x, y, z, local_agrid(x,y,z), agrid(x,y,z), local_agrid(x,y,z) - agrid(x,y,z) + enddo + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine validate_hires_parent + +#endif ! MOVING_NEST + +end module fv_moving_nest_utils_mod diff --git a/moving_nest/fv_tracker.F90 b/moving_nest/fv_tracker.F90 new file mode 100644 index 000000000..f00c9da57 --- /dev/null +++ b/moving_nest/fv_tracker.F90 @@ -0,0 +1,1837 @@ +!*********************************************************************** +!* GNU Lesser General Public License +!* +!* This file is part of the FV3 dynamical core. +!* +!* The FV3 dynamical core is free software: you can redistribute it +!* and/or modify it under the terms of the +!* GNU Lesser General Public License as published by the +!* Free Software Foundation, either version 3 of the License, or +!* (at your option) any later version. +!* +!* The FV3 dynamical core is distributed in the hope that it will be +!* useful, but WITHOUT ANYWARRANTY; without even the implied warranty +!* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +!* See the GNU General Public License for more details. +!* +!* You should have received a copy of the GNU Lesser General Public +!* License along with the FV3 dynamical core. +!* If not, see . +!*********************************************************************** + +!>@brief The module 'fv_tracker' contains the internal GFDL/NCEP vortex tracker +!adapted from HWRF internal vortex tracker, mainly based on the GFDL vortex +!tracker. + +module fv_tracker_mod + +#ifdef MOVING_NEST +#include + + use constants_mod, only: pi=>pi_8, rad_to_deg, deg_to_rad + use time_manager_mod, only: time_type, get_time, set_time, operator(+), & + operator(-), operator(/), time_type_to_real + use mpp_mod, only: mpp_error, stdout, FATAL, WARNING, NOTE, & + mpp_root_pe, mpp_npes, mpp_pe, mpp_chksum, & + mpp_get_current_pelist, & + mpp_set_current_pelist, mpp_sync + use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain + use fv_arrays_mod, only: fv_atmos_type, R_GRID + use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height + use fv_diagnostics_mod, only: interpolate_vertical, interpolate_z, get_vorticity, & + get_height_field, get_pressure_given_height, & + get_height_given_pressure, cs3_interpolator + use fv_mp_mod, only: is_master, & + mp_reduce_sum, mp_reduce_max, mp_reduce_min, & + mp_reduce_minval, mp_reduce_maxval, & + mp_reduce_minloc, mp_reduce_maxloc + + use fv_moving_nest_types_mod, only: Moving_nest + + implicit none + private + public :: fv_tracker_init, fv_tracker_center, fv_tracker_post_move + public :: fv_diag_tracker, allocate_tracker, deallocate_tracker + public :: Tracker + + integer, parameter :: maxtp=11 ! number of tracker parameters + + real, parameter :: invE=0.36787944117 ! 1/e + real, parameter :: searchrad_6=250.0 ! km - ignore data more than this far from domain center + real, parameter :: searchrad_7=200.0 ! km - ignore data more than this far from domain center + real, parameter :: uverrmax=225.0 ! For use in get_uv_guess + real, parameter :: ecircum=40030.2 ! Earth's circumference (km) using erad=6371.e3 + real, parameter :: rads_vmag=120.0 ! max search radius for wind minimum + real, parameter :: err_reg_init=300.0 ! max err at initial time (km) + real, parameter :: err_reg_max=225.0 ! max err at other times (km) + + real, parameter :: errpmax=485.0 ! max stddev of track parameters + real, parameter :: errpgro=1.25 ! stddev multiplier + + real, parameter :: max_wind_search_radius=searchrad_7 ! max radius for vmax search + real, parameter :: min_mlsp_search_radius=searchrad_7 ! max radius for pmin search + + real, parameter :: km2nmi=0.539957, kn2mps=0.514444, mps2kn=1./kn2mps + + + type fv_tracker_type + ! For internal vortex tracker + real, _ALLOCATABLE :: vort850(:,:) _NULL !< relative vorticity at 850 mb + real, _ALLOCATABLE :: spd850(:,:) _NULL !< wind speed at 850 mb + real, _ALLOCATABLE :: u850(:,:) _NULL !< ua at 850 mb + real, _ALLOCATABLE :: v850(:,:) _NULL !< va at 850 mb + real, _ALLOCATABLE :: z850(:,:) _NULL !< geopotential height at 850 mb + real, _ALLOCATABLE :: vort700(:,:) _NULL !< relative vorticity at 700 mb + real, _ALLOCATABLE :: spd700(:,:) _NULL !< wind speed at 700 mb + real, _ALLOCATABLE :: u700(:,:) _NULL !< ua at 700 mb + real, _ALLOCATABLE :: v700(:,:) _NULL !< va at 700 mb + real, _ALLOCATABLE :: z700(:,:) _NULL !< geopotential height at 700 mb + real, _ALLOCATABLE :: vort10m(:,:) _NULL !< relative vorticity at 10-m + real, _ALLOCATABLE :: spd10m(:,:) _NULL !< wind speed at 10-m + real, _ALLOCATABLE :: u10m(:,:) _NULL !< ua at 10-m + real, _ALLOCATABLE :: v10m(:,:) _NULL !< va at 10-m + real, _ALLOCATABLE :: slp(:,:) _NULL !< sea level pressure + + ! For inline NCEP tracker + real, _ALLOCATABLE :: distsq(:,:) _NULL !< Square of distance from nest center + real, _ALLOCATABLE :: tracker_distsq(:,:) _NULL !< Square of distance from tracker fix location + real, _ALLOCATABLE :: tracker_angle(:,:) _NULL !< Angle to storm center (East=0, North=pi/2, etc.) + real, _ALLOCATABLE :: tracker_fixes(:,:) _NULL !< Tracker fix information for debugging + + logical :: track_have_guess = .false. !< Is a first guess available? + real :: track_guess_lat !< First guess latitude + real :: track_guess_lon !< First guess longitude + real :: tracker_edge_dist !< Distance from storm center to domain edge + + real :: track_stderr_m1 = -99.9 !< Standard deviation of tracker centers one hour ago + real :: track_stderr_m2 = -99.9 !< Standard deviation of tracker centers two hours ago + real :: track_stderr_m3 = -99.9 !< Standard deviation of tracker centers three hours ago + + integer :: track_last_hour=0 !< Last completed forecast hour + + real :: tracker_fixlon = -999.0 !< Storm fix longitude according to inline NCEP tracker + real :: tracker_fixlat = -999.0 !< Storm fix latitude according to inline NCEP tracker + integer :: tracker_ifix = -99 !< Storm fix i location + integer :: tracker_jfix = -99 !< Storm fix j location + + real :: tracker_rmw = -99. !< Storm RMW according to inline NCEP tracker + real :: tracker_pmin = -99999. !< Storm min MSLP according to inline NCEP tracker + real :: tracker_vmax =-99. !< Storm max 10m wind according to inline NCEP tracker + + logical :: tracker_havefix = .false. !< True = storm fix locations are valid + logical :: tracker_gave_up = .false. !< True = inline tracker gave up on tracking the storm + end type fv_tracker_type + + type(fv_tracker_type), _ALLOCATABLE, target :: Tracker(:) + integer :: n = 2 ! TODO allow to vary for multiple nests + +contains + + subroutine fv_tracker_init(length) + ! Initialize tracker variables in the Atm structure. + implicit none + integer, intent(in) :: length + + integer :: i + + call mpp_error(NOTE, 'fv_tracker_init') + + allocate(Tracker(length)) + + do i=1,length + Tracker(i)%track_stderr_m1=-99.9 + Tracker(i)%track_stderr_m2=-99.9 + Tracker(i)%track_stderr_m3=-99.9 + ! Tracker(i)%track_n_old=0 + ! Tracker(i)%track_old_lon=0 + ! Tracker(i)%track_old_lat=0 + ! Tracker(i)%track_old_ntsd=0 + + Tracker(i)%tracker_angle=0 + Tracker(i)%tracker_fixlon=-999.0 + Tracker(i)%tracker_fixlat=-999.0 + Tracker(i)%tracker_ifix=-99 + Tracker(i)%tracker_jfix=-99 + Tracker(i)%tracker_havefix=.false. + Tracker(i)%tracker_gave_up=.false. + Tracker(i)%tracker_pmin=-99999. + Tracker(i)%tracker_vmax=-99. + Tracker(i)%tracker_rmw=-99. + + Tracker(i)%track_have_guess=.false. + Tracker(i)%track_guess_lat=-999.0 + Tracker(i)%track_guess_lon=-999.0 + enddo + + end subroutine fv_tracker_init + + subroutine allocate_tracker(i, is, ie, js, je) + integer, intent(in) :: i, is, ie, js, je + ! Allocate internal vortex tracker arrays + + allocate ( Tracker(i)%vort850(is:ie,js:je) ) + allocate ( Tracker(i)%spd850(is:ie,js:je) ) + allocate ( Tracker(i)%u850(is:ie,js:je) ) + allocate ( Tracker(i)%v850(is:ie,js:je) ) + allocate ( Tracker(i)%z850(is:ie,js:je) ) + allocate ( Tracker(i)%vort700(is:ie,js:je) ) + allocate ( Tracker(i)%spd700(is:ie,js:je) ) + allocate ( Tracker(i)%u700(is:ie,js:je) ) + allocate ( Tracker(i)%v700(is:ie,js:je) ) + allocate ( Tracker(i)%z700(is:ie,js:je) ) + allocate ( Tracker(i)%vort10m(is:ie,js:je) ) + allocate ( Tracker(i)%spd10m(is:ie,js:je) ) + allocate ( Tracker(i)%u10m(is:ie,js:je) ) + allocate ( Tracker(i)%v10m(is:ie,js:je) ) + allocate ( Tracker(i)%slp(is:ie,js:je) ) + + allocate ( Tracker(i)%distsq(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_distsq(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_angle(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_fixes(is:ie,js:je) ) + end subroutine allocate_tracker + + subroutine deallocate_tracker(n) + integer, intent(in) :: n + + integer :: i + + ! Deallocate internal vortex tracker arrays + do i=1,n + if (allocated(Tracker(i)%vort850)) then + deallocate ( Tracker(i)%vort850 ) + deallocate ( Tracker(i)%spd850 ) + deallocate ( Tracker(i)%u850 ) + deallocate ( Tracker(i)%v850 ) + deallocate ( Tracker(i)%z850 ) + deallocate ( Tracker(i)%vort700 ) + deallocate ( Tracker(i)%spd700 ) + deallocate ( Tracker(i)%u700 ) + deallocate ( Tracker(i)%v700 ) + deallocate ( Tracker(i)%z700 ) + deallocate ( Tracker(i)%vort10m ) + deallocate ( Tracker(i)%spd10m ) + deallocate ( Tracker(i)%u10m ) + deallocate ( Tracker(i)%v10m ) + deallocate ( Tracker(i)%slp ) + endif + enddo + deallocate(Tracker) + + end subroutine deallocate_tracker + + subroutine fv_tracker_center(Atm, n, Time) + ! Top-level entry to the internal GFDL/NCEP vortex tracker. Finds the center of + ! the storm in the specified Atm and updates the Atm variables. + ! Will do nothing and return immediately if + ! tracker%tracker_gave_up=.true. + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: n + type(time_type), intent(in) :: Time + + integer :: ids,ide,jds,jde,kds,kde + integer :: ims,ime,jms,jme,kms,kme + integer :: ips,ipe,jps,jpe,kps,kpe + + call mpp_error(NOTE, 'fv_tracker_center') + call get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + call ntc_impl(Atm, Tracker(n), Time, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + end subroutine fv_tracker_center + + subroutine fv_diag_tracker(Atm, zvir, Time) + + type(fv_atmos_type), intent(inout) :: Atm(:) + type(time_type), intent(in) :: Time + real, intent(in):: zvir + + integer :: isc, iec, jsc, jec, n, ntileMe + integer :: isd, ied, jsd, jed, npz, itrac + integer :: ngc + integer :: nt = 2 ! TODO adjust to nest number for multiple nests + + real, allocatable :: a2(:,:),a3(:,:,:),a4(:,:,:), wk(:,:,:), wz(:,:,:) + real :: height(2) + real :: ptop + integer, parameter:: nplev_tracker=2 + real:: plevs(nplev_tracker), pout(nplev_tracker) + integer:: idg(nplev_tracker), id1(nplev_tracker) + + integer i,j,k, yr, mon, dd, hr, mn, days, seconds, nq, theta_d + character(len=128) :: tname + + height(1) = 5.E3 ! for computing 5-km "pressure" + height(2) = 0. ! for sea-level pressure + + pout(1) = 700 * 1.e2 + plevs(1) = log( pout(1) ) + pout(2) = 850 * 1.e2 + plevs(2) = log( pout(2) ) + + ntileMe = size(Atm(:)) + n = 1 + isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + ngc = Atm(n)%ng + npz = Atm(n)%npz + ptop = Atm(n)%ak(1) + nq = size (Atm(n)%q,4) + + isd = Atm(n)%bd%isd; ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd; jed = Atm(n)%bd%jed + + fv_time = Time + + if (.not. allocated(a2)) allocate ( a2(isc:iec,jsc:jec) ) + if (.not. allocated(wk)) allocate ( wk(isc:iec,jsc:jec,npz) ) + if (.not. allocated(a3)) allocate ( a3(isc:iec,jsc:jec,nplev_tracker) ) + if (.not. allocated(wz)) allocate ( wz(isc:iec,jsc:jec,npz+1) ) + + ! do n = 1, ntileMe + n = 1 + call get_height_field(isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%delz, & + wz, Atm(n)%pt, Atm(n)%q, Atm(n)%peln, zvir) + + call get_pressure_given_height(isc, iec, jsc, jec, ngc, npz, wz, 1, height(2), & + Atm(n)%pt(:,:,npz), Atm(n)%peln, a2, 1.) + ! sea level pressure in Pa + Tracker(nt)%slp=a2(:,:) + call prt_maxmin('slp', Tracker(nt)%slp, isc, iec, jsc, jec, 0, 1, 1.) + + idg(:) = 1 + call get_height_given_pressure(isc, iec, jsc, jec, npz, wz, nplev_tracker, idg, plevs, Atm(n)%peln, a3) + Tracker(nt)%z700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%z850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('z700', Tracker(nt)%z700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('z850', Tracker(nt)%z850, isc, iec, jsc, jec, 0, 1, 1.) + + call cs3_interpolator(isc,iec,jsc,jec,npz, Atm(n)%ua(isc:iec,jsc:jec,:), nplev_tracker, & + pout(1:nplev_tracker), wz, Atm(n)%pe(isc:iec,1:npz+1,jsc:jec), idg, a3, -1) + Tracker(nt)%u700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%u850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('u700', Tracker(nt)%u700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('u850', Tracker(nt)%u850, isc, iec, jsc, jec, 0, 1, 1.) + + call cs3_interpolator(isc,iec,jsc,jec,npz, Atm(n)%va(isc:iec,jsc:jec,:), nplev_tracker, & + pout(1:nplev_tracker), wz, Atm(n)%pe(isc:iec,1:npz+1,jsc:jec), idg, a3, -1) + Tracker(nt)%v700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%v850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('v700', Tracker(nt)%v700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('v850', Tracker(nt)%v850, isc, iec, jsc, jec, 0, 1, 1.) + + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, Atm(n)%ua(isc:iec,jsc:jec,:), a2) + Tracker(nt)%u10m=a2(isc:iec,jsc:jec) + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, Atm(n)%va(isc:iec,jsc:jec,:), a2) + Tracker(nt)%v10m=a2(isc:iec,jsc:jec) + call prt_maxmin('u10m', Tracker(nt)%u10m, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('v10m', Tracker(nt)%v10m, isc, iec, jsc, jec, 0, 1, 1.) + + call get_vorticity(isc, iec, jsc, jec, isd, ied, jsd, jed, npz, Atm(n)%u, Atm(n)%v, wk, & + Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, Atm(n)%gridstruct%rarea) + call interpolate_vertical(isc, iec, jsc, jec, npz, & + 700.e2, Atm(n)%peln, wk, a2) + Tracker(nt)%vort700=a2(:,:) + call interpolate_vertical(isc, iec, jsc, jec, npz, & + 850.e2, Atm(n)%peln, wk, a2) + Tracker(nt)%vort850=a2(:,:) + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, wk, a2) + Tracker(nt)%vort10m=a2(:,:) + call prt_maxmin('vort700', Tracker(nt)%vort700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('vort850', Tracker(nt)%vort850, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('vort10m', Tracker(nt)%vort10m, isc, iec, jsc, jec, 0, 1, 1.) + + do j=jsc,jec + do i=isc,iec + Tracker(nt)%spd700(i,j)=sqrt(Tracker(nt)%u700(i,j)**2 + Tracker(nt)%v700(i,j)**2) + Tracker(nt)%spd850(i,j)=sqrt(Tracker(nt)%u850(i,j)**2 + Tracker(nt)%v850(i,j)**2) + Tracker(nt)%spd10m(i,j)=sqrt(Tracker(nt)%u10m(i,j)**2 + Tracker(nt)%v10m(i,j)**2) + enddo + enddo + ! enddo ! end ntileMe do-loop + + if (allocated(a2)) deallocate(a2) + if (allocated(wk)) deallocate(wk) + if (allocated(a3)) deallocate(a3) + if (allocated(wz)) deallocate(wz) + + end subroutine fv_diag_tracker + + subroutine ntc_impl(Atm,tracker,Time, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + ! This is the main entry point to the tracker. It is most similar + ! to the function "tracker" in the GFDL/NCEP vortex tracker. + + implicit none + type(fv_atmos_type), intent(inout) :: Atm + type(fv_tracker_type), intent(inout) :: tracker + type(time_type), intent(in) :: Time + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: ips,ipe,jps,jpe,kps,kpe + + real :: dxdymean, sumdxa, sumdya + integer :: i, j, iweights, ip + + integer :: iguess, jguess ! first guess location + real :: latguess, longuess ! same, but in lat & lon + + integer :: iuvguess, juvguess ! "second guess" location using everything except wind maxima + real :: srsq + integer :: ifinal, jfinal + real :: latfinal, lonfinal + integer :: ierr + integer :: icen(maxtp), jcen(maxtp) ! center locations for each parameter + real :: loncen(maxtp), latcen(maxtp) ! lat, lon locations in degrees + logical :: calcparm(maxtp) ! do we have a valid center location for this parameter? + real :: max_wind, min_pres ! for ATCF output + real :: rcen(maxtp) ! center value (max wind, min mslp, etc.) + character*255 :: message + logical :: north_hemi ! true = northern hemisphere + logical :: have_guess ! first guess is available + real :: guessdist, guessdeg ! first guess distance to nearest point on grid + real :: latnear, lonnear ! nearest point in grid to first guess + + ! icen,jcen: Same meaning as clon, clat in tracker, but uses i and + ! j indexes of the center instead of lat/lon. Tracker comment: + ! Holds the coordinates for the center positions for + ! all storms at all times for all parameters. + ! (max_#_storms, max_fcst_times, max_#_parms). + ! For the third position (max_#_parms), here they are: + ! 1: Relative vorticity at 850 mb + ! 2: Relative vorticity at 700 mb + ! 3: Vector wind magnitude at 850 mb + ! 4: NOT CURRENTLY USED + ! 5: Vector wind magnitude at 700 mb + ! 6: NOT CURRENTLY USED + ! 7: Geopotential height at 850 mb + ! 8: Geopotential height at 700 mb + ! 9: Mean Sea Level Pressure + ! 10: Vector wind magnitude at 10 m + ! 11: Relative vorticity at 10 m + + call mpp_error(NOTE, 'ntc_impl') + + ! Initialize center information to invalid values for all centers: + icen=-99 + jcen=-99 + latcen=9e9 + loncen=9e9 + rcen=9e9 + calcparm=.false. + if(Moving_nest(2)%mn_flag%vortex_tracker==6) then ! TODO pick correct Moving_nest structure + srsq=searchrad_6*searchrad_6*1e6 + else + srsq=searchrad_7*searchrad_7*1e6 + endif + + ! Estimate the domain wide mean grid spacing in km + sumdxa=0.0 + sumdya=0.0 + do j=jps,min(jde-1,jpe) + do i=ips,min(ide-1,ipe) + sumdxa=sumdxa+Atm%gridstruct%dxa(i,j) + sumdya=sumdya+Atm%gridstruct%dya(i,j) + enddo + enddo + call mp_reduce_sum(sumdxa) + call mp_reduce_sum(sumdya) + dxdymean=0.5*(sumdxa + sumdya)/((ide-ids) * (jde-jds)) / 1000.0 + + ! Get the square of the approximate distance to the domain center + ! at all points: + call get_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Get the first guess from the prior nest motion timestep: + have_guess=tracker%track_have_guess + if(have_guess) then + ! We have a first guess center. We have to translate it to gridpoint space. + longuess=tracker%track_guess_lon + latguess=tracker%track_guess_lat + call get_nearest_lonlat(Atm,iguess,jguess,ierr,longuess,latguess, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe, & + lonnear, latnear) + if(ierr==0) then + call calcdist(longuess,latguess, lonnear,latnear, guessdist,guessdeg) + if(guessdist > Atm%neststruct%refinement*dxdymean) then +108 format('WARNING: guess lon=',F0.3,',lat=',F0.3, & + ' too far (',F0.3,'km) from nearest point lon=',F0.3,',lat=',F0.3, & + '. Will use domain center as first guess.') + write(message,108) tracker%track_guess_lon,tracker%track_guess_lat, & + guessdist,lonnear,latnear + call mpp_error(NOTE, message) + have_guess=.false. ! indicate that the first guess is unusable + else + latguess=latnear + longuess=lonnear + endif + else + have_guess=.false. ! indicate that the first guess is unusable. +109 format('WARNING: guess lon=',F0.3,',lat=',F0.3, & + ' does not exist in this domain. Will use domain center as first guess.') + write(message,109) tracker%track_guess_lon,tracker%track_guess_lat + call mpp_error(NOTE, message) + endif + endif + + ! If we could not get the first guess from the prior nest motion + ! timestep, then use the default first guess: the domain center. + if(Moving_nest(2)%mn_flag%vortex_tracker==6 .or. .not.have_guess) then + ! vt=6: hard coded first-guess center is domain center: + ! vt=7: first guess comes from prior timestep + ! Initial first guess is domain center. + ! Backup first guess is domain center if first guess is unusable. + iguess=(ide-ids)/2+ids + jguess=(jde-jds)/2+jds + if(Moving_nest(2)%mn_flag%vortex_tracker==7) then + call mpp_error(NOTE, 'Using domain center as first guess since no valid first guess is available.') + endif + call get_lonlat(Atm,iguess,jguess,longuess,latguess,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(FATAL, "ERROR: center of domain is not inside the domain") + endif + have_guess=.true. + endif + + if(.not.have_guess) then + call mpp_error(FATAL, "INTERNAL ERROR: No first guess is available (should never happen).") + endif + + north_hemi = latguess>0.0 + + ! Find the centers of all fields except the wind minima: + call find_center(Atm,tracker%vort850,srsq, & + icen(1),jcen(1),rcen(1),calcparm(1),loncen(1),latcen(1),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + call find_center(Atm,tracker%vort700,srsq, & + icen(2),jcen(2),rcen(2),calcparm(2),loncen(2),latcen(2),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + call find_center(Atm,tracker%z850,srsq, & + icen(7),jcen(7),rcen(7),calcparm(7),loncen(7),latcen(7),dxdymean,'hgt', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%z700,srsq, & + icen(8),jcen(8),rcen(8),calcparm(8),loncen(8),latcen(8),dxdymean,'hgt', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%slp,srsq, & + icen(9),jcen(9),rcen(9),calcparm(9),loncen(9),latcen(9),dxdymean,'slp', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%vort10m,srsq, & + icen(11),jcen(11),rcen(11),calcparm(11),loncen(11),latcen(11),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + + ! Get a guess center location for the wind minimum searches: + call get_uv_guess(Atm,icen,jcen,loncen,latcen,calcparm, & + iguess,jguess,longuess,latguess,iuvguess,juvguess, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Find wind minima. Requires a first guess center: + windmin: if(Moving_nest(2)%mn_flag%vortex_tracker==6) then + call find_center(Atm,tracker%spd850,srsq, & + icen(3),jcen(3),rcen(3),calcparm(3),loncen(3),latcen(3),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call find_center(Atm,tracker%spd700,srsq, & + icen(5),jcen(5),rcen(5),calcparm(5),loncen(5),latcen(5),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call find_center(Atm,tracker%spd10m,srsq, & + icen(10),jcen(10),rcen(10),calcparm(10),loncen(10),latcen(10),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + else + call get_uv_center(Atm,tracker%spd850, & + icen(3),jcen(3),rcen(3),calcparm(3),loncen(3),latcen(3),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call get_uv_center(Atm,tracker%spd700, & + icen(5),jcen(5),rcen(5),calcparm(5),loncen(5),latcen(5),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call get_uv_center(Atm,tracker%spd10m, & + icen(10),jcen(10),rcen(10),calcparm(10),loncen(10),latcen(10),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + endif windmin + + ! Get a final guess center location: + call fixcenter(Atm,icen,jcen,calcparm,loncen,latcen, & + iguess,jguess,longuess,latguess, & + ifinal,jfinal,lonfinal,latfinal, & + north_hemi, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + + tracker%tracker_fixes=0 + do ip=1,maxtp + if(calcparm(ip)) then + if(icen(ip)>=ips .and. icen(ip)<=ipe & + .and. jcen(ip)>=jps .and. jcen(ip)<=jpe) then + tracker%tracker_fixes(icen(ip),jcen(ip))=ip + endif + endif + enddo + + if(iguess>=ips .and. iguess<=ipe .and. jguess>=jps .and. jguess<=jpe) then + tracker%tracker_fixes(iguess,jguess)=-1 + endif + + if(iuvguess>=ips .and. iuvguess<=ipe .and. juvguess>=jps .and. juvguess<=jpe) then + tracker%tracker_fixes(iuvguess,juvguess)=-2 + endif + + if(ifinal>=ips .and. ifinal<=ipe .and. jfinal>=jps .and. jfinal<=jpe) then + tracker%tracker_fixes(ifinal,jfinal)=-3 + endif + + call get_tracker_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + call get_wind_pres_intensity(Atm, & + tracker%tracker_pmin,tracker%tracker_vmax,tracker%tracker_rmw, & + max_wind_search_radius, min_mlsp_search_radius, & + lonfinal,latfinal, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + +205 format('tracker fixlon=',F8.3, ' fixlat=',F8.3, & + ' ifix=',I6,' jfix=',I6, & + ' pmin=',F12.3,' vmax=',F8.3,' rmw=',F8.3) + write(message,205) tracker%tracker_fixlon, tracker%tracker_fixlat, & + tracker%tracker_ifix, tracker%tracker_jfix, & + tracker%tracker_pmin, tracker%tracker_vmax, tracker%tracker_rmw + call mpp_error(NOTE, message) + + if(is_master()) then + call output_partial_atcfunix(Atm,Time, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + endif + end subroutine ntc_impl + + subroutine get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + implicit none + type(fv_atmos_type), intent(in) :: Atm + + integer, intent(out) :: ids,ide,jds,jde,kds,kde + integer, intent(out) :: ims,ime,jms,jme,kms,kme + integer, intent(out) :: ips,ipe,jps,jpe,kps,kpe + + ids = 1 + ide = Atm%npx + jds = 1 + jde = Atm%npy + kds = 1 + kde = Atm%npz + call mpp_get_data_domain(Atm%domain, ims, ime, jms, jme) + kms = 1 + kme = Atm%npz + call mpp_get_compute_domain(Atm%domain, ips, ipe, jps, jpe) + kps = 1 + kpe = Atm%npz + end subroutine get_ijk_from_domain + + subroutine get_nearest_lonlat(Atm,iloc,jloc,ierr,lon,lat, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe, & + lonnear, latnear) + ! Finds the nearest point in the domain to the specified lon,lat + ! location. + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: ips,ipe,jps,jpe,kps,kpe + integer, intent(out) :: iloc,jloc,ierr + real, intent(in) :: lon,lat + real :: dx,dy,d,dmin, zdummy, latmin,lonmin + integer :: i,j,imin,jmin + real, intent(out), optional :: latnear, lonnear + + zdummy=42 + dmin=9e9 + imin=-99 + jmin=-99 + latmin=9e9 + lonmin=9e9 + ierr=0 + do j=jps,min(jde-1,jpe) + do i=ips,min(ide-1,ipe) + dy=abs(lat-Atm%gridstruct%agrid(i,j,2)*rad_to_deg) + dx=abs(mod(3600.+180.+(lon-Atm%gridstruct%agrid(i,j,1)*rad_to_deg),360.)-180.) + d=dx*dx+dy*dy + if(dlocalextreme) then + localextreme=windsq + locali=i + localj=j + endif + endif + enddo + enddo + if(localextreme>0) localextreme=sqrt(localextreme) + + globalextreme=localextreme + globali=locali + globalj=localj + call mp_reduce_maxval(globalextreme,globali,globalj) + + call get_lonlat(Atm,globali,globalj,globallon,globallat,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte) + if(ierr/=0) then + call mpp_error(WARNING, "WARNING: Unable to find location of wind maximum.") + rmw=-99 + else + call calcdist(clon,clat,globallon,globallat,rmw,degrees) + end if + + ! Get the guess location for the next time: + max_wind=globalextreme + if(globali<0 .or. globalj<0) then + call mpp_error(WARNING, "WARNING: No wind values found that were greater than -9*10^9.") + min_mslp=-999 + endif + + end subroutine get_wind_pres_intensity + + subroutine fixcenter(Atm,icen,jcen,calcparm,loncen,latcen, & + iguess,jguess,longuess,latguess, & + ifinal,jfinal,lonfinal,latfinal, & + north_hemi, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + ! This is the same as "fixcenter" in gettrk_main. Original comment: + ! + ! ABSTRACT: This subroutine loops through the different parameters + ! for the input storm number (ist) and calculates the + ! center position of the storm by taking an average of + ! the center positions obtained for those parameters. + ! First we check to see which parameters are within a + ! max error range (errmax), and we discard those that are + ! not within that range. Of the remaining parms, we get + ! a mean position, and then we re-calculate the position + ! by giving more weight to those estimates that are closer + ! to this mean first-guess position estimate. + + ! Arguments: Input: + ! grid - the grid being processed + ! icen,jcen - arrays of center gridpoint locations + ! calcperm - array of center validity flags (true = center is valid) + ! loncen,latcen - center geographic locations + ! iguess,jguess - first guess gridpoint location + ! longuess,latguess - first guess geographic location + + ! Arguments: Output: + ! ifinal,jfinal - final center gridpoint location + ! lonfinal,latfinal - final center geographic location + + ! Arguments: Optional input: + ! north_hemi - true = northern hemisphere, false=south + + implicit none + integer, intent(in) :: & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: icen(maxtp), jcen(maxtp) + real, intent(in) :: loncen(maxtp), latcen(maxtp) + logical, intent(inout) :: calcparm(maxtp) + + integer, intent(in) :: iguess,jguess + real, intent(in) :: latguess,longuess + + integer, intent(inout) :: ifinal,jfinal + real, intent(inout) :: lonfinal,latfinal + + logical, intent(in), optional :: north_hemi + + character*255 :: message + real :: errdist(maxtp),avgerr,errmax,errinit,xavg_stderr + real :: dist,degrees, total + real :: minutes,hours,trkerr_avg,dist_from_mean(maxtp),wsum + integer :: ip,itot4next,iclose,count,ifound,ierr + integer(kind=8) :: isum,jsum + real :: irsum,jrsum,errtmp,devia,wtpos + real :: xmn_dist_from_mean, stderr_close + logical use4next(maxtp) + + ! Determine forecast hour: + hours=time_type_to_real(Atm%Time-Atm%Time_Init)/3600. + + ! Decide maximum values for distance and std. dev.: + if(hours<0.5) then + errmax=err_reg_init + errinit=err_reg_init + else + errmax=err_reg_max + errinit=err_reg_max + endif + + if(hours>4.) then + xavg_stderr = ( Tracker(n)%track_stderr_m1 + & + Tracker(n)%track_stderr_m2 + Tracker(n)%track_stderr_m3 ) / 3.0 + elseif(hours>3.) then + xavg_stderr = ( Tracker(n)%track_stderr_m1 + Tracker(n)%track_stderr_m2 ) / 2.0 + elseif(hours>2.) then + xavg_stderr = Tracker(n)%track_stderr_m1 + endif + + if(hours>2.) then + errtmp = 3.0*xavg_stderr*errpgro + errmax = max(errtmp,errinit) + errtmp = errpmax + errmax = min(errmax,errtmp) + endif + + ! Initialize loop variables: + errdist=0.0 + use4next=.false. + trkerr_avg=0 + itot4next=0 + iclose=0 + isum=0 + jsum=0 + ifound=0 + + do ip=1,maxtp + if(ip==4 .or. ip==6) then + calcparm(ip)=.false. + cycle + elseif(calcparm(ip)) then + ifound=ifound+1 + call calcdist(longuess,latguess,loncen(ip),latcen(ip),dist,degrees) + errdist(ip)=dist + if(dist<=errpmax) then + if(ip==3 .or. ip==5 .or. ip==10) then + use4next(ip)=.false. + else + use4next(ip)=.true. + trkerr_avg=trkerr_avg+dist + itot4next=itot4next+1 + endif + endif + if(dist<=errmax) then + iclose=iclose+1 + isum=isum+icen(ip) + jsum=jsum+jcen(ip) + else + calcparm(ip)=.false. + endif + endif + enddo + + if(ifound<=0) then + call mpp_error(NOTE, 'The tracker could not find the centers for any parameters. & + Thus, a center position could not be obtained for this storm.') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + if(iclose<=0) then +200 format('No storms are within errmax=',F0.1,'km of the parameters') + write(message,200) errmax + call mpp_error(NOTE, message) + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ifinal=real(isum)/real(iclose) + jfinal=real(jsum)/real(iclose) + +504 format(' calculated ifinal, jfinal: ifinal=',I0,' jfinal=',I0,' isum=',I0,' jsum=',I0,' iclose=',I0) + !write(0,504) ifinal,jfinal,isum,jsum,iclose + + call get_lonlat(Atm,ifinal,jfinal,lonfinal,latfinal,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(NOTE, 'Gave up on finding the storm location due to error in get_lonlat (1).') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + count=0 + dist_from_mean=0.0 + total=0.0 + do ip=1,maxtp + if(calcparm(ip)) then + call calcdist(lonfinal,latfinal,loncen(ip),latcen(ip),dist,degrees) + dist_from_mean(ip)=dist + total=total+dist + count=count+1 + endif + enddo + xmn_dist_from_mean=total/real(count) + + do ip=1,maxtp + if(calcparm(ip)) then + total=total+(xmn_dist_from_mean-dist_from_mean(ip))**2 + endif + enddo + if(count<2) then + stderr_close=0.0 + else + stderr_close=max(1.0,sqrt(1./(count-1) * total)) + endif + + if(calcparm(1) .or. calcparm(2) .or. calcparm(7) .or. & + calcparm(8) .or. calcparm(9) .or. calcparm(11)) then + continue + else + ! Message copied straight from tracker: + call mpp_error(NOTE, 'In fixcenter, STOPPING PROCESSING for this storm. The reason is that') + call mpp_error(NOTE, 'none of the fix locations for parms z850, z700, zeta 850, zeta 700') + call mpp_error(NOTE, 'MSLP or sfc zeta were within a reasonable distance of the guess location.') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ! Recalculate the final center location using weights + if(stderr_close<5.0) then + ! Old code forced a minimum of 5.0 stddev + stderr_close=5.0 + endif + irsum=0 + jrsum=0 + wsum=0 + do ip=1,maxtp + if(calcparm(ip)) then + devia=max(1.0,dist_from_mean(ip)/stderr_close) + wtpos=exp(-devia/3.) + irsum=icen(ip)*wtpos+irsum + jrsum=jcen(ip)*wtpos+jrsum + wsum=wtpos+wsum +1100 format(' Adding parm: devia=',F0.3,' wtpos=',F0.3,' irsum=',F0.3,' jrsum=',F0.3,' wsum=',F0.3) + !write(0,1100) devia,wtpos,irsum,jrsum,wsum + endif + enddo + ifinal=nint(real(irsum)/real(wsum)) + jfinal=nint(real(jrsum)/real(wsum)) + call get_lonlat(Atm,ifinal,jfinal,lonfinal,latfinal,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(NOTE, 'Gave up on finding the storm location due to error in get_lonlat (2).') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ! Store the lat/lon location: + Tracker(n)%tracker_fixlon=lonfinal + Tracker(n)%tracker_fixlat=latfinal + Tracker(n)%tracker_ifix=ifinal + Tracker(n)%tracker_jfix=jfinal + Tracker(n)%tracker_havefix=.true. + + if(nint(hours) > Tracker(n)%track_last_hour ) then + ! It is time to recalculate the std. dev. of the track: + count=0 + dist_from_mean=0.0 + total=0.0 + do ip=1,maxtp + if(calcparm(ip)) then + call calcdist(lonfinal,latfinal,loncen(ip),loncen(ip),dist,degrees) + dist_from_mean(ip)=dist + total=total+dist + count=count+1 + endif + enddo + xmn_dist_from_mean=total/real(count) + + do ip=1,maxtp + if(calcparm(ip)) then + total=total+(xmn_dist_from_mean-dist_from_mean(ip))**2 + endif + enddo + if(count<2) then + stderr_close=0.0 + else + stderr_close=max(1.0,sqrt(1./(count-1) * total)) + endif + + Tracker(n)%track_stderr_m3=Tracker(n)%track_stderr_m2 + Tracker(n)%track_stderr_m2=Tracker(n)%track_stderr_m1 + Tracker(n)%track_stderr_m1=stderr_close + Tracker(n)%track_last_hour=nint(hours) + endif + + return + + end subroutine fixcenter + + subroutine get_uv_guess(Atm,icen,jcen,loncen,latcen,calcparm, & + iguess,jguess,longuess,latguess,iout,jout, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + its,ite,jts,jte,kts,kte) + ! This is a rewrite of the gettrk_main.f get_uv_guess. Original comment: + ! ABSTRACT: The purpose of this subroutine is to get a modified + ! first guess lat/lon position before searching for the + ! minimum in the wind field. The reason for doing this is + ! to better refine the guess and avoid picking up a wind + ! wind minimum far away from the center. So, use the + ! first guess position (and give it strong weighting), and + ! then also use the fix positions for the current time + ! (give the vorticity centers stronger weighting as well), + ! and then take the average of these positions. + + ! Arguments: Input: + ! grid - grid being searched + ! icen,jcen - tracker parameter center gridpoints + ! loncen,latcen - tracker parameter centers' geographic locations + ! calcparm - is each center valid? + ! iguess, jguess - first guess gridpoint location + ! longuess,latguess - first guess geographic location + + ! Arguments: Output: + ! iout,jout - uv guess center location + + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: its,ite,jts,jte,kts,kte + + integer, intent(in) :: icen(maxtp), jcen(maxtp) + real, intent(in) :: loncen(maxtp), latcen(maxtp) + logical, intent(in) :: calcparm(maxtp) + + integer, intent(in) :: iguess,jguess + real, intent(in) :: latguess,longuess + + integer, intent(inout) :: iout,jout + real :: degrees,dist + integer :: ip,ict + integer(kind=8) :: isum,jsum + + ict=2 + isum=2*iguess + jsum=2*jguess + + ! Get a guess storm center location for searching for the wind centers: + do ip=1,maxtp + if ((ip > 2 .and. ip < 7) .or. ip == 10) then + cycle ! because 3-6 are for 850 & 700 u & v and 10 is + ! for surface wind magnitude. + elseif(calcparm(ip)) then + call calcdist (longuess,latguess,loncen(ip),latcen(ip),dist,degrees) + if(distrcen .and. Tracker(n)%distsq(i,j)\c NOTE: The latitude arguments passed to the + ! B / \ subr are the actual lat vals, but in + ! \ the calculation we use 90-lat. + ! a \ . + ! \pt. NOTE: You may get strange results if you: + ! C (1) use positive values for SH lats AND + ! you try computing distances across the + ! equator, or (2) use lon values of 0 to + ! -180 for WH lons AND you try computing + ! distances across the 180E meridian. + ! + ! NOTE: In the diagram above, (a) is the angle between pt. B and + ! pt. C (with pt. x as the vertex), and (A) is the difference in + ! longitude (in degrees, absolute value) between pt. B and pt. C. + ! + ! !!! NOTE !!! -- THE PARAMETER ecircum IS DEFINED (AS OF THE + ! ORIGINAL WRITING OF THIS SYSTEM) IN KM, NOT M, SO BE AWARE THAT + ! THE DISTANCE RETURNED FROM THIS SUBROUTINE IS ALSO IN KM. + ! + implicit none + + real, intent(inout) :: degrees + real, intent(out) :: xdist + real, intent(in) :: rlonb,rlatb,rlonc,rlatc + real, parameter :: dtr = 0.0174532925199433 + real :: distlatb,distlatc,pole,difflon,cosanga,circ_fract + ! + if (rlatb < 0.0 .or. rlatc < 0.0) then + pole = -90. + else + pole = 90. + endif + ! + distlatb = (pole - rlatb) * dtr + distlatc = (pole - rlatc) * dtr + difflon = abs( (rlonb - rlonc)*dtr ) + ! + cosanga = ( cos(distlatb) * cos(distlatc) + & + sin(distlatb) * sin(distlatc) * cos(difflon)) + + ! This next check of cosanga is needed since I have had ACOS crash + ! when calculating the distance between 2 identical points (should + ! = 0), but the input for ACOS was just slightly over 1 + ! (e.g., 1.00000000007), due to (I'm guessing) rounding errors. + + if (cosanga > 1.0) then + cosanga = 1.0 + endif + + degrees = acos(cosanga) / dtr + circ_fract = degrees / 360. + xdist = circ_fract * ecircum + ! + ! NOTE: whether this subroutine returns the value of the distance + ! in km or m depends on the scale of the parameter ecircum. + ! At the original writing of this subroutine (7/97), ecircum + ! was given in km. + ! + return + end subroutine calcdist + + subroutine get_lonlat(Atm,iguess,jguess,longuess,latguess,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + ! Returns the latitude (latguess) and longitude (longuess) of the + ! specified location (iguess,jguess) in the specified grid. + implicit none + integer, intent(in) :: & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe + integer, intent(out) :: ierr + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: iguess,jguess + real, intent(inout) :: longuess,latguess + real :: weight,zjunk + integer :: itemp,jtemp + + ierr=0 + zjunk=1 + if(iguess>=ips .and. iguess<=ipe .and. jguess>=jps .and. jguess<=jpe) then + weight=1 + longuess=Atm%gridstruct%agrid(iguess,jguess,1)*rad_to_deg + latguess=Atm%gridstruct%agrid(iguess,jguess,2)*rad_to_deg + itemp=iguess + jtemp=jguess + else + weight=0 + longuess=-999.9 + latguess=-999.9 + itemp=-99 + jtemp=-99 + endif + + call mp_reduce_maxloc(weight,latguess,longuess,zjunk,itemp,jtemp) + + if(itemp==-99 .and. jtemp==-99) then + ierr=95 + endif + end subroutine get_lonlat + + subroutine clean_lon_lat(xlon1,ylat1) + real, intent(inout) :: xlon1,ylat1 + ! This modifies a (lat,lon) pair so that the longitude fits + ! between [-180,180] and the latitude between [-90,90], taking + ! into account spherical geometry. + ! NOTE: inputs and outputs are in degrees + xlon1=(mod(xlon1+3600.+180.,360.)-180.) + ylat1=(mod(ylat1+3600.+180.,360.)-180.) + if(ylat1>90.) then + ylat1=180.-ylat1 + xlon1=mod(xlon1+360.,360.)-180. + elseif(ylat1<-90.) then + ylat1=-180. - ylat1 + xlon1=mod(xlon1+360.,360.)-180. + endif + end subroutine clean_lon_lat + + !---------------------------------------------------------------------------------- + ! These two simple routines return an N, S, E or W for the + ! hemisphere of a latitude or longitude. + character(1) function get_lat_ns(lat) + ! This could be written simply as merge('N','S',lat>=0) if F95 allowed + implicit none + real :: lat + if(lat>=0) then + get_lat_ns='N' + else + get_lat_ns='S' + endif + end function get_lat_ns + character(1) function get_lon_ew(lon) + ! This could be written simply as merge('E','W',lon>=0) if F95 allowed + implicit none + real :: lon + if(lon>=0) then + get_lon_ew='E' + else + get_lon_ew='W' + endif + end function get_lon_ew + + subroutine fv_tracker_post_move(Atm) + ! This updates the tracker i/j fix location and square of the + ! distance to the tracker center after a nest move. + type(fv_atmos_type), intent(inout) :: Atm + integer :: ierr, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe + + ! Get the grid bounds: + CALL get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + ! Get the i/j center location from the fix location: + ierr=0 + call get_nearest_lonlat(Atm,Tracker(n)%tracker_ifix,Tracker(n)%tracker_jfix, & + ierr,Tracker(n)%tracker_fixlon,Tracker(n)%tracker_fixlat, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Get the square of the approximate distance to the tracker center + ! at all points: + if(ierr==0) & + call get_tracker_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + end subroutine fv_tracker_post_move + +#ifdef DEBUG + subroutine check_validity(cparm, v, i, j) + ! [KA] Checks value of a tracking parameter for validity + character*(*), intent(in) :: cparm + real, intent(in) :: v + integer, intent(in) :: i, j + real :: min_v, max_v + integer :: this_pe + + min_v = -9e9 + max_v = 9e9 + this_pe = mpp_pe() + + !< set validity range + select case (trim(cparm)) + case ("zeta") + !< low-level vorticity + min_v = -1e-2 + max_v = 1e-2 + case ("hgt") + !< low-level geopotential height + min_v = 1e2 + max_v = 1e4 + case ("slp") + !< sea-level pressure + min_v = 0.85e5 + max_v = 1.10e5 + case ("wind") + !< low-level wind + min_v = 1e-3 + max_v = 2e2 + case default + !< Unrecognized parameter; must be invalid + write(0,"(A,A8)") "[KA] inval track variable:",trim(cparm) + return + end select + + !< check value for validity + if (v < min_v .OR. v > max_v) then + !< report bad value, its name, its indices, the containing pe + write(0,"(A,A8,A,E8.1,A,I3,A,2I3)") & + "[KA] inval track val:",trim(cparm)," val:",v," pe:",this_pe," i,j:",i,j + endif + + end subroutine check_validity + +#endif !< DEBUG + +#endif !< MOVING_NEST + +end module fv_tracker_mod diff --git a/tools/fv_diagnostics.F90 b/tools/fv_diagnostics.F90 index 7c6d89420..205c4ae8a 100644 --- a/tools/fv_diagnostics.F90 +++ b/tools/fv_diagnostics.F90 @@ -197,6 +197,10 @@ module fv_diagnostics_mod public :: max_vv, get_vorticity, max_uh public :: max_vorticity, max_vorticity_hy1, bunkers_vector, helicity_relative_CAPS public :: cs3_interpolator, get_height_given_pressure +#ifdef MOVING_NEST + public :: interpolate_z, get_pressure_given_height + public :: fv_diag_reinit +#endif integer, parameter :: MAX_PLEVS = 31 #ifdef FEWER_PLEVS @@ -229,6 +233,40 @@ module fv_diagnostics_mod contains +#ifdef MOVING_NEST + ! For reinitializing zsurf after moving nest advances -- Ramstrom, HRD/AOML + subroutine fv_diag_reinit(Atm) + type(fv_atmos_type), intent(inout), target :: Atm(:) + !integer, intent(out) :: axes(4) + !type(time_type), intent(in) :: Time + !integer, intent(in) :: npx, npy, npz + !real, intent(in):: p_ref + + integer :: i, j, n + integer :: isc, iec, jsc, jec + + n=1 ! Hardcoded to 1 because we pass in only the Atm that defines the nest grid - e.g. Atm(2:2). + isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + + !print '("[INFO] WDR fv_diag_reinit npe=",I0," i=",I0,"-",I0," j=",I0,"-",I0)', this_pe, isc, iec, jsc, jec + + ginv = 1./GRAV + + do j=jsc,jec + do i=isc,iec + zsurf(i,j) = ginv * Atm(n)%phis(i,j) + enddo + enddo + + ! Appears to update the zsurf data at each time this subroutine is called, not just at init. +!#ifndef DYNAMICS_ZS +! if (id_zsurf > 0) used = send_data(id_zsurf, zsurf, Time) +!#endif + + end subroutine fv_diag_reinit +#endif + subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) type(fv_atmos_type), intent(inout), target :: Atm(:) integer, intent(out) :: axes(4) @@ -267,6 +305,7 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) real, allocatable :: dx(:,:), dy(:,:) call write_version_number ( 'FV_DIAGNOSTICS_MOD', version ) + idiag => Atm(1)%idiag ! For total energy diagnostics: @@ -686,6 +725,29 @@ subroutine fv_diag_init(Atm, axes, Time, npx, npy, npz, p_ref) id_zsurf = register_diag_field ( trim(field), 'zsurf', axes(1:2), Time, & 'surface height', 'm') #endif + +#ifdef MOVING_NEST +!------------------- +! Time-dependent lon-lat (moving nest) [Ahern, AOML/HRD] +!------------------- + id_mlon = register_diag_field ( trim(field), 'grid_mlon', (/id_x,id_y/), Time, & + 'longitude', 'degrees_E' ) + id_mlat = register_diag_field ( trim(field), 'grid_mlat', (/id_x,id_y/), Time, & + 'latitude', 'degrees_N' ) + id_mlont = register_diag_field ( trim(field), 'grid_mlont', (/id_xt,id_yt/), Time, & + 'longitude', 'degrees_E' ) + id_mlatt = register_diag_field ( trim(field), 'grid_mlatt', (/id_xt,id_yt/), Time, & + 'latitude', 'degrees_N' ) + id_marea = register_diag_field ( trim(field), 'marea', axes(1:2), Time, & + 'cell area', 'm**2' ) + if (id_marea > 0) then + call diag_field_add_attribute (id_marea, 'cell_methods', 'area: sum') + endif + id_mdx = register_diag_field ( trim(field), 'mdx', (/id_xt,id_y/), Time, & + 'dx', 'm') + id_mdy = register_diag_field ( trim(field), 'mdy', (/id_x,id_yt/), Time, & + 'dy', 'm') +#endif !------------------- ! Surface pressure !------------------- @@ -1533,6 +1595,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) integer :: isd, ied, jsd, jed, npz, itrac integer :: ngc, nwater + real, allocatable :: dx(:,:), dy(:,:) real, allocatable :: a2(:,:),a3(:,:,:),a4(:,:,:), wk(:,:,:), wz(:,:,:), ucoor(:,:,:), vcoor(:,:,:) real, allocatable :: ustm(:,:), vstm(:,:) real, allocatable :: slp(:,:), depress(:,:), ws_max(:,:), tc_count(:,:) @@ -1770,6 +1833,26 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) #ifdef DYNAMICS_ZS if(id_zsurf > 0) used=send_data(id_zsurf, zsurf, Time) +#endif +#ifdef MOVING_NEST + ! send current lon/lat and orog data for moving nest/grid [Ahern, AOML/HRD] + if (id_mlon > 0) used = send_data(id_mlon, rad2deg*Atm(n)%gridstruct%grid(isc:iec+1,jsc:jec+1,1), Time) + if (id_mlat > 0) used = send_data(id_mlat, rad2deg*Atm(n)%gridstruct%grid(isc:iec+1,jsc:jec+1,2), Time) + if (id_mlont > 0) used = send_data(id_mlont, rad2deg*Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,1), Time) + if (id_mlatt > 0) used = send_data(id_mlatt, rad2deg*Atm(n)%gridstruct%agrid(isc:iec,jsc:jec,2), Time) + if (id_marea > 0) used = send_data(id_marea, Atm(n)%gridstruct%area(isc:iec,jsc:jec), Time) + if (id_mdx > 0) then + allocate(dx(isc:iec+1,jsc:jec+1)) + dx(isc:iec,jsc:jec+1) = Atm(n)%gridstruct%dx(isc:iec,jsc:jec+1) + used = send_data(id_mdx, dx, Time) + deallocate(dx) + endif + if (id_mdy > 0) then + allocate(dy(isc:iec+1,jsc:jec+1)) + dy(isc:iec+1,jsc:jec) = Atm(n)%gridstruct%dy(isc:iec+1,jsc:jec) + used = send_data(id_mdy, dy, Time) + deallocate(dy) + endif #endif if(id_ps > 0) used=send_data(id_ps, Atm(n)%ps(isc:iec,jsc:jec), Time) @@ -4058,6 +4141,7 @@ subroutine fv_diag(Atm, zvir, Time, print_freq) end subroutine fv_diag + subroutine wind_max(isc, iec, jsc, jec ,isd, ied, jsd, jed, us, vs, ws_max, domain) integer isc, iec, jsc, jec integer isd, ied, jsd, jed diff --git a/tools/fv_diagnostics.h b/tools/fv_diagnostics.h index 9622577a8..e4525e0e0 100644 --- a/tools/fv_diagnostics.h +++ b/tools/fv_diagnostics.h @@ -35,6 +35,9 @@ id_dbz, id_maxdbz, id_basedbz, id_dbz4km, id_dbztop, id_dbz_m10C, & id_ctz, id_w1km, id_wmaxup, id_wmaxdn, id_cape, id_cin +! Time-dependent lon-lat fields, moving grids: + integer :: id_mlon, id_mlat, id_mlont, id_mlatt, id_marea, id_mdx, id_mdy + ! Selected theta-level fields from 3D variables: integer :: id_pv350K, id_pv550K diff --git a/tools/fv_grid_tools.F90 b/tools/fv_grid_tools.F90 index d0d8a0a4b..680d74fbf 100644 --- a/tools/fv_grid_tools.F90 +++ b/tools/fv_grid_tools.F90 @@ -115,16 +115,17 @@ module fv_grid_tools_mod ! - use constants_mod, only: grav, omega, pi=>pi_8, cnst_radius=>radius, small_fac - use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_grid_bounds_type, R_GRID - use fv_grid_utils_mod, only: gnomonic_grids, great_circle_dist, & - mid_pt_sphere, spherical_angle, & + use constants_mod, only: grav, omega, pi=>pi_8, cnst_radius=>radius, small_fac + use fms_mod, only: mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_ROUTINE, clock_flag_default + use fv_arrays_mod, only: fv_atmos_type, fv_grid_type, fv_grid_bounds_type, R_GRID + use fv_grid_utils_mod, only: gnomonic_grids, great_circle_dist, & + mid_pt_sphere, spherical_angle, & cell_center2, get_area, inner_prod, fill_ghost, & - direct_transform, cube_transform, dist2side_latlon, & - spherical_linear_interpolation, big_number - use fv_timing_mod, only: timing_on, timing_off - use fv_mp_mod, only: is_master, fill_corners, XDir, YDir - use fv_mp_mod, only: mp_bcst, mp_reduce_max, mp_stop, grids_master_procs + direct_transform, cube_transform, dist2side_latlon, & + spherical_linear_interpolation, big_number + use fv_timing_mod, only: timing_on, timing_off + use fv_mp_mod, only: is_master, fill_corners, XDir, YDir + use fv_mp_mod, only: mp_bcst, mp_reduce_max, mp_stop, grids_master_procs use sorted_index_mod, only: sorted_inta, sorted_intb use mpp_mod, only: mpp_error, FATAL, get_unit, mpp_chksum, mpp_pe, stdout, & mpp_send, mpp_recv, mpp_sync_self, EVENT_RECV, mpp_npes, & @@ -134,7 +135,7 @@ module fv_grid_tools_mod mpp_get_compute_domains, mpp_global_field, & mpp_get_data_domain, mpp_get_compute_domain, & mpp_get_global_domain, mpp_global_sum, mpp_global_max, mpp_global_min - use mpp_domains_mod, only: domain2d + use mpp_domains_mod, only: domain2d use mpp_parameter_mod, only: AGRID_PARAM=>AGRID, & DGRID_NE_PARAM=>DGRID_NE, & @@ -147,7 +148,7 @@ module fv_grid_tools_mod use fms2_io_mod, only: file_exists, variable_exists, open_file, read_data, & get_global_attribute, get_variable_attribute, & close_file, get_mosaic_tile_grid, FmsNetcdfFile_t - use mosaic_mod, only : get_mosaic_ntiles + use mosaic_mod, only: get_mosaic_ntiles implicit none private @@ -155,9 +156,9 @@ module fv_grid_tools_mod real(kind=R_GRID), parameter:: radius = cnst_radius - real(kind=R_GRID) , parameter:: todeg = 180.0d0/pi !< convert to degrees - real(kind=R_GRID) , parameter:: torad = pi/180.0d0 !< convert to radians - real(kind=R_GRID) , parameter:: missing = 1.d25 + real(kind=R_GRID), parameter:: todeg = 180.0d0/pi !< convert to degrees + real(kind=R_GRID), parameter:: torad = pi/180.0d0 !< convert to radians + real(kind=R_GRID), parameter:: missing = 1.d25 real(kind=R_GRID) :: csFac @@ -601,6 +602,33 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, integer :: istart, iend, jstart, jend integer :: isection_s, isection_e, jsection_s, jsection_e + ! Setup timing variables + + logical, save :: first_time = .true. + integer, save :: id_timer1, id_timer2, id_timer3, id_timer3a, id_timer4, id_timer5, id_timer6, id_timer7, id_timer8 + logical :: use_timer = .false. ! Set to True for detailed performance profiling + logical :: debug_log = .false. + integer :: this_pe + + this_pe = mpp_pe() + + if (first_time) then + if (use_timer) then + id_timer1 = mpp_clock_id ('init_grid Step 1', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer2 = mpp_clock_id ('init_grid Step 2', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3 = mpp_clock_id ('init_grid Step 3', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3a = mpp_clock_id ('init_grid Step 3a', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer4 = mpp_clock_id ('init_grid Step 4', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer5 = mpp_clock_id ('init_grid Step 5', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer6 = mpp_clock_id ('init_grid Step 6', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer7 = mpp_clock_id ('init_grid Step 7', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer8 = mpp_clock_id ('init_grid Step 8', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + end if + first_time = .false. + end if + + if (use_timer) call mpp_clock_begin (id_timer1) + is = Atm%bd%is ie = Atm%bd%ie js = Atm%bd%js @@ -649,8 +677,10 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, e2 => Atm%gridstruct%e2 if (Atm%neststruct%nested .or. ANY(Atm%neststruct%child_grids)) then + if (debug_log) print '("[INFO] WDR grid_global => Atm%grid_global in init_grid fv_grid_tools.F90. npe=",I0)', this_pe grid_global => Atm%grid_global else if( trim(grid_file) .EQ. 'Inline') then + if (debug_log) print '("[INFO] WDR inline, allocating grid_global in init_grid fv_grid_tools.F90. npe=",I0)', this_pe allocate(grid_global(1-ng:npx +ng,1-ng:npy +ng,ndims,1:nregions)) endif @@ -688,7 +718,11 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, endif endif + if (use_timer) call mpp_clock_end (id_timer1) + if (Atm%flagstruct%grid_type>3) then + if (use_timer) call mpp_clock_begin (id_timer2) + if (Atm%flagstruct%grid_type == 4) then call setup_cartesian(npx, npy, Atm%flagstruct%dx_const, Atm%flagstruct%dy_const, & Atm%flagstruct%deglat, Atm%bd) @@ -697,13 +731,17 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, else call mpp_error(FATAL, 'init_grid: unsupported grid type') endif + if (use_timer) call mpp_clock_end (id_timer2) + else + if (use_timer) call mpp_clock_begin (id_timer3) cubed_sphere = .true. if (Atm%neststruct%nested) then !Read grid if it exists + if (use_timer) call mpp_clock_begin (id_timer3a) if (Atm%flagstruct%grid_type < 0) then !Note that read_grid only reads in grid corners. Will still need to compute all other grid metrics. !NOTE: cannot currently read in mosaic for both coarse and nested grids simultaneously @@ -711,6 +749,7 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, endif ! still need to set up weights call setup_aligned_nest(Atm) + if (use_timer) call mpp_clock_end (id_timer3a) else if(trim(grid_file) .NE. 'Inline' .or. Atm%flagstruct%grid_type < 0) then @@ -945,6 +984,8 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, end if !if nested + if (use_timer) call mpp_clock_end (id_timer3) + if (use_timer) call mpp_clock_begin (id_timer4) do j=jsd,jed do i=isd+1,ied @@ -972,6 +1013,9 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, dyc(i,jed+1) = dyc(i,jed) end do + if (use_timer) call mpp_clock_end (id_timer4) + if (use_timer) call mpp_clock_begin (id_timer5) + if ( Atm%flagstruct%molecular_diffusion ) then ! dx6, dy6 do j=jsd,jed+1 @@ -1050,7 +1094,6 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, endif ! MOLECULAR_DIFFUSION - if( .not. stretched_grid ) & call sorted_intb(isd, ied, jsd, jed, is, ie, js, je, npx, npy, & cubed_sphere, agrid, iintb, jintb) @@ -1058,9 +1101,12 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, call grid_area( npx, npy, ndims, nregions, Atm%gridstruct%bounded_domain, Atm%gridstruct, Atm%domain, Atm%bd ) ! stretched_grid = .false. + if (use_timer) call mpp_clock_end (id_timer5) + !---------------------------------- ! Compute area_c, rarea_c, dxc, dyc !---------------------------------- + if (use_timer) call mpp_clock_begin (id_timer6) if ( .not. stretched_grid .and. (.not. (Atm%gridstruct%bounded_domain))) then ! For symmetrical grids: if ( is==1 ) then @@ -1158,6 +1204,8 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, endif endif !----------------- + if (use_timer) call mpp_clock_end (id_timer6) + if (use_timer) call mpp_clock_begin (id_timer7) call mpp_update_domains( dxc, dyc, Atm%domain, flags=SCALAR_PAIR, & gridtype=CGRID_NE_PARAM, complete=.true.) @@ -1237,6 +1285,9 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, enddo enddo + if (use_timer) call mpp_clock_end (id_timer7) + if (use_timer) call mpp_clock_begin (id_timer8) + if ( Atm%flagstruct%molecular_diffusion ) then do j=jsd,jed+1 do i=isd,ied @@ -1410,6 +1461,8 @@ subroutine init_grid(Atm, grid_name, grid_file, npx, npy, npz, ndims, nregions, nullify(domain) + if (use_timer) call mpp_clock_end (id_timer8) + contains subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) @@ -1480,6 +1533,81 @@ subroutine setup_cartesian(npx, npy, dx_const, dy_const, deglat, bd) end subroutine setup_cartesian + + ! Subroutine to be used by setup_aligned_nest to configure the nest grid -- either the entire grid, or just the leading edge + ! based on the input dimensions in range_x and range_y. Algorithm copied from setup_aligned_nest. + subroutine compute_nest_points(p_grid, p_ind, out_grid, refinement, ioffset, joffset, range_x, range_y, isg, ieg, jsg, jeg) + real(kind=R_GRID), allocatable, intent(in) :: p_grid(:,:,:) + !integer, intent(inout) :: p_ind(:,:,:) + integer, intent(inout) :: p_ind(1-ng:npx +ng,1-ng:npy +ng,4) + real(kind=R_GRID), allocatable, intent(inout) :: out_grid(:,:,:,:) + integer, intent(in) :: refinement, ioffset, joffset + integer, intent(in) :: range_x(2), range_y(2) + integer, intent(in) :: isg, ieg, jsg, jeg + + real(kind=R_GRID), dimension(2) :: q1, q2 + integer :: i, j, ic, jc, imod, jmod + integer :: this_pe + + ! Need isg, ieg, jsg, jeg + + this_pe = mpp_pe() + + if (debug_log) print '("[INFO] Filling out_grid(",I0,"-",I0,",",I0,"-",I0,",1-2,1) in compute_nest_points fv_grid_tools.F90. npe=",I0)', range_x(1), range_x(2), range_y(1), range_y(2), this_pe + + do j=range_y(1), range_y(2) + jc = joffset + (j-1)/refinement !int( real(j-1) / real(refinement) ) + jmod = mod(j-1,refinement) + if (j-1 < 0 .and. jmod /= 0) jc = jc - 1 + if (jmod < 0) jmod = jmod + refinement + + do i=range_x(1), range_x(2) + ic = ioffset + (i-1)/refinement !int( real(i-1) / real(refinement) ) + imod = mod(i-1,refinement) + if (i-1 < 0 .and. imod /= 0) ic = ic - 1 + if (imod < 0) imod = imod + refinement + + if (ic+1 > ieg+1 .or. ic < isg .or. jc+1 > jeg+1 .or. jc < jsg) then + print*, 'p_grid:', i, j, ' OUT OF BOUNDS' + print*, ic, jc + print*, isg, ieg, jsg, jeg + print*, imod, jmod + end if + + if (jmod == 0) then + q1 = p_grid(ic, jc, 1:2) + q2 = p_grid(ic+1,jc,1:2) + else + call spherical_linear_interpolation( real(jmod,kind=R_GRID)/real(refinement,kind=R_GRID), & + p_grid(ic, jc, 1:2), p_grid(ic, jc+1, 1:2), q1) + call spherical_linear_interpolation( real(jmod,kind=R_GRID)/real(refinement,kind=R_GRID), & + p_grid(ic+1, jc, 1:2), p_grid(ic+1, jc+1, 1:2), q2) + end if + + if (imod == 0) then + out_grid(i,j,:,1) = q1 + else + call spherical_linear_interpolation( real(imod,kind=R_GRID)/real(refinement,kind=R_GRID), & + q1,q2,out_grid(i,j,:,1)) + end if + + !SW coarse-grid index; assumes grid does + !not overlie other cube panels. (These indices + !are also for the corners and thus need modification + !to be used for cell-centered and edge- + !centered variables; see below) + p_ind(i,j,1) = ic + p_ind(i,j,2) = jc + p_ind(i,j,3) = imod + p_ind(i,j,4) = jmod + + if (out_grid(i,j,1,1) > 2.*pi) out_grid(i,j,1,1) = out_grid(i,j,1,1) - 2.*pi + if (out_grid(i,j,1,1) < 0.) out_grid(i,j,1,1) = out_grid(i,j,1,1) + 2.*pi + + end do + end do + end subroutine compute_nest_points + subroutine setup_orthogonal_grid(npx, npy, bd, grid_file) type(fv_grid_bounds_type), intent(IN) :: bd character(len=*), intent(IN) :: grid_file @@ -1826,12 +1954,15 @@ subroutine setup_aligned_nest(Atm) integer :: isg, ieg, jsg, jeg integer :: ic, jc, imod, jmod - - real(kind=R_GRID), allocatable, dimension(:,:,:) :: p_grid_u, p_grid_v, pa_grid, p_grid, c_grid_u, c_grid_v + ! Hold these between executions if moving nest + real(kind=R_GRID), allocatable, dimension(:,:,:), save :: p_grid_u, p_grid_v, pa_grid, p_grid + real(kind=R_GRID), allocatable, dimension(:,:,:) :: c_grid_u, c_grid_v integer :: p_ind(1-ng:npx +ng,1-ng:npy +ng,4) !< First two entries along dim 3 are !! for the corner source indices; !! the last two are for the remainders + integer, allocatable, save :: shift_p_ind(:,:,:) + integer i,j,k, p real(kind=R_GRID) sum real(kind=R_GRID) :: dist1, dist2, dist3, dist4 @@ -1847,6 +1978,40 @@ subroutine setup_aligned_nest(Atm) integer :: is, ie, js, je integer :: isd, ied, jsd, jed + ! Setup timing variables + + logical, save :: first_time = .true. + integer, save :: id_timer1, id_timer2, id_timer3a, id_timer3b, id_timer3c, id_timer3d, id_timer4, id_timer5, id_timer6, id_timer7, id_timer8 + integer, save :: prev_ioffset, prev_joffset ! not pointers, because we want to save them between runs of this subroutine + integer, save :: move_step + integer :: delta_i_c, delta_j_c + integer :: range_x(2), range_y(2) + + real(kind=R_GRID), allocatable, dimension(:,:,:,:) :: out_grid + + logical :: moving_nest = .true. ! TODO set this from the Atm structure + + if (first_time .and. use_timer) then + id_timer1 = mpp_clock_id ('setup_aligned_nest Step 1', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer2 = mpp_clock_id ('setup_aligned_nest Step 2 sph_lin_interp', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3a = mpp_clock_id ('setup_aligned_nest Step 3a mid_pt_sphere', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3b = mpp_clock_id ('setup_aligned_nest Step 3b mid_pt_sphere', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3c = mpp_clock_id ('setup_aligned_nest Step 3c cell_ctr', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer3d = mpp_clock_id ('setup_aligned_nest Step 3d', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer4 = mpp_clock_id ('setup_aligned_nest Step 4', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer5 = mpp_clock_id ('setup_aligned_nest Step 5', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer6 = mpp_clock_id ('setup_aligned_nest Step 6', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer7 = mpp_clock_id ('setup_aligned_nest Step 7', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_timer8 = mpp_clock_id ('setup_aligned_nest Step 8', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + + prev_ioffset = Atm%neststruct%ioffset + prev_joffset = Atm%neststruct%joffset + + !first_time = .false. + end if + + if (use_timer) call mpp_clock_begin (id_timer1) + is = Atm%bd%is ie = Atm%bd%ie js = Atm%bd%js @@ -1874,30 +2039,136 @@ subroutine setup_aligned_nest(Atm) ind_b => Atm%neststruct%ind_b wt_b => Atm%neststruct%wt_b + ! For moving nest + if (first_time) then + delta_i_c = 0 + delta_j_c = 0 + prev_ioffset = ioffset + prev_joffset = joffset + else + delta_i_c = ioffset - prev_ioffset + delta_j_c = joffset - prev_joffset + end if + + if (debug_log) print '("[INFO] WDR setup_aligned_nest fv_grid_tools.F90. npe=",I0," delta_i_c=",I0," delta_j_c=",I0," ioffset=",I0," joffset=",I0)', this_pe, delta_i_c, delta_j_c, ioffset, joffset + call mpp_get_data_domain( Atm%parent_grid%domain, & isd_p, ied_p, jsd_p, jed_p ) call mpp_get_global_domain( Atm%parent_grid%domain, & isg, ieg, jsg, jeg) - allocate(p_grid_u(isg:ieg ,jsg:jeg+1,1:2)) - allocate(p_grid_v(isg:ieg+1,jsg:jeg ,1:2)) - allocate(pa_grid(isg:ieg,jsg:jeg ,1:2)) p_ind = -1000000000 - allocate(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2) ) - p_grid = 1.e25 + if (first_time) then + !! Initial allocation of p_grid_u, pgrid_v, pa_grid, and p_grid + !! Save these parent grids between executions if using moving nest. + + allocate(p_grid_u(isg:ieg ,jsg:jeg+1,1:2)) + allocate(p_grid_v(isg:ieg+1,jsg:jeg ,1:2)) + allocate(pa_grid(isg:ieg,jsg:jeg ,1:2)) + + allocate(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2) ) + p_grid = 1.e25 + + end if + + ! Note this will be called during model initialization, then not repeated once moving nest functionality is used + ! Moving nest will rely on the saved data in p_grid, which does not change (as long as nest remains on same parent tile). + if (first_time) then !Need to RECEIVE parent grid_global; - !matching mpp_send of grid_global from parent grid is in init_grid() - if( is_master() ) then + !matching mpp_send of grid_global from parent grid is in init_grid() + if( is_master() ) then - call mpp_recv(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2), size(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2)), & - Atm%parent_grid%pelist(1)) - endif + call mpp_recv(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2), size(p_grid( isg-ng:ieg+1+ng, jsg-ng:jeg+1+ng,1:2)), & + Atm%parent_grid%pelist(1)) + + endif + + call mpp_broadcast( p_grid(isg-ng:ieg+ng+1, jsg-ng:jeg+ng+1, :), & + (ieg-isg+2+2*ng)*(jeg-jsg+2+2*ng)*ndims, mpp_root_pe() ) - call mpp_broadcast( p_grid(isg-ng:ieg+ng+1, jsg-ng:jeg+ng+1, :), & - (ieg-isg+2+2*ng)*(jeg-jsg+2+2*ng)*ndims, mpp_root_pe() ) + end if + + if (use_timer) call mpp_clock_end (id_timer1) + if (use_timer) call mpp_clock_begin (id_timer2) + + !! Setup full grid for nest; confusingly called grid_global. Each nest PE is computing the grid lat/lons for the entire nest + !! not just its section. + !! INPUTS: ioffset, joffset, p_grid + !! OUTPUTS: grid_global + + ! Begin calculate shifted version of global_grid + + if (first_time) allocate(shift_p_ind(1-ng:npx +ng,1-ng:npy +ng,4)) ! TODO need to deallocate this somewhere + + if (.not. first_time) then + + ! Make copies of grid_global and p_ind to validate that code is correct + allocate( out_grid( lbound(grid_global,1):ubound(grid_global,1), & + lbound(grid_global,2):ubound(grid_global,2), & + lbound(grid_global,3):ubound(grid_global,3), & + lbound(grid_global,4):ubound(grid_global,4) ) ) + + if (debug_log) print '("[INFO] WDR bounds grid_global setup_nest_grid npe=",I0," grid_global(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(grid_global,1), ubound(grid_global,1), & + lbound(grid_global,2), ubound(grid_global,2), & + lbound(grid_global,3), ubound(grid_global,3), & + lbound(grid_global,4), ubound(grid_global,4) + + if (debug_log) print '("[INFO] WDR bounds out_grid setup_nest_grid npe=",I0," out_grid(",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,",",I0,"-",I0,")")', this_pe, lbound(out_grid,1), ubound(out_grid,1), & + lbound(out_grid,2), ubound(out_grid,2), & + lbound(out_grid,3), ubound(out_grid,3), & + lbound(out_grid,4), ubound(out_grid,4) + + out_grid = grid_global + + if ( delta_i_c .ne. 0 ) then + if (debug_log) print '("[INFO] setup_nest_grid EOSHIFT delta_i_c=",I0," start. npe=",I0)', delta_i_c, this_pe + out_grid = eoshift(out_grid, refinement * delta_i_c, DIM=1) + end if + + if (delta_j_c .ne. 0) then + if (debug_log) print '("[INFO] setup_nest_grid EOSHIFT delta_j_c=",I0," start. npe=",I0)', delta_j_c, this_pe + out_grid = eoshift(out_grid, refinement * delta_j_c, DIM=2) + end if + + shift_p_ind(:,:,1) = shift_p_ind(:,:,1) + delta_i_c + shift_p_ind(:,:,2) = shift_p_ind(:,:,2) + delta_j_c + + ! Compute nest points on any of the halo edges that are empty. This could be 1 leading edge for N,S,E, or W motion + ! or two leading edges for NW, NE, SW, or SE motion. + range_y(1) = 1-ng + range_y(2) = npy+ng + if (delta_i_c .lt. 0) then + range_x(1) = 1-ng + range_x(2) = 0 + call compute_nest_points(p_grid, shift_p_ind, out_grid, refinement, ioffset, joffset, range_x, range_y, isg, ieg, jsg, jeg) + elseif (delta_i_c .gt. 0) then + range_x(1) = npx + range_x(2) = npx+ng + call compute_nest_points(p_grid, shift_p_ind, out_grid, refinement, ioffset, joffset, range_x, range_y, isg, ieg, jsg, jeg) + end if + + range_x(1) = 1-ng + range_x(2) = npx+ng + if (delta_j_c .lt. 0) then + range_y(1) = 1-ng + range_y(2) = 0 + call compute_nest_points(p_grid, shift_p_ind, out_grid, refinement, ioffset, joffset, range_x, range_y, isg, ieg, jsg, jeg) + elseif (delta_j_c .gt. 0) then + range_y(1) = npy + range_y(2) = npy+ng + call compute_nest_points(p_grid, shift_p_ind, out_grid, refinement, ioffset, joffset, range_x, range_y, isg, ieg, jsg, jeg) + end if + + end if + + ! End calculate shifted version of global_grid + ! Validate that they match + + if (debug_log) print '("[INFO] Filling grid_global(",I0,"-",I0,",",I0,"-",I0,",1-2,1) in setup_aligned_grid fv_grid_tools.F90. npe=",I0)', 1-ng, npx+ng, 1-ng, npy+ng, this_pe + if (first_time) then ! Generate grid global and parent_grid indices ! Grid global only needed in case we create a new child nest on-the-fly? !TODO If reading in grid from disk then simply mpp_GATHER grid global from local grid arrays @@ -1954,27 +2225,111 @@ subroutine setup_aligned_nest(Atm) end do end do + else + p_ind = shift_p_ind + grid_global = out_grid + end if + + if (use_timer) call mpp_clock_end (id_timer2) + + ! Move this elsewhere later. + if (.not. first_time) deallocate(out_grid) - ! Set up parent grids for interpolation purposes - do j=jsg,jeg+1 - do i=isg,ieg - call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i+1, j,1:2), p_grid_u(i,j,:)) - !call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i, j+1,1:2), p_grid_u(i,j,:)) + if (first_time) shift_p_ind = p_ind + + !if (.not. first_time) then + if (.false.) then + ! Do fully recomputed p_ind and grid_global match with shifted grids? + do i=1-ng,npx+ng + do j=1-ng,npy+ng + do k=1,4 + if (p_ind(i,j,k) .ne. shift_p_ind(i,j,k)) then + print '("[ERROR] WDR setup_nest_grid MISMATCH p_ind(",I0,",",I0,",",I0,")=",I0," shift_p_ind(",I0,",",I0,",",I0,")=",I0," npe=",I0, " move_step=",I0," ")', & + i, j, k, p_ind(i,j,k), i, j, k, shift_p_ind(i,j,k), this_pe, move_step + end if + end do + end do end do - end do - do j=jsg,jeg - do i=isg,ieg+1 - call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i, j+1,1:2), p_grid_v(i,j,:)) - !call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i+1, j,1:2), p_grid_v(i,j,:)) + + do i=1-ng,npx+ng + do j=1-ng,npy+ng + if (abs(grid_global(i,j,1,1) - out_grid(i,j,1,1)) .gt. 0.01) then + print '("[ERROR] WDR setup_nest_grid MISMATCH grid_global(",I0,",",I0,",",I0,",1)=",F18.12," out_grid(",I0,",",I0,",",I0,",1)=",F18.12," npe=",I0," move_step=",I0," ")', & + i, j, 1, grid_global(i,j,1,1)*180.0/pi, i, j, 1, out_grid(i,j,1,1)*180.0/pi, this_pe, move_step + end if + if (abs(grid_global(i,j,2,1) - out_grid(i,j,2,1)) .gt. 0.01) then + print '("[ERROR] WDR setup_nest_grid MISMATCH grid_global(",I0,",",I0,",",I0,",1)=",F18.12," out_grid(",I0,",",I0,",",I0,",1)=",F18.12," npe=",I0, " move_step=",I0," ")', & + i, j, 2, grid_global(i,j,2,1)*180.0/pi, i, j, 2, out_grid(i,j,2,1)*180.0/pi, this_pe, move_step + end if + end do end do - end do - do j=jsg,jeg - do i=isg,ieg - call cell_center2(p_grid(i,j, 1:2), p_grid(i+1,j, 1:2), & - p_grid(i,j+1,1:2), p_grid(i+1,j+1,1:2), & - pa_grid(i,j,1:2) ) + + ! Move this elsewhere later. + deallocate(out_grid) + + end if + + if (first_time) then + ! These are the various staggers of the parent grid + ! They do not vary if the nest moves. Safe to preserve them between + ! calls to this routine to save processing time. + + if (use_timer) call mpp_clock_begin (id_timer3a) + + !! Setup parent staggered grids + !! INPUTS: p_grid + !! OUTPUTS: p_grid_u, p_grid_v, pa_grid + + ! Set up parent grids for interpolation purposes + do j=jsg,jeg+1 + do i=isg,ieg + call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i+1, j,1:2), p_grid_u(i,j,:)) + !call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i, j+1,1:2), p_grid_u(i,j,:)) + end do end do - end do + + if (use_timer) call mpp_clock_end (id_timer3a) + if (use_timer) call mpp_clock_begin (id_timer3b) + + do j=jsg,jeg + do i=isg,ieg+1 + call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i, j+1,1:2), p_grid_v(i,j,:)) + !call mid_pt_sphere(p_grid(i, j,1:2), p_grid(i+1, j,1:2), p_grid_v(i,j,:)) + end do + end do + if (use_timer) call mpp_clock_end (id_timer3b) + if (use_timer) call mpp_clock_begin (id_timer3c) + + do j=jsg,jeg + do i=isg,ieg + call cell_center2(p_grid(i,j, 1:2), p_grid(i+1,j, 1:2), & + p_grid(i,j+1,1:2), p_grid(i+1,j+1,1:2), & + pa_grid(i,j,1:2) ) + end do + end do + +!!$ !TODO: can we just send around ONE grid and re-calculate +!!$ ! staggered grids from that?? +!!$ call mpp_broadcast(grid_global(1-ng:npx+ng, 1-ng:npy+ng ,:,1), & +!!$ ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*ndims, mpp_root_pe() ) +!!$ call mpp_broadcast( p_ind(1-ng:npx+ng, 1-ng:npy+ng ,1:4), & +!!$ ((npx+ng)-(1-ng)+1)*((npy+ng)-(1-ng)+1)*4, mpp_root_pe() ) +!!$ call mpp_broadcast( pa_grid( isg:ieg , jsg:jeg , :), & +!!$ ((ieg-isg+1))*(jeg-jsg+1)*ndims, mpp_root_pe()) +!!$ call mpp_broadcast( p_grid_u( isg:ieg , jsg:jeg+1, :), & +!!$ (ieg-isg+1)*(jeg-jsg+2)*ndims, mpp_root_pe()) +!!$ call mpp_broadcast( p_grid_v( isg:ieg+1, jsg:jeg , :), & +!!$ (ieg-isg+2)*(jeg-jsg+1)*ndims, mpp_root_pe()) + + if (use_timer) call mpp_clock_end (id_timer3c) + + end if + + if (use_timer) call mpp_clock_begin (id_timer3d) + + !! Setup "grid" -- what is this doing?? + !! INPUTS: grid_global + !! OUTPUTS: grid if (Atm%flagstruct%grid_type >= 0) then do n=1,ndims @@ -1986,6 +2341,9 @@ subroutine setup_aligned_nest(Atm) enddo endif + if (use_timer) call mpp_clock_end (id_timer3d) + if (use_timer) call mpp_clock_begin (id_timer4) + ind_h = -999999999 do j=jsd,jed do i=isd,ied @@ -2096,6 +2454,9 @@ subroutine setup_aligned_nest(Atm) call mpp_update_domains( agrid, Atm%domain, position=CENTER, complete=.true. ) + if (use_timer) call mpp_clock_end (id_timer4) + if (use_timer) call mpp_clock_begin (id_timer5) + ! Compute dx do j=jsd,jed+1 do i=isd,ied @@ -2135,9 +2496,10 @@ subroutine setup_aligned_nest(Atm) end do end do + if (.not. moving_nest) deallocate(pa_grid) - - deallocate(pa_grid) + if (use_timer) call mpp_clock_end (id_timer5) + if (use_timer) call mpp_clock_begin (id_timer6) do j=jsd,jed+1 do i=isd,ied+1 @@ -2161,7 +2523,7 @@ subroutine setup_aligned_nest(Atm) enddo enddo - deallocate(p_grid) + if (.not. moving_nest) deallocate(p_grid) allocate(c_grid_u(isd:ied+1,jsd:jed,2)) @@ -2192,6 +2554,8 @@ subroutine setup_aligned_nest(Atm) end do end do + if (use_timer) call mpp_clock_end (id_timer6) + if (use_timer) call mpp_clock_begin (id_timer7) !Compute interpolation weights. (Recall that the weights are defined with respect to a d-grid) @@ -2244,6 +2608,9 @@ subroutine setup_aligned_nest(Atm) end do !v weights + if (use_timer) call mpp_clock_end (id_timer7) + if (use_timer) call mpp_clock_begin (id_timer8) + do j=jsd,jed do i=isd,ied+1 @@ -2289,9 +2656,10 @@ subroutine setup_aligned_nest(Atm) deallocate(c_grid_u) deallocate(c_grid_v) + if (.not. moving_nest) deallocate(p_grid_u) + if (.not. moving_nest) deallocate(p_grid_v) - deallocate(p_grid_u) - deallocate(p_grid_v) + if (use_timer) call mpp_clock_end (id_timer8) if (is_master()) then if (Atm%neststruct%nested) then @@ -2319,6 +2687,12 @@ subroutine setup_aligned_nest(Atm) endif end if + ! Finalize variables in case moving nest calls this again + first_time = .false. + move_step = move_step + 1 + prev_ioffset = ioffset + prev_joffset = joffset + end subroutine setup_aligned_nest subroutine setup_latlon(deglon_start,deglon_stop, deglat_start, deglat_stop, bd ) diff --git a/tools/fv_mp_mod.F90 b/tools/fv_mp_mod.F90 index 497964697..b6f279e31 100644 --- a/tools/fv_mp_mod.F90 +++ b/tools/fv_mp_mod.F90 @@ -138,6 +138,7 @@ module fv_mp_mod public mp_start, mp_assign_gid, mp_barrier, mp_stop!, npes public domain_decomp, mp_bcst, mp_reduce_max, mp_reduce_sum, mp_gather public mp_reduce_min + public mp_reduce_minval, mp_reduce_maxval, mp_reduce_minloc, mp_reduce_maxloc public fill_corners, XDir, YDir public switch_current_domain, switch_current_Atm, broadcast_domains public is_master, setup_master @@ -213,6 +214,38 @@ module fv_mp_mod MODULE PROCEDURE mp_reduce_max_i4 END INTERFACE + !> The interface 'mp_reduce_minval' contains routines that call SPMD_REDUCE. + !! The routines compute the minima of values and place the + !! absolute minimum value in a result together with the index location. + INTERFACE mp_reduce_minval + MODULE PROCEDURE mp_reduce_minval_r4 + MODULE PROCEDURE mp_reduce_minval_r8 + END INTERFACE + + !> The interface 'mp_reduce_maxval' contains routines that call SPMD_REDUCE. + !! The routines compute the maxima of values and place the + !! absolute maximum value in a result together with the index location. + INTERFACE mp_reduce_maxval + MODULE PROCEDURE mp_reduce_maxval_r4 + MODULE PROCEDURE mp_reduce_maxval_r8 + END INTERFACE + + !> The interface 'mp_reduce_minloc' contains routines that call SPMD_REDUCE. + !! The routines compute the minima of values and place the + !! absolute minimum value in a result together with the index and lat/lon/lev location. + + INTERFACE mp_reduce_minloc + MODULE PROCEDURE mp_reduce_minloc_r4 + MODULE PROCEDURE mp_reduce_minloc_r8 + END INTERFACE + + !> The interface 'mp_reduce_maxloc' contains routines that call SPMD_REDUCE. + !! The routines compute the maxima of values and place the + !! absolute minimum value in a result together with the index and lat/lon/lev location. + INTERFACE mp_reduce_maxloc + MODULE PROCEDURE mp_reduce_maxloc_r4 + MODULE PROCEDURE mp_reduce_maxloc_r8 + END INTERFACE !> The interface 'mp_reduce_sum' contains routines that call SPMD_REDUCE. !! The routines compute the sums of values and place the @@ -1949,6 +1982,9 @@ subroutine mp_reduce_max_r4(mymax) mymax = gmax end subroutine mp_reduce_max_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! @@ -1966,7 +2002,135 @@ subroutine mp_reduce_max_r8(mymax) mymax = gmax end subroutine mp_reduce_max_r8 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_maxval_r4 :: Call SPMD REDUCE_MAX +! + subroutine mp_reduce_maxval_r4(mymax, idex, jdex) + real(kind=4), intent(INOUT) :: mymax + integer, intent(INOUT) :: idex, jdex + + integer :: bcast(2), mrank + real(kind=4) :: inreduce(2), outreduce(2) + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymax, real(mrank,4)/) + bcast=(/idex, jdex/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2REAL, MPI_MAXLOC, & + commglobal, ierror ) + mymax=outreduce(1) + mrank=outreduce(2) + call MPI_BCAST( bcast, 2, MPI_INTEGER, mrank, commglobal, ierror ) + idex=bcast(1) + jdex=bcast(2) + + end subroutine mp_reduce_maxval_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_maxval_r8 :: Call SPMD REDUCE_MAX +! + subroutine mp_reduce_maxval_r8(mymax, idex, jdex) + real(kind=8), intent(INOUT) :: mymax + integer, intent(INOUT) :: idex, jdex + + integer :: bcast(2), mrank + real(kind=8) :: inreduce(2), outreduce(2) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymax, real(mrank,8)/) + bcast=(/idex, jdex/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, & + commglobal, ierror ) + mymax=outreduce(1) + mrank=outreduce(2) + call MPI_BCAST( bcast, 2, MPI_INTEGER, mrank, commglobal, ierror ) + idex=bcast(1) + jdex=bcast(2) + + end subroutine mp_reduce_maxval_r8 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_maxloc_r4 :: Call SPMD REDUCE_MAX +! + subroutine mp_reduce_maxloc_r4(mymax, lat, lon, lev, idex, jdex) + real(kind=4), intent(INOUT) :: mymax + real(kind=4), intent(INOUT) :: lat, lon, lev + integer, intent(INOUT) :: idex, jdex + + integer :: mrank + real(kind=4) :: inreduce(2), outreduce(2), bcast(5) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymax, real(mrank,4)/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2REAL, MPI_MAXLOC, & + commglobal, ierror ) + mymax=outreduce(1) + mrank=outreduce(2) + bcast=(/lat, lon, lev, real(idex,4), real(jdex,4)/) + call MPI_BCAST( bcast, 5, MPI_REAL, mrank, commglobal, ierror ) + lat=bcast(1) + lon=bcast(2) + lev=bcast(3) + idex=bcast(4) + jdex=bcast(5) + + end subroutine mp_reduce_maxloc_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_maxloc_r8 :: Call SPMD REDUCE_MAX +! + subroutine mp_reduce_maxloc_r8(mymax, lat, lon, lev, idex, jdex) + real(kind=8), intent(INOUT) :: mymax + real(kind=8), intent(INOUT) :: lat, lon, lev + integer, intent(INOUT) :: idex, jdex + + integer :: mrank + real(kind=8) :: inreduce(2), outreduce(2), bcast(5) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymax, real(mrank,8)/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, & + commglobal, ierror ) + mymax=outreduce(1) + mrank=outreduce(2) + bcast=(/lat, lon, lev, real(idex,8), real(jdex,8)/) + call MPI_BCAST( bcast, 5, MPI_DOUBLE_PRECISION, mrank, commglobal, ierror ) + lat=bcast(1) + lon=bcast(2) + lev=bcast(3) + idex=bcast(4) + jdex=bcast(5) + + end subroutine mp_reduce_maxloc_r8 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_min_r4 :: Call SPMD REDUCE_MIN +! subroutine mp_reduce_min_r4(mymin) real(kind=4), intent(INOUT) :: mymin @@ -1978,7 +2142,15 @@ subroutine mp_reduce_min_r4(mymin) mymin = gmin end subroutine mp_reduce_min_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_min_r8 :: Call SPMD REDUCE_MIN +! subroutine mp_reduce_min_r8(mymin) real(kind=8), intent(INOUT) :: mymin @@ -1994,6 +2166,126 @@ end subroutine mp_reduce_min_r8 ! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! !------------------------------------------------------------------------------- +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_minval_r4 :: Call SPMD REDUCE_MIN +! + subroutine mp_reduce_minval_r4(mymin, idex, jdex) + real(kind=4), intent(INOUT) :: mymin + integer, intent(INOUT) :: idex, jdex + + integer :: bcast(2), mrank + real(kind=4) :: inreduce(2), outreduce(2) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymin, real(mrank,4)/) + bcast=(/idex, jdex/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2REAL, MPI_MINLOC, & + commglobal, ierror ) + mymin=outreduce(1) + mrank=outreduce(2) + call MPI_BCAST( bcast, 2, MPI_INTEGER, mrank, commglobal, ierror ) + idex=bcast(1) + jdex=bcast(2) + + end subroutine mp_reduce_minval_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_minval_r8 :: Call SPMD REDUCE_MIN +! + subroutine mp_reduce_minval_r8(mymin, idex, jdex) + real(kind=8), intent(INOUT) :: mymin + integer, intent(INOUT) :: idex, jdex + + integer :: bcast(2), mrank + real(kind=8) :: inreduce(2), outreduce(2) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymin, real(mrank,8)/) + bcast=(/idex, jdex/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, & + commglobal, ierror ) + mymin=outreduce(1) + mrank=outreduce(2) + call MPI_BCAST( bcast, 2, MPI_INTEGER, mrank, commglobal, ierror ) + idex=bcast(1) + jdex=bcast(2) + + end subroutine mp_reduce_minval_r8 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_minloc_r4 :: Call SPMD REDUCE_MIN +! + subroutine mp_reduce_minloc_r4(mymin, lat, lon, lev, idex, jdex) + real(kind=4), intent(INOUT) :: mymin + real(kind=4), intent(INOUT) :: lat, lon, lev + integer, intent(INOUT) :: idex, jdex + + integer :: mrank + real(kind=4) :: inreduce(2), outreduce(2), bcast(5) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymin, real(mrank,4)/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2REAL, MPI_MINLOC, & + commglobal, ierror ) + mymin=outreduce(1) + mrank=outreduce(2) + bcast=(/lat, lon, lev, real(idex,4), real(jdex,4)/) + call MPI_BCAST( bcast, 5, MPI_REAL, mrank, commglobal, ierror ) + lat=bcast(1) + lon=bcast(2) + lev=bcast(3) + idex=bcast(4) + jdex=bcast(5) + + end subroutine mp_reduce_minloc_r4 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + +!------------------------------------------------------------------------------- +! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! +! +! mp_reduce_minloc_r8 :: Call SPMD REDUCE_MIN +! + subroutine mp_reduce_minloc_r8(mymin, lat, lon, lev, idex, jdex) + real(kind=8), intent(INOUT) :: mymin + real(kind=8), intent(INOUT) :: lat, lon, lev + integer, intent(INOUT) :: idex, jdex + + integer :: mrank + real(kind=8) :: inreduce(2), outreduce(2), bcast(5) + + call MPI_COMM_RANK( commglobal, mrank, ierror ) + inreduce=(/mymin, real(mrank,8)/) + call MPI_ALLREDUCE( inreduce, outreduce, 1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, & + commglobal, ierror ) + mymin=outreduce(1) + mrank=outreduce(2) + bcast=(/lat, lon, lev, real(idex,8), real(jdex,8)/) + call MPI_BCAST( bcast, 5, MPI_DOUBLE_PRECISION, mrank, commglobal, ierror ) + lat=bcast(1) + lon=bcast(2) + lev=bcast(3) + idex=bcast(4) + jdex=bcast(5) + + end subroutine mp_reduce_minloc_r8 +! +! ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ! +!------------------------------------------------------------------------------- + !------------------------------------------------------------------------------- ! vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ! ! diff --git a/tools/fv_surf_map.F90 b/tools/fv_surf_map.F90 index eff6b8a19..e0a7bb66d 100644 --- a/tools/fv_surf_map.F90 +++ b/tools/fv_surf_map.F90 @@ -523,7 +523,7 @@ subroutine FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & real(kind=R_GRID), intent(IN):: stretch_fac logical, intent(IN) :: bounded_domain real, intent(inout):: phis(isd:ied,jsd,jed) - real, intent(inout):: oro(isd:ied,jsd,jed) + real, intent(in):: oro(isd:ied,jsd,jed) type(domain2d), intent(INOUT) :: domain integer mdim real(kind=R_GRID) da_max