diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 28a047c4e..e240fc8f1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -25,6 +25,7 @@ module ice_dyn_eap p001, p027, p055, p111, p166, p222, p25, p333 use ice_fileunits, only: nu_diag, nu_dump_eap, nu_restart_eap use ice_exit, only: abort_ice + use ice_flux, only: rdg_shear ! use ice_timers, only: & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3, timer_tmp4, & @@ -36,8 +37,7 @@ module ice_dyn_eap implicit none private - public :: eap, init_eap, write_restart_eap, read_restart_eap, & - alloc_dyn_eap + public :: eap, init_eap, write_restart_eap, read_restart_eap ! Look-up table needed for calculating structure tensor integer (int_kind), parameter :: & @@ -71,42 +71,16 @@ module ice_dyn_eap real (kind=dbl_kind) :: & puny, pi, pi2, piq, pih -!======================================================================= - - contains - -!======================================================================= -! Allocate space for all variables -! - subroutine alloc_dyn_eap + real (kind=dbl_kind), parameter :: & + kfriction = 0.45_dbl_kind - integer (int_kind) :: ierr + real (kind=dbl_kind), save :: & + invdx, invdy, invda, invsin - character(len=*), parameter :: subname = '(alloc_dyn_eap)' - allocate( a11_1 (nx_block,ny_block,max_blocks), & - a11_2 (nx_block,ny_block,max_blocks), & - a11_3 (nx_block,ny_block,max_blocks), & - a11_4 (nx_block,ny_block,max_blocks), & - a12_1 (nx_block,ny_block,max_blocks), & - a12_2 (nx_block,ny_block,max_blocks), & - a12_3 (nx_block,ny_block,max_blocks), & - a12_4 (nx_block,ny_block,max_blocks), & - e11 (nx_block,ny_block,max_blocks), & - e12 (nx_block,ny_block,max_blocks), & - e22 (nx_block,ny_block,max_blocks), & - yieldstress11(nx_block,ny_block,max_blocks), & - yieldstress12(nx_block,ny_block,max_blocks), & - yieldstress22(nx_block,ny_block,max_blocks), & - s11 (nx_block,ny_block,max_blocks), & - s12 (nx_block,ny_block,max_blocks), & - s22 (nx_block,ny_block,max_blocks), & - a11 (nx_block,ny_block,max_blocks), & - a12 (nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') +!======================================================================= - end subroutine alloc_dyn_eap + contains !======================================================================= ! Elastic-anisotropic-plastic dynamics driver @@ -134,7 +108,8 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields, iceTmask, iceUmask + stack_fields, unstack_fields, iceTmask, iceUmask, & + fld2, fld3, fld4 use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -186,11 +161,6 @@ subroutine eap (dt) umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! temporary for stacking fields for halo update - fld3(:,:,:,:), & ! temporary for stacking fields for halo update - fld4(:,:,:,:) ! temporary for stacking fields for halo update - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -214,10 +184,6 @@ subroutine eap (dt) ! Initialize !----------------------------------------------------------------- - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -226,7 +192,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block rdg_conv (i,j,iblk) = c0 -! rdg_shear(i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 ! always zero. Could be moved divu (i,j,iblk) = c0 shear(i,j,iblk) = c0 e11(i,j,iblk) = c0 @@ -554,7 +520,6 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -588,6 +553,8 @@ subroutine init_eap use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared ! local variables @@ -599,7 +566,7 @@ subroutine init_eap eps6 = 1.0e-6_dbl_kind integer (kind=int_kind) :: & - ix, iy, iz, ia + ix, iy, iz, ia, ierr integer (kind=int_kind), parameter :: & nz = 100 @@ -609,6 +576,8 @@ subroutine init_eap da, dx, dy, dz, & phi + real (kind=dbl_kind) :: invstressconviso + character(len=*), parameter :: subname = '(init_eap)' call icepack_query_parameters(puny_out=puny, & @@ -619,6 +588,31 @@ subroutine init_eap phi = pi/c12 ! diamond shaped floe smaller angle (default phi = 30 deg) + call init_dyn_shared(dt_dyn) + + allocate( a11_1 (nx_block,ny_block,max_blocks), & + a11_2 (nx_block,ny_block,max_blocks), & + a11_3 (nx_block,ny_block,max_blocks), & + a11_4 (nx_block,ny_block,max_blocks), & + a12_1 (nx_block,ny_block,max_blocks), & + a12_2 (nx_block,ny_block,max_blocks), & + a12_3 (nx_block,ny_block,max_blocks), & + a12_4 (nx_block,ny_block,max_blocks), & + e11 (nx_block,ny_block,max_blocks), & + e12 (nx_block,ny_block,max_blocks), & + e22 (nx_block,ny_block,max_blocks), & + yieldstress11(nx_block,ny_block,max_blocks), & + yieldstress12(nx_block,ny_block,max_blocks), & + yieldstress22(nx_block,ny_block,max_blocks), & + s11 (nx_block,ny_block,max_blocks), & + s12 (nx_block,ny_block,max_blocks), & + s22 (nx_block,ny_block,max_blocks), & + a11 (nx_block,ny_block,max_blocks), & + a12 (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -640,6 +634,7 @@ subroutine init_eap a12_2 (i,j,iblk) = c0 a12_3 (i,j,iblk) = c0 a12_4 (i,j,iblk) = c0 + rdg_shear (i,j,iblk) = c0 enddo ! i enddo ! j enddo ! iblk @@ -657,6 +652,9 @@ subroutine init_eap zinit = -pih dy = pi/real(ny_yield-1,kind=dbl_kind) yinit = -dy + invdx = c1/dx + invdy = c1/dy + invda = c1/da do ia=1,na_yield do ix=1,nx_yield @@ -712,6 +710,12 @@ subroutine init_eap enddo enddo + ! Factor to maintain the same stress as in EVP (see Section 3) + ! Can be set to 1 otherwise + + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + end subroutine init_eap !======================================================================= @@ -1590,22 +1594,12 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & rotstemp11s, rotstemp12s, rotstemp22s, & sig11, sig12, sig22, & sgprm11, sgprm12, sgprm22, & - invstressconviso, & Angle_denom_gamma, Angle_denom_alpha, & Tany_1, Tany_2, & x, y, dx, dy, da, & dtemp1, dtemp2, atempprime, & kxw, kyw, kaw - real (kind=dbl_kind), save :: & - invdx, invdy, invda, invsin - - logical (kind=log_kind), save :: & - first_call = .true. - - real (kind=dbl_kind), parameter :: & - kfriction = 0.45_dbl_kind - ! tcraig, temporary, should be moved to namelist ! turns on interpolation in stress_rdg logical(kind=log_kind), parameter :: & @@ -1613,14 +1607,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' - ! Factor to maintain the same stress as in EVP (see Section 3) - ! Can be set to 1 otherwise - - if (first_call) then - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso - endif - ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates ! 1) structure tensor @@ -1717,17 +1703,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & if (y > pi) y = y - pi if (y < 0) y = y + pi - ! Now calculate updated stress tensor - - if (first_call) then - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da - endif - if (interpolate_stress_rdg) then ! Interpolated lookup @@ -1869,8 +1844,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & + rotstemp22s*dtemp22 endif - first_call = .false. - end subroutine update_stress_rdg !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 8eab5e260..69305e131 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -55,7 +55,61 @@ module ice_dyn_evp implicit none private - public :: evp +! all c or cd + real (kind=dbl_kind), allocatable :: & + uocnN (:,:,:) , & ! i ocean current (m/s) + vocnN (:,:,:) , & ! j ocean current (m/s) + ss_tltxN (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyN (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnN (:,:,:) , & ! ocn drag coefficient + waterxN (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryN (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexN (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyN (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + aiN (:,:,:) , & ! ice fraction on N-grid + nmass (:,:,:) , & ! total mass of ice and snow (N grid) + nmassdti (:,:,:) ! mass of N-cell/dte (kg/m^2 s) +! all c or d + real (kind=dbl_kind), allocatable :: & + uocnE (:,:,:) , & ! i ocean current (m/s) + vocnE (:,:,:) , & ! j ocean current (m/s) + ss_tltxE (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyE (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnE (:,:,:) , & ! ocn drag coefficient + waterxE (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryE (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexE (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyE (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + aiE (:,:,:) , & ! ice fraction on E-grid + emass (:,:,:) , & ! total mass of ice and snow (E grid) + emassdti (:,:,:) ! mass of E-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: & + strengthU(:,:,:) , & ! strength averaged to U points + divergU (:,:,:) , & ! div array on U points, differentiate from divu + tensionU (:,:,:) , & ! tension array on U points + shearU (:,:,:) , & ! shear array on U points + deltaU (:,:,:) , & ! delta array on U points + zetax2T (:,:,:) , & ! zetax2 = 2*zeta (bulk viscosity) + zetax2U (:,:,:) , & ! zetax2T averaged to U points + etax2T (:,:,:) , & ! etax2 = 2*eta (shear viscosity) + etax2U (:,:,:) ! etax2T averaged to U points + + real (kind=dbl_kind), allocatable :: & + uocnU (:,:,:) , & ! i ocean current (m/s) + vocnU (:,:,:) , & ! j ocean current (m/s) + ss_tltxU (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyU (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnU (:,:,:) , & ! ocn drag coefficient + tmass (:,:,:) , & ! total mass of ice and snow (kg/m^2) + waterxU (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryU (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexU (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyU (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + umass (:,:,:) , & ! total mass of ice and snow (u grid) + umassdti (:,:,:) ! mass of U-cell/dte (kg/m^2 s) + + public :: evp, init_evp !======================================================================= @@ -64,6 +118,84 @@ module ice_dyn_evp !======================================================================= ! Elastic-viscous-plastic dynamics driver ! + subroutine init_evp + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks + use ice_grid, only: grid_ice + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared + +!allocate c and cd grid var. Follow structucre of eap + integer (int_kind) :: ierr + + character(len=*), parameter :: subname = '(alloc_dyn_evp)' + + call init_dyn_shared(dt_dyn) + + allocate( uocnU (nx_block,ny_block,max_blocks), & ! i ocean current (m/s) + vocnU (nx_block,ny_block,max_blocks), & ! j ocean current (m/s) + ss_tltxU (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) + ss_tltyU (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction (m/m) + cdn_ocnU (nx_block,ny_block,max_blocks), & ! ocn drag coefficient + tmass (nx_block,ny_block,max_blocks), & ! total mass of ice and snow (kg/m^2) + waterxU (nx_block,ny_block,max_blocks), & ! for ocean stress calculation, x (m/s) + wateryU (nx_block,ny_block,max_blocks), & ! for ocean stress calculation, y (m/s) + forcexU (nx_block,ny_block,max_blocks), & ! work array: combined atm stress and ocn tilt, x + forceyU (nx_block,ny_block,max_blocks), & ! work array: combined atm stress and ocn tilt, y + umass (nx_block,ny_block,max_blocks), & ! total mass of ice and snow (u grid) + umassdti (nx_block,ny_block,max_blocks), & ! mass of U-cell/dte (kg/m^2 s) + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory B-Grid evp') + + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + allocate( strengthU(nx_block,ny_block,max_blocks), & + divergU (nx_block,ny_block,max_blocks), & + tensionU (nx_block,ny_block,max_blocks), & + shearU (nx_block,ny_block,max_blocks), & + deltaU (nx_block,ny_block,max_blocks), & + zetax2T (nx_block,ny_block,max_blocks), & + zetax2U (nx_block,ny_block,max_blocks), & + etax2T (nx_block,ny_block,max_blocks), & + etax2U (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory U evp') + + allocate( uocnN (nx_block,ny_block,max_blocks), & + vocnN (nx_block,ny_block,max_blocks), & + ss_tltxN (nx_block,ny_block,max_blocks), & + ss_tltyN (nx_block,ny_block,max_blocks), & + cdn_ocnN (nx_block,ny_block,max_blocks), & + waterxN (nx_block,ny_block,max_blocks), & + wateryN (nx_block,ny_block,max_blocks), & + forcexN (nx_block,ny_block,max_blocks), & + forceyN (nx_block,ny_block,max_blocks), & + aiN (nx_block,ny_block,max_blocks), & + nmass (nx_block,ny_block,max_blocks), & + nmassdti (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory N evp') + + allocate( uocnE (nx_block,ny_block,max_blocks), & + vocnE (nx_block,ny_block,max_blocks), & + ss_tltxE (nx_block,ny_block,max_blocks), & + ss_tltyE (nx_block,ny_block,max_blocks), & + cdn_ocnE (nx_block,ny_block,max_blocks), & + waterxE (nx_block,ny_block,max_blocks), & + wateryE (nx_block,ny_block,max_blocks), & + forcexE (nx_block,ny_block,max_blocks), & + forceyE (nx_block,ny_block,max_blocks), & + aiE (nx_block,ny_block,max_blocks), & + emass (nx_block,ny_block,max_blocks), & + emassdti (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') + + endif + + end subroutine init_evp + #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied ! via NEMO (unless calc_strair is true). These values are supplied @@ -116,7 +248,7 @@ subroutine evp (dt) DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & strain_rates_U, & iceTmask, iceUmask, iceEmask, iceNmask, & - dyn_haloUpdate + dyn_haloUpdate, fld2, fld3, fld4 real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -145,64 +277,6 @@ subroutine evp (dt) indxUi , & ! compressed index in i-direction indxUj ! compressed index in j-direction - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnU , & ! i ocean current (m/s) - vocnU , & ! j ocean current (m/s) - ss_tltxU , & ! sea surface slope, x-direction (m/m) - ss_tltyU , & ! sea surface slope, y-direction (m/m) - cdn_ocnU , & ! ocn drag coefficient - tmass , & ! total mass of ice and snow (kg/m^2) - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - forcexU , & ! work array: combined atm stress and ocn tilt, x - forceyU , & ! work array: combined atm stress and ocn tilt, y - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnN , & ! i ocean current (m/s) - vocnN , & ! j ocean current (m/s) - ss_tltxN , & ! sea surface slope, x-direction (m/m) - ss_tltyN , & ! sea surface slope, y-direction (m/m) - cdn_ocnN , & ! ocn drag coefficient - waterxN , & ! for ocean stress calculation, x (m/s) - wateryN , & ! for ocean stress calculation, y (m/s) - forcexN , & ! work array: combined atm stress and ocn tilt, x - forceyN , & ! work array: combined atm stress and ocn tilt, y - aiN , & ! ice fraction on N-grid - nmass , & ! total mass of ice and snow (N grid) - nmassdti ! mass of N-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnE , & ! i ocean current (m/s) - vocnE , & ! j ocean current (m/s) - ss_tltxE , & ! sea surface slope, x-direction (m/m) - ss_tltyE , & ! sea surface slope, y-direction (m/m) - cdn_ocnE , & ! ocn drag coefficient - waterxE , & ! for ocean stress calculation, x (m/s) - wateryE , & ! for ocean stress calculation, y (m/s) - forcexE , & ! work array: combined atm stress and ocn tilt, x - forceyE , & ! work array: combined atm stress and ocn tilt, y - aiE , & ! ice fraction on E-grid - emass , & ! total mass of ice and snow (E grid) - emassdti ! mass of E-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! 2 bundled fields - fld3(:,:,:,:), & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields - - real (kind=dbl_kind), allocatable :: & - strengthU(:,:,:), & ! strength averaged to U points - divergU (:,:,:), & ! div array on U points, differentiate from divu - tensionU (:,:,:), & ! tension array on U points - shearU (:,:,:), & ! shear array on U points - deltaU (:,:,:), & ! delta array on U points - zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) - zetax2U (:,:,:), & ! zetax2T averaged to U points - etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) - etax2U (:,:,:) ! etax2T averaged to U points - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -218,9 +292,6 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block - logical (kind=log_kind), save :: & - first_time = .true. ! first time logical - character(len=*), parameter :: subname = '(evp)' call ice_timer_start(timer_dynamics) ! dynamics @@ -229,21 +300,8 @@ subroutine evp (dt) ! Initialize !----------------------------------------------------------------- - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(strengthU(nx_block,ny_block,max_blocks)) - allocate(divergU (nx_block,ny_block,max_blocks)) - allocate(tensionU (nx_block,ny_block,max_blocks)) - allocate(shearU (nx_block,ny_block,max_blocks)) - allocate(deltaU (nx_block,ny_block,max_blocks)) - allocate(zetax2T (nx_block,ny_block,max_blocks)) - allocate(zetax2U (nx_block,ny_block,max_blocks)) - allocate(etax2T (nx_block,ny_block,max_blocks)) - allocate(etax2U (nx_block,ny_block,max_blocks)) strengthU(:,:,:) = c0 divergU (:,:,:) = c0 tensionU (:,:,:) = c0 @@ -383,20 +441,20 @@ subroutine evp (dt) endif endif - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks + if (trim(grid_ice) == 'B') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi - if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellT (iblk), icellU (iblk), & @@ -409,7 +467,7 @@ subroutine evp (dt) strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & iceTmask (:,:,iblk), iceUmask (:,:,iblk), & - fmU (:,:,iblk), dt, & + fmU (:,:,iblk), dt , & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & strintxU (:,:,iblk), strintyU (:,:,iblk), & @@ -426,7 +484,35 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & TbU (:,:,iblk)) - elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- + + strength(:,:,iblk) = c0 ! initialize + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) + call icepack_ice_strength(ncat = ncat, & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & + strength = strength(i,j, iblk)) + enddo ! ij + + enddo ! iblk + !$OMP END PARALLEL DO + elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellT (iblk), icellU (iblk), & @@ -455,122 +541,108 @@ subroutine evp (dt) uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & TbU (:,:,iblk)) - endif - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- - strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellT(iblk) - i = indxTi(ij, iblk) - j = indxTj(ij, iblk) - call icepack_ice_strength(ncat = ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & - strength = strength(i,j, iblk) ) - enddo ! ij - - enddo ! iblk - !$OMP END PARALLEL DO + strength(:,:,iblk) = c0 ! initialize + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) + call icepack_ice_strength(ncat = ncat, & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & + strength = strength(i,j, iblk) ) + enddo ! ij - if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- - !----------------------------------------------------------------- - ! more preparation for dynamics on N grid - !----------------------------------------------------------------- + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellT (iblk), icellN (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + aiN (:,:,iblk), nmass (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & + nmask (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + strairxN (:,:,iblk), strairyN (:,:,iblk), & + ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & + iceTmask (:,:,iblk), iceNmask (:,:,iblk), & + fmN (:,:,iblk), dt , & + strtltxN (:,:,iblk), strtltyN (:,:,iblk), & + strocnxN (:,:,iblk), strocnyN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + !----------------------------------------------------------------- + ! more preparation for dynamics on E grid + !----------------------------------------------------------------- - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellN (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), nmass (:,:,iblk), & - nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & - nmask (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - strairxN (:,:,iblk), strairyN (:,:,iblk), & - ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - iceTmask (:,:,iblk), iceNmask (:,:,iblk), & - fmN (:,:,iblk), dt, & - strtltxN (:,:,iblk), strtltyN (:,:,iblk), & - strocnxN (:,:,iblk), strocnyN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellT (iblk), icellE (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + aiE (:,:,iblk), emass (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emask (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + strairxE (:,:,iblk), strairyE (:,:,iblk), & + ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & + iceTmask (:,:,iblk), iceEmask (:,:,iblk), & + fmE (:,:,iblk), dt , & + strtltxE (:,:,iblk), strtltyE (:,:,iblk), & + strocnxE (:,:,iblk), strocnyE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) - !----------------------------------------------------------------- - ! more preparation for dynamics on E grid - !----------------------------------------------------------------- - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellE (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), emass (:,:,iblk), & - emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & - emask (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - strairxE (:,:,iblk), strairyE (:,:,iblk), & - ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - iceTmask (:,:,iblk), iceEmask (:,:,iblk), & - fmE (:,:,iblk), dt, & - strtltxE (:,:,iblk), strtltyE (:,:,iblk), & - strocnxE (:,:,iblk), strocnyE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - - do i=1,nx_block - do j=1,ny_block - if (.not.iceUmask(i,j,iblk)) then - stresspU (i,j,iblk) = c0 - stressmU (i,j,iblk) = c0 - stress12U(i,j,iblk) = c0 - endif - if (.not.iceTmask(i,j,iblk)) then - stresspT (i,j,iblk) = c0 - stressmT (i,j,iblk) = c0 - stress12T(i,j,iblk) = c0 - endif - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO + do i=1,nx_block + do j=1,ny_block + if (.not.iceUmask(i,j,iblk)) then + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 + stress12U(i,j,iblk) = c0 + endif + if (.not.iceTmask(i,j,iblk)) then + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 + stress12T(i,j,iblk) = c0 + endif + enddo + enddo + enddo ! iblk + !$OMP END PARALLEL DO endif ! grid_ice @@ -721,10 +793,6 @@ subroutine evp (dt) if (evp_algorithm == "shared_mem_1d" ) then - if (first_time .and. my_task == master_task) then - write(nu_diag,'(3a)') subname,' Entering evp_algorithm version ',evp_algorithm - first_time = .false. - endif if (trim(grid_type) == 'tripole') then call abort_ice(trim(subname)//' & & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') @@ -1211,12 +1279,6 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm - deallocate(fld2,fld3,fld4) - if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(strengthU, divergU, tensionU, shearU, deltaU) - deallocate(zetax2T, zetax2U, etax2T, etax2U) - endif - if (maskhalo_dyn) then call ice_HaloDestroy(halo_info_mask) endif @@ -1658,17 +1720,17 @@ end subroutine stress ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - subroutine stressC_T (nx_block, ny_block , & - icellT , & - indxTi , indxTj , & - uvelE , vvelE , & - uvelN , vvelN , & - dxN , dyE , & - dxT , dyT , & - uarea , DminTarea, & - strength, shearU , & - zetax2T , etax2T , & - stressp , stressm ) + subroutine stressC_T (nx_block, ny_block , & + icellT , & + indxTi , indxTj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + uarea , DminTarea , & + strength , shearU , & + zetax2T , etax2T , & + stressp , stressm ) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress, e_factor @@ -1682,24 +1744,24 @@ subroutine stressC_T (nx_block, ny_block , & indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - strength , & ! ice strength (N/m) - shearU , & ! shearU - uarea , & ! area of u cell - DminTarea ! deltaminEVP*tarea + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + shearU , & ! shearU local for this routine + uarea , & ! area of u cell + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) - etax2T , & ! etax2 = 2*eta (shear viscosity) - stressp , & ! sigma11+sigma22 - stressm ! sigma11-sigma22 + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) + stressp , & ! sigma11+sigma22 + stressm ! sigma11-sigma22 ! local variables @@ -1707,8 +1769,8 @@ subroutine stressC_T (nx_block, ny_block , & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT ! tension at T point + divT , & ! divergence at T point + tensionT ! tension at T point real (kind=dbl_kind) :: & shearTsqr , & ! strain rates squared at T point @@ -1728,7 +1790,7 @@ subroutine stressC_T (nx_block, ny_block , & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:) ) + divT (:,:), tensionT(:,:)) do ij = 1, icellT i = indxTi(ij) @@ -1752,7 +1814,7 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & - zetax2T (i,j), etax2T (i,j), rep_prsT, capping) + zetax2T (i,j), etax2T(i,j), rep_prsT, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1783,13 +1845,13 @@ end subroutine stressC_T ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - subroutine stressC_U (nx_block , ny_block, & - icellU, & - indxUi , indxUj, & + subroutine stressC_U (nx_block , ny_block ,& + icellU ,& + indxUi , indxUj ,& uarea , & - etax2U , deltaU, & - strengthU, shearU, & - stress12 ) + etax2U , deltaU ,& + strengthU, shearU ,& + stress12) use ice_dyn_shared, only: visc_replpress, & visc_method, deltaminEVP, capping @@ -1847,7 +1909,7 @@ subroutine stressC_U (nx_block , ny_block, & DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & lzetax2U , letax2U , lrep_prsU , capping) stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*letax2U*shearU(i,j)) * denom1 @@ -1863,18 +1925,18 @@ end subroutine stressC_U ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stressCD_T (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - DminTarea, & - strength, & - zetax2T, etax2T, & - stresspT, stressmT, & - stress12T ) + subroutine stressCD_T (nx_block, ny_block , & + icellT , & + indxTi , indxTj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + DminTarea, & + strength, & + zetax2T , etax2T , & + stresspT, stressmT , & + stress12T) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress @@ -1929,13 +1991,13 @@ subroutine stressCD_T (nx_block, ny_block, & call strain_rates_T (nx_block , ny_block , & icellT , & - indxTi(:) , indxTj (:) , & + indxTi (:), indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & divT (:,:), tensionT(:,:), & - shearT(:,:), DeltaT (:,:) ) + shearT(:,:), DeltaT (:,:)) do ij = 1, icellT i = indxTi(ij) @@ -1946,7 +2008,7 @@ subroutine stressCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & - zetax2T (i,j), etax2T (i,j), rep_prsT , capping) + zetax2T (i,j), etax2T(i,j), rep_prsT , capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1973,16 +2035,16 @@ end subroutine stressCD_T ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stressCD_U (nx_block, ny_block, & - icellU, & - indxUi, indxUj, & - uarea, & - zetax2U, etax2U, & - strengthU, & - divergU, tensionU, & - shearU, DeltaU, & - stresspU, stressmU, & - stress12U ) + subroutine stressCD_U (nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + uarea , & + zetax2U , etax2U , & + strengthU , & + divergU , tensionU, & + shearU , deltaU , & + stresspU , stressmU, & + stress12U) use ice_dyn_shared, only: strain_rates_U, & visc_replpress, & @@ -1997,7 +2059,7 @@ subroutine stressCD_U (nx_block, ny_block, & indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarea , & ! area of U-cell (m^2) + uarea , & ! area of U-cell (m^2) zetax2U , & ! 2*zeta at U point etax2U , & ! 2*eta at U point strengthU, & ! ice strength at U point @@ -2043,7 +2105,7 @@ subroutine stressCD_U (nx_block, ny_block, & DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & lzetax2U , letax2U , lrep_prsU , capping) endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 187ec55cc..5e2757b93 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -24,9 +24,8 @@ module ice_dyn_shared implicit none private public :: set_evp_parameters, stepu, stepuv_CD, stepu_C, stepv_C, & - principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & + principal_stress, init_dyn_shared, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & - alloc_dyn_shared, & deformations, deformationsC_T, deformationsCD_T, & strain_rates, strain_rates_T, strain_rates_U, & visc_replpress, & @@ -94,6 +93,11 @@ module ice_dyn_shared fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) + real (kind=dbl_kind), allocatable, public :: & + fld2(:,:,:,:), & ! 2 bundled fields + fld3(:,:,:,:), & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & uvel_init , & ! x-component of velocity (m/s), beginning of timestep vvel_init ! y-component of velocity (m/s), beginning of timestep @@ -176,6 +180,15 @@ subroutine alloc_dyn_shared vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics + fcor_blk (nx_block,ny_block,max_blocks), & ! Coriolis + DminTarea (nx_block,ny_block,max_blocks), & ! + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + + allocate( & + fld2(nx_block,ny_block,2,max_blocks), & + fld3(nx_block,ny_block,3,max_blocks), & + fld4(nx_block,ny_block,4,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -187,6 +200,8 @@ subroutine alloc_dyn_shared vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics + fcorE_blk (nx_block,ny_block,max_blocks), & ! Coriolis + fcorN_blk (nx_block,ny_block,max_blocks), & ! Coriolis stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -197,18 +212,18 @@ end subroutine alloc_dyn_shared ! Initialize parameters and variables needed for the dynamics ! author: Elizabeth C. Hunke, LANL - subroutine init_dyn (dt) + subroutine init_dyn_shared (dt) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks - use ice_flux, only: rdg_conv, rdg_shear, & + use ice_flux, only: & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear + use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & @@ -221,10 +236,11 @@ subroutine init_dyn (dt) nprocs, & ! number of processors iblk ! block index - character(len=*), parameter :: subname = '(init_dyn)' + character(len=*), parameter :: subname = '(init_dyn_shared)' call set_evp_parameters (dt) - + ! allocate dyn shared (init_uvel,init_vvel) + call alloc_dyn_shared ! Set halo_dynbundle, this is empirical at this point, could become namelist halo_dynbundle = .true. nprocs = get_num_procs() @@ -237,14 +253,6 @@ subroutine init_dyn (dt) write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle endif - allocate(fcor_blk(nx_block,ny_block,max_blocks)) - allocate(DminTarea(nx_block,ny_block,max_blocks)) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(fcorE_blk(nx_block,ny_block,max_blocks)) - allocate(fcorN_blk(nx_block,ny_block,max_blocks)) - endif - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -260,11 +268,6 @@ subroutine init_dyn (dt) vvelN(i,j,iblk) = c0 endif - ! strain rates - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 ! Coriolis parameter if (trim(coriolis) == 'constant') then @@ -330,7 +333,7 @@ subroutine init_dyn (dt) enddo ! iblk !$OMP END PARALLEL DO - end subroutine init_dyn + end subroutine init_dyn_shared !======================================================================= ! Set parameters needed for the evp dynamics. diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 6534e7568..5a01f4308 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -48,7 +48,7 @@ module ice_dyn_vp use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_fields, unstack_fields + seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4 use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum @@ -105,11 +105,6 @@ module ice_dyn_vp indxUi(:,:) , & ! compressed index in i-direction indxUj(:,:) ! compressed index in j-direction - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! work array for boundary updates - fld3(:,:,:,:), & ! work array for boundary updates - fld4(:,:,:,:) ! work array for boundary updates - !======================================================================= contains @@ -126,6 +121,8 @@ subroutine init_vp use ice_constants, only: c1, & field_loc_center, field_type_scalar use ice_domain, only: blocks_ice, halo_info + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared ! use ice_grid, only: tarea ! local variables @@ -137,15 +134,14 @@ subroutine init_vp type (block) :: & this_block ! block information for current block + call init_dyn_shared(dt_dyn) + ! Initialize module variables allocate(icellT(max_blocks), icellU(max_blocks)) allocate(indxTi(nx_block*ny_block, max_blocks), & indxTj(nx_block*ny_block, max_blocks), & indxUi(nx_block*ny_block, max_blocks), & indxUj(nx_block*ny_block, max_blocks)) - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) end subroutine init_vp diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index bd5a49eaf..ffe9ec587 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -55,7 +55,7 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask, kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -215,45 +215,52 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - + if (kdyn > 0) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif + else + work1(:,:,:) = c0 + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif endif ! for mixed layer model @@ -277,7 +284,7 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask,kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -524,57 +531,76 @@ subroutine restartfile (ice_ic) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - if (my_task == master_task) & - write(nu_diag,*) 'ice mask for dynamics' - - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceumask',1,diag,field_loc_center, field_type_scalar) - - iceUmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. - enddo - enddo - enddo - !$OMP END PARALLEL DO + if (kdyn > 0) then - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - if (query_field(nu_restart,'icenmask')) then + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics' + if (query_field(nu_restart,'iceumask')) then call read_restart_field(nu_restart,0,work1,'ruf8', & - 'icenmask',1,diag,field_loc_center, field_type_scalar) + 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceNmask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo !$OMP END PARALLEL DO endif - - if (query_field(nu_restart,'iceemask')) then - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceemask',1,diag,field_loc_center, field_type_scalar) - - iceEmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + iceNmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + enddo + enddo enddo + !$OMP END PARALLEL DO + endif + + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceEmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO + endif + endif + else + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics - not used, however mandatory to read in binary files' + if (query_field(nu_restart,'iceumask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceumask',1,diag,field_loc_center, field_type_scalar) + endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + endif + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + endif endif - endif ! set Tsfcn to c0 on land diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 679a2b6e6..10fcf8b81 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -942,11 +942,8 @@ logical function query_field(nu,vname) query_field = .false. - if (my_task == master_task) then - status = pio_inq_varid(File,trim(vname),vardesc) - if (status == PIO_noerr) query_field = .true. - endif - call broadcast_scalar(query_field,master_task) + status = pio_inq_varid(File,trim(vname),vardesc) + if (status == PIO_noerr) query_field = .true. end function query_field diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 0b8ed689e..cd27f296e 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 0b8ed689e..cd27f296e 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index a8bf96ad2..5ee070673 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -72,9 +72,10 @@ subroutine cice_init(mpicom_ice) use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -125,7 +126,6 @@ subroutine cice_init(mpicom_ice) call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -135,9 +135,9 @@ subroutine cice_init(mpicom_ice) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 5fbde9cce..091a948bb 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -36,7 +36,6 @@ subroutine cice_init1() use ice_domain , only: init_domain_blocks use ice_arrays_column , only: alloc_arrays_column use ice_state , only: alloc_state - use ice_dyn_shared , only: alloc_dyn_shared use ice_flux_bgc , only: alloc_flux_bgc use ice_flux , only: alloc_flux use ice_timers , only: timer_total, init_ice_timers, ice_timer_start @@ -59,7 +58,6 @@ subroutine cice_init1() call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -79,9 +77,10 @@ subroutine cice_init2() use ice_communicate , only: my_task, master_task use ice_diagnostics , only: init_diags use ice_domain_size , only: ncat, nfsd, nfreq - use ice_dyn_eap , only: init_eap, alloc_dyn_eap - use ice_dyn_shared , only: kdyn, init_dyn + use ice_dyn_eap , only: init_eap + use ice_dyn_evp , only: init_evp use ice_dyn_vp , only: init_vp + use ice_dyn_shared , only: kdyn use ice_flux , only: init_coupler_flux, init_history_therm use ice_flux , only: init_history_dyn, init_flux_atm, init_flux_ocn use ice_forcing , only: init_snowtable @@ -107,9 +106,9 @@ subroutine cice_init2() call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables + if (kdyn == 1) then + call init_evp ! allocate dyn_evp arrays if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 22596429d..02356e2ba 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -15,6 +15,7 @@ module CICE_InitMod use ice_kinds_mod use ice_exit, only: abort_ice use ice_fileunits, only: init_fileunits, nu_diag + use ice_memusage, only: ice_memusage_init, ice_memusage_print use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave @@ -47,9 +48,9 @@ subroutine CICE_Initialize(mpi_comm) integer (kind=int_kind), optional, intent(in) :: mpi_comm ! communicator from nuopc character(len=*), parameter :: subname='(CICE_Initialize)' - !-------------------------------------------------------------------- - ! model initialization - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- if (present(mpi_comm)) then call cice_init(mpi_comm) @@ -70,15 +71,16 @@ subroutine cice_init(mpi_comm) floe_binwidth, c_fsd_range use ice_state, only: alloc_state use ice_flux_bgc, only: alloc_flux_bgc - use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + use ice_calendar, only: dt, dt_dyn, write_ic, & init_calendar, advance_timestep, calc_timesteps use ice_communicate, only: init_communicate, my_task, master_task use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -122,12 +124,17 @@ subroutine cice_init(mpi_comm) call input_zbgc ! vertical biogeochemistry namelist call count_tracers ! count tracers + ! Call this as early as possible, must be after memory_stats is read + if (my_task == master_task) then + call ice_memusage_init(nu_diag) + call ice_memusage_print(nu_diag,subname//':start') + endif + call init_domain_blocks ! set up block decomposition call init_grid1 ! domain distribution call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -137,9 +144,9 @@ subroutine cice_init(mpi_comm) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -254,6 +261,10 @@ subroutine cice_init(mpi_comm) if (write_ic) call accum_hist(dt) ! write initial conditions + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname//':end') + endif + end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 9c30b15a3..c91dae4b4 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -72,9 +72,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -122,7 +123,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -132,9 +132,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index 84d1a3a60..c65f04150 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -113,7 +114,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +123,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index 84d1a3a60..f0877d502 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -70,8 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp + use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_dyn_vp, only: init_vp use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux @@ -113,7 +115,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +124,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/configuration/scripts/options/set_nml.dyneap b/configuration/scripts/options/set_nml.dyneap new file mode 100644 index 000000000..0a5140ac7 --- /dev/null +++ b/configuration/scripts/options/set_nml.dyneap @@ -0,0 +1,2 @@ +kdyn = 2 +