From 1cc4372f944a7e726f6db94e239c8aaafc886d54 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 29 Oct 2021 18:35:19 -0600 Subject: [PATCH 001/109] initial implementation of N+E LON/LAT variables --- cicecore/cicedynB/infrastructure/ice_grid.F90 | 175 ++++++++++++++++-- 1 file changed, 162 insertions(+), 13 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 18dbaaefe..04b1fe9b9 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -14,6 +14,12 @@ ! 2006: Converted to free source form (F90) by Elizabeth Hunke ! 2007: Option to read from netcdf files (A. Keen, Met Office) ! Grid reading routines reworked by E. Hunke for boundary values +! 2021: Add N (center of north face) and E (center of east face) grids +! to support CD solvers. Defining T at center of cells, U at +! NE corner, N at center of top face, E at center of right face. +! All cells are quadrilaterals with NE, E, and N associated with +! directions relative to logical grid. E is increasing i (x) and +! N is increasing j (y) direction. module ice_grid @@ -68,10 +74,14 @@ module ice_grid tinyarea,& ! puny*tarea tarean , & ! area of NH T-cells tareas , & ! area of SH T-cells - ULON , & ! longitude of velocity pts (radians) - ULAT , & ! latitude of velocity pts (radians) - TLON , & ! longitude of temp pts (radians) - TLAT , & ! latitude of temp pts (radians) + ULON , & ! longitude of velocity pts, NE corner of T pts (radians) + ULAT , & ! latitude of velocity pts, NE corner of T pts (radians) + TLON , & ! longitude of temp (T) pts (radians) + TLAT , & ! latitude of temp (T) pts (radians) + NLON , & ! longitude of center of north face of T pts (radians) + NLAT , & ! latitude of center of north face of T pts (radians) + ELON , & ! longitude of center of east face of T pts (radians) + ELAT , & ! latitude of center of east face of T pts (radians) ANGLE , & ! for conversions between POP grid and lat/lon ANGLET , & ! ANGLE converted to T-cells bathymetry , & ! ocean depth, for grounding keels and bergs (m) @@ -146,7 +156,6 @@ module ice_grid logical (kind=log_kind), private :: & l_readCenter ! If anglet exist in grid file read it otherwise calculate it - !======================================================================= contains @@ -175,10 +184,14 @@ subroutine alloc_grid tinyarea (nx_block,ny_block,max_blocks), & ! puny*tarea tarean (nx_block,ny_block,max_blocks), & ! area of NH T-cells tareas (nx_block,ny_block,max_blocks), & ! area of SH T-cells - ULON (nx_block,ny_block,max_blocks), & ! longitude of velocity pts (radians) - ULAT (nx_block,ny_block,max_blocks), & ! latitude of velocity pts (radians) - TLON (nx_block,ny_block,max_blocks), & ! longitude of temp pts (radians) - TLAT (nx_block,ny_block,max_blocks), & ! latitude of temp pts (radians) + ULON (nx_block,ny_block,max_blocks), & ! longitude of U pts, NE corner (radians) + ULAT (nx_block,ny_block,max_blocks), & ! latitude of U pts, NE corner (radians) + TLON (nx_block,ny_block,max_blocks), & ! longitude of T pts (radians) + TLAT (nx_block,ny_block,max_blocks), & ! latitude of T pts (radians) + NLON (nx_block,ny_block,max_blocks), & ! longitude of N pts, N face (radians) + NLAT (nx_block,ny_block,max_blocks), & ! latitude of N pts, N face (radians) + ELON (nx_block,ny_block,max_blocks), & ! longitude of E pts, E face (radians) + ELAT (nx_block,ny_block,max_blocks), & ! latitude of E pts, E face (radians) ANGLE (nx_block,ny_block,max_blocks), & ! for conversions between POP grid and lat/lon ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) @@ -1158,6 +1171,10 @@ subroutine latlongrid endif endif ULON (i,j,iblk) = c0 + NLON (i,j,iblk) = c0 + NLAT (i,j,iblk) = c0 + ELON (i,j,iblk) = c0 + ELAT (i,j,iblk) = c0 ANGLE (i,j,iblk) = c0 ANGLET(i,j,iblk) = c0 @@ -1749,8 +1766,9 @@ end subroutine makemask subroutine Tlatlon - use ice_constants, only: c0, c1, c2, c4, & - field_loc_center, field_type_scalar + use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & + field_loc_center, field_loc_Nface, field_loc_Eface, & + field_type_scalar integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices @@ -1772,6 +1790,10 @@ subroutine Tlatlon TLAT(:,:,:) = c0 TLON(:,:,:) = c0 + NLAT(:,:,:) = c0 + NLON(:,:,:) = c0 + ELAT(:,:,:) = c0 + ELON(:,:,:) = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & @@ -1806,6 +1828,10 @@ subroutine Tlatlon y4 = sin(ULON(i,j,iblk))*z4 z4 = sin(ULAT(i,j,iblk)) + ! --------- + ! TLON/TLAT 4 pt computation (pts 1, 2, 3, 4) + ! --------- + tx = (x1+x2+x3+x4)/c4 ty = (y1+y2+y3+y4)/c4 tz = (z1+z2+z3+z4)/c4 @@ -1819,11 +1845,90 @@ subroutine Tlatlon ! TLAT in radians North TLAT(i,j,iblk) = asin(tz) + +! these two loops should be merged to save cos/sin calculations, +! but atan2 is not bit-for-bit. This suggests the result for atan2 depends on +! the prior atan2 call ??? not sure what's going on. +#if (1 == 1) + enddo ! i + enddo ! j + enddo ! iblk + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & + !$OMP tx,ty,tz,da) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + z1 = cos(ULAT(i-1,j-1,iblk)) + x1 = cos(ULON(i-1,j-1,iblk))*z1 + y1 = sin(ULON(i-1,j-1,iblk))*z1 + z1 = sin(ULAT(i-1,j-1,iblk)) + + z2 = cos(ULAT(i,j-1,iblk)) + x2 = cos(ULON(i,j-1,iblk))*z2 + y2 = sin(ULON(i,j-1,iblk))*z2 + z2 = sin(ULAT(i,j-1,iblk)) + + z3 = cos(ULAT(i-1,j,iblk)) + x3 = cos(ULON(i-1,j,iblk))*z3 + y3 = sin(ULON(i-1,j,iblk))*z3 + z3 = sin(ULAT(i-1,j,iblk)) + + z4 = cos(ULAT(i,j,iblk)) + x4 = cos(ULON(i,j,iblk))*z4 + y4 = sin(ULON(i,j,iblk))*z4 + z4 = sin(ULAT(i,j,iblk)) +#endif + ! --------- + ! NLON/NLAT 2 pt computation (pts 3, 4) + ! --------- + + tx = (x3+x4)/c2 + ty = (y3+y4)/c2 + tz = (z3+z4)/c2 + da = sqrt(tx**2+ty**2+tz**2) + + tz = tz/da + + ! NLON in radians East + NLON(i,j,iblk) = c0 + if (tx /= c0 .or. ty /= c0) NLON(i,j,iblk) = atan2(ty,tx) + + ! NLAT in radians North + NLAT(i,j,iblk) = asin(tz) + + ! --------- + ! ELON/ELAT 2 pt computation (pts 2, 4) + ! --------- + + tx = (x2+x4)/c2 + ty = (y2+y4)/c2 + tz = (z2+z4)/c2 + da = sqrt(tx**2+ty**2+tz**2) + + tz = tz/da + + ! ELON in radians East + ELON(i,j,iblk) = c0 + if (tx /= c0 .or. ty /= c0) ELON(i,j,iblk) = atan2(ty,tx) + + ! ELAT in radians North + ELAT(i,j,iblk) = asin(tz) enddo ! i enddo ! j enddo ! iblk !$OMP END PARALLEL DO + if (trim(grid_type) == 'regional') then ! for W boundary extrapolate from interior !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -1841,6 +1946,10 @@ subroutine Tlatlon TLON(i+2,j,iblk) TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & TLAT(i+2,j,iblk) + NLON(i,j,iblk) = c1p5*TLON(i+1,j,iblk) - & + p5*TLON(i+2,j,iblk) + NLAT(i,j,iblk) = c1p5*TLAT(i+1,j,iblk) - & + p5*TLAT(i+2,j,iblk) enddo endif enddo @@ -1854,10 +1963,30 @@ subroutine Tlatlon call ice_HaloUpdate (TLAT, halo_info, & field_loc_center, field_type_scalar, & fillValue=c1) + call ice_HaloUpdate (NLON, halo_info, & + field_loc_Nface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (NLAT, halo_info, & + field_loc_Nface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (ELON, halo_info, & + field_loc_Eface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (ELAT, halo_info, & + field_loc_Eface, field_type_scalar, & + fillValue=c1) call ice_HaloExtrapolate(TLON, distrb_info, & ew_boundary_type, ns_boundary_type) call ice_HaloExtrapolate(TLAT, distrb_info, & ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(NLON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(NLAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(ELON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(ELAT, distrb_info, & + ew_boundary_type, ns_boundary_type) call ice_timer_stop(timer_bound) x1 = global_minval(TLON, distrb_info, tmask) @@ -1872,14 +2001,34 @@ subroutine Tlatlon if (my_task==master_task) then write(nu_diag,*) ' ' - if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then write(nu_diag,*) 'min/max ULON:', y1*rad_to_deg, y2*rad_to_deg write(nu_diag,*) 'min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg - endif +! endif write(nu_diag,*) 'min/max TLON:', x1*rad_to_deg, x2*rad_to_deg write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg endif ! my_task + x1 = global_minval(NLON, distrb_info, tmask) + x2 = global_maxval(NLON, distrb_info, tmask) + x3 = global_minval(NLAT, distrb_info, tmask) + x4 = global_maxval(NLAT, distrb_info, tmask) + + y1 = global_minval(ELON, distrb_info, umask) + y2 = global_maxval(ELON, distrb_info, umask) + y3 = global_minval(ELAT, distrb_info, umask) + y4 = global_maxval(ELAT, distrb_info, umask) + + if (my_task==master_task) then + write(nu_diag,*) ' ' +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then + write(nu_diag,*) 'min/max NLON:', x1*rad_to_deg, x2*rad_to_deg + write(nu_diag,*) 'min/max NLAT:', x3*rad_to_deg, x4*rad_to_deg + write(nu_diag,*) 'min/max ELON:', y1*rad_to_deg, y2*rad_to_deg + write(nu_diag,*) 'min/max ELAT:', y3*rad_to_deg, y4*rad_to_deg +! endif + endif ! my_task + end subroutine Tlatlon !======================================================================= From 7f738900f94fcbf67bca48bf555a89ff747a91cb Mon Sep 17 00:00:00 2001 From: apcraig Date: Sat, 30 Oct 2021 20:20:49 -0600 Subject: [PATCH 002/109] add dx,dy,area,mask for N and E and write to history files --- cicecore/cicedynB/analysis/ice_history.F90 | 18 + .../cicedynB/analysis/ice_history_shared.F90 | 81 ++- cicecore/cicedynB/infrastructure/ice_grid.F90 | 532 ++++++++++++++++-- .../io/io_netcdf/ice_history_write.F90 | 346 +++++++----- 4 files changed, 764 insertions(+), 213 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index 4b295b54d..a2806b429 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -290,13 +290,22 @@ subroutine init_hist (dt) if (tr_fsd) f_NFSD = .true. call broadcast_scalar (f_tmask, master_task) + call broadcast_scalar (f_umask, master_task) + call broadcast_scalar (f_nmask, master_task) + call broadcast_scalar (f_emask, master_task) call broadcast_scalar (f_blkmask, master_task) call broadcast_scalar (f_tarea, master_task) call broadcast_scalar (f_uarea, master_task) + call broadcast_scalar (f_narea, master_task) + call broadcast_scalar (f_earea, master_task) call broadcast_scalar (f_dxt, master_task) call broadcast_scalar (f_dyt, master_task) call broadcast_scalar (f_dxu, master_task) call broadcast_scalar (f_dyu, master_task) + call broadcast_scalar (f_dxn, master_task) + call broadcast_scalar (f_dyn, master_task) + call broadcast_scalar (f_dxe, master_task) + call broadcast_scalar (f_dye, master_task) call broadcast_scalar (f_HTN, master_task) call broadcast_scalar (f_HTE, master_task) call broadcast_scalar (f_ANGLE, master_task) @@ -1592,13 +1601,22 @@ subroutine init_hist (dt) igrd=.true. igrd(n_tmask ) = f_tmask + igrd(n_umask ) = f_umask + igrd(n_nmask ) = f_nmask + igrd(n_emask ) = f_emask igrd(n_blkmask ) = f_blkmask igrd(n_tarea ) = f_tarea igrd(n_uarea ) = f_uarea + igrd(n_narea ) = f_narea + igrd(n_earea ) = f_earea igrd(n_dxt ) = f_dxt igrd(n_dyt ) = f_dyt igrd(n_dxu ) = f_dxu igrd(n_dyu ) = f_dyu + igrd(n_dxn ) = f_dxn + igrd(n_dyn ) = f_dyn + igrd(n_dxe ) = f_dxe + igrd(n_dye ) = f_dye igrd(n_HTN ) = f_HTN igrd(n_HTE ) = f_HTE igrd(n_ANGLE ) = f_ANGLE diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 9b58deeec..1c8823b62 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -120,9 +120,9 @@ module ice_history_shared avail_hist_fields(max_avail_hist_fields) integer (kind=int_kind), parameter, public :: & - nvar = 12 , & ! number of grid fields that can be written + nvar_grd = 21 , & ! number of grid fields that can be written ! excluding grid vertices - nvarz = 6 ! number of category/vertical grid fields written + nvar_grdz = 6 ! number of category/vertical grid fields written integer (kind=int_kind), public :: & ncat_hist , & ! number of thickness categories written <= ncat @@ -152,32 +152,52 @@ module ice_history_shared avgct(max_nstrm) ! average sample counter logical (kind=log_kind), public :: & - igrd (nvar), & ! true if grid field is written to output file - igrdz(nvarz) ! true if category/vertical grid field is written + igrd (nvar_grd), & ! true if grid field is written to output file + igrdz(nvar_grdz) ! true if category/vertical grid field is written character (len=25), public, parameter :: & tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + ncstr = 'area: narea' , & ! vcellmeas for N cell quantities + ecstr = 'area: earea' , & ! vcellmeas for E cell quantities tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D + nstr2D = 'NLON NLAT time' , & ! vcoord for N cell quantities, 2D + estr2D = 'ELON ELAT time' , & ! vcoord for E cell quantities, 2D tstr3Dz = 'TLON TLAT VGRDi time',& ! vcoord for T cell quantities, 3D ustr3Dz = 'ULON ULAT VGRDi time',& ! vcoord for U cell quantities, 3D + nstr3Dz = 'NLON NLAT VGRDi time',& ! vcoord for N cell quantities, 3D + estr3Dz = 'ELON ELAT VGRDi time',& ! vcoord for E cell quantities, 3D tstr3Dc = 'TLON TLAT NCAT time',& ! vcoord for T cell quantities, 3D ustr3Dc = 'ULON ULAT NCAT time',& ! vcoord for U cell quantities, 3D + nstr3Dc = 'NLON NLAT NCAT time',& ! vcoord for N cell quantities, 3D + estr3Dc = 'ELON ELAT NCAT time',& ! vcoord for E cell quantities, 3D tstr3Db = 'TLON TLAT VGRDb time',& ! vcoord for T cell quantities, 3D ustr3Db = 'ULON ULAT VGRDb time',& ! vcoord for U cell quantities, 3D + nstr3Db = 'NLON NLAT VGRDb time',& ! vcoord for N cell quantities, 3D + estr3Db = 'ELON ELAT VGRDb time',& ! vcoord for E cell quantities, 3D tstr3Da = 'TLON TLAT VGRDa time',& ! vcoord for T cell quantities, 3D ustr3Da = 'ULON ULAT VGRDa time',& ! vcoord for U cell quantities, 3D + nstr3Da = 'NLON NLAT VGRDa time',& ! vcoord for N cell quantities, 3D + estr3Da = 'ELON ELAT VGRDa time',& ! vcoord for E cell quantities, 3D tstr3Df = 'TLON TLAT NFSD time',& ! vcoord for T cell quantities, 3D ustr3Df = 'ULON ULAT NFSD time',& ! vcoord for U cell quantities, 3D + nstr3Df = 'NLON NLAT NFSD time',& ! vcoord for N cell quantities, 3D + estr3Df = 'ELON ELAT NFSD time',& ! vcoord for E cell quantities, 3D !ferret tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice + nstr4Di = 'NLON NLAT VGRDi NCAT', & ! vcoord for N cell, 4D, ice + estr4Di = 'ELON ELAT VGRDi NCAT', & ! vcoord for E cell, 4D, ice tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow + nstr4Ds = 'NLON NLAT VGRDs NCAT', & ! vcoord for N cell, 4D, snow + estr4Ds = 'ELON ELAT VGRDs NCAT', & ! vcoord for E cell, 4D, snow tstr4Df = 'TLON TLAT NFSD NCAT', & ! vcoord for T cell, 4D, fsd - ustr4Df = 'ULON ULAT NFSD NCAT' ! vcoord for U cell, 4D, fsd + ustr4Df = 'ULON ULAT NFSD NCAT', & ! vcoord for U cell, 4D, fsd + nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd + estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd !ferret ! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time ! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. @@ -193,10 +213,15 @@ module ice_history_shared !--------------------------------------------------------------- logical (kind=log_kind), public :: & - f_tmask = .true., f_blkmask = .true., & + f_tmask = .true., f_umask = .true., & + f_nmask = .true., f_emask = .true., & + f_blkmask = .true., & f_tarea = .true., f_uarea = .true., & + f_narea = .true., f_earea = .true., & f_dxt = .true., f_dyt = .true., & f_dxu = .true., f_dyu = .true., & + f_dxn = .true., f_dyn = .true., & + f_dxe = .true., f_dye = .true., & f_HTN = .true., f_HTE = .true., & f_ANGLE = .true., f_ANGLET = .true., & f_bounds = .true., f_NCAT = .true., & @@ -339,10 +364,15 @@ module ice_history_shared !--------------------------------------------------------------- namelist / icefields_nml / & - f_tmask , f_blkmask , & + f_tmask , f_umask , & + f_nmask , f_emask , & + f_blkmask , & f_tarea , f_uarea , & + f_narea , f_earea , & f_dxt , f_dyt , & f_dxu , f_dyu , & + f_dxn , f_dyn , & + f_dxe , f_dye , & f_HTN , f_HTE , & f_ANGLE , f_ANGLET , & f_bounds , f_NCAT , & @@ -484,17 +514,26 @@ module ice_history_shared integer (kind=int_kind), parameter, public :: & n_tmask = 1, & - n_blkmask = 2, & - n_tarea = 3, & - n_uarea = 4, & - n_dxt = 5, & - n_dyt = 6, & - n_dxu = 7, & - n_dyu = 8, & - n_HTN = 9, & - n_HTE = 10, & - n_ANGLE = 11, & - n_ANGLET = 12, & + n_umask = 2, & + n_nmask = 3, & + n_emask = 4, & + n_blkmask = 5, & + n_tarea = 6, & + n_uarea = 7, & + n_narea = 8, & + n_earea = 9, & + n_dxt = 10, & + n_dyt = 11, & + n_dxu = 12, & + n_dyu = 13, & + n_dxn = 14, & + n_dyn = 15, & + n_dxe = 16, & + n_dye = 17, & + n_HTN = 18, & + n_HTE = 19, & + n_ANGLE = 20, & + n_ANGLET = 21, & n_NCAT = 1, & n_VGRDi = 2, & @@ -506,7 +545,11 @@ module ice_history_shared n_lont_bnds = 1, & n_latt_bnds = 2, & n_lonu_bnds = 3, & - n_latu_bnds = 4 + n_latu_bnds = 4, & + n_lonn_bnds = 5, & + n_latn_bnds = 6, & + n_lone_bnds = 7, & + n_late_bnds = 8 integer (kind=int_kind), dimension(max_nstrm), public :: & ! n_example , & diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 04b1fe9b9..0b6fba962 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -65,10 +65,16 @@ module ice_grid dyt , & ! height of T-cell through the middle (m) dxu , & ! width of U-cell through the middle (m) dyu , & ! height of U-cell through the middle (m) + dxn , & ! width of N-cell through the middle (m) + dyn , & ! height of N-cell through the middle (m) + dxe , & ! width of E-cell through the middle (m) + dye , & ! height of E-cell through the middle (m) HTE , & ! length of eastern edge of T-cell (m) HTN , & ! length of northern edge of T-cell (m) tarea , & ! area of T-cell (m^2) uarea , & ! area of U-cell (m^2) + narea , & ! area of N-cell (m^2) + earea , & ! area of E-cell (m^2) tarear , & ! 1/tarea uarear , & ! 1/uarea tinyarea,& ! puny*tarea @@ -110,7 +116,11 @@ module ice_grid lont_bounds, & ! longitude of gridbox corners for T point latt_bounds, & ! latitude of gridbox corners for T point lonu_bounds, & ! longitude of gridbox corners for U point - latu_bounds ! latitude of gridbox corners for U point + latu_bounds, & ! latitude of gridbox corners for U point + lonn_bounds, & ! longitude of gridbox corners for N point + latn_bounds, & ! latitude of gridbox corners for N point + lone_bounds, & ! longitude of gridbox corners for E point + late_bounds ! latitude of gridbox corners for E point ! geometric quantities used for remapping transport real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -137,6 +147,8 @@ module ice_grid hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id uvm , & ! land/boundary mask, velocity (U-cell) + npm , & ! land/boundary mask (N-cell) + epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) logical (kind=log_kind), public :: & @@ -147,6 +159,8 @@ module ice_grid dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) umask , & ! land/boundary mask, velocity (U-cell) + nmask , & ! land/boundary mask, (N-cell) + emask , & ! land/boundary mask, (E-cell) lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask @@ -175,10 +189,16 @@ subroutine alloc_grid dyt (nx_block,ny_block,max_blocks), & ! height of T-cell through the middle (m) dxu (nx_block,ny_block,max_blocks), & ! width of U-cell through the middle (m) dyu (nx_block,ny_block,max_blocks), & ! height of U-cell through the middle (m) + dxn (nx_block,ny_block,max_blocks), & ! width of N-cell through the middle (m) + dyn (nx_block,ny_block,max_blocks), & ! height of N-cell through the middle (m) + dxe (nx_block,ny_block,max_blocks), & ! width of E-cell through the middle (m) + dye (nx_block,ny_block,max_blocks), & ! height of E-cell through the middle (m) HTE (nx_block,ny_block,max_blocks), & ! length of eastern edge of T-cell (m) HTN (nx_block,ny_block,max_blocks), & ! length of northern edge of T-cell (m) tarea (nx_block,ny_block,max_blocks), & ! area of T-cell (m^2) uarea (nx_block,ny_block,max_blocks), & ! area of U-cell (m^2) + narea (nx_block,ny_block,max_blocks), & ! area of N-cell (m^2) + earea (nx_block,ny_block,max_blocks), & ! area of E-cell (m^2) tarear (nx_block,ny_block,max_blocks), & ! 1/tarea uarear (nx_block,ny_block,max_blocks), & ! 1/uarea tinyarea (nx_block,ny_block,max_blocks), & ! puny*tarea @@ -209,16 +229,24 @@ subroutine alloc_grid hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + npm (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) + epm (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) kmt (nx_block,ny_block,max_blocks), & ! ocean topography mask for bathymetry (T-cell) tmask (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) umask (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) + emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) lont_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for T point latt_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for T point lonu_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for U point - latu_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for U point + latu_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for U point + lonn_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for N point + latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point + lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point + late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. mse (2,2,nx_block,ny_block,max_blocks), & @@ -368,7 +396,7 @@ subroutine init_grid2 use ice_blocks, only: get_block, block, nx_block, ny_block use ice_constants, only: c0, c1, c2, p5, p25, c1p5, & - field_loc_center, field_loc_NEcorner, & + field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & field_type_scalar, field_type_vector, field_type_angle use ice_domain_size, only: max_blocks @@ -435,6 +463,9 @@ subroutine init_grid2 do i = ilo, ihi tarea(i,j,iblk) = dxt(i,j,iblk)*dyt(i,j,iblk) uarea(i,j,iblk) = dxu(i,j,iblk)*dyu(i,j,iblk) + narea(i,j,iblk) = dxn(i,j,iblk)*dyn(i,j,iblk) + earea(i,j,iblk) = dxe(i,j,iblk)*dye(i,j,iblk) + if (tarea(i,j,iblk) > c0) then tarear(i,j,iblk) = c1/tarea(i,j,iblk) else @@ -482,6 +513,12 @@ subroutine init_grid2 call ice_HaloUpdate (uarea, halo_info, & field_loc_NEcorner, field_type_scalar, & fillValue=c1) + call ice_HaloUpdate (narea, halo_info, & + field_loc_Nface, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (earea, halo_info, & + field_loc_Eface, field_type_scalar, & + fillValue=c1) call ice_HaloUpdate (tarear, halo_info, & field_loc_center, field_type_scalar, & fillValue=c1) @@ -597,6 +634,7 @@ subroutine init_grid2 !---------------------------------------------------------------- call gridbox_corners + call gridbox_edges !----------------------------------------------------------------- ! Compute global index (used for unpacking messages from coupler) @@ -725,10 +763,10 @@ subroutine popgrid !----------------------------------------------------------------- call ice_read_global(nu_grid,3,work_g1,'rda8',.true.) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe call ice_read_global(nu_grid,4,work_g1,'rda8',.true.) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye deallocate(work_g1) @@ -902,10 +940,10 @@ subroutine popgrid_nc fieldname='htn' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe fieldname='hte' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye deallocate(work_g1) @@ -1184,6 +1222,10 @@ subroutine latlongrid dyt (i,j,iblk) = 1.e36_dbl_kind dxu (i,j,iblk) = 1.e36_dbl_kind dyu (i,j,iblk) = 1.e36_dbl_kind + dxn (i,j,iblk) = 1.e36_dbl_kind + dyn (i,j,iblk) = 1.e36_dbl_kind + dxe (i,j,iblk) = 1.e36_dbl_kind + dye (i,j,iblk) = 1.e36_dbl_kind dxhy (i,j,iblk) = 1.e36_dbl_kind dyhx (i,j,iblk) = 1.e36_dbl_kind cyp (i,j,iblk) = 1.e36_dbl_kind @@ -1295,7 +1337,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe if (my_task == master_task) then do j = 1, ny_global @@ -1304,7 +1346,7 @@ subroutine rectgrid enddo enddo endif - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1458,11 +1500,11 @@ subroutine cpomgrid call ice_read_global(nu_grid,3,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTN(work_g1) ! dxu, dxt + call primary_grid_lengths_HTN(work_g1) ! dxu, dxt, dxn, dxe call ice_read_global(nu_grid,4,work_g1, 'rda8',diag) work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTE(work_g1) ! dyu, dyt + call primary_grid_lengths_HTE(work_g1) ! dyu, dyt, dyn, dye call ice_read_global(nu_grid,7,work_g1,'rda8',diag) call scatter_global(ANGLE, work_g1, master_task, distrb_info, & @@ -1494,7 +1536,7 @@ end subroutine cpomgrid subroutine primary_grid_lengths_HTN(work_g) - use ice_constants, only: p5, c2, cm_to_m, & + use ice_constants, only: p25, p5, c2, cm_to_m, & field_loc_center, field_loc_NEcorner, & field_loc_Nface, field_type_scalar @@ -1517,20 +1559,22 @@ subroutine primary_grid_lengths_HTN(work_g) allocate(work_g2(1,1)) endif + ! HTN, dxu = average of 2 neighbor HTNs in i + if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g(i,j) = work_g(i,j) * cm_to_m ! HTN - enddo - enddo - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - ip1 = i+1 - if (i == nx_global) ip1 = 1 - work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu - enddo - enddo + do j = 1, ny_global + do i = 1, nx_global + work_g(i,j) = work_g(i,j) * cm_to_m ! HTN + enddo + enddo + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxu + enddo + enddo endif if (pgl_global_ext) then call primary_grid_lengths_global_ext( & @@ -1541,20 +1585,49 @@ subroutine primary_grid_lengths_HTN(work_g) call scatter_global(dxu, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) + ! dxt = average of 2 neighbor HTNs in j + if (my_task == master_task) then - do j = 2, ny_global + do j = 2, ny_global do i = 1, nx_global work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxt enddo - enddo - ! extrapolate to obtain dxt along j=1 - do i = 1, nx_global - work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt - enddo + enddo + ! extrapolate to obtain dxt along j=1 + do i = 1, nx_global + work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxt + enddo endif call scatter_global(dxt, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) + ! dxn = HTN + + dxn(:,:,:) = HTN(:,:,:) ! dxn + + ! dxe = average of 4 surrounding HTNs + + if (my_task == master_task) then + do j = 2, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,j) = p25*(work_g(i,j)+work_g(ip1,j)+work_g(i,j-1)+work_g(ip1,j-1)) ! dxe + enddo + enddo + ! extrapolate to obtain dxt along j=1 + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + ip1 = i+1 + if (i == nx_global) ip1 = 1 + work_g2(i,1) = p5*(c2*work_g(i ,2) - work_g(i ,3) + & + c2*work_g(ip1,2) - work_g(ip1,3)) ! dxe + enddo + endif + call scatter_global(dxe, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + deallocate(work_g2) end subroutine primary_grid_lengths_HTN @@ -1568,7 +1641,7 @@ end subroutine primary_grid_lengths_HTN subroutine primary_grid_lengths_HTE(work_g) - use ice_constants, only: p5, c2, cm_to_m, & + use ice_constants, only: p25, p5, c2, cm_to_m, & field_loc_center, field_loc_NEcorner, & field_loc_Eface, field_type_scalar @@ -1591,6 +1664,8 @@ subroutine primary_grid_lengths_HTE(work_g) allocate(work_g2(1,1)) endif + ! HTE, dyu = average of 2 neighbor HTE in j + if (my_task == master_task) then do j = 1, ny_global do i = 1, nx_global @@ -1605,8 +1680,7 @@ subroutine primary_grid_lengths_HTE(work_g) ! extrapolate to obtain dyu along j=ny_global if (ny_global > 1) then do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g(i,ny_global-1) & - - work_g(i,ny_global-2) ! dyu + work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyu enddo endif endif @@ -1619,19 +1693,50 @@ subroutine primary_grid_lengths_HTE(work_g) call scatter_global(dyu, work_g2, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) + ! dyt = average of 2 neighbor HTE in i + if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - im1 = i-1 - if (i == 1) im1 = nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt - enddo - enddo + do j = 1, ny_global + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyt + enddo + enddo endif call scatter_global(dyt, work_g2, master_task, distrb_info, & field_loc_center, field_type_scalar) + ! dyn = average of 4 neighbor HTEs + + if (my_task == master_task) then + do j = 1, ny_global-1 + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyn + enddo + enddo + ! extrapolate to obtain dyn along j=ny_global + if (ny_global > 1) then + do i = 1, nx_global + ! assume cyclic; noncyclic will be handled during scatter + im1 = i-1 + if (i == 1) im1 = nx_global + work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & + c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyn + enddo + endif + endif + call scatter_global(dyn, work_g2, master_task, distrb_info, & + field_loc_center, field_type_scalar) + + ! dye = HTE + + dye(:,:,:) = HTE(:,:,:) + deallocate(work_g2) end subroutine primary_grid_lengths_HTE @@ -1639,7 +1744,8 @@ end subroutine primary_grid_lengths_HTE !======================================================================= ! Sets the boundary values for the T cell land mask (hm) and -! makes the logical land masks for T and U cells (tmask, umask). +! makes the logical land masks for T and U cells (tmask, umask) +! and N and E cells (nmask, emask). ! Also creates hemisphere masks (mask-n northern, mask-s southern) ! ! author: Elizabeth C. Hunke, LANL @@ -1647,7 +1753,8 @@ end subroutine primary_grid_lengths_HTE subroutine makemask use ice_constants, only: c0, p5, & - field_loc_center, field_loc_NEcorner, field_type_scalar + field_loc_center, field_loc_NEcorner, field_type_scalar, & + field_loc_Nface, field_loc_Eface integer (kind=int_kind) :: & i, j, iblk, & @@ -1667,7 +1774,7 @@ subroutine makemask file=__FILE__, line=__LINE__) call ice_timer_start(timer_bound) - call ice_HaloUpdate (kmt, halo_info, & + call ice_HaloUpdate (kmt, halo_info, & field_loc_center, field_type_scalar) call ice_HaloUpdate (hm, halo_info, & field_loc_center, field_type_scalar) @@ -1691,6 +1798,8 @@ subroutine makemask do i = ilo, ihi uvm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j, iblk), & hm(i,j+1,iblk), hm(i+1,j+1,iblk)) + npm(i,j,iblk) = min (hm(i,j, iblk), hm(i,j+1,iblk)) + epm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j,iblk)) bm(i,j,iblk) = my_task + iblk/100.0_dbl_kind enddo enddo @@ -1700,8 +1809,12 @@ subroutine makemask call ice_timer_start(timer_bound) call ice_HaloUpdate (uvm, halo_info, & field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (bm, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (npm, halo_info, & + field_loc_Nface, field_type_scalar) + call ice_HaloUpdate (epm, halo_info, & + field_loc_Eface, field_type_scalar) + call ice_HaloUpdate (bm, halo_info, & + field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -1715,10 +1828,14 @@ subroutine makemask ! needs to cover halo (no halo update for logicals) tmask(:,:,iblk) = .false. umask(:,:,iblk) = .false. + nmask(:,:,iblk) = .false. + emask(:,:,iblk) = .false. do j = jlo-nghost, jhi+nghost do i = ilo-nghost, ihi+nghost if ( hm(i,j,iblk) > p5) tmask(i,j,iblk) = .true. if (uvm(i,j,iblk) > p5) umask(i,j,iblk) = .true. + if (npm(i,j,iblk) > p5) nmask(i,j,iblk) = .true. + if (epm(i,j,iblk) > p5) emask(i,j,iblk) = .true. enddo enddo @@ -2009,15 +2126,15 @@ subroutine Tlatlon write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg endif ! my_task - x1 = global_minval(NLON, distrb_info, tmask) - x2 = global_maxval(NLON, distrb_info, tmask) - x3 = global_minval(NLAT, distrb_info, tmask) - x4 = global_maxval(NLAT, distrb_info, tmask) + x1 = global_minval(NLON, distrb_info, nmask) + x2 = global_maxval(NLON, distrb_info, nmask) + x3 = global_minval(NLAT, distrb_info, nmask) + x4 = global_maxval(NLAT, distrb_info, nmask) - y1 = global_minval(ELON, distrb_info, umask) - y2 = global_maxval(ELON, distrb_info, umask) - y3 = global_minval(ELAT, distrb_info, umask) - y4 = global_maxval(ELAT, distrb_info, umask) + y1 = global_minval(ELON, distrb_info, emask) + y2 = global_maxval(ELON, distrb_info, emask) + y3 = global_minval(ELAT, distrb_info, emask) + y4 = global_maxval(ELAT, distrb_info, emask) if (my_task==master_task) then write(nu_diag,*) ' ' @@ -2268,12 +2385,12 @@ subroutine gridbox_corners latu_bounds(1,i,j,iblk)=TLAT(i ,j ,iblk)*rad_to_deg latu_bounds(2,i,j,iblk)=TLAT(i+1,j ,iblk)*rad_to_deg latu_bounds(3,i,j,iblk)=TLAT(i+1,j+1,iblk)*rad_to_deg - latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg + latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg lonu_bounds(1,i,j,iblk)=TLON(i ,j ,iblk)*rad_to_deg lonu_bounds(2,i,j,iblk)=TLON(i+1,j ,iblk)*rad_to_deg lonu_bounds(3,i,j,iblk)=TLON(i+1,j+1,iblk)*rad_to_deg - lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg + lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg enddo enddo @@ -2402,6 +2519,307 @@ subroutine gridbox_corners end subroutine gridbox_corners +!======================================================================= +! The following code is used for obtaining the coordinates of the grid +! vertices for CF-compliant netCDF history output. Approximate! +!======================================================================= + +! These fields are only used for netcdf history output, and the +! ghost cell values are not needed. +! NOTE: Extrapolations were used: these fields are approximate! +! + + subroutine gridbox_edges + + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c2, c360, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + + integer (kind=int_kind) :: & + i,j,iblk,icorner,& ! index counters + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g2 + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 + + real (kind=dbl_kind) :: & + rad_to_deg + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(gridbox_edges)' + + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + !------------------------------------------------------------- + ! Get coordinates of grid boxes for each block as follows: + ! for N pt: (1) W edge, (2) E edge, (3) E edge j+1, (4) W edge j+1 + ! for E pt: (1) S edge, (2) S edge i+1, (3) N edge, i+1 (4) N edge + !------------------------------------------------------------- + + latn_bounds(:,:,:,:) = c0 + lonn_bounds(:,:,:,:) = c0 + late_bounds(:,:,:,:) = c0 + lone_bounds(:,:,:,:) = c0 + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + + latn_bounds(1,i,j,iblk)=ELAT(i-1,j ,iblk)*rad_to_deg + latn_bounds(2,i,j,iblk)=ELAT(i ,j ,iblk)*rad_to_deg + latn_bounds(3,i,j,iblk)=ELAT(i ,j+1,iblk)*rad_to_deg + latn_bounds(4,i,j,iblk)=ELAT(i-1,j+1,iblk)*rad_to_deg + + lonn_bounds(1,i,j,iblk)=ELON(i-1,j ,iblk)*rad_to_deg + lonn_bounds(2,i,j,iblk)=ELON(i ,j ,iblk)*rad_to_deg + lonn_bounds(3,i,j,iblk)=ELON(i ,j+1,iblk)*rad_to_deg + lonn_bounds(4,i,j,iblk)=ELON(i-1,j+1,iblk)*rad_to_deg + + late_bounds(1,i,j,iblk)=NLAT(i ,j-1,iblk)*rad_to_deg + late_bounds(2,i,j,iblk)=NLAT(i+1,j-1,iblk)*rad_to_deg + late_bounds(3,i,j,iblk)=NLAT(i+1,j ,iblk)*rad_to_deg + late_bounds(4,i,j,iblk)=NLAT(i ,j ,iblk)*rad_to_deg + + lone_bounds(1,i,j,iblk)=NLON(i ,j-1,iblk)*rad_to_deg + lone_bounds(2,i,j,iblk)=NLON(i+1,j-1,iblk)*rad_to_deg + lone_bounds(3,i,j,iblk)=NLON(i+1,j ,iblk)*rad_to_deg + lone_bounds(4,i,j,iblk)=NLON(i ,j ,iblk)*rad_to_deg + + enddo + enddo + enddo + !$OMP END PARALLEL DO + + !---------------------------------------------------------------- + ! extrapolate on global grid to get edge values + !---------------------------------------------------------------- + + if (my_task == master_task) then + allocate(work_g2(nx_global,ny_global)) + else + allocate(work_g2(1,1)) + endif + + ! latn_bounds + + work1(:,:,:) = latn_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latn_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = latn_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + latn_bounds(4,:,:,:) = work1(:,:,:) + + ! lonn_bounds + + work1(:,:,:) = lonn_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonn_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(3,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lonn_bounds(4,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & + - work_g2(i,ny_global-2) + enddo + do j = 1, ny_global + work_g2(1,j) = c2*work_g2(2,j) & + - work_g2(3,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lonn_bounds(4,:,:,:) = work1(:,:,:) + + ! late_bounds + + work1(:,:,:) = late_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = late_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = late_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + late_bounds(3,:,:,:) = work1(:,:,:) + + ! lone_bounds + + work1(:,:,:) = lone_bounds(1,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(1,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lone_bounds(2,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do i = 1, nx_global + work_g2(i,1) = c2*work_g2(i,2) & + - work_g2(i,3) + enddo + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(2,:,:,:) = work1(:,:,:) + + work1(:,:,:) = lone_bounds(3,:,:,:) + call gather_global(work_g2, work1, master_task, distrb_info) + if (my_task == master_task) then + do j = 1, ny_global + work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & + - work_g2(nx_global-2,j) + enddo + endif + call scatter_global(work1, work_g2, & + master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + lone_bounds(3,:,:,:) = work1(:,:,:) + + deallocate(work_g2) + + !---------------------------------------------------------------- + ! Convert longitude to Degrees East >0 for history output + !---------------------------------------------------------------- + + allocate(work_g2(nx_block,ny_block)) ! not used as global here + !OMP fails in this loop + do iblk = 1, nblocks + do icorner = 1, 4 + work_g2(:,:) = lonn_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lonn_bounds(icorner,:,:,iblk) = work_g2(:,:) + work_g2(:,:) = lone_bounds(icorner,:,:,iblk) + c360 + where (work_g2 > c360) work_g2 = work_g2 - c360 + where (work_g2 < c0 ) work_g2 = work_g2 + c360 + lone_bounds(icorner,:,:,iblk) = work_g2(:,:) + enddo + enddo + deallocate(work_g2) + + end subroutine gridbox_edges + !======================================================================= ! NOTE: Boundary conditions for fields on NW, SW, SE corners diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index 493a91c1e..f48371f8d 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -54,9 +54,11 @@ subroutine ice_write_hist (ns) use ice_domain, only: distrb_info use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & - dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & + hm, uvm, npm, epm, bm, tarea, uarea, narea, earea, & + dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & + lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared use ice_restart_shared, only: runid, lcdf64 #ifdef USE_NETCDF @@ -91,15 +93,15 @@ subroutine ice_write_hist (ns) character (char_len) :: start_time,current_date,current_time character (len=8) :: cdate - ! 4 coordinate variables: TLON, TLAT, ULON, ULAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 - ! 4 vertices in each grid cell - INTEGER (kind=int_kind), PARAMETER :: nverts = 4 + ! 8 vertices in each grid cell + INTEGER (kind=int_kind), PARAMETER :: nverts = 8 - ! 4 variables describe T, U grid boundaries: + ! 8 variables describe T, U grid boundaries: ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 TYPE coord_attributes ! netcdf coordinate attributes character (len=11) :: short_name @@ -112,10 +114,10 @@ subroutine ice_write_hist (ns) character (len=20) :: coordinates END TYPE req_attributes - TYPE(req_attributes), dimension(nvar) :: var - TYPE(coord_attributes), dimension(ncoord) :: coord_var + TYPE(req_attributes), dimension(nvar_grd) :: var_grd + TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvarz) :: var_nz + TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz CHARACTER (char_len), dimension(ncoord) :: coord_bounds character(len=*), parameter :: subname = '(ice_write_hist)' @@ -270,65 +272,118 @@ subroutine ice_write_hist (ns) ind = 0 ind = ind + 1 - coord_var(ind) = coord_attributes('TLON', & + var_coord(ind) = coord_attributes('TLON', & 'T grid center longitude', 'degrees_east') coord_bounds(ind) = 'lont_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('TLAT', & + var_coord(ind) = coord_attributes('TLAT', & 'T grid center latitude', 'degrees_north') coord_bounds(ind) = 'latt_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULON', & + var_coord(ind) = coord_attributes('ULON', & 'U grid center longitude', 'degrees_east') coord_bounds(ind) = 'lonu_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULAT', & + var_coord(ind) = coord_attributes('ULAT', & 'U grid center latitude', 'degrees_north') coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' - var_nz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_nz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_nz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_nz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_nz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_nz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') + var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') + var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') !----------------------------------------------------------------- ! define information for optional time-invariant variables !----------------------------------------------------------------- - var(n_tarea)%req = coord_attributes('tarea', & + var_grd(n_tmask)%req = coord_attributes('tmask', & + 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_tmask)%coordinates = 'TLON TLAT' + var_grd(n_umask)%req = coord_attributes('umask', & + 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_umask)%coordinates = 'ULON ULAT' + var_grd(n_nmask)%req = coord_attributes('nmask', & + 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_nmask)%coordinates = 'NLON NLAT' + var_grd(n_emask)%req = coord_attributes('emask', & + 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_emask)%coordinates = 'ELON ELAT' + + var_grd(n_tarea)%req = coord_attributes('tarea', & 'area of T grid cells', 'm^2') - var(n_tarea)%coordinates = 'TLON TLAT' - var(n_uarea)%req = coord_attributes('uarea', & + var_grd(n_tarea)%coordinates = 'TLON TLAT' + var_grd(n_uarea)%req = coord_attributes('uarea', & 'area of U grid cells', 'm^2') - var(n_uarea)%coordinates = 'ULON ULAT' - var(n_dxt)%req = coord_attributes('dxt', & + var_grd(n_uarea)%coordinates = 'ULON ULAT' + var_grd(n_narea)%req = coord_attributes('narea', & + 'area of N grid cells', 'm^2') + var_grd(n_narea)%coordinates = 'NLON NLAT' + var_grd(n_earea)%req = coord_attributes('earea', & + 'area of E grid cells', 'm^2') + var_grd(n_earea)%coordinates = 'ELON ELAT' + + var_grd(n_blkmask)%req = coord_attributes('blkmask', & + 'block id of T grid cells, mytask + iblk/100', 'unitless') + var_grd(n_blkmask)%coordinates = 'TLON TLAT' + + var_grd(n_dxt)%req = coord_attributes('dxt', & 'T cell width through middle', 'm') - var(n_dxt)%coordinates = 'TLON TLAT' - var(n_dyt)%req = coord_attributes('dyt', & + var_grd(n_dxt)%coordinates = 'TLON TLAT' + var_grd(n_dyt)%req = coord_attributes('dyt', & 'T cell height through middle', 'm') - var(n_dyt)%coordinates = 'TLON TLAT' - var(n_dxu)%req = coord_attributes('dxu', & + var_grd(n_dyt)%coordinates = 'TLON TLAT' + var_grd(n_dxu)%req = coord_attributes('dxu', & 'U cell width through middle', 'm') - var(n_dxu)%coordinates = 'ULON ULAT' - var(n_dyu)%req = coord_attributes('dyu', & + var_grd(n_dxu)%coordinates = 'ULON ULAT' + var_grd(n_dyu)%req = coord_attributes('dyu', & 'U cell height through middle', 'm') - var(n_dyu)%coordinates = 'ULON ULAT' - var(n_HTN)%req = coord_attributes('HTN', & + var_grd(n_dyu)%coordinates = 'ULON ULAT' + var_grd(n_dxn)%req = coord_attributes('dxn', & + 'N cell width through middle', 'm') + var_grd(n_dxn)%coordinates = 'NLON NLAT' + var_grd(n_dyn)%req = coord_attributes('dyn', & + 'N cell height through middle', 'm') + var_grd(n_dyn)%coordinates = 'NLON NLAT' + var_grd(n_dxe)%req = coord_attributes('dxe', & + 'E cell width through middle', 'm') + var_grd(n_dxe)%coordinates = 'ELON ELAT' + var_grd(n_dye)%req = coord_attributes('dye', & + 'E cell height through middle', 'm') + var_grd(n_dye)%coordinates = 'ELON ELAT' + + var_grd(n_HTN)%req = coord_attributes('HTN', & 'T cell width on North side','m') - var(n_HTN)%coordinates = 'TLON TLAT' - var(n_HTE)%req = coord_attributes('HTE', & + var_grd(n_HTN)%coordinates = 'TLON TLAT' + var_grd(n_HTE)%req = coord_attributes('HTE', & 'T cell width on East side', 'm') - var(n_HTE)%coordinates = 'TLON TLAT' - var(n_ANGLE)%req = coord_attributes('ANGLE', & + var_grd(n_HTE)%coordinates = 'TLON TLAT' + var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & 'angle grid makes with latitude line on U grid', & 'radians') - var(n_ANGLE)%coordinates = 'ULON ULAT' - var(n_ANGLET)%req = coord_attributes('ANGLET', & + var_grd(n_ANGLE)%coordinates = 'ULON ULAT' + var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & 'angle grid makes with latitude line on T grid', & 'radians') - var(n_ANGLET)%coordinates = 'TLON TLAT' + var_grd(n_ANGLET)%coordinates = 'TLON TLAT' ! These fields are required for CF compliance ! dimensions (nx,ny,nverts) @@ -340,6 +395,14 @@ subroutine ice_write_hist (ns) 'longitude boundaries of U cells', 'degrees_east') var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & 'latitude boundaries of U cells', 'degrees_north') + var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & + 'longitude boundaries of N cells', 'degrees_east') + var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & + 'latitude boundaries of N cells', 'degrees_north') + var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & + 'longitude boundaries of E cells', 'degrees_east') + var_nverts(n_late_bnds) = coord_attributes('late_bounds', & + 'latitude boundaries of E cells', 'degrees_north') !----------------------------------------------------------------- ! define attributes for time-invariant variables @@ -350,28 +413,28 @@ subroutine ice_write_hist (ns) dimid(3) = timid do i = 1, ncoord - status = nf90_def_var(ncid, coord_var(i)%short_name, lprecision, & + status = nf90_def_var(ncid, var_coord(i)%short_name, lprecision, & dimid(1:2), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',coord_var(i)%long_name) + 'ERROR: defining short_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_coord(i)%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//coord_var(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', coord_var(i)%units) + 'ERROR: defining long_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_coord(i)%units) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//coord_var(i)%short_name) - call ice_write_hist_fill(ncid,varid,coord_var(i)%short_name,history_precision) - if (coord_var(i)%short_name == 'ULAT') then + 'ERROR: defining units for '//var_coord(i)%short_name) + call ice_write_hist_fill(ncid,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then status = nf90_put_att(ncid,varid,'comment', & 'Latitude of NE corner of T grid cell') if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining comment for '//coord_var(i)%short_name) + 'ERROR: defining comment for '//var_coord(i)%short_name) endif if (f_bounds) then status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining bounds for '//coord_var(i)%short_name) - endif + 'ERROR: defining bounds for '//var_coord(i)%short_name) + endif enddo ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) @@ -382,62 +445,37 @@ subroutine ice_write_hist (ns) dimidex(5)=kmtida dimidex(6)=fmtid - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = nf90_def_var(ncid, var_nz(i)%short_name, & + status = nf90_def_var(ncid, var_grdz(i)%short_name, & lprecision, dimidex(i), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',var_nz(i)%long_name) + 'ERROR: defining short_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_grdz(i)%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_nz(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_nz(i)%units) + 'ERROR: defining long_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grdz(i)%units) if (Status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_nz(i)%short_name) + 'ERROR: defining units for '//var_grdz(i)%short_name) endif enddo - ! Attributes for tmask, blkmask defined separately, since they have no units - if (igrd(n_tmask)) then - status = nf90_def_var(ncid, 'tmask', lprecision, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: defining var tmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ocean grid mask') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask units') - status = nf90_put_att(ncid,varid,'comment', '0 = land, 1 = ocean') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: tmask comment') - call ice_write_hist_fill(ncid,varid,'tmask',history_precision) - endif - - if (igrd(n_blkmask)) then - status = nf90_def_var(ncid, 'blkmask', lprecision, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: defining var blkmask') - status = nf90_put_att(ncid,varid, 'long_name', 'ice grid block mask') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask long_name') - status = nf90_put_att(ncid, varid, 'coordinates', 'TLON TLAT') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask units') - status = nf90_put_att(ncid,varid,'comment', 'mytask + iblk/100') - if (status /= nf90_noerr) call abort_ice(subname//'ERROR: blkmask comment') - call ice_write_hist_fill(ncid,varid,'blkmask',history_precision) - endif - - do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - status = nf90_def_var(ncid, var(i)%req%short_name, & + status = nf90_def_var(ncid, var_grd(i)%req%short_name, & lprecision, dimid(1:2), varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//var(i)%req%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var(i)%req%long_name) + 'ERROR: defining variable '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var_grd(i)%req%long_name) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'units', var(i)%req%units) + 'ERROR: defining long_name for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grd(i)%req%units) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'coordinates', var(i)%coordinates) + 'ERROR: defining units for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'coordinates', var_grd(i)%coordinates) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//var(i)%req%short_name) - call ice_write_hist_fill(ncid,varid,var(i)%req%short_name,history_precision) + 'ERROR: defining coordinates for '//var_grd(i)%req%short_name) + call ice_write_hist_fill(ncid,varid,var_grd(i)%req%short_name,history_precision) endif enddo @@ -951,8 +989,8 @@ subroutine ice_write_hist (ns) !----------------------------------------------------------------- do i = 1,ncoord - call broadcast_scalar(coord_var(i)%short_name,master_task) - SELECT CASE (coord_var(i)%short_name) + call broadcast_scalar(var_coord(i)%short_name,master_task) + SELECT CASE (var_coord(i)%short_name) CASE ('TLON') ! Convert T grid longitude from -180 -> 180 to 0 to 360 work1 = TLON*rad_to_deg + c360 @@ -968,28 +1006,40 @@ subroutine ice_write_hist (ns) CASE ('ULAT') work1 = ULAT*rad_to_deg call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLON') + work1 = NLON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLAT') + work1 = NLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELON') + work1 = ELON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELAT') + work1 = ELAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) END SELECT if (my_task == master_task) then - status = nf90_inq_varid(ncid, coord_var(i)%short_name, varid) + status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//coord_var(i)%short_name) + 'ERROR: getting varid for '//var_coord(i)%short_name) status = nf90_put_var(ncid,varid,work_g1) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//coord_var(i)%short_name) + 'ERROR: writing'//var_coord(i)%short_name) endif enddo ! Extra dimensions (NCAT, NFSD, VGRD*) - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - call broadcast_scalar(var_nz(i)%short_name,master_task) + call broadcast_scalar(var_grdz(i)%short_name,master_task) if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nz(i)%short_name, varid) + status = nf90_inq_varid(ncid, var_grdz(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_nz(i)%short_name) - SELECT CASE (var_nz(i)%short_name) + 'ERROR: getting varid for '//var_grdz(i)%short_name) + SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) CASE ('NFSD') @@ -1004,7 +1054,7 @@ subroutine ice_write_hist (ns) status = nf90_put_var(ncid,varid,(/(k, k=1,nzalyr)/)) END SELECT if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//var_nz(i)%short_name) + 'ERROR: writing'//var_grdz(i)%short_name) endif endif enddo @@ -1013,38 +1063,28 @@ subroutine ice_write_hist (ns) ! write grid masks, area and rotation angle !----------------------------------------------------------------- - if (igrd(n_tmask)) then - call gather_global(work_g1, hm, master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, 'tmask', varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for tmask') - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable tmask') - endif - endif - - if (igrd(n_blkmask)) then - call gather_global(work_g1, bm, master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, 'blkmask', varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for blkmask') - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable blkmask') - endif - endif - - do i = 3, nvar ! note n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - call broadcast_scalar(var(i)%req%short_name,master_task) - SELECT CASE (var(i)%req%short_name) + call broadcast_scalar(var_grd(i)%req%short_name,master_task) + SELECT CASE (var_grd(i)%req%short_name) + CASE ('tmask') + call gather_global(work_g1, hm, master_task, distrb_info) + CASE ('umask') + call gather_global(work_g1, uvm, master_task, distrb_info) + CASE ('nmask') + call gather_global(work_g1, npm, master_task, distrb_info) + CASE ('emask') + call gather_global(work_g1, epm, master_task, distrb_info) CASE ('tarea') call gather_global(work_g1, tarea, master_task, distrb_info) CASE ('uarea') call gather_global(work_g1, uarea, master_task, distrb_info) + CASE ('narea') + call gather_global(work_g1, narea, master_task, distrb_info) + CASE ('earea') + call gather_global(work_g1, earea, master_task, distrb_info) + CASE ('blkmask') + call gather_global(work_g1, bm, master_task, distrb_info) CASE ('dxu') call gather_global(work_g1, dxu, master_task, distrb_info) CASE ('dyu') @@ -1053,6 +1093,14 @@ subroutine ice_write_hist (ns) call gather_global(work_g1, dxt, master_task, distrb_info) CASE ('dyt') call gather_global(work_g1, dyt, master_task, distrb_info) + CASE ('dxn') + call gather_global(work_g1, dxn, master_task, distrb_info) + CASE ('dyn') + call gather_global(work_g1, dyn, master_task, distrb_info) + CASE ('dxe') + call gather_global(work_g1, dxe, master_task, distrb_info) + CASE ('dye') + call gather_global(work_g1, dye, master_task, distrb_info) CASE ('HTN') call gather_global(work_g1, HTN, master_task, distrb_info) CASE ('HTE') @@ -1064,12 +1112,12 @@ subroutine ice_write_hist (ns) END SELECT if (my_task == master_task) then - status = nf90_inq_varid(ncid, var(i)%req%short_name, varid) + status = nf90_inq_varid(ncid, var_grd(i)%req%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var(i)%req%short_name) + 'ERROR: getting varid for '//var_grd(i)%req%short_name) status = nf90_put_var(ncid,varid,work_g1) if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//var(i)%req%short_name) + 'ERROR: writing variable '//var_grd(i)%req%short_name) endif endif enddo @@ -1086,7 +1134,7 @@ subroutine ice_write_hist (ns) endif work1_3(:,:,:) = c0 - work1 (:,:,:) = c0 + work1 (:,:,:) = c0 do i = 1, nvar_verts call broadcast_scalar(var_nverts(i)%short_name,master_task) @@ -1115,6 +1163,30 @@ subroutine ice_write_hist (ns) call gather_global(work_g1, work1, master_task, distrb_info) if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lonn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lone_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + work1(:,:,:) = late_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo END SELECT if (my_task == master_task) then From 87df8d4caa97b5977057ccf3cfc46145edc15d44 Mon Sep 17 00:00:00 2001 From: apcraig Date: Tue, 2 Nov 2021 11:04:20 -0600 Subject: [PATCH 003/109] add grid_average_X2Y method to average fields from one grid to another --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 22 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 22 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 23 +- cicecore/cicedynB/general/ice_forcing.F90 | 16 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 364 +++++++++++------- cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 | 2 +- cicecore/drivers/mct/cesm1/ice_comp_mct.F90 | 2 +- .../drivers/mct/cesm1/ice_import_export.F90 | 14 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 14 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 15 +- 10 files changed, 312 insertions(+), 182 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 83374d4dd..c550a4b14 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,7 +134,7 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, to_ugrid, t2ugrid_vector, u2tgrid_vector + tarear, uarear, grid_average_X2Y use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength ! use ice_timers, only: timer_dynamics, timer_bound, & @@ -254,8 +254,8 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) + call grid_average_X2Y('T2U',tmass,umass) + call grid_average_X2Y('T2U',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -270,8 +270,12 @@ subroutine eap (dt) strairx(:,:,:) = strax(:,:,:) strairy(:,:,:) = stray(:,:,:) else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) + call ice_HaloUpdate (strairx, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairy, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('T2U',strairx) + call grid_average_X2Y('T2U',strairy) endif ! tcraig, tcx, turned off this threaded region, in evp, this block and @@ -548,8 +552,12 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) + call ice_HaloUpdate (strocnxT, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (strocnyT, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('U2T',strocnxT) ! shift + call grid_average_X2Y('U2T',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 8f3fc4910..cf7048e15 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -89,7 +89,7 @@ subroutine evp (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, tinyarea, to_ugrid, t2ugrid_vector, u2tgrid_vector, & + tarear, uarear, tinyarea, grid_average_X2Y, & grid_type use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -219,8 +219,8 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) + call grid_average_X2Y('T2U',tmass,umass) + call grid_average_X2Y('T2U',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -235,8 +235,12 @@ subroutine evp (dt) strairx(:,:,:) = strax(:,:,:) strairy(:,:,:) = stray(:,:,:) else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) + call ice_HaloUpdate (strairx, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairy, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('T2U',strairx) + call grid_average_X2Y('T2U',strairy) endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -557,8 +561,12 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) + call ice_HaloUpdate (strocnxT, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (strocnyT, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('U2T',strocnxT) ! shift + call grid_average_X2Y('U2T',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 1a6c68548..a8bf7be89 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -200,8 +200,7 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & - tarear, to_ugrid, t2ugrid_vector, u2tgrid_vector, & - grid_type + tarear, grid_type, grid_average_X2Y use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -321,8 +320,8 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call to_ugrid(tmass,umass) - call to_ugrid(aice_init, aiu) + call grid_average_X2Y('T2U',tmass,umass) + call grid_average_X2Y('T2U',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -337,8 +336,12 @@ subroutine implicit_solver (dt) strairx(:,:,:) = strax(:,:,:) strairy(:,:,:) = stray(:,:,:) else - call t2ugrid_vector(strairx) - call t2ugrid_vector(strairy) + call ice_HaloUpdate (strairx, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairy, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('T2U',strairx) + call grid_average_X2Y('T2U',strairy) endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -648,8 +651,12 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO - call u2tgrid_vector(strocnxT) ! shift - call u2tgrid_vector(strocnyT) + call ice_HaloUpdate (strocnxT, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (strocnyT, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('U2T',strocnxT) ! shift + call grid_average_X2Y('U2T',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index cede58950..a4dd66c67 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -3954,7 +3954,7 @@ subroutine ocn_data_ncar_init_3D use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks - use ice_grid, only: to_ugrid, ANGLET + use ice_grid, only: grid_average_X2Y, ANGLET use ice_read_write, only: ice_read_nc_uv #ifdef USE_NETCDF use netcdf @@ -4072,8 +4072,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call to_ugrid(work1,ocn_frc_m(:,:,:,n ,m)) - call to_ugrid(work2,ocn_frc_m(:,:,:,n+1,m)) + call grid_average_X2Y('T2U',work1,ocn_frc_m(:,:,:,n ,m)) + call grid_average_X2Y('T2U',work2,ocn_frc_m(:,:,:,n+1,m)) enddo ! month loop enddo ! field loop @@ -4315,7 +4315,7 @@ subroutine ocn_data_hadgem(dt) use ice_domain, only: nblocks use ice_flux, only: sst, uocn, vocn - use ice_grid, only: t2ugrid_vector, ANGLET + use ice_grid, only: grid_average_X2Y, ANGLET real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -4474,8 +4474,8 @@ subroutine ocn_data_hadgem(dt) ! Interpolate to U grid !----------------------------------------------------------------- - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) + call grid_average_X2Y('T2U',uocn) + call grid_average_X2Y('T2U',vocn) endif ! ocn_data_type = hadgem_sst_uvocn @@ -5257,7 +5257,7 @@ subroutine box2001_data use ice_calendar, only: timesecs use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray - use ice_grid, only: uvm, to_ugrid + use ice_grid, only: uvm, grid_average_X2Y use ice_state, only: aice ! local parameters @@ -5278,7 +5278,7 @@ subroutine box2001_data call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call to_ugrid(aice, aiu) + call grid_average_X2Y('T2U',aice, aiu) period = c4*secday diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 0b6fba962..7a80d963f 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -45,9 +45,8 @@ module ice_grid implicit none private - public :: init_grid1, init_grid2, & - t2ugrid_vector, u2tgrid_vector, & - to_ugrid, to_tgrid, alloc_grid, makemask + public :: init_grid1, init_grid2, grid_average_X2Y, & + alloc_grid, makemask character (len=char_len_long), public :: & grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) @@ -2150,52 +2149,87 @@ end subroutine Tlatlon !======================================================================= -! Transfer vector component from T-cell centers to U-cell centers. +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig - subroutine t2ugrid_vector (work) + subroutine grid_average_X2Y(X2Y,work1,work2) - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: field_loc_center, field_type_vector - use ice_domain_size, only: max_blocks + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(inout) :: & + work1(nx_block,ny_block,max_blocks) - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(inout) :: & - work + real (kind=dbl_kind), intent(out), optional :: & + work2(nx_block,ny_block,max_blocks) ! local variables real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - character(len=*), parameter :: subname = '(t2ugrid_vector)' - - work1(:,:,:) = work(:,:,:) - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (work1, halo_info, & - field_loc_center, field_type_vector) - call ice_timer_stop(timer_bound) - - call to_ugrid(work1,work) + work2tmp + + character(len=*), parameter :: subname = '(grid_average_X2Y)' + + select case (trim(X2Y)) + + case('T2U') + call grid_average_X2Y_compute('NE',work1,tarea,work2tmp,uarea) + case('T2E') + call grid_average_X2Y_compute('E' ,work1,tarea,work2tmp,earea) + case('T2N') + call grid_average_X2Y_compute('N' ,work1,tarea,work2tmp,narea) + case('U2T') + call grid_average_X2Y_compute('SW',work1,uarea,work2tmp,tarea) + case('U2E') + call grid_average_X2Y_compute('S' ,work1,uarea,work2tmp,earea) + case('U2N') + call grid_average_X2Y_compute('W' ,work1,uarea,work2tmp,narea) + case('E2T') + call grid_average_X2Y_compute('W' ,work1,earea,work2tmp,tarea) + case('E2U') + call grid_average_X2Y_compute('N' ,work1,earea,work2tmp,uarea) + case('E2N') + call grid_average_X2Y_compute('NW',work1,earea,work2tmp,narea) + case('N2T') + call grid_average_X2Y_compute('S' ,work1,narea,work2tmp,tarea) + case('N2U') + call grid_average_X2Y_compute('E' ,work1,narea,work2tmp,uarea) + case('N2E') + call grid_average_X2Y_compute('SE',work1,narea,work2tmp,earea) + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + if (present(work2)) then + work2 = work2tmp + else + work1 = work2tmp + endif - end subroutine t2ugrid_vector + end subroutine grid_average_X2Y !======================================================================= -! Shifts quantities from the T-cell midpoint (work1) to the U-cell -! midpoint (work2) +! Shifts quantities from one grid to another ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! -! author: Elizabeth C. Hunke, LANL +! author: T. Craig - subroutine to_ugrid(work1,work2) + subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) - use ice_constants, only: c0, p25 + use ice_constants, only: c0, p25, p5 + + character(len=*) , intent(in) :: & + dir real (kind=dbl_kind), intent(in) :: & - work1(nx_block,ny_block,max_blocks) + work1(nx_block,ny_block,max_blocks), & + area1(nx_block,ny_block,max_blocks), & + area2(nx_block,ny_block,max_blocks) real (kind=dbl_kind), intent(out) :: & work2(nx_block,ny_block,max_blocks) @@ -2203,7 +2237,7 @@ subroutine to_ugrid(work1,work2) type (block) :: & this_block ! block information for current block - character(len=*), parameter :: subname = '(to_ugrid)' + character(len=*), parameter :: subname = '(grid_average_X2Y_compute)' ! local variables @@ -2213,113 +2247,173 @@ subroutine to_ugrid(work1,work2) work2(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*tarea(i, j, iblk) & - + work1(i+1,j, iblk)*tarea(i+1,j, iblk) & - + work1(i, j+1,iblk)*tarea(i, j+1,iblk) & - + work1(i+1,j+1,iblk)*tarea(i+1,j+1,iblk)) & - / uarea(i, j, iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - end subroutine to_ugrid - -!======================================================================= - -! Transfer from U-cell centers to T-cell centers. Writes work into -! another array that has ghost cells -! NOTE: Input array is dimensioned only over physical cells. -! -! author: Elizabeth C. Hunke, LANL - - subroutine u2tgrid_vector (work) - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: field_loc_NEcorner, field_type_vector - use ice_domain_size, only: max_blocks - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - work - - ! local variables - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - character(len=*), parameter :: subname = '(u2tgrid_vector)' - - work1(:,:,:) = work(:,:,:) - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_timer_stop(timer_bound) - - call to_tgrid(work1,work) - - end subroutine u2tgrid_vector - -!======================================================================= - -! Shifts quantities from the U-cell midpoint (work1) to the T-cell -! midpoint (work2) -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: Elizabeth C. Hunke, LANL - - subroutine to_tgrid(work1, work2) - - use ice_constants, only: p25 - - real (kind=dbl_kind) :: work1(nx_block,ny_block,max_blocks), & - work2(nx_block,ny_block,max_blocks) - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(to_tgrid)' + select case (trim(dir)) - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i, j, iblk)*area1(i, j, iblk) & + + work1(i+1,j, iblk)*area1(i+1,j, iblk) & + + work1(i, j+1,iblk)*area1(i, j+1,iblk) & + + work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & + / area2(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i, j, iblk) * area1(i, j, iblk) & + + work1(i-1,j, iblk) * area1(i-1,j, iblk) & + + work1(i, j-1,iblk) * area1(i, j-1,iblk) & + + work1(i-1,j-1,iblk) * area1(i-1,j-1,iblk)) & + / area2(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i-1,j, iblk)*area1(i-1,j, iblk) & + + work1(i, j, iblk)*area1(i, j, iblk) & + + work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & + + work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & + / area2(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p25 * & + (work1(i ,j-1,iblk) * area1(i ,j-1,iblk)) & + + work1(i+1,j-1,iblk) * area1(i+1,j-1,iblk) & + + work1(i ,j ,iblk) * area1(i ,j, iblk) & + + work1(i+1,j ,iblk) * area1(i+1,j, iblk) & + / area2(i, j, iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i, j,iblk)*area1(i, j,iblk) & + + work1(i+1,j,iblk)*area1(i+1,j,iblk)) & + / area2(i, j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i-1,j,iblk)*area1(i-1,j,iblk) & + + work1(i, j,iblk)*area1(i, j,iblk)) & + / area2(i, j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i,j, iblk)*area1(i,j, iblk) & + + work1(i,j+1,iblk)*area1(i,j+1,iblk)) & + / area2(i, j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + work2(i,j,iblk) = p5 * & + (work1(i,j-1,iblk)*area1(i,j-1,iblk) & + + work1(i,j, iblk)*area1(i,j, iblk)) & + / area2(i, j,iblk) + enddo + enddo + enddo + !$OMP END PARALLEL DO - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i, j ,iblk) * uarea(i, j, iblk) & - + work1(i-1,j ,iblk) * uarea(i-1,j, iblk) & - + work1(i, j-1,iblk) * uarea(i, j-1,iblk) & - + work1(i-1,j-1,iblk) * uarea(i-1,j-1,iblk)) & - / tarea(i, j, iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO + case default + call abort_ice(subname//'ERROR: unknown dir '//trim(dir)) + end select - end subroutine to_tgrid + end subroutine grid_average_X2Y_compute !======================================================================= ! The following code is used for obtaining the coordinates of the grid diff --git a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 index 08681d84f..b0a78bfcd 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 @@ -44,7 +44,7 @@ module ice_comp_esmf use ice_domain, only : nblocks, blocks_ice, halo_info, distrb_info use ice_blocks, only : block, get_block, nx_block, ny_block use ice_grid, only : tlon, tlat, tarea, tmask, anglet, hm, & - grid_type, t2ugrid_vector, gridcpl_file, ocn_gridcell_frac + grid_type, gridcpl_file, ocn_gridcell_frac use ice_constants, only : c0, c1, spval_dbl, rad_to_deg, radius, secday use ice_communicate, only : my_task, master_task, MPI_COMM_ICE use ice_calendar, only : istep, istep1, force_restart_now, write_ic,& diff --git a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 index 64dff54e2..d663d0f97 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 @@ -42,7 +42,7 @@ module ice_comp_mct use ice_domain, only : nblocks, blocks_ice, halo_info, distrb_info use ice_blocks, only : block, get_block, nx_block, ny_block use ice_grid, only : tlon, tlat, tarea, tmask, anglet, hm, & - grid_type, t2ugrid_vector, gridcpl_file, ocn_gridcell_frac + grid_type, gridcpl_file, ocn_gridcell_frac use ice_constants, only : c0, c1, spval_dbl, radius use ice_constants, only : ice_init_constants use ice_communicate, only : my_task, master_task, MPI_COMM_ICE diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index d42d3f8a1..d0eac5a19 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -29,7 +29,7 @@ module ice_import_export use ice_domain , only: nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only: nx_global, ny_global, block_size_x, block_size_y, max_blocks use ice_grid , only: tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only: grid_type, t2ugrid_vector + use ice_grid , only: grid_type, grid_average_X2Y use ice_boundary , only: ice_HaloUpdate use ice_communicate , only: my_task, master_task, MPI_COMM_ICE, get_num_procs use ice_calendar , only: istep, istep1, diagfreq @@ -468,10 +468,14 @@ subroutine ice_import( x2i ) if (.not.prescribed_ice) then call t_startf ('cice_imp_t2u') - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) + call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + call grid_average_X2Y('T2U',uocn) + call grid_average_X2Y('T2U',vocn) + call grid_average_X2Y('T2U',ss_tltx) + call grid_average_X2Y('T2U',ss_tlty) call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 62ff2727d..50cba8883 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -25,7 +25,7 @@ module ice_import_export use ice_flux , only : sss, Tf, wind, fsw use ice_state , only : vice, vsno, aice, aicen_init, trcr use ice_grid , only : tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only : grid_type, t2ugrid_vector + use ice_grid , only : grid_type, grid_average_X2Y use ice_mesh_mod , only : ocn_gridcell_frac use ice_boundary , only : ice_HaloUpdate use ice_fileunits , only : nu_diag, flush_fileunit @@ -797,10 +797,14 @@ subroutine ice_import( importState, rc ) if (.not.prescribed_ice) then call t_startf ('cice_imp_t2u') - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) + call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) + call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + call grid_average_X2Y('T2U',uocn) + call grid_average_X2Y('T2U',vocn) + call grid_average_X2Y('T2U',ss_tltx) + call grid_average_X2Y('T2U',ss_tlty) call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 49127cc15..d1eee8ae0 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -18,7 +18,7 @@ module cice_cap use ice_calendar, only: dt use ice_flux use ice_grid, only: TLAT, TLON, ULAT, ULON, hm, tarea, ANGLET, ANGLE, & - dxt, dyt, t2ugrid_vector + dxt, dyt, grid_average_X2Y use ice_state use CICE_RunMod use CICE_InitMod @@ -934,12 +934,17 @@ module cice_cap ss_tlty(i,j,iblk) = ue*sin(AngT_s) + vn*cos(AngT_s) enddo enddo - call t2ugrid_vector(ss_tltx) - call t2ugrid_vector(ss_tlty) - call t2ugrid_vector(uocn) - call t2ugrid_vector(vocn) enddo +! call ice_HaloUpdate(uocn, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) +! call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) + call grid_average_X2Y('T2U',uocn) + call grid_average_X2Y('T2U',vocn) + call grid_average_X2Y('T2U',ss_tltx) + call grid_average_X2Y('T2U',ss_tlty) + end subroutine subroutine CICE_Export(st,rc) type(ESMF_State) :: st From f37f4fd68b5ecd80cfe7b3d47e3410c77d381ee2 Mon Sep 17 00:00:00 2001 From: apcraig Date: Wed, 3 Nov 2021 18:32:16 -0600 Subject: [PATCH 004/109] add gridavgchk, add blockall, add set_nml.dwblockall --- .../cicedynB/infrastructure/ice_domain.F90 | 1 + cicecore/cicedynB/infrastructure/ice_grid.F90 | 4 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 486 ++++++++++++++++++ .../unittest/gridavgchk/gridavgchk.F90 | 416 +++++++++++++++ configuration/scripts/Makefile | 6 +- .../scripts/options/set_env.gridavgchk | 2 + .../scripts/options/set_nml.dwblockall | 1 + configuration/scripts/tests/unittest_suite.ts | 2 + doc/source/user_guide/ug_case_settings.rst | 3 +- doc/source/user_guide/ug_implementation.rst | 4 +- 10 files changed, 919 insertions(+), 6 deletions(-) create mode 100644 cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 create mode 100644 cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 create mode 100644 configuration/scripts/options/set_env.gridavgchk create mode 100644 configuration/scripts/options/set_nml.dwblockall diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index 1dfdd0428..ae6e83195 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -523,6 +523,7 @@ subroutine init_domain_distribution(KMTG,ULATG) #else if (distribution_wght == 'block' .and. & ! POP style nocn(n) > 0) nocn(n) = nx_block*ny_block + if (distribution_wght == 'blockall') nocn(n) = nx_block*ny_block #endif end do endif ! distribution_wght = file diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 7a80d963f..70329bfe7 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -2323,10 +2323,10 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i ,j-1,iblk) * area1(i ,j-1,iblk)) & + (work1(i ,j-1,iblk) * area1(i ,j-1,iblk) & + work1(i+1,j-1,iblk) * area1(i+1,j-1,iblk) & + work1(i ,j ,iblk) * area1(i ,j, iblk) & - + work1(i+1,j ,iblk) * area1(i+1,j, iblk) & + + work1(i+1,j ,iblk) * area1(i+1,j, iblk)) & / area2(i, j, iblk) enddo enddo diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 new file mode 100644 index 000000000..60f71fa8a --- /dev/null +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -0,0 +1,486 @@ +!======================================================================= +! +! This module contains the CICE initialization routine that sets model +! parameters and initializes the grid and CICE state variables. +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL +! Philip W. Jones, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_InitMod + + use ice_kinds_mod + use ice_exit, only: abort_ice + use ice_fileunits, only: init_fileunits, nu_diag + use icepack_intfc, only: icepack_aggregate + use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist + use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave + use icepack_intfc, only: icepack_configure + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted + use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_flags, & + icepack_query_tracer_indices, icepack_query_tracer_sizes + + implicit none + private + public :: CICE_Initialize, cice_init + +!======================================================================= + + contains + +!======================================================================= + +! Initialize the basic state, grid and all necessary parameters for +! running the CICE model. Return the initial state in routine +! export state. +! Note: This initialization driver is designed for standalone and +! CESM-coupled applications. For other +! applications (e.g., standalone CAM), this driver would be +! replaced by a different driver that calls subroutine cice_init, +! where most of the work is done. + + subroutine CICE_Initialize + + character(len=*), parameter :: subname='(CICE_Initialize)' + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- + + call cice_init + + end subroutine CICE_Initialize + +!======================================================================= +! +! Initialize CICE model. + + subroutine cice_init + + use ice_arrays_column, only: hin_max, c_hi_range, alloc_arrays_column + use ice_arrays_column, only: floe_rad_l, floe_rad_c, & + floe_binwidth, c_fsd_range + use ice_state, only: alloc_state + use ice_flux_bgc, only: alloc_flux_bgc + use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + init_calendar, advance_timestep, calc_timesteps + use ice_communicate, only: init_communicate, my_task, master_task + use ice_diagnostics, only: init_diags + use ice_domain, only: init_domain_blocks + use ice_domain_size, only: ncat, nfsd + use ice_dyn_eap, only: init_eap, alloc_dyn_eap + use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_vp, only: init_vp + use ice_flux, only: init_coupler_flux, init_history_therm, & + init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux + use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & + get_forcing_atmo, get_forcing_ocn, get_wave_spec + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & + faero_default, faero_optics, alloc_forcing_bgc, fiso_default + use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_history, only: init_hist, accum_hist + use ice_restart_shared, only: restart, runtype + use ice_init, only: input_data, init_state + use ice_init_column, only: init_thermo_vertical, init_shortwave, init_zbgc, input_zbgc, count_tracers + use ice_kinds_mod + use ice_restoring, only: ice_HaloRestore_init + use ice_timers, only: timer_total, init_ice_timers, ice_timer_start + use ice_transport_driver, only: init_transport + + logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers, & + tr_iso, tr_fsd, wave_spec + character(len=*), parameter :: subname = '(cice_init)' + + call init_communicate ! initial setup for message passing + call init_fileunits ! unit numbers + + ! tcx debug, this will create a different logfile for each pe + ! if (my_task /= master_task) nu_diag = 100+my_task + + call icepack_configure() ! initialize icepack + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + call input_data ! namelist variables + call input_zbgc ! vertical biogeochemistry namelist + call count_tracers ! count tracers + + call init_domain_blocks ! set up block decomposition + call init_grid1 ! domain distribution + call alloc_grid ! allocate grid arrays + call alloc_arrays_column ! allocate column arrays + call alloc_state ! allocate state arrays + call alloc_dyn_shared ! allocate dyn shared arrays + call alloc_flux_bgc ! allocate flux_bgc arrays + call alloc_flux ! allocate flux arrays + call init_ice_timers ! initialize all timers + call ice_timer_start(timer_total) ! start timing entire run + call init_grid2 ! grid variables + call init_zbgc ! vertical biogeochemistry initialization + call init_calendar ! initialize some calendar stuff + call init_hist (dt) ! initialize output history file + + call init_dyn (dt_dyn) ! define dynamics parameters, variables + if (kdyn == 2) then + call alloc_dyn_eap ! allocate dyn_eap arrays + call init_eap ! define eap dynamics parameters, variables + else if (kdyn == 3) then + call init_vp ! define vp dynamics parameters, variables + endif + + call init_coupler_flux ! initialize fluxes exchanged with coupler + + call init_thermo_vertical ! initialize vertical thermodynamics + + call icepack_init_itd(ncat=ncat, hin_max=hin_max) ! ice thickness distribution + if (my_task == master_task) then + call icepack_init_itd_hist(ncat=ncat, hin_max=hin_max, c_hi_range=c_hi_range) ! output + endif + + call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_fsd) call icepack_init_fsd_bounds (nfsd, & ! floe size distribution + floe_rad_l, & ! fsd size lower bound in m (radius) + floe_rad_c, & ! fsd size bin centre in m (radius) + floe_binwidth, & ! fsd size bin width in m (radius) + c_fsd_range, & ! string for history output + write_diags=(my_task == master_task)) ! write diag on master only + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call init_forcing_ocn(dt) ! initialize sss and sst from data + call init_state ! initialize the ice state + call init_transport ! initialize horizontal transport + call ice_HaloRestore_init ! restored boundary conditions + + call icepack_query_parameters(skl_bgc_out=skl_bgc, z_tracers_out=z_tracers, & + wave_spec_out=wave_spec) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (skl_bgc .or. z_tracers) call alloc_forcing_bgc ! allocate biogeochemistry arrays + + call init_restart ! initialize restart variables + call init_diags ! initialize diagnostic output points + call init_history_therm ! initialize thermo history variables + call init_history_dyn ! initialize dynamic history variables + call calc_timesteps ! update timestep counter if not using npt_unit="1" + + call icepack_query_tracer_flags(tr_aero_out=tr_aero, tr_zaero_out=tr_zaero) + call icepack_query_tracer_flags(tr_iso_out=tr_iso) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + !property tables + + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing + ! in prep_radiation. + if (trim(runtype) == 'continue' .or. restart) & + call init_shortwave ! initialize radiative transfer + +! tcraig, use advance_timestep here +! istep = istep + 1 ! update time step counters +! istep1 = istep1 + 1 +! time = time + dt ! determine the time and date +! call calendar(time) ! at the end of the first timestep + call advance_timestep() + + !-------------------------------------------------------------------- + ! coupler communication or forcing data initialization + !-------------------------------------------------------------------- + + call init_forcing_atmo ! initialize atmospheric forcing (standalone) + + if (tr_fsd .and. wave_spec) call get_wave_spec ! wave spectrum in ice + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + + ! isotopes + if (tr_iso) call fiso_default ! default values + ! aerosols + ! if (tr_aero) call faero_data ! data file + ! if (tr_zaero) call fzaero_data ! data file (gx1) + if (tr_aero .or. tr_zaero) call faero_default ! default values + if (skl_bgc .or. z_tracers) call get_forcing_bgc ! biogeochemistry + if (z_tracers) call get_atm_bgc ! biogeochemistry + + if (runtype == 'initial' .and. .not. restart) & + call init_shortwave ! initialize radiative transfer using current swdn + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + if (write_ic) call accum_hist(dt) ! write initial conditions + + end subroutine cice_init + +!======================================================================= + + subroutine init_restart + + use ice_arrays_column, only: dhsn + use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: calendar + use ice_constants, only: c0 + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, n_iso, n_aero, nfsd + use ice_dyn_eap, only: read_restart_eap + use ice_dyn_shared, only: kdyn + use ice_grid, only: tmask + use ice_init, only: ice_ic + use ice_init_column, only: init_age, init_FY, init_lvl, & + init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & + init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd + use ice_restart_column, only: restart_age, read_restart_age, & + restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & + restart_pond_cesm, read_restart_pond_cesm, & + restart_pond_lvl, read_restart_pond_lvl, & + restart_pond_topo, read_restart_pond_topo, & + restart_fsd, read_restart_fsd, & + restart_iso, read_restart_iso, & + restart_aero, read_restart_aero, & + restart_hbrine, read_restart_hbrine, & + restart_zsal, restart_bgc + use ice_restart_driver, only: restartfile + use ice_restart_shared, only: runtype, restart + use ice_state ! almost everything + + integer(kind=int_kind) :: & + i, j , & ! horizontal indices + iblk ! block index + logical(kind=log_kind) :: & + tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & + tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & + skl_bgc, z_tracers, solve_zsal + integer(kind=int_kind) :: & + ntrcr + integer(kind=int_kind) :: & + nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & + nt_iage, nt_FY, nt_aero, nt_fsd, nt_isosno, nt_isoice + + character(len=*), parameter :: subname = '(init_restart)' + + call icepack_query_tracer_sizes(ntrcr_out=ntrcr) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_query_parameters(skl_bgc_out=skl_bgc, & + z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) + call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & + tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & + tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) + call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + if (trim(runtype) == 'continue') then + ! start from core restart file + call restartfile() ! given by pointer in ice_in + call calendar() ! update time parameters + if (kdyn == 2) call read_restart_eap ! EAP + else if (restart) then ! ice_ic = core restart file + call restartfile (ice_ic) ! or 'default' or 'none' + !!! uncomment to create netcdf + ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file + !!! uncomment if EAP restart data exists + ! if (kdyn == 2) call read_restart_eap + endif + + ! tracers + ! ice age tracer + if (tr_iage) then + if (trim(runtype) == 'continue') & + restart_age = .true. + if (restart_age) then + call read_restart_age + else + do iblk = 1, nblocks + call init_age(trcrn(:,:,nt_iage,:,iblk)) + enddo ! iblk + endif + endif + ! first-year area tracer + if (tr_FY) then + if (trim(runtype) == 'continue') restart_FY = .true. + if (restart_FY) then + call read_restart_FY + else + do iblk = 1, nblocks + call init_FY(trcrn(:,:,nt_FY,:,iblk)) + enddo ! iblk + endif + endif + ! level ice tracer + if (tr_lvl) then + if (trim(runtype) == 'continue') restart_lvl = .true. + if (restart_lvl) then + call read_restart_lvl + else + do iblk = 1, nblocks + call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & + trcrn(:,:,nt_vlvl,:,iblk)) + enddo ! iblk + endif + endif + ! CESM melt ponds + if (tr_pond_cesm) then + if (trim(runtype) == 'continue') & + restart_pond_cesm = .true. + if (restart_pond_cesm) then + call read_restart_pond_cesm + else + do iblk = 1, nblocks + call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk)) + enddo ! iblk + endif + endif + ! level-ice melt ponds + if (tr_pond_lvl) then + if (trim(runtype) == 'continue') & + restart_pond_lvl = .true. + if (restart_pond_lvl) then + call read_restart_pond_lvl + else + do iblk = 1, nblocks + call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk), & + dhsn(:,:,:,iblk)) + enddo ! iblk + endif + endif + ! topographic melt ponds + if (tr_pond_topo) then + if (trim(runtype) == 'continue') & + restart_pond_topo = .true. + if (restart_pond_topo) then + call read_restart_pond_topo + else + do iblk = 1, nblocks + call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + enddo ! iblk + endif ! .not. restart_pond + endif + ! floe size distribution + if (tr_fsd) then + if (trim(runtype) == 'continue') restart_fsd = .true. + if (restart_fsd) then + call read_restart_fsd + else + call init_fsd(trcrn(:,:,nt_fsd:nt_fsd+nfsd-1,:,:)) + endif + endif + + ! isotopes + if (tr_iso) then + if (trim(runtype) == 'continue') restart_iso = .true. + if (restart_iso) then + call read_restart_iso + else + do iblk = 1, nblocks + call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & + trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) + enddo ! iblk + endif + endif + + if (tr_aero) then ! ice aerosol + if (trim(runtype) == 'continue') restart_aero = .true. + if (restart_aero) then + call read_restart_aero + else + do iblk = 1, nblocks + call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) + enddo ! iblk + endif ! .not. restart_aero + endif + + if (trim(runtype) == 'continue') then + if (tr_brine) & + restart_hbrine = .true. + if (solve_zsal) & + restart_zsal = .true. + if (skl_bgc .or. z_tracers) & + restart_bgc = .true. + endif + + if (tr_brine .or. skl_bgc) then ! brine height tracer + call init_hbrine + if (tr_brine .and. restart_hbrine) call read_restart_hbrine + endif + + if (solve_zsal .or. skl_bgc .or. z_tracers) then ! biogeochemistry + if (tr_fsd) then + write (nu_diag,*) 'FSD implementation incomplete for use with BGC' + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + endif + call init_bgc + endif + + !----------------------------------------------------------------- + ! aggregate tracers + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + call icepack_aggregate(ncat = ncat, & + aicen = aicen(i,j,:,iblk), & + trcrn = trcrn(i,j,:,:,iblk), & + vicen = vicen(i,j,:,iblk), & + vsnon = vsnon(i,j,:,iblk), & + aice = aice (i,j, iblk), & + trcr = trcr (i,j,:,iblk), & + vice = vice (i,j, iblk), & + vsno = vsno (i,j, iblk), & + aice0 = aice0(i,j, iblk), & + ntrcr = ntrcr, & + trcr_depend = trcr_depend, & + trcr_base = trcr_base, & + n_trcr_strata = n_trcr_strata, & + nt_strata = nt_strata) + else + ! tcraig, reset all tracer values on land to zero + trcrn(i,j,:,:,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + end subroutine init_restart + +!======================================================================= + + end module CICE_InitMod + +!======================================================================= diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 new file mode 100644 index 000000000..e4c650649 --- /dev/null +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -0,0 +1,416 @@ + + program gridavgchk + + ! This tests the CICE grid_average_X2Y methods by + ! using CICE_InitMod (from the standalone model) to read/initialize + ! a CICE grid/configuration. Then methods in grid_average_X2Y + ! are verified using hardwired inputs with known outputs. + + use CICE_InitMod + use ice_kinds_mod, only: int_kind, dbl_kind + use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot + use ice_boundary, only: ice_HaloUpdate + use ice_constants, only: c0, c1, c2, p25, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, field_type_scalar + use ice_communicate, only: my_task, master_task, get_num_procs, MPI_COMM_ICE + use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet + use ice_domain_size, only: nx_global, ny_global, & + block_size_x, block_size_y, max_blocks + use ice_domain, only: distrb_info, halo_info + use ice_fileunits, only: bfbflag + use ice_exit, only: abort_ice, end_run + use ice_global_reductions, only: global_minval, global_maxval + use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask + + implicit none + + integer(int_kind) :: i, j, n, ib, ie, jb, je, iblock + integer(int_kind) :: iglob, jglob + integer(int_kind) :: blockID, numBlocks + type (block) :: this_block + + real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) + real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) + real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) + real(dbl_kind) :: amin, amax, errtol, errx, erry + real(dbl_kind) :: deltax0, deltay0, deltax, deltay + + integer(int_kind) :: npes, ierr, ntask, ntest + integer(int_kind), parameter :: maxtest = 36 + integer(int_kind) :: errorflag0,errorflag(maxtest),gflag + character(len=32) :: stringflag(maxtest) + integer(int_kind), parameter :: & + passflag = 0, & + failflag = 1 + integer(int_kind), parameter :: navg = 12 + character(len=8) :: avgname(navg) + logical, allocatable :: dmask(:,:,:,:) + + real(dbl_kind), parameter :: fillval = -1.0e36_dbl_kind + real(dbl_kind), parameter :: testconst = 100._dbl_kind + real(dbl_kind), parameter :: errtolconst = 0.01_dbl_kind ! error tolerance relative to const field + real(dbl_kind), parameter :: errtolijind = 0.65_dbl_kind ! absolute error tolerance for ij index field + real(dbl_kind), parameter :: errtolarea = 0.06_dbl_kind ! relative error tolerance for area field ratio + character(len=*), parameter :: subname='(gridavgchk)' + + !----------------------------------------------------------------- + ! Initialize CICE + !----------------------------------------------------------------- + + call CICE_Initialize + npes = get_num_procs() + + !----------------------------------------------------------------- + ! Testing + !----------------------------------------------------------------- + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'RunningUnitTest GRIDAVGCHK' + write(6,*) ' ' + write(6,*) ' npes = ',npes + write(6,*) ' my_task = ',my_task + write(6,*) ' nx_global = ',nx_global + write(6,*) ' ny_global = ',ny_global + write(6,*) ' block_size_x = ',block_size_x + write(6,*) ' block_size_y = ',block_size_y + write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' ' + endif + + errorflag0 = passflag + errorflag = passflag + stringflag = ' ' + + ! --------------------------- + ! TEST GRID AVERAGES + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + allocate(array1x(nx_block,ny_block,max_blocks)) + allocate(array1y(nx_block,ny_block,max_blocks)) + allocate(array2x(nx_block,ny_block,max_blocks)) + allocate(array2y(nx_block,ny_block,max_blocks)) + allocate(array3x(nx_block,ny_block,max_blocks)) + allocate(array3y(nx_block,ny_block,max_blocks)) + + call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) + + allocate(dmask(nx_block,ny_block,max_blocks,navg)) + avgname(1) = 'T2U'; dmask(:,:,:,1) = umask(:,:,:) + avgname(2) = 'T2N'; dmask(:,:,:,2) = nmask(:,:,:) + avgname(3) = 'T2E'; dmask(:,:,:,3) = emask(:,:,:) + avgname(4) = 'U2T'; dmask(:,:,:,4) = tmask(:,:,:) + avgname(5) = 'U2N'; dmask(:,:,:,5) = nmask(:,:,:) + avgname(6) = 'U2E'; dmask(:,:,:,6) = emask(:,:,:) + avgname(7) = 'N2T'; dmask(:,:,:,7) = tmask(:,:,:) + avgname(8) = 'N2U'; dmask(:,:,:,8) = umask(:,:,:) + avgname(9) = 'N2E'; dmask(:,:,:,9) = emask(:,:,:) + avgname(10) = 'E2T'; dmask(:,:,:,10) = tmask(:,:,:) + avgname(11) = 'E2U'; dmask(:,:,:,11) = umask(:,:,:) + avgname(12) = 'E2N'; dmask(:,:,:,12) = nmask(:,:,:) + + ntest = 0 + + !---------------- + ! Test constant field + !---------------- + + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST constant field' + endif + + array1x = testconst + + do n = 1,navg + ntest = ntest + 1 + + stringflag(ntest) = trim(avgname(n))//' const' + if (my_task == master_task) then + write(6,*) '' + write(6,*) trim(stringflag(ntest)),' test ',ntest + endif + + array2x = c0 + call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + + array3x = c0 + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + errtol = errtolconst * testconst + do i = ib,ie + iglob = this_block%i_glob(i) + array3x(i,j,iblock) = array2x(i,j,iblock) - array1x(i,j,iblock) + errx = abs(array3x(i,j,iblock)) + if (dmask(i,j,iblock,n) .and. errx > errtol) then + errorflag(ntest) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error const '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'value, error ',array2x(i,j,iblock),errx + endif + enddo + enddo + enddo + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,102) 'input min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'result min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + enddo + + !---------------- + ! Test global i, j fields + !---------------- + + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST global i, j fields' + endif + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi +! write(6,*) 'tcx1x ',my_task,iblock,minval(this_block%i_glob(ib:ie)),maxval(this_block%i_glob(ib:ie)) +! write(6,*) 'tcx1y ',my_task,iblock,minval(this_block%j_glob(jb:je)),maxval(this_block%j_glob(jb:je)) + do j = jb,je + do i = ib,ie + array1x(i,j,iblock) = real(this_block%i_glob(i),kind=dbl_kind) + array1y(i,j,iblock) = real(this_block%j_glob(j),kind=dbl_kind) + enddo + enddo + enddo + + call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) + call ice_HaloUpdate(array1y, halo_info, field_loc_center, field_type_scalar, fillval) + + do n = 1,navg + ntest = ntest + 1 + + stringflag(ntest) = trim(avgname(n))//' ijind' + if (my_task == master_task) then + write(6,*) '' + write(6,*) trim(stringflag(ntest)),' test ',ntest + endif + + deltax0 = 0.0_dbl_kind + deltay0 = 0.0_dbl_kind + if (avgname(n) == 'T2U' .or. & + avgname(n) == 'T2E' .or. & + avgname(n) == 'N2U' .or. & + avgname(n) == 'N2E') then + deltax0 = 0.5_dbl_kind + elseif (avgname(n) == 'U2T' .or. & + avgname(n) == 'U2N' .or. & + avgname(n) == 'E2T' .or. & + avgname(n) == 'E2N') then + deltax0 = -0.5_dbl_kind + endif + if (avgname(n) == 'T2U' .or. & + avgname(n) == 'T2N' .or. & + avgname(n) == 'E2U' .or. & + avgname(n) == 'E2N') then + deltay0 = 0.5_dbl_kind + elseif (avgname(n) == 'U2T' .or. & + avgname(n) == 'U2E' .or. & + avgname(n) == 'N2T' .or. & + avgname(n) == 'N2E') then + deltay0 = -0.5_dbl_kind + endif + + array2x = c0 + array2y = c0 + call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + call grid_average_X2Y(trim(avgname(n)),array1y,array2y) + + array3x = c0 + errtol = errtolijind + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + do i = ib,ie + iglob = this_block%i_glob(i) + deltax = deltax0 + deltay = deltay0 + ! adjust deltax at wraparound + if (iglob == 1 .and. deltax < -0.01_dbl_kind) deltax = deltax0 + nx_global/c2 + if (iglob == nx_global .and. deltax > 0.01_dbl_kind) deltax = deltax0 - nx_global/c2 + array3x(i,j,iblock) = array2x(i,j,iblock)-array1x(i,j,iblock)-deltax + errx = abs(array3x(i,j,iblock)) + array3y(i,j,iblock) = array2y(i,j,iblock)-array1y(i,j,iblock)-deltay + erry = abs(array3y(i,j,iblock)) + if (dmask(i,j,iblock,n) .and. (errx > errtol .or. erry > errtol)) then + errorflag(ntest) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error ijind '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'array2x, err',array2x(i,j,iblock),errx + write(100+my_task,101) 'array1x j+1 ',array1x(i-1,j+1,iblock),array1x(i,j+1,iblock),array1x(i+1,j+1,iblock) + write(100+my_task,101) 'array1x j ',array1x(i-1,j ,iblock),array1x(i,j ,iblock),array1x(i+1,j ,iblock) + write(100+my_task,101) 'array1x j-1 ',array1x(i-1,j-1,iblock),array1x(i,j-1,iblock),array1x(i+1,j-1,iblock) + write(100+my_task,101) 'array2y, err',array2y(i,j,iblock),erry + write(100+my_task,101) 'array1y j+1 ',array1y(i-1,j+1,iblock),array1y(i,j+1,iblock),array1y(i+1,j+1,iblock) + write(100+my_task,101) 'array1y j ',array1y(i-1,j ,iblock),array1y(i,j ,iblock),array1y(i+1,j ,iblock) + write(100+my_task,101) 'array1y j-1 ',array1y(i-1,j-1,iblock),array1y(i,j-1,iblock),array1y(i+1,j-1,iblock) +! write(100+my_task,101) 'uarea ',uarea(i,j,iblock) +! write(100+my_task,101) 'tarea j+1 ',tarea (i-1,j+1,iblock),tarea (i,j+1,iblock),tarea (i+1,j+1,iblock) +! write(100+my_task,101) 'tarea j ',tarea (i-1,j ,iblock),tarea (i,j ,iblock),tarea (i+1,j ,iblock) +! write(100+my_task,101) 'tarea j-1 ',tarea (i-1,j-1,iblock),tarea (i,j-1,iblock),tarea (i+1,j-1,iblock) + endif + enddo + enddo + enddo + + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,102) 'i_glob min/max = ',amin,amax + amin = global_minval(array1y, distrb_info) + amax = global_maxval(array1y, distrb_info) + if (my_task == master_task) write(6,102) 'j_glob min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'i result min/max = ',amin,amax + amin = global_minval(array2y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'j result min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'i error min/max = ',amin,amax + amin = global_minval(array3y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'j error min/max = ',amin,amax + + enddo + + !---------------- + ! Test area fields + !---------------- + + if (my_task == master_task) then + write(6,*) '' + write(6,*) 'TEST area fields' + endif + + do n = 1,navg + ntest = ntest + 1 + + stringflag(ntest) = trim(avgname(n))//' area' + if (my_task == master_task) then + write(6,*) '' + write(6,*) trim(stringflag(ntest)),' test ',ntest + endif + + array1x = tarea ! input + array2y = uarea ! result + call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) + array2x = c0 + call grid_average_X2Y('T2U',array1x,array2x) + + array3x = c1 + array3y = c1 + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + jglob = this_block%j_glob(j) + do i = ib,ie + iglob = this_block%i_glob(i) + array3x(i,j,iblock) = array2x(i,j,iblock)/array2y(i,j,iblock) - c1 + errx = abs(array3x(i,j,iblock)) + if (dmask(i,j,iblock,n) .and. errx > errtolarea) then + errorflag(ntest) = failflag + errorflag0 = failflag + write(100+my_task,*) '' + write(100+my_task,100) 'error area '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob + write(100+my_task,101) 'out,exact,err',array2x(i,j,iblock),array2y(i,j,iblock),array3x(i,j,iblock) + endif + enddo + enddo + enddo + amin = global_minval(array1x, distrb_info) + amax = global_maxval(array1x, distrb_info) + if (my_task == master_task) write(6,103) 'input min/max = ',amin,amax + amin = global_minval(array2x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,103) 'output min/max = ',amin,amax + amin = global_minval(array2y, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array2y, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,103) 'exact min/max = ',amin,amax + amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) + amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) + if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + enddo + +100 format(a,10i8) +101 format(a,3g16.7) +102 format(a,3f16.7) +103 format(a,2g16.7,f16.7) + + gflag = global_maxval(errorflag0, MPI_COMM_ICE) + errorflag0 = gflag + do n = 1,maxtest + gflag = global_maxval(errorflag(n), MPI_COMM_ICE) + errorflag(n) = gflag + enddo + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'GRIDAVGCHK COMPLETED SUCCESSFULLY' + do n = 1,maxtest + if (errorflag(n) == passflag) then + write(6,*) 'PASS ',trim(stringflag(n)) + else + write(6,*) 'FAIL ',trim(stringflag(n)) + endif + enddo + if (errorflag0 == passflag) then + write(6,*) 'GRIDAVGCHK TEST COMPLETED SUCCESSFULLY' + else + write(6,*) 'GRIDAVGCHK TEST FAILED' + endif + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + endif + + + !----------------------------------------------------------------- + ! Gracefully end + !----------------------------------------------------------------- + + call end_run() + + end program gridavgchk + +!======================================================================= diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index 51c36cee3..1633b1542 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -75,7 +75,7 @@ AR := ar .SUFFIXES: .SUFFIXES: .F90 .F .c .o -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk all: $(EXEC) cice: $(EXEC) @@ -94,7 +94,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk" + @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk" target: targets db_files: @@ -150,6 +150,8 @@ sumchk: $(EXEC) bcstchk: $(EXEC) +gridavgchk: $(EXEC) + # this builds just a subset of source code specified explicitly and requires a separate target HWOBJS := helloworld.o diff --git a/configuration/scripts/options/set_env.gridavgchk b/configuration/scripts/options/set_env.gridavgchk new file mode 100644 index 000000000..740d1e9c3 --- /dev/null +++ b/configuration/scripts/options/set_env.gridavgchk @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/gridavgchk +setenv ICE_TARGET gridavgchk diff --git a/configuration/scripts/options/set_nml.dwblockall b/configuration/scripts/options/set_nml.dwblockall new file mode 100644 index 000000000..161c86b01 --- /dev/null +++ b/configuration/scripts/options/set_nml.dwblockall @@ -0,0 +1 @@ +distribution_wght = 'blockall' diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 21810a1e3..3f9a306c8 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -6,3 +6,5 @@ unittest gx3 1x1x25x29x16 sumchk unittest tx1 8x1 sumchk unittest gx3 4x1 bcstchk unittest gx3 1x1 bcstchk +unittest gx3 8x2 gridavgchk,dwblockall +unittest gx3 8x2 gridavgchk diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 23e6951fc..9d8e5fbc7 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -278,7 +278,8 @@ domain_nml "", "``spacecurve``", "distribute blocks via space-filling curves", "" "", "``spiralcenter``", "distribute blocks via roundrobin from center of grid outward in a spiral", "" "", "``wghtfile``", "distribute blocks based on weights specified in ``distribution_wght_file``", "" - "``distribution_wght``", "``block``", "full block size distribution weight method", "``latitude``" + "``distribution_wght``", "``block``", "full block weight method with land block elimination", "``latitude``" + "", "``blockall``", "full block weight method without land block elimination", "" "", "``latitude``", "latitude/ocean sets ``work_per_block``", "" "``distribution_wght_file``", "string", "distribution weight file when distribution_type is ``wghtfile``", "'unknown'" "``ew_boundary_type``", "``cyclic``", "periodic boundary conditions in x-direction", "``cyclic``" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 650d3d6c7..3fb63c11d 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -452,7 +452,9 @@ block equally. This is useful in POP which always has work in each block and is written with a lot of array syntax requiring calculations over entire blocks (whether or not land is present). This option is provided in CICE as well for -direct-communication compatibility with POP. The ‘latitude’ option +direct-communication compatibility with POP. Block that contain 100% +land grid cells are eliminated with 'block'. The 'blockall' option is identical +to 'block' but does not do land block elimination. The ‘latitude’ option weights the blocks based on latitude and the number of ocean grid cells they contain. Many of the non-cartesian decompositions support automatic land block elimination and provide alternative ways to From 262eb60bbad672ff8b18a3d8a4960e5148e33400 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 09:40:48 -0600 Subject: [PATCH 005/109] add X2YS and update gridavgchk --- .../cicedynB/infrastructure/ice_domain.F90 | 10 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 341 +++++++++++++++--- .../unittest/gridavgchk/gridavgchk.F90 | 188 +++++++--- .../scripts/machines/env.cheyenne_gnu | 2 +- .../scripts/machines/env.cheyenne_intel | 2 +- .../scripts/machines/env.cheyenne_pgi | 2 +- configuration/scripts/tests/unittest_suite.ts | 5 +- 7 files changed, 445 insertions(+), 105 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index ae6e83195..ee7d98b50 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -63,6 +63,7 @@ module ice_domain maskhalo_dyn , & ! if true, use masked halo updates for dynamics maskhalo_remap , & ! if true, use masked halo updates for transport maskhalo_bound , & ! if true, use masked halo updates for bound_state + landblockelim , & ! if true, land block elimination is on orca_halogrid ! if true, input fields are haloed as defined by orca grid !----------------------------------------------------------------------- @@ -79,6 +80,7 @@ module ice_domain ! 'rake', 'spacecurve', etc distribution_wght ! method for weighting work per block ! 'block' = POP default configuration + ! 'blockall' = no land block elimination ! 'latitude' = no. ocean points * |lat| ! 'file' = read distribution_wgth_file character (char_len_long) :: & @@ -155,11 +157,12 @@ subroutine init_domain_blocks maskhalo_bound = .false. ! if true, use masked halos for bound_state add_mpi_barriers = .false. ! if true, throttle communication debug_blocks = .false. ! if true, print verbose block information - max_blocks = -1 ! max number of blocks per processor + max_blocks = -1 ! max number of blocks per processor block_size_x = -1 ! size of block in first horiz dimension block_size_y = -1 ! size of block in second horiz dimension nx_global = -1 ! NXGLOB, i-axis size ny_global = -1 ! NYGLOB, j-axis size + landblockelim = .true. ! on by default call get_fileunit(nu_nml) if (my_task == master_task) then @@ -446,6 +449,8 @@ subroutine init_domain_distribution(KMTG,ULATG) flat = 1 endif + if (distribution_wght == 'blockall') landblockelim = .false. + allocate(nocn(nblocks_tot)) if (distribution_wght == 'file') then @@ -523,7 +528,8 @@ subroutine init_domain_distribution(KMTG,ULATG) #else if (distribution_wght == 'block' .and. & ! POP style nocn(n) > 0) nocn(n) = nx_block*ny_block - if (distribution_wght == 'blockall') nocn(n) = nx_block*ny_block + if (distribution_wght == 'blockall') & + nocn(n) = nx_block*ny_block #endif end do endif ! distribution_wght = file diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 70329bfe7..ce5d89dbc 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -677,7 +677,7 @@ end subroutine init_grid2 subroutine popgrid use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, & + use ice_constants, only: c0, c1, p5, & field_loc_center, field_loc_NEcorner, & field_type_scalar, field_type_angle use ice_domain_size, only: max_blocks @@ -725,7 +725,7 @@ subroutine popgrid do j = jlo, jhi do i = ilo, ihi kmt(i,j,iblk) = work1(i,j,iblk) - if (kmt(i,j,iblk) >= c1) hm(i,j,iblk) = c1 + if (kmt(i,j,iblk) >= p5) hm(i,j,iblk) = c1 enddo enddo enddo @@ -2175,30 +2175,58 @@ subroutine grid_average_X2Y(X2Y,work1,work2) select case (trim(X2Y)) - case('T2U') - call grid_average_X2Y_compute('NE',work1,tarea,work2tmp,uarea) - case('T2E') - call grid_average_X2Y_compute('E' ,work1,tarea,work2tmp,earea) - case('T2N') - call grid_average_X2Y_compute('N' ,work1,tarea,work2tmp,narea) - case('U2T') - call grid_average_X2Y_compute('SW',work1,uarea,work2tmp,tarea) - case('U2E') - call grid_average_X2Y_compute('S' ,work1,uarea,work2tmp,earea) - case('U2N') - call grid_average_X2Y_compute('W' ,work1,uarea,work2tmp,narea) - case('E2T') - call grid_average_X2Y_compute('W' ,work1,earea,work2tmp,tarea) - case('E2U') - call grid_average_X2Y_compute('N' ,work1,earea,work2tmp,uarea) - case('E2N') - call grid_average_X2Y_compute('NW',work1,earea,work2tmp,narea) - case('N2T') - call grid_average_X2Y_compute('S' ,work1,narea,work2tmp,tarea) - case('N2U') - call grid_average_X2Y_compute('E' ,work1,narea,work2tmp,uarea) - case('N2E') - call grid_average_X2Y_compute('SE',work1,narea,work2tmp,earea) + ! flux unmasked + case('T2UF','T2U') + call grid_average_X2YF('NE',work1,tarea,work2tmp,uarea) + case('T2EF','T2E') + call grid_average_X2YF('E' ,work1,tarea,work2tmp,earea) + case('T2NF','T2N') + call grid_average_X2YF('N' ,work1,tarea,work2tmp,narea) + case('U2TF','U2T') + call grid_average_X2YF('SW',work1,uarea,work2tmp,tarea) + case('U2EF','U2E') + call grid_average_X2YF('S' ,work1,uarea,work2tmp,earea) + case('U2NF','U2N') + call grid_average_X2YF('W' ,work1,uarea,work2tmp,narea) + case('E2TF','E2T') + call grid_average_X2YF('W' ,work1,earea,work2tmp,tarea) + case('E2UF','E2U') + call grid_average_X2YF('N' ,work1,earea,work2tmp,uarea) + case('E2NF','E2N') + call grid_average_X2YF('NW',work1,earea,work2tmp,narea) + case('N2TF','N2T') + call grid_average_X2YF('S' ,work1,narea,work2tmp,tarea) + case('N2UF','N2U') + call grid_average_X2YF('E' ,work1,narea,work2tmp,uarea) + case('N2EF','N2E') + call grid_average_X2YF('SE',work1,narea,work2tmp,earea) + + ! state masked + case('T2US') + call grid_average_X2YS('NE',work1,tarea,hm ,work2tmp) + case('T2ES') + call grid_average_X2YS('E' ,work1,tarea,hm ,work2tmp) + case('T2NS') + call grid_average_X2YS('N' ,work1,tarea,hm ,work2tmp) + case('U2TS') + call grid_average_X2YS('SW',work1,uarea,uvm,work2tmp) + case('U2ES') + call grid_average_X2YS('S' ,work1,uarea,uvm,work2tmp) + case('U2NS') + call grid_average_X2YS('W' ,work1,uarea,uvm,work2tmp) + case('E2TS') + call grid_average_X2YS('W' ,work1,earea,epm,work2tmp) + case('E2US') + call grid_average_X2YS('N' ,work1,earea,epm,work2tmp) + case('E2NS') + call grid_average_X2YS('NW',work1,earea,epm,work2tmp) + case('N2TS') + call grid_average_X2YS('S' ,work1,narea,npm,work2tmp) + case('N2US') + call grid_average_X2YS('E' ,work1,narea,npm,work2tmp) + case('N2ES') + call grid_average_X2YS('SE',work1,narea,npm,work2tmp) + case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select @@ -2212,16 +2240,16 @@ subroutine grid_average_X2Y(X2Y,work1,work2) end subroutine grid_average_X2Y !======================================================================= - ! Shifts quantities from one grid to another +! State masked version, simple area weighted averager ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! ! author: T. Craig - subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) + subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) - use ice_constants, only: c0, p25, p5 + use ice_constants, only: c0 character(len=*) , intent(in) :: & dir @@ -2229,15 +2257,241 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) real (kind=dbl_kind), intent(in) :: & work1(nx_block,ny_block,max_blocks), & area1(nx_block,ny_block,max_blocks), & - area2(nx_block,ny_block,max_blocks) + mask1(nx_block,ny_block,max_blocks) real (kind=dbl_kind), intent(out) :: & work2(nx_block,ny_block,max_blocks) + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + type (block) :: & this_block ! block information for current block - character(len=*), parameter :: subname = '(grid_average_X2Y_compute)' + character(len=*), parameter :: subname = '(grid_average_X2YS)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i+1,j, iblk)*area1(i+1,j, iblk) & + + mask1(i, j+1,iblk)*area1(i, j+1,iblk) & + + mask1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i+1,j, iblk)*work1(i+1,j, iblk)*area1(i+1,j, iblk) & + + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i, j+1,iblk) & + + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i-1,j, iblk)*area1(i-1,j, iblk) & + + mask1(i, j-1,iblk)*area1(i, j-1,iblk) & + + mask1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & + + mask1(i, j-1,iblk)*work1(i, j-1,iblk)*area1(i, j-1,iblk) & + + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i-1,j, iblk)*area1(i-1,j, iblk) & + + mask1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & + + mask1(i, j+1,iblk)*area1(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & + + mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & + + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & + + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i ,j-1,iblk)*area1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*area1(i ,j, iblk) & + + mask1(i+1,j ,iblk)*area1(i+1,j, iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*area1(i ,j, iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i, j,iblk)*area1(i, j,iblk) & + + mask1(i+1,j,iblk)*area1(i+1,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk) & + + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*area1(i+1,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i-1,j,iblk)*area1(i-1,j,iblk) & + + mask1(i, j,iblk)*area1(i, j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*area1(i-1,j,iblk) & + + mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i,j, iblk)*area1(i,j, iblk) & + + mask1(i,j+1,iblk)*area1(i,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk) & + + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*area1(i,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1(i,j-1,iblk)*area1(i,j-1,iblk) & + + mask1(i,j, iblk)*area1(i,j, iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*area1(i,j-1,iblk) & + + mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YS + +!======================================================================= +! Shifts quantities from one grid to another +! Flux masked, original implementation based on earlier t2u and u2t versions +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YF(dir,work1,area1,work2,area2) + + use ice_constants, only: c0, p25, p5 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(nx_block,ny_block,max_blocks), & + area1(nx_block,ny_block,max_blocks), & + area2(nx_block,ny_block,max_blocks) + + real (kind=dbl_kind), intent(out) :: & + work2(nx_block,ny_block,max_blocks) ! local variables @@ -2245,6 +2499,11 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YF)' + work2(:,:,:) = c0 select case (trim(dir)) @@ -2281,10 +2540,10 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i, j, iblk) * area1(i, j, iblk) & - + work1(i-1,j, iblk) * area1(i-1,j, iblk) & - + work1(i, j-1,iblk) * area1(i, j-1,iblk) & - + work1(i-1,j-1,iblk) * area1(i-1,j-1,iblk)) & + (work1(i, j, iblk)*area1(i, j, iblk) & + + work1(i-1,j, iblk)*area1(i-1,j, iblk) & + + work1(i, j-1,iblk)*area1(i, j-1,iblk) & + + work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & / area2(i, j, iblk) enddo enddo @@ -2323,10 +2582,10 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i ,j-1,iblk) * area1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk) * area1(i+1,j-1,iblk) & - + work1(i ,j ,iblk) * area1(i ,j, iblk) & - + work1(i+1,j ,iblk) * area1(i+1,j, iblk)) & + (work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*area1(i ,j, iblk) & + + work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & / area2(i, j, iblk) enddo enddo @@ -2410,10 +2669,10 @@ subroutine grid_average_X2Y_compute(dir,work1,area1,work2,area2) !$OMP END PARALLEL DO case default - call abort_ice(subname//'ERROR: unknown dir '//trim(dir)) + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select - end subroutine grid_average_X2Y_compute + end subroutine grid_average_X2YF !======================================================================= ! The following code is used for obtaining the coordinates of the grid diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 index e4c650649..73ff10cad 100644 --- a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -5,6 +5,13 @@ program gridavgchk ! using CICE_InitMod (from the standalone model) to read/initialize ! a CICE grid/configuration. Then methods in grid_average_X2Y ! are verified using hardwired inputs with known outputs. + ! There are lots of issues here + ! areas (T, U, N, E) are not locally conservative, affect X2YF + ! X2YF is unmasked which can create havoc in U2T type directions + ! X2YS is masked but there can be no active cells to average (for instance, + ! single gridcell wide channels U2T where resuilt is zero) + ! land block elimination can lead to missing data on halo + ! This test tries to deal with all these things.... use CICE_InitMod use ice_kinds_mod, only: int_kind, dbl_kind @@ -17,11 +24,12 @@ program gridavgchk use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet use ice_domain_size, only: nx_global, ny_global, & block_size_x, block_size_y, max_blocks - use ice_domain, only: distrb_info, halo_info + use ice_domain, only: distrb_info, halo_info, landblockelim use ice_fileunits, only: bfbflag use ice_exit, only: abort_ice, end_run use ice_global_reductions, only: global_minval, global_maxval - use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask + use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask, & + hm,uvm implicit none @@ -36,22 +44,20 @@ program gridavgchk real(dbl_kind) :: amin, amax, errtol, errx, erry real(dbl_kind) :: deltax0, deltay0, deltax, deltay - integer(int_kind) :: npes, ierr, ntask, ntest - integer(int_kind), parameter :: maxtest = 36 - integer(int_kind) :: errorflag0,errorflag(maxtest),gflag - character(len=32) :: stringflag(maxtest) + integer(int_kind) :: npes, ierr, ntask, ntest, maxtest, navg + integer(int_kind), parameter :: maxgroup = 3 + integer(int_kind) :: errorflag0,gflag + integer(int_kind), allocatable :: errorflag(:) + character(len=32), allocatable :: stringflag(:) integer(int_kind), parameter :: & passflag = 0, & failflag = 1 - integer(int_kind), parameter :: navg = 12 - character(len=8) :: avgname(navg) + character(len=8), allocatable :: avgname(:) logical, allocatable :: dmask(:,:,:,:) + real(dbl_kind), allocatable :: errtolconst(:),errtolijind(:),errtolarea(:) real(dbl_kind), parameter :: fillval = -1.0e36_dbl_kind real(dbl_kind), parameter :: testconst = 100._dbl_kind - real(dbl_kind), parameter :: errtolconst = 0.01_dbl_kind ! error tolerance relative to const field - real(dbl_kind), parameter :: errtolijind = 0.65_dbl_kind ! absolute error tolerance for ij index field - real(dbl_kind), parameter :: errtolarea = 0.06_dbl_kind ! relative error tolerance for area field ratio character(len=*), parameter :: subname='(gridavgchk)' !----------------------------------------------------------------- @@ -61,6 +67,57 @@ program gridavgchk call CICE_Initialize npes = get_num_procs() + navg = 12 + if (.not. landblockelim) navg=24 ! no land block elimination, can test F mappings + allocate(avgname(navg)) + allocate(errtolconst(navg)) + allocate(errtolijind(navg)) + allocate(errtolarea(navg)) + maxtest = maxgroup * navg + allocate(errorflag(maxtest)) + allocate(stringflag(maxtest)) + allocate(dmask(nx_block,ny_block,max_blocks,navg)) + + errtolconst(1:12) = 0.0001_dbl_kind + errtolijind(1:12) = 0.51_dbl_kind + errtolarea (1:12) = 0.75_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolarea (1:12) = 0.20_dbl_kind + endif + avgname(1) = 'T2US'; dmask(:,:,:,1) = umask(:,:,:) + avgname(2) = 'T2NS'; dmask(:,:,:,2) = nmask(:,:,:) + avgname(3) = 'T2ES'; dmask(:,:,:,3) = emask(:,:,:) + avgname(4) = 'U2TS'; dmask(:,:,:,4) = tmask(:,:,:) + avgname(5) = 'U2NS'; dmask(:,:,:,5) = nmask(:,:,:) + avgname(6) = 'U2ES'; dmask(:,:,:,6) = emask(:,:,:) + avgname(7) = 'N2TS'; dmask(:,:,:,7) = tmask(:,:,:) + avgname(8) = 'N2US'; dmask(:,:,:,8) = umask(:,:,:) + avgname(9) = 'N2ES'; dmask(:,:,:,9) = emask(:,:,:) + avgname(10) = 'E2TS'; dmask(:,:,:,10) = tmask(:,:,:) + avgname(11) = 'E2US'; dmask(:,:,:,11) = umask(:,:,:) + avgname(12) = 'E2NS'; dmask(:,:,:,12) = nmask(:,:,:) + if (navg > 12) then + errtolconst(13:24) = 0.008_dbl_kind + errtolijind(13:24) = 0.65_dbl_kind + errtolarea (13:24) = 0.55_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(13:24) = 0.25_dbl_kind + errtolarea (13:24) = 0.15_dbl_kind + endif + avgname(13) = 'T2UF'; dmask(:,:,:,13) = umask(:,:,:) + avgname(14) = 'T2NF'; dmask(:,:,:,14) = nmask(:,:,:) + avgname(15) = 'T2EF'; dmask(:,:,:,15) = emask(:,:,:) + avgname(16) = 'U2TF'; dmask(:,:,:,16) = tmask(:,:,:) + avgname(17) = 'U2NF'; dmask(:,:,:,17) = nmask(:,:,:) + avgname(18) = 'U2EF'; dmask(:,:,:,18) = emask(:,:,:) + avgname(19) = 'N2TF'; dmask(:,:,:,19) = tmask(:,:,:) + avgname(20) = 'N2UF'; dmask(:,:,:,20) = umask(:,:,:) + avgname(21) = 'N2EF'; dmask(:,:,:,21) = emask(:,:,:) + avgname(22) = 'E2TF'; dmask(:,:,:,22) = tmask(:,:,:) + avgname(23) = 'E2UF'; dmask(:,:,:,23) = umask(:,:,:) + avgname(24) = 'E2NF'; dmask(:,:,:,24) = nmask(:,:,:) + endif + !----------------------------------------------------------------- ! Testing !----------------------------------------------------------------- @@ -78,6 +135,7 @@ program gridavgchk write(6,*) ' block_size_x = ',block_size_x write(6,*) ' block_size_y = ',block_size_y write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' maxtest = ',maxtest write(6,*) ' ' endif @@ -100,20 +158,6 @@ program gridavgchk call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) - allocate(dmask(nx_block,ny_block,max_blocks,navg)) - avgname(1) = 'T2U'; dmask(:,:,:,1) = umask(:,:,:) - avgname(2) = 'T2N'; dmask(:,:,:,2) = nmask(:,:,:) - avgname(3) = 'T2E'; dmask(:,:,:,3) = emask(:,:,:) - avgname(4) = 'U2T'; dmask(:,:,:,4) = tmask(:,:,:) - avgname(5) = 'U2N'; dmask(:,:,:,5) = nmask(:,:,:) - avgname(6) = 'U2E'; dmask(:,:,:,6) = emask(:,:,:) - avgname(7) = 'N2T'; dmask(:,:,:,7) = tmask(:,:,:) - avgname(8) = 'N2U'; dmask(:,:,:,8) = umask(:,:,:) - avgname(9) = 'N2E'; dmask(:,:,:,9) = emask(:,:,:) - avgname(10) = 'E2T'; dmask(:,:,:,10) = tmask(:,:,:) - avgname(11) = 'E2U'; dmask(:,:,:,11) = umask(:,:,:) - avgname(12) = 'E2N'; dmask(:,:,:,12) = nmask(:,:,:) - ntest = 0 !---------------- @@ -133,7 +177,7 @@ program gridavgchk stringflag(ntest) = trim(avgname(n))//' const' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest + write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolconst(n) endif array2x = c0 @@ -149,12 +193,15 @@ program gridavgchk je = this_block%jhi do j = jb,je jglob = this_block%j_glob(j) - errtol = errtolconst * testconst + errtol = errtolconst(n) * testconst do i = ib,ie iglob = this_block%i_glob(i) array3x(i,j,iblock) = array2x(i,j,iblock) - array1x(i,j,iblock) + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 errx = abs(array3x(i,j,iblock)) - if (dmask(i,j,iblock,n) .and. errx > errtol) then + ! flag points that are active and error numerically + if (dmask(i,j,iblock,n) .and. errx > errtol .and. array2x(i,j,iblock) /= c0) then errorflag(ntest) = failflag errorflag0 = failflag write(100+my_task,*) '' @@ -191,8 +238,6 @@ program gridavgchk ie = this_block%ihi jb = this_block%jlo je = this_block%jhi -! write(6,*) 'tcx1x ',my_task,iblock,minval(this_block%i_glob(ib:ie)),maxval(this_block%i_glob(ib:ie)) -! write(6,*) 'tcx1y ',my_task,iblock,minval(this_block%j_glob(jb:je)),maxval(this_block%j_glob(jb:je)) do j = jb,je do i = ib,ie array1x(i,j,iblock) = real(this_block%i_glob(i),kind=dbl_kind) @@ -204,37 +249,53 @@ program gridavgchk call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) call ice_HaloUpdate(array1y, halo_info, field_loc_center, field_type_scalar, fillval) + ! Overwrite the i wraparound points to deal with i/j index average on wraparound + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = 1,ny_block + do i = ib,ie + if (this_block%i_glob(i) == 1 ) array1x(i-1,j,iblock) = 0 + if (this_block%i_glob(i) == nx_global) array1x(i+1,j,iblock) = nx_global+1 + enddo + enddo + enddo + do n = 1,navg ntest = ntest + 1 stringflag(ntest) = trim(avgname(n))//' ijind' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest + write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolijind(n) endif deltax0 = 0.0_dbl_kind deltay0 = 0.0_dbl_kind - if (avgname(n) == 'T2U' .or. & - avgname(n) == 'T2E' .or. & - avgname(n) == 'N2U' .or. & - avgname(n) == 'N2E') then + if (avgname(n)(1:3) == 'T2U' .or. & + avgname(n)(1:3) == 'T2E' .or. & + avgname(n)(1:3) == 'N2U' .or. & + avgname(n)(1:3) == 'N2E') then deltax0 = 0.5_dbl_kind - elseif (avgname(n) == 'U2T' .or. & - avgname(n) == 'U2N' .or. & - avgname(n) == 'E2T' .or. & - avgname(n) == 'E2N') then + elseif (avgname(n)(1:3) == 'U2T' .or. & + avgname(n)(1:3) == 'U2N' .or. & + avgname(n)(1:3) == 'E2T' .or. & + avgname(n)(1:3) == 'E2N') then deltax0 = -0.5_dbl_kind endif - if (avgname(n) == 'T2U' .or. & - avgname(n) == 'T2N' .or. & - avgname(n) == 'E2U' .or. & - avgname(n) == 'E2N') then + if (avgname(n)(1:3) == 'T2U' .or. & + avgname(n)(1:3) == 'T2N' .or. & + avgname(n)(1:3) == 'E2U' .or. & + avgname(n)(1:3) == 'E2N') then deltay0 = 0.5_dbl_kind - elseif (avgname(n) == 'U2T' .or. & - avgname(n) == 'U2E' .or. & - avgname(n) == 'N2T' .or. & - avgname(n) == 'N2E') then + elseif (avgname(n)(1:3) == 'U2T' .or. & + avgname(n)(1:3) == 'U2E' .or. & + avgname(n)(1:3) == 'N2T' .or. & + avgname(n)(1:3) == 'N2E') then deltay0 = -0.5_dbl_kind endif @@ -244,7 +305,7 @@ program gridavgchk call grid_average_X2Y(trim(avgname(n)),array1y,array2y) array3x = c0 - errtol = errtolijind + errtol = errtolijind(n) do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -258,13 +319,15 @@ program gridavgchk iglob = this_block%i_glob(i) deltax = deltax0 deltay = deltay0 - ! adjust deltax at wraparound - if (iglob == 1 .and. deltax < -0.01_dbl_kind) deltax = deltax0 + nx_global/c2 - if (iglob == nx_global .and. deltax > 0.01_dbl_kind) deltax = deltax0 - nx_global/c2 array3x(i,j,iblock) = array2x(i,j,iblock)-array1x(i,j,iblock)-deltax + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 errx = abs(array3x(i,j,iblock)) array3y(i,j,iblock) = array2y(i,j,iblock)-array1y(i,j,iblock)-deltay + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2y(i,j,iblock) == c0) array3y(i,j,iblock) = c0 erry = abs(array3y(i,j,iblock)) + ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. (errx > errtol .or. erry > errtol)) then errorflag(ntest) = failflag errorflag0 = failflag @@ -278,10 +341,16 @@ program gridavgchk write(100+my_task,101) 'array1y j+1 ',array1y(i-1,j+1,iblock),array1y(i,j+1,iblock),array1y(i+1,j+1,iblock) write(100+my_task,101) 'array1y j ',array1y(i-1,j ,iblock),array1y(i,j ,iblock),array1y(i+1,j ,iblock) write(100+my_task,101) 'array1y j-1 ',array1y(i-1,j-1,iblock),array1y(i,j-1,iblock),array1y(i+1,j-1,iblock) -! write(100+my_task,101) 'uarea ',uarea(i,j,iblock) -! write(100+my_task,101) 'tarea j+1 ',tarea (i-1,j+1,iblock),tarea (i,j+1,iblock),tarea (i+1,j+1,iblock) -! write(100+my_task,101) 'tarea j ',tarea (i-1,j ,iblock),tarea (i,j ,iblock),tarea (i+1,j ,iblock) -! write(100+my_task,101) 'tarea j-1 ',tarea (i-1,j-1,iblock),tarea (i,j-1,iblock),tarea (i+1,j-1,iblock) + write(100+my_task,101) 'tarea ',tarea(i,j,iblock) + write(100+my_task,101) 'uarea j+1 ',uarea (i-1,j+1,iblock),uarea (i,j+1,iblock),uarea (i+1,j+1,iblock) + write(100+my_task,101) 'uarea j ',uarea (i-1,j ,iblock),uarea (i,j ,iblock),uarea (i+1,j ,iblock) + write(100+my_task,101) 'uarea j-1 ',uarea (i-1,j-1,iblock),uarea (i,j-1,iblock),uarea (i+1,j-1,iblock) + write(100+my_task,101) 'hm j+1 ',hm (i-1,j+1,iblock),hm (i,j+1,iblock),hm (i+1,j+1,iblock) + write(100+my_task,101) 'hm j ',hm (i-1,j ,iblock),hm (i,j ,iblock),hm (i+1,j ,iblock) + write(100+my_task,101) 'hm j-1 ',hm (i-1,j-1,iblock),hm (i,j-1,iblock),hm (i+1,j-1,iblock) + write(100+my_task,101) 'uvm j+1 ',uvm (i-1,j+1,iblock),uvm (i,j+1,iblock),uvm (i+1,j+1,iblock) + write(100+my_task,101) 'uvm j ',uvm (i-1,j ,iblock),uvm (i,j ,iblock),uvm (i+1,j ,iblock) + write(100+my_task,101) 'uvm j-1 ',uvm (i-1,j-1,iblock),uvm (i,j-1,iblock),uvm (i+1,j-1,iblock) endif enddo enddo @@ -323,14 +392,14 @@ program gridavgchk stringflag(ntest) = trim(avgname(n))//' area' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest + write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolarea(n) endif array1x = tarea ! input array2y = uarea ! result call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) array2x = c0 - call grid_average_X2Y('T2U',array1x,array2x) + call grid_average_X2Y(trim(avgname(n)),array1x,array2x) array3x = c1 array3y = c1 @@ -347,8 +416,11 @@ program gridavgchk do i = ib,ie iglob = this_block%i_glob(i) array3x(i,j,iblock) = array2x(i,j,iblock)/array2y(i,j,iblock) - c1 + ! if array2 is c0, then there are no valid surrounding points and ignore it + if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 errx = abs(array3x(i,j,iblock)) - if (dmask(i,j,iblock,n) .and. errx > errtolarea) then + ! flag points that are active and error numerically + if (dmask(i,j,iblock,n) .and. errx > errtolarea(n)) then errorflag(ntest) = failflag errorflag0 = failflag write(100+my_task,*) '' diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index 8ddc443b1..c68a87d5c 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -53,7 +53,7 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " # For lcov diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index 28df6647d..d6eeb67ea 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -53,5 +53,5 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index d492129fb..9c559b90c 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -53,5 +53,5 @@ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "regular" setenv ICE_MACHINE_TPNODE 36 -setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_BLDTHRDS 8 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 3f9a306c8..76c9f4312 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -7,4 +7,7 @@ unittest tx1 8x1 sumchk unittest gx3 4x1 bcstchk unittest gx3 1x1 bcstchk unittest gx3 8x2 gridavgchk,dwblockall -unittest gx3 8x2 gridavgchk +unittest gx3 12x1 gridavgchk +unittest gx1 28x1 gridavgchk,dwblockall +unittest gx1 16x2 gridavgchk +unittest gbox128 8x2 gridavgchk From 6bffe27a1cb384d5e608fbe3a33800e1c0d5ad5c Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 12:19:09 -0600 Subject: [PATCH 006/109] add grid_system namelist, update computation of areas to use local data on halos and avoid halo updates to improve halo values when neighbor cells are land block eliminated --- cicecore/cicedynB/analysis/ice_history.F90 | 11 +++++- cicecore/cicedynB/general/ice_init.F90 | 13 +++++-- cicecore/cicedynB/infrastructure/ice_grid.F90 | 34 ++++++------------- configuration/scripts/ice_in | 1 + 4 files changed, 33 insertions(+), 26 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index a2806b429..cf6b470d1 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -68,6 +68,7 @@ subroutine init_hist (dt) use ice_domain_size, only: max_blocks, max_nstrm, nilyr, nslyr, nblyr, ncat, nfsd use ice_dyn_shared, only: kdyn use ice_flux, only: mlt_onset, frz_onset, albcnt, snwcnt + use ice_grid, only: grid_system use ice_history_shared ! everything use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc @@ -547,11 +548,19 @@ subroutine init_hist (dt) "snow/ice surface temperature", & "averaged with Tf if no ice is present", c1, c0, & ns1, f_Tsfc) - + +! tcraig, just to test capability, tcx +! if (grid_system == 'CD') then +! call define_hist_field(n_aice,"aice","1",nstr2D, ncstr, & +! "ice area (aggregate)", & +! "none", c1, c0, & +! ns1, f_aice) +! else call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & "ice area (aggregate)", & "none", c1, c0, & ns1, f_aice) +! endif call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 2e67af51c..6754f97db 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -99,7 +99,7 @@ subroutine input_data use ice_grid, only: grid_file, gridcpl_file, kmt_file, & bathymetry_file, use_bathymetry, & bathymetry_format, & - grid_type, grid_format, & + grid_type, grid_format, grid_system, & dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & @@ -184,7 +184,7 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & - close_boundaries, orca_halogrid + close_boundaries, orca_halogrid, grid_system namelist /tracer_nml/ & tr_iage, restart_age, & @@ -324,6 +324,7 @@ subroutine input_data ice_ic = 'default' ! latitude and sst-dependent grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) grid_type = 'rectangular' ! define rectangular grid internally + grid_system = 'B' ! underlying grid system grid_file = 'unknown_grid_file' gridcpl_file = 'unknown_gridcpl_file' orca_halogrid = .false. ! orca haloed grid @@ -696,6 +697,7 @@ subroutine input_data call broadcast_scalar(dyrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) + call broadcast_scalar(grid_system, master_task) call broadcast_scalar(grid_file, master_task) call broadcast_scalar(gridcpl_file, master_task) call broadcast_scalar(orca_halogrid, master_task) @@ -1356,6 +1358,7 @@ subroutine input_data if (trim(grid_type) == 'displaced_pole') tmpstr2 = ' : user-defined grid with rotated north pole' if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) + write(nu_diag,1030) ' grid_system = ',trim(grid_system) if (trim(grid_type) /= 'rectangular') then if (use_bathymetry) then tmpstr2 = ' : bathymetric input data is used' @@ -2024,6 +2027,12 @@ subroutine input_data abort_list = trim(abort_list)//":20" endif + if (grid_system /= 'B' .and. & + grid_system /= 'CD' ) then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_system=',trim(grid_system) + abort_list = trim(abort_list)//":26" + endif + if (kdyn == 1 .and. & evp_algorithm /= 'standard_2d' .and. & evp_algorithm /= 'shared_mem_1d') then diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index ce5d89dbc..8f8c85904 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -56,6 +56,7 @@ module ice_grid bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) grid_spacing , & ! default of 30.e3m or set by user in namelist + grid_system , & ! Underlying grid structure (i.e. B, C, CD, etc) grid_type ! current options are rectangular (default), ! displaced_pole, tripole, regional @@ -448,6 +449,10 @@ subroutine init_grid2 !----------------------------------------------------------------- ! T-grid cell and U-grid cell quantities + ! Fill halo data locally where possible to avoid missing + ! data associated with land block elimination + ! Note: HTN, HTE, dx*, dy* are all defined from global arrays + ! at halos. !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) @@ -458,8 +463,8 @@ subroutine init_grid2 jlo = this_block%jlo jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi + do j = 1,ny_block + do i = 1,nx_block tarea(i,j,iblk) = dxt(i,j,iblk)*dyt(i,j,iblk) uarea(i,j,iblk) = dxu(i,j,iblk)*dyu(i,j,iblk) narea(i,j,iblk) = dxn(i,j,iblk)*dyn(i,j,iblk) @@ -476,7 +481,11 @@ subroutine init_grid2 uarear(i,j,iblk) = c0 ! possible on boundaries endif tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) + enddo + enddo + do j = jlo, jhi + do i = ilo, ihi dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) enddo @@ -506,27 +515,6 @@ subroutine init_grid2 !----------------------------------------------------------------- call ice_timer_start(timer_bound) - call ice_HaloUpdate (tarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (uarea, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (narea, halo_info, & - field_loc_Nface, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (earea, halo_info, & - field_loc_Eface, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (tarear, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (uarear, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (tinyarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) call ice_HaloUpdate (dxhy, halo_info, & field_loc_center, field_type_vector, & fillValue=c1) diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 443ff1cbb..11520e36b 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -60,6 +60,7 @@ &grid_nml grid_format = 'bin' grid_type = 'displaced_pole' + grid_system = 'B' grid_file = 'grid' kmt_file = 'kmt' bathymetry_file = 'unknown_bathymetry_file' From 5de9a2c886714ad144dd8f6d22792455145f7589 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 13:32:10 -0600 Subject: [PATCH 007/109] update documentation --- doc/source/cice_index.rst | 11 ++++++++- doc/source/user_guide/ug_case_settings.rst | 3 +++ doc/source/user_guide/ug_implementation.rst | 27 ++++++++++++++++++--- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 12bc8d32e..97afb8016 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -184,10 +184,14 @@ either Celsius or Kelvin units). "dumpfreq_n", "restart output frequency", "" "dump_last", "if true, write restart on last time step of simulation", "" "dwavefreq", "widths of wave frequency bins", "1/s" + "dxe", "width of E cell (:math:`\Delta x`) through the middle", "m" "dxhy", "combination of HTE values", "" + "dxn", "width of N cell (:math:`\Delta x`) through the middle", "m" "dxt", "width of T cell (:math:`\Delta x`) through the middle", "m" "dxu", "width of U cell (:math:`\Delta x`) through the middle", "m" + "dye", "height of E cell (:math:`\Delta y`) through the middle", "m" "dyhx", "combination of HTN values", "" + "dyn", "height of N cell (:math:`\Delta y`) through the middle", "m" "dyn_dt", "dynamics and transport time step (:math:`\Delta t_{dyn}`)", "s" "dyt", "height of T cell (:math:`\Delta y`) through the middle", "m" "dyu", "height of U cell (:math:`\Delta y`) through the middle", "m" @@ -196,8 +200,10 @@ either Celsius or Kelvin units). "dvirdg(n)dt", "ice volume ridging rate (category n)", "m/s" "**E**", "", "" "e11, e12, e22", "strain rate tensor components", "" + "earea", "area of E-cell", "m\ :math:`^2`" "ecci", "yield curve minor/major axis ratio, squared", "1/4" "eice(n)", "energy of melting of ice per unit area (in category n)", "J/m\ :math:`^2`" + "emask", "land/boundary mask, T east edge (E-cell)", "" "emissivity", "emissivity of snow and ice", "0.985" "eps13", "a small number", "10\ :math:`^{-13}`" "eps16", "a small number", "10\ :math:`^{-16}`" @@ -277,6 +283,7 @@ either Celsius or Kelvin units). "gravit", "gravitational acceleration", "9.80616 m/s\ :math:`^2`" "grid_file", "input file for grid info", "" "grid_format", "format of grid files", "" + "grid_system", "structure of the grid, ‘B’, ‘CD’, etc", "" "grid_type", "‘rectangular’, ‘displaced_pole’, ‘column’ or ‘regional’", "" "gridcpl_file", "input file for coupling grid info", "" "grow_net", "specific biogeochemistry growth rate per grid cell", "s :math:`^{-1}`" @@ -413,6 +420,7 @@ either Celsius or Kelvin units). "my_task", "task ID for the current processor", "" "**N**", "", "" "n_aero", "number of aerosol species", "" + "narea", "area of N-cell", "m\ :math:`^2`" "natmiter", "number of atmo boundary layer iterations", "5" "nblocks", "number of blocks on current processor", "" "nblocks_tot", "total number of blocks in decomposition", "" @@ -434,6 +442,7 @@ either Celsius or Kelvin units). "nilyr", "number of ice layers in each category", "7" "nit", "nitrate concentration", "mmol/m\ :math:`^3`" "nlt_bgc_[chem]", "ocean sources and sinks for biogeochemistry", "" + "nmask", "land/boundary mask, T north edge (N-cell)", "" "nml_filename", "namelist file name", "" "nprocs", "total number of processors", "" "npt", "total run length values associate with npt_unit", "" @@ -690,7 +699,7 @@ either Celsius or Kelvin units). "uatm", "wind velocity in the x direction", "m/s" "ULAT", "latitude of U-cell centers", "radians" "ULON", "longitude of U-cell centers", "radians" - "umask", "land/boundary mask, velocity (U-cell)", "" + "umask", "land/boundary mask, velocity corner (U-cell)", "" "umax_stab", "ice speed threshold (diagnostics)", "1. m/s" "umin", "min wind speed for turbulent fluxes", "1. m/s" "uocn", "ocean current in the x-direction", "m/s" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 9d8e5fbc7..851f39883 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -239,6 +239,9 @@ grid_nml "``grid_file``", "string", "name of grid file to be read", "'unknown_grid_file'" "``grid_format``", "``bin``", "read direct access grid and kmt files", "``bin``" "", "``nc``", "read grid and kmt files", "" + "``grid_system``", "``B``", "use B grid structure with T at center and U at NE corner, "``B``" + "", "``C``", "use C grid structure with T at center, U at E edge, and V at N edge", "" + "", "``CD``", "use CD grid structure with T at center and U/V at N and E edge", "" "``grid_type``", "``displaced_pole``", "read from file in *popgrid*", "``rectangular``" "", "``rectangular``", "defined in *rectgrid*", "" "", "``regional``", "read from file in *popgrid*", "" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 3fb63c11d..f855e003b 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -79,8 +79,8 @@ this tool. Grid, boundary conditions and masks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The spatial discretization is specialized for a generalized orthogonal -B-grid as in :cite:`Murray96` or +The spatial discretization of the original implementation was specialized +for a generalized orthogonal B-grid as in :cite:`Murray96` or :cite:`Smith95`. Figure :ref:`fig-Bgrid` is a schematic of CICE B-grid. This cell with the tracer point :math:`t(i,j)` in the middle is referred to as T-cell. The ice and snow area, volume and energy are @@ -111,7 +111,23 @@ distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. Schematic of CICE B-grid. -The user has several choices of grid routines: *popgrid* reads grid +The ability to solve on the CD-grid was added later. With the CD grid, +the u and v velocity points are located on the N and E edges of the T cell +rather than the T cell corners. To support this capability, N and E grids +were added to the existing T and U grids, and the N and E grids are defined +at the northern and eastern edge of the T cell. This is shown in +Figure :ref:`fig-Cgrid`. + +.. _fig-Cgrid: + +.. figure:: ./figures/CICE_Cgrid.pdf + :align: center + :scale: 55% + + Schematic of CICE CD-grid. + + +The user has several ways to initialize the grid: *popgrid* reads grid lengths and other parameters for a nonuniform grid (including tripole and regional grids), and *rectgrid* creates a regular rectangular grid. The input files **global\_gx3.grid** and **global\_gx3.kmt** contain the @@ -122,6 +138,11 @@ and **global\_tx1.kmt** contain the :math:`\left<1^\circ\right>` POP tripole grid and land mask. These are binary unformatted, direct access, Big Endian files. +The input grid file for the B-grid and CD-grid is identical. That file +contains each cells' HTN, HTE, ULON, ULAT, and kmt value. From those +variables, the longitude, latitude, grid lengths (dx and dy), areas, +and masks can be derived for all grids. + In CESM, the sea ice model may exchange coupling fluxes using a different grid than the computational grid. This functionality is activated using the namelist variable ``gridcpl_file``. From e0b1b1916bf8eda6d05d1b1c6898b9794a4aed40 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 13:35:30 -0600 Subject: [PATCH 008/109] add CICE_Cgrid.pdf --- doc/source/user_guide/figures/CICE_Cgrid.pdf | Bin 0 -> 29946 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/source/user_guide/figures/CICE_Cgrid.pdf diff --git a/doc/source/user_guide/figures/CICE_Cgrid.pdf b/doc/source/user_guide/figures/CICE_Cgrid.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dcf0ae479560e268312b44cbfb85950a80fd913c GIT binary patch literal 29946 zcma&M1yo$kx-E(mBsh%+_r^WAySuwXaCdiicXxLQ&`5B%;O->2=8}K!eb2e$+&Av) z(Nv% z{ULrzyNuh=?{qkwDp+qT(mt$s$@_EuUi5NxiM;j3>?+^qT=e!Zp47f%dCKSV!n9JB zcY#jlci0y7_M-f-r!Fm`#DC3^spF~EEBVZMF*x&P=jQz9Qup=p8L8JAX> znv}X+EFb7F%(vgqv&SMT&2?J-d^RTFpz=c{}c0n-W+Z^DZ|SUcJ9I z?E58}mRwcpSh;I}?O~mds97rW`N_SOf$G%jM0Ll}`dC%U({xKYjKO!rB5s5^j>ULs z8oQ4GFK@|qh1hMGs~CrhV}+ox`h}G-7V)Kio5cgjkC^l4tF!z@?0BfJpq*Rn_+0(^ za+DJvC@!SS`&w-3AP*-=Wgkk2G+9Tf>3o@#kwMxIsfnlkR%Apf4sUHKZjVQ3XOL^K z`C%MvzWe;mcf!*k@Hf=O`H{~~g~VDn(pziBHHHdxL9DBw@X|uwkR7STd0UBM*m1su z?tKL#rn1abNkb78OPxNdsjN1~b#C_8Z`ys6Bj4>&;m%64muFl9-PQ}v-z0zdM^WYO zC|PNa?A6b(!Am+3zBq7txFamfW#>@v{&?K{|F ziq-E$IrVo|%AxRjh-_B=Nkni5@S(li))SRfd z=GJ4Ztkpn?FRhg4Rnk&`7iXJ4jREx(;Edfzi)n`-&p*vxJu<B!^A32th%;-A(Ui)#Fo{QvNj6;HN)3P$E z3YFnH-Q{GfVN4emM*+i6X*Jvz!{P2nM(3xlpI!Lla`Z|*&9dV^P++$}eH0AHfp0pG z<6%eM@1Gw^zmi%+)~{hVn;X zAqE0>ic|^pN$qp4<8N+qoX+cSj@yZMKd~!Gf(*&bQSf9SYN@H4Bj3}VKrR)o7?O3s zuFVcrEe*H425YDBLMSQA-kh*Gr;-GfG}DnF#KU^JiN@lipy$!hS8Ocu&HQ$!W!XiQ zx&6+Yd|V|@S4lzQfKWcSdZz@l=@3sfdRNJjrMjkFNUW~j;`XQ>b;&AThD6pYzNRYx zmic=9h;BF1iuqW^T2jdHz$poTy+o6KjT9eDZk?*6H}+Cc2VKYO3rMqJ(9bNw{PoE$ zK8Lnffs!99@OGOrT-ji0%$omODvJj71XNDhL4HhSC^GXwDIw;@V`L`f>HZ{yCe(D@ zyfck5EOOF0bM%*1`cfq!oXJ2}oD^C4m6UQ_lz`4S2^m{Jso1aXbKOdvZ&I`;B@W9k zs+xQ--zPezM;CD~H6rLHJI}_HMzH$jNOPE1s#za$K`(|=KihKVEO}d&7G;D=$Rn2; zg!`_5?+CVQ6sZ|u?LrE~!R%;GrUUXL13*=1*&*nt1uNqbV0l8%fE!>aNW` zRjqV)eFtgDxjM+3QYErh&c*~*w3Bx0^6x02w!clfSaR0ub4OaeK)}e*%#4J)diC^S zXtRd%qB4^-6{-sPS{M2`R`fTZNs+`?HC~`2-72onEeq9LT91^L*1VLIc4e>iayxa+ z3VN-spb!r^gMd}fa~7dQ)P0rG@9#!Ij#i3tW`Y7I>MI}1>@ha>?qsRJH-tQerC-n8 z0v%$-qe4xW<;&ZL`MndVjJkhSXcy{)YeUD7wGF}?9)8Dkqr1-EYu?@Ry3xZtvqX;l z502h&^o;Tno4aPg@rivmqUMwfwS-1W@`}c36A!vOPF2VkMkMcADUuzT+sbVxmXtuM zdkk1THH#QF>XQ_o1jb%!JvBsju$!rC=6L#eCI3Yjh9oznngFz{#1K*esi}#x5`}IQ z#_sAFG`OabAJ9x^c!6pQ9^^#0jluC2Zv_48G9?^@F<+seK?vZ6?jb5fweiZo8pT#y zI*L&bDZ~lvv4||b2k-Zvm`LDt%LuyTYndu4;OdFpjWmO+e;r_(4M~;2Ce2H< z$DfBK*Ev=0>L16L;s#mMT9WkR`7|TIUs%<_t8bgCD(#xa^jZ_FN5^RU$j8#9$1CyS zxJOv9btBfpPUMMdeIHLvIbb=1MNl97==m4=$Fg8{S4)xTd={jA=F$1gpbb?tku}qv z3kj>CVf| zlvoHkh1q+y-x)3M$Y!AXs}+8NAUfeI3=CuP1xA!~qtf9}OMvzw3s8#Jww`4wE#x4A zYCc6DPIb`AqysDh1F0!D77|jhG~D{fzl?^P z&#juJVKeuNBDGgl_KGTa16tE_*1&i&*Z1;iiE$WA>a`^}k&h=Sh^i84$Zu!qE#7@CbBh_sQ7aJQlxEExtT5y!U^1w4gJ_9ZK$nM!5cf3q zx>suu86~_AI9<>W9iE3eJf3*#%iL>@f79@GR6UpplcJbd~bW zLS3`?S`aK`Yqh2WCUUOVp<-oWEbhodT;UGbbOONCBqK60UuxYPSdhN$`H0#HhN@WR zme~@OvkshQvo1u)Q}5A_Y=5dKFbgRxKw9_!JaW-*nZGCbINIi$NtF$Ldx=v*OTYP= zn2uo8N+$XY!A9GB&gJ5ifKjelbhXyWRie28tQu_bD=W`Xt?h0z%YD&Wu*ooYMWr57 zU#KpCwrZU~K%N(y%r3`?qWA)4jm3$3KSr{;*y%HDDfzE|$Z~ZN1^F`J(L4_({j<;ai5}Mr_3R)ud6h+sdsZ=?n z%K7JYo3ydytg9`%<=Q6UH;PXIa0`al{gb#_`MNZ2YOTafl>u4QWeQvH-w`7tYlC{E ze)}*pan;8t4k#;>K*=1j<+h_>)@x#7xNBGmD<~A#+*a6QYU{**FPN_B@_n}x+j&=D z@uCn%nGU28ZN{ZjWPu)b1>A-|7Lw$0+*eiZf{Q4RCPb`GDM`S0=*Ei+PSAv$fKa}& zIn_owVG8na!lZ{Z0vN{MRRGUBYMhsc4$l*{X9QX!WD{M}nYuwDzcCIm+!J-OnnlJ% z3S9YVwT7)?Qdl%9Djf!=e|7UAC#D-d#P%IC;(NC0vpwp=FPwSIXM4O8-_& zCEM6%Xu2$YKmyyeH;>B2-lCVDcqrfM2S#f{wTjliT;-$&m|8;1sj8H|(wbGOEEnWl;F zy~NV_0hw$c{R2_I*mSF@;EDl4&VCk}NmYhzDzWR>9M05+1ZGxHAV~>MCaNiZ{CJeJ z@&NBz$~7-k4;-zvBpEKcHd`h++ZR7}q1K4v7w~A+h3Yx9vpL5kUfF`5}o56v8th&A>YX8YJW>WL?f8l~)!dw8_g6eR}+ zU%8?{daDqbV^MOF@p6;dY6bM)@hBu{zZjZT#OT3@PBzL*R=rWsA^1S*OG;OETB~=o z*rub$zIlyIVC-e@`uN;~| zUSnkTbq^;If<@Yg&aAXZXMR|0DKO)NXR~9zN-R&gT9?RUZ1KKZ!qZZ}ILa7fQ%FW zFVNxv6Z^B0U6e|Aj)A)fEyv0`g&o)YY09WgV#_TZLu$M9<8-lKx9MFoHCt$hWRUPf zf*6^=@}h|MC+riLw@ zq~J5%w>B(T+;Bj&4u2jbBE#S$KwADDY_ClG8G~NQZ0A)S3|7 z%=Py-Q!#olwSso1&xbUu9%EoE1f$M^lAmm=TWHZ&)3d9J3EeZMv=RgEs1!30@@O&K z>J++#26I-gXety2I?qU3rRE)kqg`RQ==3OH?GVhFjKtd*Sv{;cISbX}sdG*X3U7D| zL(?!L6HPv-MrpRq2SdWZh}93_n;x4AF&<75UR6!(xoo714wxPT1zjD~^+;vTl^6JZ zr2r(tmmGJohkju7na_CT5U^t^WsKh=jfJOVLTehUNy&N;er(3~sas~%9TmhYQ+rF^ zHVcoT2DM`_IIN@{m9~0`w$ZC_&AcA4yB@Ecqqbc5SWSx(m&-kQq|p|#;wBl8R>HTo zvAc~8%`r>h&CoRgPf@J#)tvpCXN(PYCP}QeF;XHa0}?AZEzO-L$qp^|C}kj}rtmn8 zynt~`dT}u`n^R^^iB7DtvS9x=`1uTD2+^zq!y=Jyt@Nt3VB;9=#MJ$bMnCU-YasV1 zw#mcX$kDL3h$%)c8Qs&^AK^u9|+LpsH_L5c{b{ z@j7%*Tp=x(861)G!KfFe5}3JDsObTtpj487mj6w=BxLphvke+qsYDX{xZSYiu z0V1W*%Dr?->imbanIiroO8+7wG`*P}v95xWQjV9}F%CHkIaV1x>ZL@6oWT^{2}etU zcTl0(YFY83Z&-e0GUnMg2b+8wOF7Q_*m!2$fx%YkE*%*{ii<#s16RVM6iVMyFvIr) z)Y1Tmvjm+_!~J<}7QOxqh-oA88-!m7SBvHhdxB)Hg6xsGb-05Ku{s>x zY_0~WI!=R}J-FP3udNL)J3Bew_pq6;f8u%e{@T|3R_@trb$tu=O^f39T=+OeJAB;T zzIG62Q=Kb!nWo`{$U0xZd`-~fPLALLI%jvq5nf!OYbL4(D8G(CjCm8!!FoD(Guh9i zk(eMfwW@LJruvq0+E4l6x(az?iTCw+Fyv2C`PAi(8=<}Q!?o8tP8aO&@w~1PKQC^1 zM-Wj)+KDQGmD8^SgoJBfyR|Pm$620_e~$6SF9Le{?r^$vxK(v#d!&&R)_cr@K<)9S zEt?Q|2UINt+OukVmPf@ztgc00ewK{=LMd{f%-P()xzJo!we>S2pg83^gMbI~tTj-s z7}V6WG(?Xfa}1-*p5ky4#j6L7kU4uC%O4xEea}RdW82Zl>W6DJa0pb4QP1bQ6Vvfr zj6+JUS-KaG{ZoV{iopmB8O3d0a`iR<>dFVgC3jv&4%bN zMgy;If9fY282cv2kVFy5tmOnB^a7Lq3WEJ%!#kJy{+8~Hha`GCAs2icM~_KRQQhvL zQdC9vnmvXVgR+>wfAaooF`F}H!G#6oP~3K9qb;0HH$4);Bb!a>^< zOGQh>-pAw|L(i|*o8G&9Q?88~GY3^L=2|_oFdR-cGG=;-aqZ<@t9=8^%V_3+mkkwc zF@NjYBI&d5iyr$nJwxP%l^o*Eo#z){clVJmUm)c;nSbqq>fC0K2}kW-((s4q^)JO~ zzV_7wadz~dO{ER|$wVqFUBQjfKO$nEZp0gEMY0U|F%C=D9h_t#P8eon*445ixot1TYC6ArqrNMv1LmOY_z0k>CFJXo%BIROs=b1urj zZVoLSpH3q2GzP7%5iSPnl7-?5J1d952%cc*=&8SeW1~o5vOmeDJ-5KFv)CPikQ0~| z5$D4FqNzWd*_v3PZb(L;*doP%re5^RBm=-qPd)>8q4ZeYqFkaKfwW;_?L3c*YxmH| zUt6+Mbax|OHf3U~F zu)Ob7ltx-k&ceSS9C}X)X1>(05_U9-Ur9qB$pFnUaxexd+rO~d))hdJwB}HlSfJV) zF1Kqd5ANI@+4W5$bywR3E_UN@%)Q?VUM7XNIf)jq8~HvPzn6(RCU#y8EjAmKWj zwK-d>XMKA2*<<8+u*%)^>s?s32(uW1e3~hLBQ(18wCL7cEu@7Gki;uc%vY@RPPtjo z+ite8M%8Vuxl4p1hsMDf7e|rxF8s$0PTu;SN+q{f#L1rY>S^cqmPY=9lIx1Q^~DX} zIXj35z)w9b$g$K5efKX=_@@2u?|EC)O6kF=%bwrj-+%G$XtDJ%@51a{i5w1ng_(gmpO62?78Xm1L5TDh*z#- zX#HAU>?hGHL0nK$B7`0sgVTX6%by#aS9YEJ?0(Pc$Nl?^z8v76M?W>`Oci+3ZzlhC zwm-i8^~0I|n=h{7VgF%-D;S#p^X_Q+&6$Y#qgjcFLCMs~&c)H#)QO1oUlqc3-<&_% zorwPW>K_NlnwnS|3fZ|6X)}IQaBwjbu`@C2!haa){~qV>dH?a>6&&r1l}(+Av_A$D z5hG$yF?Dw)(j{ULwzIKwRJJ!XHYNIN(+fK>5wZW%Js;nPS^qKOKh-KK%tV}jtKk_` zR9K0a{tl@0VcGw6?f(8 z#luL9MKKV90*#j8exoQ1e^(R{J=lo0hP$d^yV=Zq8`0caJ!)HCHNWy%?gUeK4TKtS z)&TcsEzrQYJo~PLhCV*@0D+AHj-&zJzhh2N$+pcwGQl4 zpm19HH6~Yss5l?RdAMCfdo35(;KV~t_q zq7UX1!llqI2|H@X3nokoN%oE36T;_A8-)pew71)$CZWzLj@k45=$Q~fC-U2lj39~f z;XPuUi6eO-5T|G>*ycL~T0E?CK0IpxJtG9r;4{8Ii#_Bf5m>Ol1PWv-B)tTLr2tf; zAm0iYydcvGxNDG=J=j8k=M0owAiF(`4wQQTXL|%he{li$(C^ZW;F|%Kp8~H@Fbf4M z;{nCU8A3}acv_!4gas8iF(5(05%DOA0#7p(<=8Yql?B2IytkyzxSgMLgP!wYXNXu~ z-oSl=QPUyj4L`4dwfe8tbK`#!9~j>jamUPt#p++*k!lC7xR`F7t|o(Z$Zm$h67UpQVzk&$szF)J?+V9`niWGgq-GFjpk1HV0J&m#&W(>W z(vP!?Z|~cVt;76{trn^cc{#ev|7`H{4#h9`ZkkPut3c%MPS-}>#J%u-_bk4AzY&Lx%}ILMrC?uj+T6nh>w&HgLo24+D+QtSWQw@f_2JSnisiz@^YG6nm&CV zr9QEr)O+b`JA~qQf$z!zMfGY0Vmr+PydjdJnT07v;YOK85k^6~Y#>GQ?!I8$;{?E|U<`~%#BrXNZ$ z1B?cbEJ`>f(Mi!=(Q!AdhaJ;<)9dVYmU@4>sZ*Y4zR`H6kffNUfTzS&%Bb2cVl3uV z+E&UhFj`Sq8CtDcy)4pK#Y~q?yX8*j=(GOWe+@gW?~L$lf3~_CJmo!2!p+3(#l^u@ z#Z6-!X2Z$+mN}Mrka@&9(rlrH&;q4(pmo%u$C#3FIiX@k-W0`EP*ZzRMmNh>yivtl zV_e>(VO}KHG=;94NUzNy*rwB_>?R~H&7i@cU#ECgz^9m3lULj)-ly&Nqko2I%^*rJ z>w9@YWI>-{u3^!RF==H&)htbsRuT6wr*W%8fDM2?@B*kzcLpTXq}GJi zux&oAe;!;L%Cy!s_8AGa+B0sND&072si~mba_zhD;m*{p-vD&Ubxq$R-IKgfyfDG? zhPj9JAU&)&yHa%p`z)fK9q+VH{fgd$rSzg~n3pWu%F&#np87mhgn)>3jkQ3B?KtbO z(~j?$KyN}Ht?zDMu%No0vZr{lemCl^t(RLoXPS_U^iHqQ%@8%EaC>ay@s_Am^x7BLEU{;*uUR7Ecom`#wOa?&*G zN{@Y)xs!+{%OVfbL};>V+%y+A&HJG^fqHnmpSk~SoM7x`?8TSOjLpr(yw~+T^z}R2 zr8QDb$WpD&dWW2w?)T1}yWX=nTOwPD)$4Jw@fcIQt#6T=3yS%Q^}PlbO;?A*LLEYi zJ3Vw$+DJAa8%&#v4YQ^itMxPzz1cgX@!iN>dGeo$XIq*+>Q5UJ_rVuECHEySZriS( z+!2<@#O`hTPS^{*>O_I1sN)felbGw* z?#LtIRW2gWsz;O8D*V(*3ms49`yEz_^R;Tx?!?9w$=jBy3 zt;)QXOPAXx$EW8a^etxTOIL2H{<*K^Nz0XO&xw80Tk2j-Y%jFW(X;Lg!{&Xb&q3^; z96ZP-O2?*rZnUW{)?5>C$L zALUc>V))&>;a-2cQ`njvZaoCwy!=nL{g=-EWx*fJS5jD5$k55ugox=cSydv^{SSHl zOS%8Ul>dv7JNVNn5-}&(W>{Hm#+0e$${9kn6>EAs6 zzZf*z|DNc-dLv@c(9|Mg;bbLZ}GBK1jWc;lB&49|d(w6K9LR;j?mb5V0__5HT}z!85Zl5;1=y^!zQevwnyYR`!o} zP9jz=P9io&jt|lCUm^c9#(zTjcQz0+7wi9980aQUS`9Ly1Ydc=6RZnguRU2p3@a!> z@ZE-W`haO_!3xqLXmUMXa4XGYc%~1Qt)f;bI@siCnHK>!2a3649qXr{a?7M=fL?ss zls}|!tM*cUXM9P)f*#ml7XPBnA=6kJk__S|aYBsh_F`3@emFU2!$w-lIoLRA7^mC- zy)b-P6_qTsEZzZM=B52pD3&T3TdM`RHEbHPur)NYt7$|^_9HLl*2Mi@%1PmJqL9Ms zbiM-he1gBKo8o!rt5%9^j!5$cYhf)7k%-C#dTjv8>}9+?WbpQBur?8tI{8m<;&zWm z@J~C3wvG=1;TD5%$O50zb)kdfFOt?Fi3?pa@_u#Mc`91HFD3T|`zQW>*nd}ISo(DV z`?jx)thv=}8UKG1p8q`~>t6u*3y%MS!M|W3XK4GO+5QJ$Yz)o+YSDjGq0m3NlaB4< z8q=|SV26pBmGMIsGPCQ#Ge~}*+|pR^o4JkY2T&OVos9pgS}wK^z4v#dzXdvGcCL?2 zWN0s8YH4oaOvKLe(WvZfYOD7542^%)!n3k}j3(%6{twKE*qA@Io2;Sxze-Fj%$)z4 z%136IDKin%-`(?9lKy8)|5NdQLW_xriG%B7JN+l`jPs-P|AyM4w}*R@>hrxv6Nlv* z*~VO{t&-4qVye)%62LMaA-NF~S4Oi-IsZIawjZ8EOEkxUU<8@~bL8tYyD4>hK~UFr z=q?}UUUY0^VQg&BobQ829^a>l>8JO{+N;jPwEHO^{%MaXkB`^yUJ4Lfl@@NWY%_ch zwoY5KS;&kY=}{oeVvX(|DR-5|r#<5sS^zycH=YSH&}MT+;(io1I9zXQK|xu*^%0Jz z8xE;4rC_^PR(iRb(yQ3U!2afU=jQK(<$gI$t)||G=UtzbUQ;${JgVLW$_vaYT+i_o z#>|)OAMb-*fsGbSve+%yIriZ?4Z@$6?Kc=FNylZMp4`MY9LxmI)2~DtMVM~%#Hmy_0pl^?sz zfEpmZX+sFe43MRvGhjp-^%=O-q%mSd7X<)VYgC&xB!b`p8`*NirGK#gJoiZx+_jL4!O z0Hhj-=1kEb1;7*Vw+5miQ@SX9lpxSi!=&j`J@%*(Skyc4xna~OVXTk=fj^25$gJ_G z;jUTSBw?KJV_=XZM3fnUBZ>?V0YuXnY6>^rG1@WNF+d$B2@S~+WktY4ppBve003qH z3V;{@2H*+!3V=tLrA__<*Z@w|KSqG!fO1WbSW$!;uSuW>V6X8nWWZMAV<>1DDA({9 z2}%RXH9iJ{j(~i|zuW+MjgP^gEsfU{&^7kFvn02{Tg`I-u{1^N!|ssY;S+cQA2 zz;7lFkWuvoQlo*4JIo5dDW z`nM*!Xp;G0RI4@8ldqvk#)UeuXl8}e6R$}$6DP7G5ucG-isrkBSVw4;+kDonicm{% zG9|O+p{XX)l536@N=0Ksr;8{~C30k%B`KsR3=xS*STG7^jaQXd5m%8`5mu3H_P5|$ z%o1PHDdZAqil_`95Wzy9B!Nd&MVlnc6jTvp&9T6pRVkzvNs9OpK_SXxiY<&YI}~n? zR{fFkf-28dofvM;T%KMYZo!ihQ{*XFC{)RjdQ4Ktgu#_*fuTw$sUpA{Xu&zlXk|mC z$k&o6&KhjN$d)S28h(0!Vu}tO4ukfY=v00oE^~_HM=|>g+Ec`{!aR-$U*!?G6r~iY z6!~wI5)q|R63sD@ONk$>8j>Or`!Yn4l;-#eQKOQkNQD}X!zil}g%uNGCghQY%m@h? zqZpF8u~m4aW0*vc2_NaDjTj^a33I{=H6c{yqt6k?;)U}n_Y94`M9-A74r8xPmh?xTS!TsW zD9g{~MFd&7vyWg3pkR%p?uPWA+CHRl9XfA!jJO=Zc!AuXM++%vY&rkRiwa%yDG1~i9Dcg zNZ)c7dWBz7tcf~fvAI&VIKs|esN8E8u7~>yJJYJRhg4@pd?V>h`0*`Y%~$r8mZmHB z_-mncge#h^aDsZ$qL?%0Y>EhXcx^b@g2DMZDO^kbctoox8m^%&c_%OtC2$c}vVT(S zBY}(&zd6K$MRiN$?gNZ?0xeCs7`md- z5|K#}>2Lr#U7@F}2ttKHc`nIWxK)Hzcx40tWwK0gx+r&&JX1I*S5{ViHsk}A z%5LVfAwmmMmWMqK0MX+)aj6qMs$I%oSE2 zrGQs3hKztwFn)`8Mm0q>70YIp_KA31Fe9{L3BtWmq}3~Qu1ph4mf9zlB9@BfJS>oP z8)70%#&7_Fk)x5LfY3px9p!;ZG@PS2(ip-fl#!(pc%oWkIHD%R%;*_Za}o(Kj;9gM zBJc^H?uV8<$vchDy3LKwdiVwZe9DG-hu%WIenWr!@`&^Z`?>|Om49x%Qs!Uh&k&Gw z?Fa51+c<6Thu{(85$aKULg-50IJ27&(hvL{(Hm+D?mEvOIRM$957`&l9m^foeZ6nGpP3*b@KtLY(~cl-tB`s=Orz5j#iHs&_fkGTGaMxTlY@?)$5eShE0-j$bZpM(cZ zucQZ@7ojV|g*VbQFI;!{Pxtr(FG>CA{d=>tJ#nISzxi-`*1Q-!V;;a}pwGkn2bC$*gXgvZSPyxTipqve35y;&S5TLFPjeBub}@RwZpnx z!dx}d{n~`~p}ZyNznJM0)ov@O*~+ic-sysF$7zRaM{NgxWIKI>ztFYIIqjMHI_Xv8 zQs7p0$9KiHw6S^dEOT}63~|L?(%;3lfZJK!!{%&h;bbBGau{#hWVC+1s?+$gI6rZQ8g{pR5tq0bYierm_S6f76wE%z(N@A+L^EmbW1^I;OR_Yphv;e#f^{?f?3#M z^E8qwX0R|8sjr%#Db<1=PNt>;YUZpSYNi?^8!P5JHS|Hj*u_DCBs99P=>@tlI=qxH zId->%OYyVP9nxw*Ma6N%HPs%hS70uBuJV_l>+)RTFFmyEpsYeXl&oqDS!uOYO8rc4 z-A2Bpw@7U)XXH`(7+Fv96M0E}a-7{jNl2KKND=Wu2E{z0kyOqSrPvylhWd2(QAXS9 zO%XpeOPfHntZmtu6z`OL9pc+adBxyeuoJrSCUME@)KkJR1pQPv(GEQom>*q0Be4#v z3_fwyS``-a9}fl~n?>gm8lk46U~Y_qwMryWb(MwqyW9=4qd1-6;8Sk0J#6c}5sAn8B0 z**T8c&IkROPgRid)(~q9)ifodneGKpeO1f-sr*DMLaDF6qXtmg4A}IvSQcIx)SvVf z3SSwH_xwDtxS?7l+;!qE9lg^!KCi&*z%)aoL1_C=_ET)58Iab4YamxcEajJTSiunj#PY$*VNdS_+Eg6016ewOo zSTtBD?jOGzkmVQ4B;`X33!Hpen0XpI5QaGHuPKoD-q^3 z7_|WNHcX@FDAHWa=95Cw82z@i^UBY>(OA{`vv019!>^STz+*&pJQVqJfk zGo-nHol>Q?DBmUw6LH*$Q2rJ0vkoOQ?{&GYR&R{gx zpRO6NVYi9%3EWX0d9T6h@!AR7k=wBz3G9*WIqDJG5!%t)A=?Qa>Fn{Y@wU~r0o!KV z6x(9kFx$)hb^d1oXa3d!IsI(?ivoD|nD*rLknIpF^rs^&G-<-NkgX7D;5Gdv{Qv`& zdboC|l}|QcOOR*&EdmJjFdCn$A=Dsf{SW$?4X9@jf#3khD1QnA;+apZV0Qup3J42e zi~SG=e2mEH2ovCDkg5XM?bzwi!TwSP6pWDRNHUP65dY&UM?C-eZT#HVw?0E9ROwEU^?88Y9wtU5G^Q=?RkVk}Ute>FjP zy3~=6t68~5X@uq783nCS(W7P!9d16jWSXN(v0H84QLU2DgC=w+TCE~8c-DswjJVBg z!`hW}LU3M8&nlY~ZFj94J|8}5IpjQ&_uOyaRempwH*WC3r0$^}AbLN)8@F2zAeYV? zUh_nvT!`8w|JCR&$^&^;e*ex>tqwZd{p|4J4fIgF$3C#^s_u=Dh(!nJ zJNmOXo=$=`RGUlaxpeoMpGSq8BE=)X7p%EPTKk<6ecZRVc^6Y{Fi*65W&Wnem%jZ^ zhFgs6#>1^QX0{LLo~=r~MyJ7hb?%Tn3X6@-#-_YZm(g$YCs$L|K*5g;7MfD|tgohy zPsNQ9=u*;=QNwWvfY=_3iDN}l1TjRyRBkHDEqA+s&J~`?hwA^i}iLn*N9@x^EM4Hzu4WFCmYpQ2Ilb5VFZ#O3>+T33Qlk1 zq4aDU{fMoyFVy!hoyn`1hQ~RecbYs;rFmsm+tE2h?&xU5h3k~|tPjgx>4H{KdYQc? zMFs1~gqDEaI-#%pdu9^d*J^eNyyUg5zCz&>ezn=)TEDS!q@4Pd15;HWGfCN=uRSC> zO6Oyv<#XPJ64ugem=$IL( z7I73{AEOq*@%UXy13p`|_O*+Aew=OijbqM`Up_1owzYhpBXIOa*(Xpp{cGXYQzj$! zDj1k7%#%f-T46{x!*PqUH$EEg)6q#wWo1}igK604Q@DhV`u(RsN@Z0ld00CcF`2Yh z$G**-6+g4~ETrM9oGEGmR>tO*FP%3Cu;8o|c85 zXbT`TYRe>aQf{jC-|E50;jg*q1j0VKQ{GG}Dr3Io5u)kI)rqb4K1HWB_Y1~8?9T(a zh-?}mx2jKrpH&@GUy)q;;dGD~Wb)MzjJt?3gP3dvvJ{L#uAZBwX~k8E-R0(G_u`D@Dw(EWye}*m#JJ=u9vA)sfn`hwHTQuu(OHs)+SM&f+sURupW}XOr z-It{MY;i+VrrJblaj)sL3k@>WiFOi~v9qqaLV1?yNaiJoq=*((SFf0dQ1xwL-=2ky+dnTb#iK1V|(@qW?d~=g(GI5*WxB^vF*t`bi}~1 zCCH%3yy!eiI+^w$Wp6fFF8b=4R$a{*Duvd33W?DE(@s|-Gyd#O)8XkaRRSMH0N*w` zNx6s19BmHs5as99IYev#-KG-}l}MGE)eL^7?sCbeNpZnz>cU!SalSCXazcqNRk$Cw z;<9OeB2k-x|FlrATIA&xo~>Yna4J4Vd&2f0|80iet)#(>6{1$4;&+Q2-LT{z)Dito zC=H5{$I)u%k!6s!Mo{*9v?sCU`A7asXdOs2HQ~vrthR3mDQQXj%u)ndA_j-1`v#YYTsH%_63zHeu?5Znb{?RBneKnpnR&KVs zHpFZy{>x2}Kez#tV-2~HFFIbx?s&-OZ)dpV`RLnR> z@O8_-VZNnal@U!#-_eh2(s$MBO2z?I@-|h&xN%h41&!$Ur_833D|edUg^gLKlE#T` zI*>Wvo!{HVi{tk>HopD7q+HIs#_t!yRJFzWy1*5XZ8ILC*LC>Kx@qOl5qvU3)EjVI zjWPbKhc@ImbXg6Denj{8XG)jjQWA)2i=XUcaao!4$`Ht3J#F$3^?g#Jh0@Bb%`^U_ z1_oF@K^vDYJ zqSlzaS*U7dPUN=yW92E}s%a|zLbocaX4@vh?+08V`#3D)@tZ`cx#COr6<>eqS$Z}< zn>NzK!ela5!o<0v&$eQJOZ0Brd705cc6^TVH4H};`cvUP%BjFp{3k1%?H(0*x9IKS zhdHm^u6208Bbp_5J*orpnIBroO8Z*c_iYPrxqV3U?Jcd-kPjCkYbRnrvs5@*Rzs0O zU!Df*%(=7WS>sJMlsJ?;bR_!MFtV(%g3%DAP1Rl9-aHlWX)K2FkcvZ8CohUG;zJ=0 ziDIq_XNm7@vgg%c6duK&c2^~*_1TPk!dR61(8?Nlc77gFuCI@dzb0$5_ z3#tjDt>_U?C%Cg_!@C&c9a-&JIqX^e%^-rpRH&yA%R(Qqb5`-~em~$8<1H~*yZicX zYHZXOXuk6?eVycRehhDTe(iS7`I4}YQo4n%QHC8U| z(&ad8^3`l=44;K+vYI37RxGOHPxvpNA6e4R-PW$9!)OOqi>yI+~XYV}|ywWNA&i!1arRMLGG?JWru{geWEo zdfN3)zfPtHp*6ReQHM1qt2ZTA1C<@q&D#~Cft$Xe zHE-R3G|J5b2dz#laW17j9cm<}2b8FJ(&aZ{-m=fy(Q*z6?fQpRP5L%A-gd*RVt=3g zWhTR{csNmMIMVWMNVv&IVu3e5!zZ0c8K(ovc{qfAn}iD&V@=c}MP8UoTq-BvCyvnq zoylW>Dvoh?r4t#`W8;%)XIzJY@Ryso*_0hHFNSK*Pw43A+1*s8hx?T>N^^ITPnygr z%`)hS+UIc8_o&owukXQ^6kF{o=X%oF%NO^Zz*TCNxo400>>uULJ08M6;Ty4zWaVx< zfd8wxyAFz@@A3s6JV?mkE`t*wxce|baCgn%9^5V1;K5yk1$QS9+zCN~LvVr(5}a@+ z&+fbXy!-Cmy?@>*plVJvzwVwn)x}Vs^KHD%x*mHQ??*RA8AnRdvt2oHf4k00$XuGhgMycr$`4oGPx0`&G%Jfn)shDV6-t zPy7R>4r;bBh_%g3O)lG0ZmN6uletp9YP&pb?a9A$YzU@-bTISWM6w%wgLhNNoJzXhYq*t)-~8Y23VoqZ2%*gU<;%$-iA3L@DAF z-1P%GXK>jfC>%Iv^|L=YJ2$R#OrrTJlkyR7NlP51Y+;pX?J`WizY$ke4!r!J0Cf@MfZY-Gw-e8aNIGd?u143Ol-CA}D0 z*NcBEC10SN57V~d4gv(lDnv`y{3-ywLTSxsMxRdO5*UQj_j&x=3Y=k+@Jd% zW94J70sA~-oytb*`kJf$Cyw1syf0XF1_oqdzioc+1a$FDVh+UudjmnaCZ?0|9>(-y zQUz9zm#6}3nh=B8;!oauUw-pK?=kY^iZk)I4(ePxL1{gK(8~V za!T2ck-2y9CRNfmiy1>uPA@3vlk1WRrpsUz6ic`skdm>G`EW z_BoW3Do_O9?e1Nd{NUJ;w0!&W=VjWNZ#pk9J%pMEUSz9 z!AVy!(@r0ZJxZ=KK)1GEI#FC>*g z!MV6-kzi}(_F|9BEpE05IXksIjIDh;g{t#W|Kl{|8;Qf@K2gf*(7L)uH(;l>u!W&7 z;lyVDw9iYuZ7iJ*m3dlU=Ke}a=dOg=UxT1q_8_b9Qc1ghMP56RtEU+2-K*DB*uh`U zHyVb1mT11z4pqT^D{*>;J|Bk`Tj$@i)cov;+H>NWH)EWf=a{@>mS2nD&dB>)B&tJj z)`rx1k>F6^n-htJ()Vg*b(kvA&aDIZ9(Pt)@7$OzeE_2@BCg<(oYK{3L|McLq+36e zUP}yU5ZCF^*bE8LKj>|}q)mP-wD>|RF z5nKMe47<4I3`MV>Y7pfi`T^o!3?P`YkaKd9T@@s3>ys?uksr<1&D z+v~+DpD~a@$gYEvOc5>(|J#xTe) z19qHB%xEGVT$s5l-BdyNNY$^J!~U;Y?XS^_f3TJRBsy>%^S|OQ>Hj-%;r#m(;FN=b@`^GTLWeK4l>cSDW% zW8)&<6xuV^CP%W^l~DKS>h7qiIh>)`uL&kKMGQ&}Nl~4RNOkhi_I5*C8SSU&^Yf+Y zUoGEgYAT?B`+7?)HHlzAgxRbtQ&34`C?YT_`6T~YSE+7i1Y*&vp9mNqM$2TGBr>B^ zRD*JoSA3*)o0S|YY+FWHxD;T*LCFszKT4L7Nj2J zg>`OEj{I${lACB^%$c$OO;`N`LH?QX`OmKUAK=&Do9X{=`1RLZ(%<3N-+AP}wHh3L z2?)aHqy7QE1bGB_|M%9L^Yzx%l)ds;_4WNowC~tnQ1Hdk9-&DR-%uj9v7V_AQJzdy z&0~7wwWFF8WE=!Zja3>x z`8frh4>>6HC~3`3j`STHz`>(A2euaz<_#u2Tgw~N2Besj^UaV)hI&jdq@-mXUg*UstR&uc+4OlLzWBO1?wPEv*EP{06hP zcAPUK(#X!w9b?<-0_9e=PxTVzT*Yq|=ZRXq9?Pre^Wv{P2mKsLUvh_!=yRHtS#^GT zau^(MdGLF0{ zpa4}8L2>|gp}K7SuYlSZbs(q*a1sD%1B``Q<3Tfe)RomAU|L~Cs2%NlN%^T zu$c$)qv;<4`O)=@hu)|{yD`uBfntEoGC&}~ray2EZ_^04hO_AaT*KW|0>(W*3kTF- zo@D}RFr0OvI-y>A(6mr5HK-J*le+%`B*NI=01~0?pMmE?^(TQufc+++UWU-2Pywd? zk0246{yvZhU4IBjgt{LAco(|B-v0jZ>%xWdM?fSpy18=+5)5bp%|3T-2V8_b$5dnM7Xj{Ks4;`X z0Is3w^x!rCcNh~MBpfgk#zasEhC+Z*KDRS2d47)O!}wIW8MIO>`L4r4!d&JwmCE&# zG)t5ukT6#=J%h+ex+PGfV4@HKFpK7sbbc(D3@TJ+FRFJ4olkUCqH`yGRmq?)-Ixf{ z1~lR2;K(scZo3p(fE~aJk@~dE1J7-M6&YobWeH_)k7JEK4Jp@=ou`4jG0eYcjxuAW z3O++QL#8ZGT3J+CV%d9Z>}mT#J@5{gn|6&WrXa>zre7Fu-mUN(w1r$-u3s2aQ&AJl z6FXX>-UUzt&SinzNTs%_j@AgNO94j{JBXA!Ii zmMlyF(?Y}XEY#r~>|l16qwv|Zs;yA$u*xq#uoP51QXSd}9!c01e4~w>fe zob5Xh&c>1a73TTDCV2kR7AA?mP^LKx zr#>BV7pjchsuZd~#Tf3Opx=`75$2CMmEAc05^l+(zvVB*1lQpxY@edAg?XE;evM8D zS5AaN%9}P(fa+3c+HTt~WuIf31Y*?fVs8(43&g z_*?a_J{L-}mhE4~I3zD@pgrNJoP?4^#JXwsi&8e`Sd(e3%m0G6C9X3+t@$4%)|g<8 zq=q)_Q|LZ>sbBs%+}gc=HB?($&5VWF1qE)|NWf!}scKz(g~hWC+U0AYS;Yh;a4h%* z_}3pZHj{r?W)%%3{FYCoI@9D7WDmaR!TKQ`uv;m(d zg$bL_ShwMpD@On>$A8%*iU z5fUkL8ZKNCCUunrA(vNRt^{yDD8xBdEH1c}!6eYI;go1UFLV{z68KW8_8iQ|vH^#M9FwvKf z3@f)k8d_t;b5q5gWvy!&Ug z%0qv&ATd+%hijyJ)n+1#E|_!>r3A`JgZ`Oa`P2hO6}+svrwwCE<@V#3T1WP$eTQ`U%lc5L(VcCn9rbfXa77SH@M;f150&bv zOQUCocigGEYuG97h5GvI3yROt6TW`JDQ@d+@}Hd|l23&hS4hM`(-begN5v&*8T?XbUSafg5J@<$l7XZ0!F#{LL@xPv7= zw8J(}+F^wcC_lnhF?*YCd4J5{h>Ld%k)Kp!PmR@JOdVHa=xk4+PXz~h1bBOCdwP3W z{*HO4cO3(}eE9Yb9d%0mi1|1Ez@#t-BZ}x-W1A&wr(xgO`xKp_&hqPg9p^#%`@|g_ zXE}8>>BPoY2bDz%Ny!3J3Y3D|Bnj}r)Eo6p6TEK4*?|aL*_c-r=vT;r?>BeN6w-|* zhUT+WbrnS{v-z}5K-3?3CfNqs{2kl26_B!|0UTWz$OBC<^WZp2^*Ykq3vbC3K zwPif!Ba}vWPFqJiTSY8r-`S0cLJg^AKfj%mr&#I7q`xt4q2zl){9@U;eQkmt-b#N6 z`<7X-8`wSi^d9#hP?G9ttYe|C7HnIBJ-WB)nIRnH8q^T1yuoTJTnoox+35BNd`MYA zu|25yNPoewXBEL(J@Td~U$L|QqG1j_>iGoL2${hQ`8d|keg@O`K=QG5ky()mpEXIm zcqha`^&DxgN5u5CDvBl|K@Yhp9|tBAnkAw|50Bx%67(srfh#Zu}XfY_~L58MS9HhetG6;(jgaBk2MB1Pq5)2;@ir`)h9!l^A=^^MP zF=7!Sa}d!R)Wl~mKRl;<=GcSMkEGm#un;UjO<0Sd4Yy#y#68Hw2uS&8ZW?H3)z7%7 z5n^~1U~)(j;xJ4Y_dD_%^c$QTA{Y`(6h;AKgS~+9!U$mWFqC&#H{>^1aHPrzLx*v~ za9}Stq&7}_)`MOKee5v_z7D<)@(lJ2Y6)%$;t!_nA?rEpvFoYmc?(CW78~>%(i;F6 zB1{BE4r7I3!FXW!Fgh4AjA+AogL8u#_UDSj0RzCOVb9(@+ef-Wf?sHYx+K^E2tLmU z{@|}xv{tfx)Dy(tSP$q6NGs?o2rJkRIIS;Q30nWdm9|p0Vz&~uqPCK@VzlBeyf8#~ zKxidz#cIWeJ1-PNEJI{B1R_MDXM0F{2*P?7v&sjCWfVPvJ&O{I?+9xt7EtX`CXt(; zWd$qs$e1F1MW{rZN3aX#?Xfi__~ZW?k*uDL1zSjv)#6*D=m%Nmqgx}B2KoH)fMrO8 za1V$g!KR9ojXjRmE5ZARi2d8!OsI-IjyxJf(&Lto&x8;aOf&yr@>%Jr#k1zByDQ{r z=h5t;?x;V65O3zwOU|@J^$6BOLLt2=AlS)w9ywzi_=<}W~86`9+(|T zj~SniIImcRUF55tWX(()0#F)W-Dd<3aPxWj!4`UQB=W?r=%xs2H{E}mh=yE!VGr24 zhipVi-b>_t85!3%%JL(w@@M(=l5!})&b0Z<*-GkEMBGgEWz$(n@IC5PHdm0m_5fjR zm_1H~*{@23EhkF{dlD{JR68q{1^Eg6$ok^1Rkf+q7|YQmfwB4n+6`LvYz9+83#fio z;l@NwUW=p?C#Ga)O_2_k1`8SXEX#!c>t)`}h33(_tD%XvR9daV;^~1=X|mzqYqPWlaZ|A)OP%_P1>SU`?+s)D zTV47WCPR)8ezQp!sZOtJLrFD6ry*iN!=9=mZFT76yUSweinr*HPj#*wSmV`u%Bp;6*OEbN%P2RVaZM%bK@#TlUu)_72$&*H?#capuRaHkH?= zt{TK9p7KUd`qs{tg}SDjX&Chddnip`Lc&FQu_2}2pB?_{51$8Jn`#F~038>k>ygwD3sp8jX9ySEjQdezrDcJRdj=S=4;Ed3ts- z_`;s^^KbFbmm}j7jwBDx@3uDLN3rH#X>B2#JFXOW9YOrK>dwC_D2Q3kKgPB#tbL(M zmfraB=ybS@>PYs$)wt1_bNIc=sKIA$l;GHk)nvZ8v8lMtd1SL)8k1;tKD1(QqN=*z@u9go$GbB=Q%Gf$I|B zvZZ$UpV18sMerFQCe>NUn5$0Kl7Xzdvb-pbho>S% ziDc zqrHl_d?6au%D9E|0yK1cwFblidRva5xRfbTX9T&q(q*LI)u$vvR03CmKK_ZNz@MB& z%iUI;A6}k;Gc`X+T8Dowh#)r6Cp{gL_J@Ctl{&602QE*McHt0?qR*GRh_VYZ$*;UM z{>Ch?tXcDtx`948@oxV*oG*T9aer#EH)rnijFyqm=wo@J+WNxmU|T^ki)suGOZ*ds05+tv4dCQ&Ut zjU!C86{uXd6ghm0CbZre&-=6raSU_5VPnyES);xRQERde=YNr8;QJWy+S}&Yez>jB zG$Vyj!BdlpTh971sq_VQEG(@NKX3Mn25DGCBUX~7 zcu3MKv+T_1b?JB$i#`sxVoH52&Dej{cVeUft_+2WT+TV*P&M7OD`Z7(BS|)DbeXNPc%BB<-Qe!rhapTnqQ1ypS@XuiJtYKu-nsh(_wfX8-7z=a^jne8 zQWW3MuRltMERwOA`{~NJ4_IoO$rMQXxjK-9p`JNZN~_;&@E$%`KdW}oVcS<5l=~E; zE#aZ6Ie7p6^kUkZ@{ZW4q&$A&g?;bZlyH^NC=@c6YCIn_6Z_L(1ARMs)Osv=(x@|* zaoBE~wuR<&0rw8|8kz5cO}Vqa0rBB->muI!rD;L^QuVsfY}l70))}WRzKPX2i~7eY z0wxoNW%S)DQ);CLU8D>2(h2@Rba7K8Zr^P|=raXU&k3vz)ub6gSpG-T+oIbnJ8%4U z!%xQ+!RweHJjVAooK?JQ7J2=7@~vSm6YqmhU%MZCvRp=Qz#k~~B37hAv0f)><@8%O zIni3?4z>?FBa?dpT(%`~HkM==QnM z*VI8_UMOmc%AKfEW09tJ5Irm7mDExmmj(Ki%e&S!0H6HwKc3sc2(2){rAL_zQ7Gc+Yufu!bSStZnKjjr{i6|6~)Q(R_yU={Z_DRi1bjtm2?hHCws) zSVj3Y))AG}Lb1T;UC~ij*r3tPg#6p5`$zWO%kIE&y$`O6qu!?>*W$H-kA)wp8BasL zixG#cH+CdC+%K!S8=3~*3k=j&7&+sg-xZ`^^?tBlnTwMV3H!256vDoXY=Tvov9ZtD zlt?>K@P&YdEcUgO={}RUq2=IBXXgs@d21a>a6w)DE}3vZFFrlHC$y3}QAESsQDO9l z2_X}~BdWJ={=0zOfHyALW0qgXm?^EUw%zVK!w!;Ui%AJSN=|3R^f4-u>8}}cT{?WE zoy+;~lP4ql6Z(l}MBNjG|IwM#Wt}+D!BXH_6)^+bEAkRUGrkd_kOC(tqs?|vWBsl3 znMV%&+Q}OZk{s+q*rxR3UYmv&8M7mQ)0{Y&=f2~3MAdKrVMm%(7o%W<6D-NNgNN)v z%gHd=f%GXT0{7&p=zFi8M<@ic`FIwv0^1fF!LN6>e1pbal*U~YgC(V&XCAvy&Lt@t zZmaE8`C&b_o!8D;$|V~GMNshSWn+AltFsjRx{nIady@MsLW zR~tS>4yfJZr_CwlbFdy17+N^rx>Sr8Png3mTg_BOY5l#7TW)drzw&L+&X) zM87ewX>)73!pw?-cr&?bQ@LUj{`{;WXKpx8E~dk7@cLj9O)&zGdA8)Fts;Dh$m(A& znmi10wBf@XxZv+V>efat$fiFRKafD!yH`sUa5OEU#n0sRQ65=ZQ ze7uSFRa&WVTExQQhljRZHRp`>q5>_sw_;ieJ^hI0r%l#toKk{gl$gWwB6EkCRgHY|8KCh*;g!i!9y;b*PrK8uahM2@g&XJ zAa8bXe3zR5MXc##Et>j`j11bOc{U>Kek3fijlbFq6ib6wA&;;2Y%8uKZ}1}y_Okk8 zrg<~Tna|D|JARavQHhkS1g)2tZy0(*<9W7Ovg!0XQT*lzU|Kq+L$gd9EOkLD(D!I( zES`fdsPR`cG>n<3wW+AFYH}B}LZvEv5#whz#i{QVYhHVp=M}}nNBWPG+`8%-el>WJ z*6!JGM=~U)tDu<4eD{{JRGaRzJ8`%kkJ)j*ZnjJDeqXzq!&Bj9IY_eqb=p!Hv>8bf z%9cbW=J*{z&aJVHKKepNHY)n`dZxx`!R_pz&y|ebM7@sI-0fCGF2HI$M|wF5(~g0z zZD=wt(?h?EhZ|jR_MrAP|75PXudK?}9okfmu{xbWf6Pzz49~mm!K#8ySAAvII9Ae- zqZ4wG*IEazuv-Vx<&P5|dnKezwIt*=yrq8kEV)hFAL(vG*CPePi{^yWD!TG>J^7Si>c=F^Yn!O0-HTTgM9WvEFWfzYx8smK11W z=5aBXIH!o%EGAq6qCfdT7tbXu+op|_B%8;^-0&h}PBLk6_ubOX^e0b?DJG5fCsBum z$Zx3K$vXQdZ}AqxMC^QJIb^AYg+zH|?t1j~dzL~X2ZxRMcBO_ht;fU&(-UlkQw7Yh zrXrl1ugaXu0P!T=(~tLG@g7KMLrdbDX!$skt%c`v1^R|rz)NaUzKw61sb0lQ;^bV7 z?|x-Ky($!ue?iH);)mpEHi}ICnoVORRVV%EH_k^~g)FI0%UjtGw~(xK3978xW1%gv zTn*?-zO5x9{V7 zZQy-Cdmh0@TW1-7G0<<7W}$y6XW=FG^{GAON5%^)nqvHT0y=A!=tyno4zKcBj~9jW ztK81OoOBaxhapn&AE8LXY20%;=|4HfKigit=e8;)G2-dMb+wGib+JVE;HXr!8jnP2 z;}o$v|AtAnQ^24QQ`oPo__(gc$}C@lK1R{omq#bFG0q}O=(=z$5{B5tp{-p8|SQE$ydIQ zOy>-EA1WJlwonRtjzcvS`1#En4r2P^Vf?%UGTe6tZcdcbdMi>I>N6DTG20|JX>w6h zm!+tNma7G6?v@c#S(zuvW~c$9QL!yQ2|f-mHDMXPRKJ|R(Y5JrWaL{b(ZI6Yl8Dzt zPq^kLl0s5u)#IzorPuQlJtnZSj#(DjU{4R|B{T$yo^PLU2ZfUbx1XElq2G;Y2Io< zOFN_(F=wWd)x{U`D~fq?UR*wacCBzo-}T&^%VopghC+C>lQkut2O_8V_Ub$K+o%1e z(0tLW?};y^OB<($ohF#PL7fZKMegAX;Gr-d13&dn+G*AU4}Foo#H4U~{nx)@DN=GS z3#aYX@$JLZ{Yt+(@e>~f7`pEDP#zdOO0)L3G!dL=-?(Ysm_qpFT)*Os1icu^fVSm}lA6bTJiwcH!Uy0N=KD>U160j}B&?~1;{p}v;Szf$&&RFcxMZdSk$gRgz@ z(|tVJ;Olic#&kTE_fH>&y{S$=;aSZ~em2yt&+s81kM+n9E;Jg9{8$_}_491f*=%PV zD}bG|4ln!L5c93p7J;2#J-6T-!vPVSai=chvIg#|*L+f%$FQ8;n(sx5cGn2?AshCg z>$hB5r#HU2v2|I~1|cV3B~WrV=ys8B#)4Gn7BqjqLMP;HWVbWyIC^?!#_mG---t57 zzkxnY8#j2?7n2m5m5YrzGd$nU#mU^=41T*uCWp6XDjL+<7A~$fPLA-{z1KX{hD;#% zP07ruxj1C_ISrYq)!g5@xcN}SQ>p(7xb|{m#$cCpce8ea|GJx*%Uc^O_=-~SJ43=_ z>ZqAiojfgERGn-b-TqO9FAV+@GR*`Q;^N@q?ox%mHOJwX9}cnsS=t;Z?I{V(SM$9Vkzb-$c~y#Mn0aS3w% z{oLH(yIU!Azq*4 Date: Fri, 5 Nov 2021 13:49:25 -0600 Subject: [PATCH 009/109] update documentation --- doc/source/user_guide/figures/CICE_Cgrid.pdf | Bin 29946 -> 0 bytes doc/source/user_guide/figures/CICE_Cgrid.png | Bin 0 -> 77699 bytes doc/source/user_guide/ug_implementation.rst | 28 +++++++++++++++++-- 3 files changed, 26 insertions(+), 2 deletions(-) delete mode 100644 doc/source/user_guide/figures/CICE_Cgrid.pdf create mode 100644 doc/source/user_guide/figures/CICE_Cgrid.png diff --git a/doc/source/user_guide/figures/CICE_Cgrid.pdf b/doc/source/user_guide/figures/CICE_Cgrid.pdf deleted file mode 100644 index dcf0ae479560e268312b44cbfb85950a80fd913c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29946 zcma&M1yo$kx-E(mBsh%+_r^WAySuwXaCdiicXxLQ&`5B%;O->2=8}K!eb2e$+&Av) z(Nv% z{ULrzyNuh=?{qkwDp+qT(mt$s$@_EuUi5NxiM;j3>?+^qT=e!Zp47f%dCKSV!n9JB zcY#jlci0y7_M-f-r!Fm`#DC3^spF~EEBVZMF*x&P=jQz9Qup=p8L8JAX> znv}X+EFb7F%(vgqv&SMT&2?J-d^RTFpz=c{}c0n-W+Z^DZ|SUcJ9I z?E58}mRwcpSh;I}?O~mds97rW`N_SOf$G%jM0Ll}`dC%U({xKYjKO!rB5s5^j>ULs z8oQ4GFK@|qh1hMGs~CrhV}+ox`h}G-7V)Kio5cgjkC^l4tF!z@?0BfJpq*Rn_+0(^ za+DJvC@!SS`&w-3AP*-=Wgkk2G+9Tf>3o@#kwMxIsfnlkR%Apf4sUHKZjVQ3XOL^K z`C%MvzWe;mcf!*k@Hf=O`H{~~g~VDn(pziBHHHdxL9DBw@X|uwkR7STd0UBM*m1su z?tKL#rn1abNkb78OPxNdsjN1~b#C_8Z`ys6Bj4>&;m%64muFl9-PQ}v-z0zdM^WYO zC|PNa?A6b(!Am+3zBq7txFamfW#>@v{&?K{|F ziq-E$IrVo|%AxRjh-_B=Nkni5@S(li))SRfd z=GJ4Ztkpn?FRhg4Rnk&`7iXJ4jREx(;Edfzi)n`-&p*vxJu<B!^A32th%;-A(Ui)#Fo{QvNj6;HN)3P$E z3YFnH-Q{GfVN4emM*+i6X*Jvz!{P2nM(3xlpI!Lla`Z|*&9dV^P++$}eH0AHfp0pG z<6%eM@1Gw^zmi%+)~{hVn;X zAqE0>ic|^pN$qp4<8N+qoX+cSj@yZMKd~!Gf(*&bQSf9SYN@H4Bj3}VKrR)o7?O3s zuFVcrEe*H425YDBLMSQA-kh*Gr;-GfG}DnF#KU^JiN@lipy$!hS8Ocu&HQ$!W!XiQ zx&6+Yd|V|@S4lzQfKWcSdZz@l=@3sfdRNJjrMjkFNUW~j;`XQ>b;&AThD6pYzNRYx zmic=9h;BF1iuqW^T2jdHz$poTy+o6KjT9eDZk?*6H}+Cc2VKYO3rMqJ(9bNw{PoE$ zK8Lnffs!99@OGOrT-ji0%$omODvJj71XNDhL4HhSC^GXwDIw;@V`L`f>HZ{yCe(D@ zyfck5EOOF0bM%*1`cfq!oXJ2}oD^C4m6UQ_lz`4S2^m{Jso1aXbKOdvZ&I`;B@W9k zs+xQ--zPezM;CD~H6rLHJI}_HMzH$jNOPE1s#za$K`(|=KihKVEO}d&7G;D=$Rn2; zg!`_5?+CVQ6sZ|u?LrE~!R%;GrUUXL13*=1*&*nt1uNqbV0l8%fE!>aNW` zRjqV)eFtgDxjM+3QYErh&c*~*w3Bx0^6x02w!clfSaR0ub4OaeK)}e*%#4J)diC^S zXtRd%qB4^-6{-sPS{M2`R`fTZNs+`?HC~`2-72onEeq9LT91^L*1VLIc4e>iayxa+ z3VN-spb!r^gMd}fa~7dQ)P0rG@9#!Ij#i3tW`Y7I>MI}1>@ha>?qsRJH-tQerC-n8 z0v%$-qe4xW<;&ZL`MndVjJkhSXcy{)YeUD7wGF}?9)8Dkqr1-EYu?@Ry3xZtvqX;l z502h&^o;Tno4aPg@rivmqUMwfwS-1W@`}c36A!vOPF2VkMkMcADUuzT+sbVxmXtuM zdkk1THH#QF>XQ_o1jb%!JvBsju$!rC=6L#eCI3Yjh9oznngFz{#1K*esi}#x5`}IQ z#_sAFG`OabAJ9x^c!6pQ9^^#0jluC2Zv_48G9?^@F<+seK?vZ6?jb5fweiZo8pT#y zI*L&bDZ~lvv4||b2k-Zvm`LDt%LuyTYndu4;OdFpjWmO+e;r_(4M~;2Ce2H< z$DfBK*Ev=0>L16L;s#mMT9WkR`7|TIUs%<_t8bgCD(#xa^jZ_FN5^RU$j8#9$1CyS zxJOv9btBfpPUMMdeIHLvIbb=1MNl97==m4=$Fg8{S4)xTd={jA=F$1gpbb?tku}qv z3kj>CVf| zlvoHkh1q+y-x)3M$Y!AXs}+8NAUfeI3=CuP1xA!~qtf9}OMvzw3s8#Jww`4wE#x4A zYCc6DPIb`AqysDh1F0!D77|jhG~D{fzl?^P z&#juJVKeuNBDGgl_KGTa16tE_*1&i&*Z1;iiE$WA>a`^}k&h=Sh^i84$Zu!qE#7@CbBh_sQ7aJQlxEExtT5y!U^1w4gJ_9ZK$nM!5cf3q zx>suu86~_AI9<>W9iE3eJf3*#%iL>@f79@GR6UpplcJbd~bW zLS3`?S`aK`Yqh2WCUUOVp<-oWEbhodT;UGbbOONCBqK60UuxYPSdhN$`H0#HhN@WR zme~@OvkshQvo1u)Q}5A_Y=5dKFbgRxKw9_!JaW-*nZGCbINIi$NtF$Ldx=v*OTYP= zn2uo8N+$XY!A9GB&gJ5ifKjelbhXyWRie28tQu_bD=W`Xt?h0z%YD&Wu*ooYMWr57 zU#KpCwrZU~K%N(y%r3`?qWA)4jm3$3KSr{;*y%HDDfzE|$Z~ZN1^F`J(L4_({j<;ai5}Mr_3R)ud6h+sdsZ=?n z%K7JYo3ydytg9`%<=Q6UH;PXIa0`al{gb#_`MNZ2YOTafl>u4QWeQvH-w`7tYlC{E ze)}*pan;8t4k#;>K*=1j<+h_>)@x#7xNBGmD<~A#+*a6QYU{**FPN_B@_n}x+j&=D z@uCn%nGU28ZN{ZjWPu)b1>A-|7Lw$0+*eiZf{Q4RCPb`GDM`S0=*Ei+PSAv$fKa}& zIn_owVG8na!lZ{Z0vN{MRRGUBYMhsc4$l*{X9QX!WD{M}nYuwDzcCIm+!J-OnnlJ% z3S9YVwT7)?Qdl%9Djf!=e|7UAC#D-d#P%IC;(NC0vpwp=FPwSIXM4O8-_& zCEM6%Xu2$YKmyyeH;>B2-lCVDcqrfM2S#f{wTjliT;-$&m|8;1sj8H|(wbGOEEnWl;F zy~NV_0hw$c{R2_I*mSF@;EDl4&VCk}NmYhzDzWR>9M05+1ZGxHAV~>MCaNiZ{CJeJ z@&NBz$~7-k4;-zvBpEKcHd`h++ZR7}q1K4v7w~A+h3Yx9vpL5kUfF`5}o56v8th&A>YX8YJW>WL?f8l~)!dw8_g6eR}+ zU%8?{daDqbV^MOF@p6;dY6bM)@hBu{zZjZT#OT3@PBzL*R=rWsA^1S*OG;OETB~=o z*rub$zIlyIVC-e@`uN;~| zUSnkTbq^;If<@Yg&aAXZXMR|0DKO)NXR~9zN-R&gT9?RUZ1KKZ!qZZ}ILa7fQ%FW zFVNxv6Z^B0U6e|Aj)A)fEyv0`g&o)YY09WgV#_TZLu$M9<8-lKx9MFoHCt$hWRUPf zf*6^=@}h|MC+riLw@ zq~J5%w>B(T+;Bj&4u2jbBE#S$KwADDY_ClG8G~NQZ0A)S3|7 z%=Py-Q!#olwSso1&xbUu9%EoE1f$M^lAmm=TWHZ&)3d9J3EeZMv=RgEs1!30@@O&K z>J++#26I-gXety2I?qU3rRE)kqg`RQ==3OH?GVhFjKtd*Sv{;cISbX}sdG*X3U7D| zL(?!L6HPv-MrpRq2SdWZh}93_n;x4AF&<75UR6!(xoo714wxPT1zjD~^+;vTl^6JZ zr2r(tmmGJohkju7na_CT5U^t^WsKh=jfJOVLTehUNy&N;er(3~sas~%9TmhYQ+rF^ zHVcoT2DM`_IIN@{m9~0`w$ZC_&AcA4yB@Ecqqbc5SWSx(m&-kQq|p|#;wBl8R>HTo zvAc~8%`r>h&CoRgPf@J#)tvpCXN(PYCP}QeF;XHa0}?AZEzO-L$qp^|C}kj}rtmn8 zynt~`dT}u`n^R^^iB7DtvS9x=`1uTD2+^zq!y=Jyt@Nt3VB;9=#MJ$bMnCU-YasV1 zw#mcX$kDL3h$%)c8Qs&^AK^u9|+LpsH_L5c{b{ z@j7%*Tp=x(861)G!KfFe5}3JDsObTtpj487mj6w=BxLphvke+qsYDX{xZSYiu z0V1W*%Dr?->imbanIiroO8+7wG`*P}v95xWQjV9}F%CHkIaV1x>ZL@6oWT^{2}etU zcTl0(YFY83Z&-e0GUnMg2b+8wOF7Q_*m!2$fx%YkE*%*{ii<#s16RVM6iVMyFvIr) z)Y1Tmvjm+_!~J<}7QOxqh-oA88-!m7SBvHhdxB)Hg6xsGb-05Ku{s>x zY_0~WI!=R}J-FP3udNL)J3Bew_pq6;f8u%e{@T|3R_@trb$tu=O^f39T=+OeJAB;T zzIG62Q=Kb!nWo`{$U0xZd`-~fPLALLI%jvq5nf!OYbL4(D8G(CjCm8!!FoD(Guh9i zk(eMfwW@LJruvq0+E4l6x(az?iTCw+Fyv2C`PAi(8=<}Q!?o8tP8aO&@w~1PKQC^1 zM-Wj)+KDQGmD8^SgoJBfyR|Pm$620_e~$6SF9Le{?r^$vxK(v#d!&&R)_cr@K<)9S zEt?Q|2UINt+OukVmPf@ztgc00ewK{=LMd{f%-P()xzJo!we>S2pg83^gMbI~tTj-s z7}V6WG(?Xfa}1-*p5ky4#j6L7kU4uC%O4xEea}RdW82Zl>W6DJa0pb4QP1bQ6Vvfr zj6+JUS-KaG{ZoV{iopmB8O3d0a`iR<>dFVgC3jv&4%bN zMgy;If9fY282cv2kVFy5tmOnB^a7Lq3WEJ%!#kJy{+8~Hha`GCAs2icM~_KRQQhvL zQdC9vnmvXVgR+>wfAaooF`F}H!G#6oP~3K9qb;0HH$4);Bb!a>^< zOGQh>-pAw|L(i|*o8G&9Q?88~GY3^L=2|_oFdR-cGG=;-aqZ<@t9=8^%V_3+mkkwc zF@NjYBI&d5iyr$nJwxP%l^o*Eo#z){clVJmUm)c;nSbqq>fC0K2}kW-((s4q^)JO~ zzV_7wadz~dO{ER|$wVqFUBQjfKO$nEZp0gEMY0U|F%C=D9h_t#P8eon*445ixot1TYC6ArqrNMv1LmOY_z0k>CFJXo%BIROs=b1urj zZVoLSpH3q2GzP7%5iSPnl7-?5J1d952%cc*=&8SeW1~o5vOmeDJ-5KFv)CPikQ0~| z5$D4FqNzWd*_v3PZb(L;*doP%re5^RBm=-qPd)>8q4ZeYqFkaKfwW;_?L3c*YxmH| zUt6+Mbax|OHf3U~F zu)Ob7ltx-k&ceSS9C}X)X1>(05_U9-Ur9qB$pFnUaxexd+rO~d))hdJwB}HlSfJV) zF1Kqd5ANI@+4W5$bywR3E_UN@%)Q?VUM7XNIf)jq8~HvPzn6(RCU#y8EjAmKWj zwK-d>XMKA2*<<8+u*%)^>s?s32(uW1e3~hLBQ(18wCL7cEu@7Gki;uc%vY@RPPtjo z+ite8M%8Vuxl4p1hsMDf7e|rxF8s$0PTu;SN+q{f#L1rY>S^cqmPY=9lIx1Q^~DX} zIXj35z)w9b$g$K5efKX=_@@2u?|EC)O6kF=%bwrj-+%G$XtDJ%@51a{i5w1ng_(gmpO62?78Xm1L5TDh*z#- zX#HAU>?hGHL0nK$B7`0sgVTX6%by#aS9YEJ?0(Pc$Nl?^z8v76M?W>`Oci+3ZzlhC zwm-i8^~0I|n=h{7VgF%-D;S#p^X_Q+&6$Y#qgjcFLCMs~&c)H#)QO1oUlqc3-<&_% zorwPW>K_NlnwnS|3fZ|6X)}IQaBwjbu`@C2!haa){~qV>dH?a>6&&r1l}(+Av_A$D z5hG$yF?Dw)(j{ULwzIKwRJJ!XHYNIN(+fK>5wZW%Js;nPS^qKOKh-KK%tV}jtKk_` zR9K0a{tl@0VcGw6?f(8 z#luL9MKKV90*#j8exoQ1e^(R{J=lo0hP$d^yV=Zq8`0caJ!)HCHNWy%?gUeK4TKtS z)&TcsEzrQYJo~PLhCV*@0D+AHj-&zJzhh2N$+pcwGQl4 zpm19HH6~Yss5l?RdAMCfdo35(;KV~t_q zq7UX1!llqI2|H@X3nokoN%oE36T;_A8-)pew71)$CZWzLj@k45=$Q~fC-U2lj39~f z;XPuUi6eO-5T|G>*ycL~T0E?CK0IpxJtG9r;4{8Ii#_Bf5m>Ol1PWv-B)tTLr2tf; zAm0iYydcvGxNDG=J=j8k=M0owAiF(`4wQQTXL|%he{li$(C^ZW;F|%Kp8~H@Fbf4M z;{nCU8A3}acv_!4gas8iF(5(05%DOA0#7p(<=8Yql?B2IytkyzxSgMLgP!wYXNXu~ z-oSl=QPUyj4L`4dwfe8tbK`#!9~j>jamUPt#p++*k!lC7xR`F7t|o(Z$Zm$h67UpQVzk&$szF)J?+V9`niWGgq-GFjpk1HV0J&m#&W(>W z(vP!?Z|~cVt;76{trn^cc{#ev|7`H{4#h9`ZkkPut3c%MPS-}>#J%u-_bk4AzY&Lx%}ILMrC?uj+T6nh>w&HgLo24+D+QtSWQw@f_2JSnisiz@^YG6nm&CV zr9QEr)O+b`JA~qQf$z!zMfGY0Vmr+PydjdJnT07v;YOK85k^6~Y#>GQ?!I8$;{?E|U<`~%#BrXNZ$ z1B?cbEJ`>f(Mi!=(Q!AdhaJ;<)9dVYmU@4>sZ*Y4zR`H6kffNUfTzS&%Bb2cVl3uV z+E&UhFj`Sq8CtDcy)4pK#Y~q?yX8*j=(GOWe+@gW?~L$lf3~_CJmo!2!p+3(#l^u@ z#Z6-!X2Z$+mN}Mrka@&9(rlrH&;q4(pmo%u$C#3FIiX@k-W0`EP*ZzRMmNh>yivtl zV_e>(VO}KHG=;94NUzNy*rwB_>?R~H&7i@cU#ECgz^9m3lULj)-ly&Nqko2I%^*rJ z>w9@YWI>-{u3^!RF==H&)htbsRuT6wr*W%8fDM2?@B*kzcLpTXq}GJi zux&oAe;!;L%Cy!s_8AGa+B0sND&072si~mba_zhD;m*{p-vD&Ubxq$R-IKgfyfDG? zhPj9JAU&)&yHa%p`z)fK9q+VH{fgd$rSzg~n3pWu%F&#np87mhgn)>3jkQ3B?KtbO z(~j?$KyN}Ht?zDMu%No0vZr{lemCl^t(RLoXPS_U^iHqQ%@8%EaC>ay@s_Am^x7BLEU{;*uUR7Ecom`#wOa?&*G zN{@Y)xs!+{%OVfbL};>V+%y+A&HJG^fqHnmpSk~SoM7x`?8TSOjLpr(yw~+T^z}R2 zr8QDb$WpD&dWW2w?)T1}yWX=nTOwPD)$4Jw@fcIQt#6T=3yS%Q^}PlbO;?A*LLEYi zJ3Vw$+DJAa8%&#v4YQ^itMxPzz1cgX@!iN>dGeo$XIq*+>Q5UJ_rVuECHEySZriS( z+!2<@#O`hTPS^{*>O_I1sN)felbGw* z?#LtIRW2gWsz;O8D*V(*3ms49`yEz_^R;Tx?!?9w$=jBy3 zt;)QXOPAXx$EW8a^etxTOIL2H{<*K^Nz0XO&xw80Tk2j-Y%jFW(X;Lg!{&Xb&q3^; z96ZP-O2?*rZnUW{)?5>C$L zALUc>V))&>;a-2cQ`njvZaoCwy!=nL{g=-EWx*fJS5jD5$k55ugox=cSydv^{SSHl zOS%8Ul>dv7JNVNn5-}&(W>{Hm#+0e$${9kn6>EAs6 zzZf*z|DNc-dLv@c(9|Mg;bbLZ}GBK1jWc;lB&49|d(w6K9LR;j?mb5V0__5HT}z!85Zl5;1=y^!zQevwnyYR`!o} zP9jz=P9io&jt|lCUm^c9#(zTjcQz0+7wi9980aQUS`9Ly1Ydc=6RZnguRU2p3@a!> z@ZE-W`haO_!3xqLXmUMXa4XGYc%~1Qt)f;bI@siCnHK>!2a3649qXr{a?7M=fL?ss zls}|!tM*cUXM9P)f*#ml7XPBnA=6kJk__S|aYBsh_F`3@emFU2!$w-lIoLRA7^mC- zy)b-P6_qTsEZzZM=B52pD3&T3TdM`RHEbHPur)NYt7$|^_9HLl*2Mi@%1PmJqL9Ms zbiM-he1gBKo8o!rt5%9^j!5$cYhf)7k%-C#dTjv8>}9+?WbpQBur?8tI{8m<;&zWm z@J~C3wvG=1;TD5%$O50zb)kdfFOt?Fi3?pa@_u#Mc`91HFD3T|`zQW>*nd}ISo(DV z`?jx)thv=}8UKG1p8q`~>t6u*3y%MS!M|W3XK4GO+5QJ$Yz)o+YSDjGq0m3NlaB4< z8q=|SV26pBmGMIsGPCQ#Ge~}*+|pR^o4JkY2T&OVos9pgS}wK^z4v#dzXdvGcCL?2 zWN0s8YH4oaOvKLe(WvZfYOD7542^%)!n3k}j3(%6{twKE*qA@Io2;Sxze-Fj%$)z4 z%136IDKin%-`(?9lKy8)|5NdQLW_xriG%B7JN+l`jPs-P|AyM4w}*R@>hrxv6Nlv* z*~VO{t&-4qVye)%62LMaA-NF~S4Oi-IsZIawjZ8EOEkxUU<8@~bL8tYyD4>hK~UFr z=q?}UUUY0^VQg&BobQ829^a>l>8JO{+N;jPwEHO^{%MaXkB`^yUJ4Lfl@@NWY%_ch zwoY5KS;&kY=}{oeVvX(|DR-5|r#<5sS^zycH=YSH&}MT+;(io1I9zXQK|xu*^%0Jz z8xE;4rC_^PR(iRb(yQ3U!2afU=jQK(<$gI$t)||G=UtzbUQ;${JgVLW$_vaYT+i_o z#>|)OAMb-*fsGbSve+%yIriZ?4Z@$6?Kc=FNylZMp4`MY9LxmI)2~DtMVM~%#Hmy_0pl^?sz zfEpmZX+sFe43MRvGhjp-^%=O-q%mSd7X<)VYgC&xB!b`p8`*NirGK#gJoiZx+_jL4!O z0Hhj-=1kEb1;7*Vw+5miQ@SX9lpxSi!=&j`J@%*(Skyc4xna~OVXTk=fj^25$gJ_G z;jUTSBw?KJV_=XZM3fnUBZ>?V0YuXnY6>^rG1@WNF+d$B2@S~+WktY4ppBve003qH z3V;{@2H*+!3V=tLrA__<*Z@w|KSqG!fO1WbSW$!;uSuW>V6X8nWWZMAV<>1DDA({9 z2}%RXH9iJ{j(~i|zuW+MjgP^gEsfU{&^7kFvn02{Tg`I-u{1^N!|ssY;S+cQA2 zz;7lFkWuvoQlo*4JIo5dDW z`nM*!Xp;G0RI4@8ldqvk#)UeuXl8}e6R$}$6DP7G5ucG-isrkBSVw4;+kDonicm{% zG9|O+p{XX)l536@N=0Ksr;8{~C30k%B`KsR3=xS*STG7^jaQXd5m%8`5mu3H_P5|$ z%o1PHDdZAqil_`95Wzy9B!Nd&MVlnc6jTvp&9T6pRVkzvNs9OpK_SXxiY<&YI}~n? zR{fFkf-28dofvM;T%KMYZo!ihQ{*XFC{)RjdQ4Ktgu#_*fuTw$sUpA{Xu&zlXk|mC z$k&o6&KhjN$d)S28h(0!Vu}tO4ukfY=v00oE^~_HM=|>g+Ec`{!aR-$U*!?G6r~iY z6!~wI5)q|R63sD@ONk$>8j>Or`!Yn4l;-#eQKOQkNQD}X!zil}g%uNGCghQY%m@h? zqZpF8u~m4aW0*vc2_NaDjTj^a33I{=H6c{yqt6k?;)U}n_Y94`M9-A74r8xPmh?xTS!TsW zD9g{~MFd&7vyWg3pkR%p?uPWA+CHRl9XfA!jJO=Zc!AuXM++%vY&rkRiwa%yDG1~i9Dcg zNZ)c7dWBz7tcf~fvAI&VIKs|esN8E8u7~>yJJYJRhg4@pd?V>h`0*`Y%~$r8mZmHB z_-mncge#h^aDsZ$qL?%0Y>EhXcx^b@g2DMZDO^kbctoox8m^%&c_%OtC2$c}vVT(S zBY}(&zd6K$MRiN$?gNZ?0xeCs7`md- z5|K#}>2Lr#U7@F}2ttKHc`nIWxK)Hzcx40tWwK0gx+r&&JX1I*S5{ViHsk}A z%5LVfAwmmMmWMqK0MX+)aj6qMs$I%oSE2 zrGQs3hKztwFn)`8Mm0q>70YIp_KA31Fe9{L3BtWmq}3~Qu1ph4mf9zlB9@BfJS>oP z8)70%#&7_Fk)x5LfY3px9p!;ZG@PS2(ip-fl#!(pc%oWkIHD%R%;*_Za}o(Kj;9gM zBJc^H?uV8<$vchDy3LKwdiVwZe9DG-hu%WIenWr!@`&^Z`?>|Om49x%Qs!Uh&k&Gw z?Fa51+c<6Thu{(85$aKULg-50IJ27&(hvL{(Hm+D?mEvOIRM$957`&l9m^foeZ6nGpP3*b@KtLY(~cl-tB`s=Orz5j#iHs&_fkGTGaMxTlY@?)$5eShE0-j$bZpM(cZ zucQZ@7ojV|g*VbQFI;!{Pxtr(FG>CA{d=>tJ#nISzxi-`*1Q-!V;;a}pwGkn2bC$*gXgvZSPyxTipqve35y;&S5TLFPjeBub}@RwZpnx z!dx}d{n~`~p}ZyNznJM0)ov@O*~+ic-sysF$7zRaM{NgxWIKI>ztFYIIqjMHI_Xv8 zQs7p0$9KiHw6S^dEOT}63~|L?(%;3lfZJK!!{%&h;bbBGau{#hWVC+1s?+$gI6rZQ8g{pR5tq0bYierm_S6f76wE%z(N@A+L^EmbW1^I;OR_Yphv;e#f^{?f?3#M z^E8qwX0R|8sjr%#Db<1=PNt>;YUZpSYNi?^8!P5JHS|Hj*u_DCBs99P=>@tlI=qxH zId->%OYyVP9nxw*Ma6N%HPs%hS70uBuJV_l>+)RTFFmyEpsYeXl&oqDS!uOYO8rc4 z-A2Bpw@7U)XXH`(7+Fv96M0E}a-7{jNl2KKND=Wu2E{z0kyOqSrPvylhWd2(QAXS9 zO%XpeOPfHntZmtu6z`OL9pc+adBxyeuoJrSCUME@)KkJR1pQPv(GEQom>*q0Be4#v z3_fwyS``-a9}fl~n?>gm8lk46U~Y_qwMryWb(MwqyW9=4qd1-6;8Sk0J#6c}5sAn8B0 z**T8c&IkROPgRid)(~q9)ifodneGKpeO1f-sr*DMLaDF6qXtmg4A}IvSQcIx)SvVf z3SSwH_xwDtxS?7l+;!qE9lg^!KCi&*z%)aoL1_C=_ET)58Iab4YamxcEajJTSiunj#PY$*VNdS_+Eg6016ewOo zSTtBD?jOGzkmVQ4B;`X33!Hpen0XpI5QaGHuPKoD-q^3 z7_|WNHcX@FDAHWa=95Cw82z@i^UBY>(OA{`vv019!>^STz+*&pJQVqJfk zGo-nHol>Q?DBmUw6LH*$Q2rJ0vkoOQ?{&GYR&R{gx zpRO6NVYi9%3EWX0d9T6h@!AR7k=wBz3G9*WIqDJG5!%t)A=?Qa>Fn{Y@wU~r0o!KV z6x(9kFx$)hb^d1oXa3d!IsI(?ivoD|nD*rLknIpF^rs^&G-<-NkgX7D;5Gdv{Qv`& zdboC|l}|QcOOR*&EdmJjFdCn$A=Dsf{SW$?4X9@jf#3khD1QnA;+apZV0Qup3J42e zi~SG=e2mEH2ovCDkg5XM?bzwi!TwSP6pWDRNHUP65dY&UM?C-eZT#HVw?0E9ROwEU^?88Y9wtU5G^Q=?RkVk}Ute>FjP zy3~=6t68~5X@uq783nCS(W7P!9d16jWSXN(v0H84QLU2DgC=w+TCE~8c-DswjJVBg z!`hW}LU3M8&nlY~ZFj94J|8}5IpjQ&_uOyaRempwH*WC3r0$^}AbLN)8@F2zAeYV? zUh_nvT!`8w|JCR&$^&^;e*ex>tqwZd{p|4J4fIgF$3C#^s_u=Dh(!nJ zJNmOXo=$=`RGUlaxpeoMpGSq8BE=)X7p%EPTKk<6ecZRVc^6Y{Fi*65W&Wnem%jZ^ zhFgs6#>1^QX0{LLo~=r~MyJ7hb?%Tn3X6@-#-_YZm(g$YCs$L|K*5g;7MfD|tgohy zPsNQ9=u*;=QNwWvfY=_3iDN}l1TjRyRBkHDEqA+s&J~`?hwA^i}iLn*N9@x^EM4Hzu4WFCmYpQ2Ilb5VFZ#O3>+T33Qlk1 zq4aDU{fMoyFVy!hoyn`1hQ~RecbYs;rFmsm+tE2h?&xU5h3k~|tPjgx>4H{KdYQc? zMFs1~gqDEaI-#%pdu9^d*J^eNyyUg5zCz&>ezn=)TEDS!q@4Pd15;HWGfCN=uRSC> zO6Oyv<#XPJ64ugem=$IL( z7I73{AEOq*@%UXy13p`|_O*+Aew=OijbqM`Up_1owzYhpBXIOa*(Xpp{cGXYQzj$! zDj1k7%#%f-T46{x!*PqUH$EEg)6q#wWo1}igK604Q@DhV`u(RsN@Z0ld00CcF`2Yh z$G**-6+g4~ETrM9oGEGmR>tO*FP%3Cu;8o|c85 zXbT`TYRe>aQf{jC-|E50;jg*q1j0VKQ{GG}Dr3Io5u)kI)rqb4K1HWB_Y1~8?9T(a zh-?}mx2jKrpH&@GUy)q;;dGD~Wb)MzjJt?3gP3dvvJ{L#uAZBwX~k8E-R0(G_u`D@Dw(EWye}*m#JJ=u9vA)sfn`hwHTQuu(OHs)+SM&f+sURupW}XOr z-It{MY;i+VrrJblaj)sL3k@>WiFOi~v9qqaLV1?yNaiJoq=*((SFf0dQ1xwL-=2ky+dnTb#iK1V|(@qW?d~=g(GI5*WxB^vF*t`bi}~1 zCCH%3yy!eiI+^w$Wp6fFF8b=4R$a{*Duvd33W?DE(@s|-Gyd#O)8XkaRRSMH0N*w` zNx6s19BmHs5as99IYev#-KG-}l}MGE)eL^7?sCbeNpZnz>cU!SalSCXazcqNRk$Cw z;<9OeB2k-x|FlrATIA&xo~>Yna4J4Vd&2f0|80iet)#(>6{1$4;&+Q2-LT{z)Dito zC=H5{$I)u%k!6s!Mo{*9v?sCU`A7asXdOs2HQ~vrthR3mDQQXj%u)ndA_j-1`v#YYTsH%_63zHeu?5Znb{?RBneKnpnR&KVs zHpFZy{>x2}Kez#tV-2~HFFIbx?s&-OZ)dpV`RLnR> z@O8_-VZNnal@U!#-_eh2(s$MBO2z?I@-|h&xN%h41&!$Ur_833D|edUg^gLKlE#T` zI*>Wvo!{HVi{tk>HopD7q+HIs#_t!yRJFzWy1*5XZ8ILC*LC>Kx@qOl5qvU3)EjVI zjWPbKhc@ImbXg6Denj{8XG)jjQWA)2i=XUcaao!4$`Ht3J#F$3^?g#Jh0@Bb%`^U_ z1_oF@K^vDYJ zqSlzaS*U7dPUN=yW92E}s%a|zLbocaX4@vh?+08V`#3D)@tZ`cx#COr6<>eqS$Z}< zn>NzK!ela5!o<0v&$eQJOZ0Brd705cc6^TVH4H};`cvUP%BjFp{3k1%?H(0*x9IKS zhdHm^u6208Bbp_5J*orpnIBroO8Z*c_iYPrxqV3U?Jcd-kPjCkYbRnrvs5@*Rzs0O zU!Df*%(=7WS>sJMlsJ?;bR_!MFtV(%g3%DAP1Rl9-aHlWX)K2FkcvZ8CohUG;zJ=0 ziDIq_XNm7@vgg%c6duK&c2^~*_1TPk!dR61(8?Nlc77gFuCI@dzb0$5_ z3#tjDt>_U?C%Cg_!@C&c9a-&JIqX^e%^-rpRH&yA%R(Qqb5`-~em~$8<1H~*yZicX zYHZXOXuk6?eVycRehhDTe(iS7`I4}YQo4n%QHC8U| z(&ad8^3`l=44;K+vYI37RxGOHPxvpNA6e4R-PW$9!)OOqi>yI+~XYV}|ywWNA&i!1arRMLGG?JWru{geWEo zdfN3)zfPtHp*6ReQHM1qt2ZTA1C<@q&D#~Cft$Xe zHE-R3G|J5b2dz#laW17j9cm<}2b8FJ(&aZ{-m=fy(Q*z6?fQpRP5L%A-gd*RVt=3g zWhTR{csNmMIMVWMNVv&IVu3e5!zZ0c8K(ovc{qfAn}iD&V@=c}MP8UoTq-BvCyvnq zoylW>Dvoh?r4t#`W8;%)XIzJY@Ryso*_0hHFNSK*Pw43A+1*s8hx?T>N^^ITPnygr z%`)hS+UIc8_o&owukXQ^6kF{o=X%oF%NO^Zz*TCNxo400>>uULJ08M6;Ty4zWaVx< zfd8wxyAFz@@A3s6JV?mkE`t*wxce|baCgn%9^5V1;K5yk1$QS9+zCN~LvVr(5}a@+ z&+fbXy!-Cmy?@>*plVJvzwVwn)x}Vs^KHD%x*mHQ??*RA8AnRdvt2oHf4k00$XuGhgMycr$`4oGPx0`&G%Jfn)shDV6-t zPy7R>4r;bBh_%g3O)lG0ZmN6uletp9YP&pb?a9A$YzU@-bTISWM6w%wgLhNNoJzXhYq*t)-~8Y23VoqZ2%*gU<;%$-iA3L@DAF z-1P%GXK>jfC>%Iv^|L=YJ2$R#OrrTJlkyR7NlP51Y+;pX?J`WizY$ke4!r!J0Cf@MfZY-Gw-e8aNIGd?u143Ol-CA}D0 z*NcBEC10SN57V~d4gv(lDnv`y{3-ywLTSxsMxRdO5*UQj_j&x=3Y=k+@Jd% zW94J70sA~-oytb*`kJf$Cyw1syf0XF1_oqdzioc+1a$FDVh+UudjmnaCZ?0|9>(-y zQUz9zm#6}3nh=B8;!oauUw-pK?=kY^iZk)I4(ePxL1{gK(8~V za!T2ck-2y9CRNfmiy1>uPA@3vlk1WRrpsUz6ic`skdm>G`EW z_BoW3Do_O9?e1Nd{NUJ;w0!&W=VjWNZ#pk9J%pMEUSz9 z!AVy!(@r0ZJxZ=KK)1GEI#FC>*g z!MV6-kzi}(_F|9BEpE05IXksIjIDh;g{t#W|Kl{|8;Qf@K2gf*(7L)uH(;l>u!W&7 z;lyVDw9iYuZ7iJ*m3dlU=Ke}a=dOg=UxT1q_8_b9Qc1ghMP56RtEU+2-K*DB*uh`U zHyVb1mT11z4pqT^D{*>;J|Bk`Tj$@i)cov;+H>NWH)EWf=a{@>mS2nD&dB>)B&tJj z)`rx1k>F6^n-htJ()Vg*b(kvA&aDIZ9(Pt)@7$OzeE_2@BCg<(oYK{3L|McLq+36e zUP}yU5ZCF^*bE8LKj>|}q)mP-wD>|RF z5nKMe47<4I3`MV>Y7pfi`T^o!3?P`YkaKd9T@@s3>ys?uksr<1&D z+v~+DpD~a@$gYEvOc5>(|J#xTe) z19qHB%xEGVT$s5l-BdyNNY$^J!~U;Y?XS^_f3TJRBsy>%^S|OQ>Hj-%;r#m(;FN=b@`^GTLWeK4l>cSDW% zW8)&<6xuV^CP%W^l~DKS>h7qiIh>)`uL&kKMGQ&}Nl~4RNOkhi_I5*C8SSU&^Yf+Y zUoGEgYAT?B`+7?)HHlzAgxRbtQ&34`C?YT_`6T~YSE+7i1Y*&vp9mNqM$2TGBr>B^ zRD*JoSA3*)o0S|YY+FWHxD;T*LCFszKT4L7Nj2J zg>`OEj{I${lACB^%$c$OO;`N`LH?QX`OmKUAK=&Do9X{=`1RLZ(%<3N-+AP}wHh3L z2?)aHqy7QE1bGB_|M%9L^Yzx%l)ds;_4WNowC~tnQ1Hdk9-&DR-%uj9v7V_AQJzdy z&0~7wwWFF8WE=!Zja3>x z`8frh4>>6HC~3`3j`STHz`>(A2euaz<_#u2Tgw~N2Besj^UaV)hI&jdq@-mXUg*UstR&uc+4OlLzWBO1?wPEv*EP{06hP zcAPUK(#X!w9b?<-0_9e=PxTVzT*Yq|=ZRXq9?Pre^Wv{P2mKsLUvh_!=yRHtS#^GT zau^(MdGLF0{ zpa4}8L2>|gp}K7SuYlSZbs(q*a1sD%1B``Q<3Tfe)RomAU|L~Cs2%NlN%^T zu$c$)qv;<4`O)=@hu)|{yD`uBfntEoGC&}~ray2EZ_^04hO_AaT*KW|0>(W*3kTF- zo@D}RFr0OvI-y>A(6mr5HK-J*le+%`B*NI=01~0?pMmE?^(TQufc+++UWU-2Pywd? zk0246{yvZhU4IBjgt{LAco(|B-v0jZ>%xWdM?fSpy18=+5)5bp%|3T-2V8_b$5dnM7Xj{Ks4;`X z0Is3w^x!rCcNh~MBpfgk#zasEhC+Z*KDRS2d47)O!}wIW8MIO>`L4r4!d&JwmCE&# zG)t5ukT6#=J%h+ex+PGfV4@HKFpK7sbbc(D3@TJ+FRFJ4olkUCqH`yGRmq?)-Ixf{ z1~lR2;K(scZo3p(fE~aJk@~dE1J7-M6&YobWeH_)k7JEK4Jp@=ou`4jG0eYcjxuAW z3O++QL#8ZGT3J+CV%d9Z>}mT#J@5{gn|6&WrXa>zre7Fu-mUN(w1r$-u3s2aQ&AJl z6FXX>-UUzt&SinzNTs%_j@AgNO94j{JBXA!Ii zmMlyF(?Y}XEY#r~>|l16qwv|Zs;yA$u*xq#uoP51QXSd}9!c01e4~w>fe zob5Xh&c>1a73TTDCV2kR7AA?mP^LKx zr#>BV7pjchsuZd~#Tf3Opx=`75$2CMmEAc05^l+(zvVB*1lQpxY@edAg?XE;evM8D zS5AaN%9}P(fa+3c+HTt~WuIf31Y*?fVs8(43&g z_*?a_J{L-}mhE4~I3zD@pgrNJoP?4^#JXwsi&8e`Sd(e3%m0G6C9X3+t@$4%)|g<8 zq=q)_Q|LZ>sbBs%+}gc=HB?($&5VWF1qE)|NWf!}scKz(g~hWC+U0AYS;Yh;a4h%* z_}3pZHj{r?W)%%3{FYCoI@9D7WDmaR!TKQ`uv;m(d zg$bL_ShwMpD@On>$A8%*iU z5fUkL8ZKNCCUunrA(vNRt^{yDD8xBdEH1c}!6eYI;go1UFLV{z68KW8_8iQ|vH^#M9FwvKf z3@f)k8d_t;b5q5gWvy!&Ug z%0qv&ATd+%hijyJ)n+1#E|_!>r3A`JgZ`Oa`P2hO6}+svrwwCE<@V#3T1WP$eTQ`U%lc5L(VcCn9rbfXa77SH@M;f150&bv zOQUCocigGEYuG97h5GvI3yROt6TW`JDQ@d+@}Hd|l23&hS4hM`(-begN5v&*8T?XbUSafg5J@<$l7XZ0!F#{LL@xPv7= zw8J(}+F^wcC_lnhF?*YCd4J5{h>Ld%k)Kp!PmR@JOdVHa=xk4+PXz~h1bBOCdwP3W z{*HO4cO3(}eE9Yb9d%0mi1|1Ez@#t-BZ}x-W1A&wr(xgO`xKp_&hqPg9p^#%`@|g_ zXE}8>>BPoY2bDz%Ny!3J3Y3D|Bnj}r)Eo6p6TEK4*?|aL*_c-r=vT;r?>BeN6w-|* zhUT+WbrnS{v-z}5K-3?3CfNqs{2kl26_B!|0UTWz$OBC<^WZp2^*Ykq3vbC3K zwPif!Ba}vWPFqJiTSY8r-`S0cLJg^AKfj%mr&#I7q`xt4q2zl){9@U;eQkmt-b#N6 z`<7X-8`wSi^d9#hP?G9ttYe|C7HnIBJ-WB)nIRnH8q^T1yuoTJTnoox+35BNd`MYA zu|25yNPoewXBEL(J@Td~U$L|QqG1j_>iGoL2${hQ`8d|keg@O`K=QG5ky()mpEXIm zcqha`^&DxgN5u5CDvBl|K@Yhp9|tBAnkAw|50Bx%67(srfh#Zu}XfY_~L58MS9HhetG6;(jgaBk2MB1Pq5)2;@ir`)h9!l^A=^^MP zF=7!Sa}d!R)Wl~mKRl;<=GcSMkEGm#un;UjO<0Sd4Yy#y#68Hw2uS&8ZW?H3)z7%7 z5n^~1U~)(j;xJ4Y_dD_%^c$QTA{Y`(6h;AKgS~+9!U$mWFqC&#H{>^1aHPrzLx*v~ za9}Stq&7}_)`MOKee5v_z7D<)@(lJ2Y6)%$;t!_nA?rEpvFoYmc?(CW78~>%(i;F6 zB1{BE4r7I3!FXW!Fgh4AjA+AogL8u#_UDSj0RzCOVb9(@+ef-Wf?sHYx+K^E2tLmU z{@|}xv{tfx)Dy(tSP$q6NGs?o2rJkRIIS;Q30nWdm9|p0Vz&~uqPCK@VzlBeyf8#~ zKxidz#cIWeJ1-PNEJI{B1R_MDXM0F{2*P?7v&sjCWfVPvJ&O{I?+9xt7EtX`CXt(; zWd$qs$e1F1MW{rZN3aX#?Xfi__~ZW?k*uDL1zSjv)#6*D=m%Nmqgx}B2KoH)fMrO8 za1V$g!KR9ojXjRmE5ZARi2d8!OsI-IjyxJf(&Lto&x8;aOf&yr@>%Jr#k1zByDQ{r z=h5t;?x;V65O3zwOU|@J^$6BOLLt2=AlS)w9ywzi_=<}W~86`9+(|T zj~SniIImcRUF55tWX(()0#F)W-Dd<3aPxWj!4`UQB=W?r=%xs2H{E}mh=yE!VGr24 zhipVi-b>_t85!3%%JL(w@@M(=l5!})&b0Z<*-GkEMBGgEWz$(n@IC5PHdm0m_5fjR zm_1H~*{@23EhkF{dlD{JR68q{1^Eg6$ok^1Rkf+q7|YQmfwB4n+6`LvYz9+83#fio z;l@NwUW=p?C#Ga)O_2_k1`8SXEX#!c>t)`}h33(_tD%XvR9daV;^~1=X|mzqYqPWlaZ|A)OP%_P1>SU`?+s)D zTV47WCPR)8ezQp!sZOtJLrFD6ry*iN!=9=mZFT76yUSweinr*HPj#*wSmV`u%Bp;6*OEbN%P2RVaZM%bK@#TlUu)_72$&*H?#capuRaHkH?= zt{TK9p7KUd`qs{tg}SDjX&Chddnip`Lc&FQu_2}2pB?_{51$8Jn`#F~038>k>ygwD3sp8jX9ySEjQdezrDcJRdj=S=4;Ed3ts- z_`;s^^KbFbmm}j7jwBDx@3uDLN3rH#X>B2#JFXOW9YOrK>dwC_D2Q3kKgPB#tbL(M zmfraB=ybS@>PYs$)wt1_bNIc=sKIA$l;GHk)nvZ8v8lMtd1SL)8k1;tKD1(QqN=*z@u9go$GbB=Q%Gf$I|B zvZZ$UpV18sMerFQCe>NUn5$0Kl7Xzdvb-pbho>S% ziDc zqrHl_d?6au%D9E|0yK1cwFblidRva5xRfbTX9T&q(q*LI)u$vvR03CmKK_ZNz@MB& z%iUI;A6}k;Gc`X+T8Dowh#)r6Cp{gL_J@Ctl{&602QE*McHt0?qR*GRh_VYZ$*;UM z{>Ch?tXcDtx`948@oxV*oG*T9aer#EH)rnijFyqm=wo@J+WNxmU|T^ki)suGOZ*ds05+tv4dCQ&Ut zjU!C86{uXd6ghm0CbZre&-=6raSU_5VPnyES);xRQERde=YNr8;QJWy+S}&Yez>jB zG$Vyj!BdlpTh971sq_VQEG(@NKX3Mn25DGCBUX~7 zcu3MKv+T_1b?JB$i#`sxVoH52&Dej{cVeUft_+2WT+TV*P&M7OD`Z7(BS|)DbeXNPc%BB<-Qe!rhapTnqQ1ypS@XuiJtYKu-nsh(_wfX8-7z=a^jne8 zQWW3MuRltMERwOA`{~NJ4_IoO$rMQXxjK-9p`JNZN~_;&@E$%`KdW}oVcS<5l=~E; zE#aZ6Ie7p6^kUkZ@{ZW4q&$A&g?;bZlyH^NC=@c6YCIn_6Z_L(1ARMs)Osv=(x@|* zaoBE~wuR<&0rw8|8kz5cO}Vqa0rBB->muI!rD;L^QuVsfY}l70))}WRzKPX2i~7eY z0wxoNW%S)DQ);CLU8D>2(h2@Rba7K8Zr^P|=raXU&k3vz)ub6gSpG-T+oIbnJ8%4U z!%xQ+!RweHJjVAooK?JQ7J2=7@~vSm6YqmhU%MZCvRp=Qz#k~~B37hAv0f)><@8%O zIni3?4z>?FBa?dpT(%`~HkM==QnM z*VI8_UMOmc%AKfEW09tJ5Irm7mDExmmj(Ki%e&S!0H6HwKc3sc2(2){rAL_zQ7Gc+Yufu!bSStZnKjjr{i6|6~)Q(R_yU={Z_DRi1bjtm2?hHCws) zSVj3Y))AG}Lb1T;UC~ij*r3tPg#6p5`$zWO%kIE&y$`O6qu!?>*W$H-kA)wp8BasL zixG#cH+CdC+%K!S8=3~*3k=j&7&+sg-xZ`^^?tBlnTwMV3H!256vDoXY=Tvov9ZtD zlt?>K@P&YdEcUgO={}RUq2=IBXXgs@d21a>a6w)DE}3vZFFrlHC$y3}QAESsQDO9l z2_X}~BdWJ={=0zOfHyALW0qgXm?^EUw%zVK!w!;Ui%AJSN=|3R^f4-u>8}}cT{?WE zoy+;~lP4ql6Z(l}MBNjG|IwM#Wt}+D!BXH_6)^+bEAkRUGrkd_kOC(tqs?|vWBsl3 znMV%&+Q}OZk{s+q*rxR3UYmv&8M7mQ)0{Y&=f2~3MAdKrVMm%(7o%W<6D-NNgNN)v z%gHd=f%GXT0{7&p=zFi8M<@ic`FIwv0^1fF!LN6>e1pbal*U~YgC(V&XCAvy&Lt@t zZmaE8`C&b_o!8D;$|V~GMNshSWn+AltFsjRx{nIady@MsLW zR~tS>4yfJZr_CwlbFdy17+N^rx>Sr8Png3mTg_BOY5l#7TW)drzw&L+&X) zM87ewX>)73!pw?-cr&?bQ@LUj{`{;WXKpx8E~dk7@cLj9O)&zGdA8)Fts;Dh$m(A& znmi10wBf@XxZv+V>efat$fiFRKafD!yH`sUa5OEU#n0sRQ65=ZQ ze7uSFRa&WVTExQQhljRZHRp`>q5>_sw_;ieJ^hI0r%l#toKk{gl$gWwB6EkCRgHY|8KCh*;g!i!9y;b*PrK8uahM2@g&XJ zAa8bXe3zR5MXc##Et>j`j11bOc{U>Kek3fijlbFq6ib6wA&;;2Y%8uKZ}1}y_Okk8 zrg<~Tna|D|JARavQHhkS1g)2tZy0(*<9W7Ovg!0XQT*lzU|Kq+L$gd9EOkLD(D!I( zES`fdsPR`cG>n<3wW+AFYH}B}LZvEv5#whz#i{QVYhHVp=M}}nNBWPG+`8%-el>WJ z*6!JGM=~U)tDu<4eD{{JRGaRzJ8`%kkJ)j*ZnjJDeqXzq!&Bj9IY_eqb=p!Hv>8bf z%9cbW=J*{z&aJVHKKepNHY)n`dZxx`!R_pz&y|ebM7@sI-0fCGF2HI$M|wF5(~g0z zZD=wt(?h?EhZ|jR_MrAP|75PXudK?}9okfmu{xbWf6Pzz49~mm!K#8ySAAvII9Ae- zqZ4wG*IEazuv-Vx<&P5|dnKezwIt*=yrq8kEV)hFAL(vG*CPePi{^yWD!TG>J^7Si>c=F^Yn!O0-HTTgM9WvEFWfzYx8smK11W z=5aBXIH!o%EGAq6qCfdT7tbXu+op|_B%8;^-0&h}PBLk6_ubOX^e0b?DJG5fCsBum z$Zx3K$vXQdZ}AqxMC^QJIb^AYg+zH|?t1j~dzL~X2ZxRMcBO_ht;fU&(-UlkQw7Yh zrXrl1ugaXu0P!T=(~tLG@g7KMLrdbDX!$skt%c`v1^R|rz)NaUzKw61sb0lQ;^bV7 z?|x-Ky($!ue?iH);)mpEHi}ICnoVORRVV%EH_k^~g)FI0%UjtGw~(xK3978xW1%gv zTn*?-zO5x9{V7 zZQy-Cdmh0@TW1-7G0<<7W}$y6XW=FG^{GAON5%^)nqvHT0y=A!=tyno4zKcBj~9jW ztK81OoOBaxhapn&AE8LXY20%;=|4HfKigit=e8;)G2-dMb+wGib+JVE;HXr!8jnP2 z;}o$v|AtAnQ^24QQ`oPo__(gc$}C@lK1R{omq#bFG0q}O=(=z$5{B5tp{-p8|SQE$ydIQ zOy>-EA1WJlwonRtjzcvS`1#En4r2P^Vf?%UGTe6tZcdcbdMi>I>N6DTG20|JX>w6h zm!+tNma7G6?v@c#S(zuvW~c$9QL!yQ2|f-mHDMXPRKJ|R(Y5JrWaL{b(ZI6Yl8Dzt zPq^kLl0s5u)#IzorPuQlJtnZSj#(DjU{4R|B{T$yo^PLU2ZfUbx1XElq2G;Y2Io< zOFN_(F=wWd)x{U`D~fq?UR*wacCBzo-}T&^%VopghC+C>lQkut2O_8V_Ub$K+o%1e z(0tLW?};y^OB<($ohF#PL7fZKMegAX;Gr-d13&dn+G*AU4}Foo#H4U~{nx)@DN=GS z3#aYX@$JLZ{Yt+(@e>~f7`pEDP#zdOO0)L3G!dL=-?(Ysm_qpFT)*Os1icu^fVSm}lA6bTJiwcH!Uy0N=KD>U160j}B&?~1;{p}v;Szf$&&RFcxMZdSk$gRgz@ z(|tVJ;Olic#&kTE_fH>&y{S$=;aSZ~em2yt&+s81kM+n9E;Jg9{8$_}_491f*=%PV zD}bG|4ln!L5c93p7J;2#J-6T-!vPVSai=chvIg#|*L+f%$FQ8;n(sx5cGn2?AshCg z>$hB5r#HU2v2|I~1|cV3B~WrV=ys8B#)4Gn7BqjqLMP;HWVbWyIC^?!#_mG---t57 zzkxnY8#j2?7n2m5m5YrzGd$nU#mU^=41T*uCWp6XDjL+<7A~$fPLA-{z1KX{hD;#% zP07ruxj1C_ISrYq)!g5@xcN}SQ>p(7xb|{m#$cCpce8ea|GJx*%Uc^O_=-~SJ43=_ z>ZqAiojfgERGn-b-TqO9FAV+@GR*`Q;^N@q?ox%mHOJwX9}cnsS=t;Z?I{V(SM$9Vkzb-$c~y#Mn0aS3w% z{oLH(yIU!Azq*4l_Jgg9>1 ztqsU2Ok1D28^5(NTftIWcOip!hAyIhpD38q2Tb!XQoz z)HTA;MfSvqqxhp?zW%$H*9!XU?zAYc4_=S&-Mdo!98K772M@UK1F#K~4CqX5WqwJ( z%8api5AHeQTNOU@r%f=)y)t+&VJIyPM+^Im3vHkiYj&cqc9GoDW#2X<+C;06dlz2@80Oy{^%L#BO1_DYb5;BGqGI^Y_(3irl z>@ud5QYHG$90vdQra9O}jKHLa7E18Ys!#Md*l&>JktH#ZF}_ni4~0XS;4GFZ1M`>n z+EE#W#Yh;g@^-j|B!#;6@qy#7E-sV$^xfRS@k0i-1}Kl<5fG6v$OS#&{`rBN!YiL( zBnA7wu6qh933`4Y|JQr3pEiMl%v@~d_vzoC4(kE$!}2e~{+@_FHF-mOKIOw_|J4H# zzxDsj_;wAU37&8i)sZj0{#OqS9sGO$v1(622Lyz`ZM78g|5`U}(WrNe{;O#3cKWZ< zxK*q>^}e;o8ymi}>3@TiJ0SaqH(cU6}W1eB0@$BTDy=6r6$dgZhQ0`^|CaBsgNSgd{UnBX(~Hf`xQ^kydN# zcoV-#YU$cIST|_y0K^} zr4`g-zH9FL3sI6wknJWkRJpr+kGW7V$op{-Hm*Ve8*z>;wdnk0ah&FX29LA@fjLp4 zU{ZkYUg1SbhARl4-KM+(a!{hzyeDLCEe<8`&p-gnx<0+UaIFvcB02l@$gIIeVv0ZKjDe7nHSj{H$&ZFDl})wX{11{8-%Vw{M@ZFysY=pw(;hmolxEJe)ubBl^Eh%+|zyY2$VUpSu&tmRNFRzV@rxk zQnpldUXJoe9%kJR|3N=KySt4&!VZ?lMer-=van?S_yP(W!(Nj*sUPU_)a!lG>59|k zew^;H|5*?R@hdwiVyjcv6vz6R#ZK8o0uP5H_#iFtXTcZ*)-Ll9VW^An=2cL*xWY+iYZmbiEsWe(h!5 z`^x1Nrv*RtXGonU8)_(vJ|XE z!?|wgPisK8jKOuBg-RJLf>k2+7FCK&hI|;xrikgHdV^L13|kr&u$I0=;kMFH6BoB| zBx1f0)9GwtYF43}JCQa*lTJDa>U`4_zp47hek5(b#&m*iA0ipjnmp6iishk!=}TM~ zWhpfZf5t=PwPsXHngpFB87Vp+zL*O>HXBlX6BNZpP@}WE#A4U{*-0N_W)0z?ego3t zBeYy5@WMpIWz-@dgKo>}o%tK=FMqdGZrf-*HX~9j<{GUldbO(Jjyq-t!s!}){8Ni` z=V#~21wEn-)_XaEzNAXX=zA>`rVN!<+nv~zV@iwJnkcyn$i{c!UHsotdiR{Zw_bEEz~IU^mfREe%d@*B0D*WTcf<&(_Q_H!p&) zLtmKhAiPriu0BtSo<6{cw1%HhHjXuOEDr6GeCCem&dC}tCc!kScJo+A(v#puM3TL-Z_ zKe-f8Y^uQQvN!%+3)7*-#?aaREUoYo9$^KUmtW5!SF`-7R>GNJz(c2LQ`5Zxw`$9PRApY- zXnVaLV>O*!vnyNkO*_Azk@P?_mFB9>B#_osc*P=?5pbEI>}kIRf-9MuDC*==XlEK| zh=ISgQ6;0z-8YfM<9MO3*{tuXss&W?6t>j$38V5?d*dOBOp=3xOKE;Ul{NGbA@BRZ zBZe}Heny0Qz9iVGRPlI^*7ye&FA6kkpUY!7E}3^Nc6Rle?L$9`CsQD{NsfbqDSmQVowE=0Z5}N7V`;_J#m+7AY76>o1M8C$2p2t z>BQ-j9`iTJwTO`)iLts=7y_h;%+%e!+MOk!MX<7n?v{Aebfk}|ZyPq9;JmM|bfnMM z82M9{qU^k<)Z-+7BD#32BBZBC;uG;4g5^g-QaUm^ z4pW&geW+Dht#W2f^$>Pw3*8jz-`85p4cl!2dsum)sQ6lWdZ&f(J8B^bSif0Xb)qMn z-q==_>>E!(9+YjCQV;t+!2Rtf8&jb@4D2&5q9L>QmYN-<0=q6(J6Act++Geir>6oJ z^rVR=`Z*y3=`R%=nxBX@k3?yYY=T|xd7WUqKW?phb#XB!nsn(}>IA-a6hil)%|T-q zr7fT$*p^_NZZNr%%|ZHTrhr@dnr4ee0#FmmhESc4c*&f=fCk}^9{;X-C$qqDP(}3N zwq+cz=vXW%i|%$2i%p+U5m>95y)r1`R&DZ7Dju|s>gQ4mdN$x9h)GSCL-N`_K#A*G z=%fW^03eBoo$A(i;iBxdUO+@{Ih~N12XFfLy;IDj!MJ2V^$`AnJl7;~5D=#D$=Y*? z*8P{Rt%bGJ%B{8`hFzNzMpmekfRDt0@o2+xAk4Sp;$LPaW!BRDP&13Hi(ryPB^gw4 z8X#nwa`@UX;2RXY|6rQ{O6n$$O2SfP)qV6OalfrbpbETAhNrz(*0`m73rDe77&>g! z%l0sJKbO$y_vE)e*k95cZ}($y3bCHDlQ7Z>bRNnoGFo9fD<@`gh?{cxCMbqX%w?@; zu$V*TvuiINfL`uukefNY+Y_hwigkQ8XHlJbA(syA|rbz$Z-)+Wru$ zXPUK}exO&t(x%*Y`g1?C`CX=42E_I7!7Mz5 zxwNR^>w@=U6quJ}#sxj(j7i$;M2m4_J78Z-8=nF^$;a<7fHDBg0r8evOKw{{Kokuk z#w=N*_ovK(D)nM+l62wK;m6T|iA^igjvtVo;j!o!?C;mv2KzhK9Vh!Rxk@EtdnNK! zt603Kr(b2t^P?;ffVuxjPW85mra6+0OZcSAxQ?Q!poA<=HO4><4(HbgQq|9g<`PNUNGi3t zM7U|as1SDndzGitW4m9|A8CCR;I$K8F^t=a9jB$$tCS_S+176Le44u0r4p}Di8{GX zRbCPXqZ(VIc_BGAkVS4bOC*P2*VCjbG0PDtRo%}P_H%&2K|x#8O) zHtSI%3Hm-k-)8Z=P-nouVttUTxj0>NspIO>!C<=A?8v2wI(5#@WgF12Z{151zPRLZ zeB#68EJZHqHF-?5*ciZ%Szm4Rw%f@jwoA@%f>PwkFZP`v3ByC%V@V+FpvtpC7ishJ zTpNmB?eo=+_r)LW0ZM$X!_E6`!2toYfR8H&@9u4lxeAiwd0jakHtp9YNd?)PMYi5> zvS1iW=B$j^ZQ)dh#ABqkB@xfKqJ6!BMo~9cW|BrqZfugvVTjK)RT^#D?k`Pcv~)+| zj@BpcTJtU>f7`!23=VFhu0T}d3w$tv%zNgmvN)tNN1UTl5X99tLv&~)pmA1to>{^| z8^tRLHYZT3%xr*(bt$^3tyw9mmab~#6&7RMeOmoE1UxOL57Tx-(nW?nvE~+QBENmE zr=TPvB0j^rVl-I1SN1HFsxQBKA#>j`>0HWfD{idWc`Y83ugY<%b5t3tT^Pox`r7I} zK#oNfn04u0ySducDJ}&8cHTX9OJ{sOCjs>r^L`@|CA_rCNM5Vg!N&7e5@#a|J4gmc>XgN}%O#Z|%0C?$|ceSErn(E4-6EUg)lQw9S`nL-h{a3BqEx7Fszl z0`Y*?I}Ofhv*N!r$UGx&7?&VSm-;pH!f@nhgQ?$N53t2K8Hva1U31?b*lgFSGcAZD zn#+&iB3zT$k+?r(07or1u~9?>9EEP*%!AhJ*r-jpZfC0YrM2PHLRJ)1(s62K#ss1* z-y>|TZKu|^?G@CNC*RzaGp)^^{`upW z{um@g{DevrfG?NO@lllKu4uKO*pW@(i)> zh5w$f{aBi%R&%vj5cq8#b&6mf8RAxW(riiw=Wbr{ru{fCXNE&h@V!yiz>j zQjK-5HQ-;LAy*8$zI^``?|*iH@iuJf-2X}9uTO{F^cEKHe4mE%g}JwXt5qQ&`on(h zlj6tz@594N!5y&cVMOzL|JmW4pyxZbBAFJPN|rv!IOaHj-!NN|S) zckbZM9o%t(J5KQbIx!R%k8rWe#)`2S0!z^yia2?d2H$nvqzg6qKcM?V}p zmM%6`>irdDxG4Egib8h6lKOmaUo!BW5(a&ev(~?(6)#*A4Vwtd?s^+6{i)oTk5LiadrnyQ@wxvlY74~XC?ZCwn6xR=gACnky170kHf(mk#&M>I7Z zXYAFd(vSa)Ve(+nTPbGu>hA`@Lxc^YqPzs1t(j`aT#d@&fnJ6F&`7U)4_Z5@lQga- zz0vvc*AiVV+z!H%0{h%gN{+YUTti<+bc9p4rAKKIf&d`bxGA@4Em`=EW6gL8 zT*VNyrM#W?&tt!moH}L*T~SPPlalJ1NsWU(U7U6ML6A)K`}kX91qB`qUy95tr(Hp{ zEFT&_pAJUVDe`$g-*FWSey6Uku06Zj6SqZH-r_wvItfdIIE_`$D)HHIyQY6IH-8rb zxQ*bra|FwKA%6fYeg|C17l>s>pBSW1*mxi5^~Q5qw~;t12Jmj>mZTWq76NE9a@IVt3VD{m^`>#j@Acs3P>@@1(S;! zm^{w9-j^D`OO4pr_9n8dDXQ9~(>{AkY}Ej(p2_|&oeBFit!`NhgdF`$4fY>M6&tHn zynt@z6a-IMR5P>&5#c=`bF)evD+5bBl;ejCgtUU|uP%0GM8ZLmM%rnTa%iNy6+N#v zNV3?!2yGZSS1k{FT#R|NoUJ9z>CY&`Ch;4kN;L#M2W;rnZyFe|E9O5v$~GcrI~V;J zJ1hWGnH*Q~xrU21kIOYtSIO|R4XJ>tl{JAYX8=aw$^<4%9vT|3}QczFV_% z!*evmr`JZcdE7B~c~Z^^>~P4FoEyGqc)*WAV`s9X+n58?Eu-$QL<;6R8AoU+H5u9* zOK_MpvF;aoFd{H_AOvv+>UV@~ZZde3WkhVOnL&o8!%nPQa8%xm{+e z&wD-CYzn}Vw1+;mZY3~cb5VzkTF%(O9^M*bl&A_W?Wnh`J6qj^lomMF z9VP^oNeI1(eKnOx`Gt-_RgHBaf>AG`F#x+k!+9lw#c_A~6oycfVNIV=3UFB_zQTEN zVE*JOVc5yfLOHRU3&>K)RHk0+<64W)BW0giTbr7@bT~eibz8(Ly1Il~+D{$Xo)IxO3uIc>Uz}_zXN^|5%6&X53(jW0nEKv}UvmlBTH37!1=|_U zmVHWK*gA^8O;sfSjRkzC52Z>UtbINhMwMP=Z+1>6$z4LB$w(>vIKiqBWu$~pP0gl? z<+MA7HD^xFVl%GW9%|C_StYhYa?a_p5w8xT$poWOR?aW+RwQzJh2(jtHoFy$Z?S=DFUXeNeT8wKaJ ziYQOc0)QNC8g3j&Mwa0juhTXdyK1{W|GoL#Ne#gs2^ohmXU#WI#`9KpIE9RW+MDFCN@e}CukZ3vbsHV09&EdXn6 zX2m=kdY^^vGAWj`#Tyr>mjmmEf@+5V?qy~1FeCdCrR8yPyvgVFkg4?0zu1g~-_7Z` zPMT)>iB73)$<9rtYaJVA9x?-lWMRKnwXK0*AO3Mey=Eh0e5W>g&(!h*lv)NbeyR&8ua5eZ27fInwJHXeMNORTOC z+;Yd7X<>2W{cVxt=w4M4Wq)`-5EGO79JAM%3d)87B>)L>yrtsdiS!6}SioZU4|nJe z-r;JGnH@@2W|BL%4BzqaraGu{Em1ToaP#M0WmZ*JKKslIz{X(u$e`tD{AO6b5d7&b0QcVn&t8ki6Vs8-T?Nq|nC94Ugy3WYk84Aw4gc);{F9U=} zlQ3%`jPZ$5d0N*y;A)tnJ;h&SJkj6_4Dvz-n5MOff_G45I_4$i{l~=kMhW)HIkW1mtyDFga*k3639{rfak}TH``6}Vd}^CX@toA-d>`U! zJUlu&N_Makr42hEl%bTA=}Iac%SJ;u%dHeAg9cO`uZw)?k=KQ_<{>KPt)Br*mSgl%)gl{1tA1R4yVKdThBBzQ{x_e}% zpItMJ-4Ar z_1ZJb8F3c=Fp^d?unHrd+5ECbW@)U?%F0mjnaTN~6MYstE23M_qYW%}cJ^b=a*Lxy zT-iY*lZs)ahO9*ErB->d{0uz(UzU^Ijuap3!btzQrFT7QKCPcKO4h5DZX^| z`&;!Y61?`WSY{?R=3OY+4CoPcJv=ZjwYjN%zKHjY>I)&`#=$d_m-Gzw;pS-CmR%?X zbomu26E09NXl`#$9WXt-#<2iYVq}5=+-t8RThoCQks{NPYRiqjV-EF!19gCf zbTNPe=?Pkt&j)*jD_pkevhOyOVZ*9J*{@ZWxHFyNFNys>5At&_`idDGrU8 z>88P)Ius$0?P;0SLSw~l)mH3`^~|ztEGp!nKw$k>R5O8uO)Jl=_W7}1(W_4NJ>~)|u2;ijulk@X1 z_MdP65lq%ZdRy_y!+o`$?7f&4E|Xoyq^aQ6)g{gjY|4{%x`63%h#gHfI$&LRq@&S>TrsMI$P_1lzI~8$FYkr7HE{{yvh+G&$(dVK^essy9YVS6qs2*klw(gHEGP*ZuzV- zL1N{Gkf1WJeYv9j?80P&&y8Dc69yfDM@C%+r+W+EF4H8DcgyG8t$VrFnqgG!>?X2ck_sXd z;D-T`S}kG+3?m4&VKJ^AEbOD#Z2zpX!5|0cB}2CtSzeQV879Vq^}32}sK)uQnC>i) zwfE~$+y7Qx1{-C4_?`xa7}`@!NP5j^k?s4*g+2cF-)XCt?r*a5K#55hvFeGG#OfT((CbA zA21V4Ys0;s1?;F@OEz$zq;LK!Bl`;W{LjA)-oJ{V$)OU-F_%LE!z&#;yl61w;IuD} zEEsmegi*ObQ~RQH#;mG2iG-Drk@sqS+_5=SQ=_46N(m8{UW*`vm!D^q;XUb0^;q+I zt#9i$-(Y|KTGwq_vVpbnFG!>L>SUa=`|r;W%4OTmztCkT*&VJ*v+U2;F7X+WK546U zuGc&rf*MzcHNmvCTOh|ourown)<;OPn zfSVebo(i_O&_QNC#JM}6oL;#gnk-=Lf(3R86ss2U*iV{hv_-+g4G6J4Rq(Ktx682} z+|Pe?-yx#UZYx2y+0o-b5Egcp4Tx>ZJ3Dkvq5Fks+I^C%{1!;f<1Gj~bTq=u2;geG z)vxg<@J2e@UlYNYb}~?!tQKEM#B#@AE^9fu)GC$Y$G+8T?19yrb*Ku@-!K+s%$Gd+)?r zxiY2vL#{@qh-!CHyfhFmcVLHnr*>ntE&#f{CG`QqZlwvpwz zfQsoWyJp5)v5CAN;Rn)W2$Q2kd1KFehBkbo2=#b@5(+0K+4v3r^=nDEp#Zdu3AU+m z9jm69VoLQ|@MV(o7|2vbE$_KHU2P7S6Z{Q4wvCuw0}#2y6S0JdaF1qhHaqX2xt^aDp(9SfbrAXa)?8CwUQa7%W$|8I zn%0H;V?R=gO4E#iPvKOk`nHvv2>eL1XIA0on~5q0Yl&_&bkh`+4>%>2&fv3hEW`7v zyOiYg!iib!KR>5#J${Cr`N9 zS^a`?aQHpNgu05jkadI`(8MEZrCm^m9MQo5kFYC#Wk-|ZY(m?`{L`$k4ithnjAxvF z(0rIwEL5vPDf9Td=VPk+<3Gyfm8xWs0_*oD0%wTkUPg*TXDGT(iJ{-0mDD30tvPiZ zCgFSe?`&P=@n!V^?5X*LBcql!WV7=uU$&`;D^0+wmM_9j z98F+Olx#3eT=bX~UtU}(l|&|mErz|U+|Q5Ffa0Lp#Z?NxR^$nXfQ%Rn2Oro#K&R#p zhj5R+&;TlwiUi4;-IbS^RZjitEbX|`A(mW^$gf%5RiU&Sti*4bm88=*apw2^`&g%A zHE-10L+7V4$fv4;manw6RGC_Q-TNR_35l9%Q4l)$xDlo+r;>t{<){4x2#_CN7Y7MD zW{zW6^xDUIY#q5hv^|OqGYfQ=pE8ppxZHz0xH_rZ?{DJFxs>gZW854LmOq9J-^;!IFQ}9h-6tFw)R$U0Rx0+FL!ond&wTY^I4zG6hcr=a~`1uu_6@8dEW=Yq=NY zV$weUwCIwD^SZ=C-n>$`5krKCH=Ng+UMBA3t7+zE70Hx=Xuh{*tnT`UhX`IFBksFa zcqCT_)FW|N@Fag~&Xb??Cg^n0#!KKJiyhepmA$*&SJcPw2>9N5#2ZKm$Vc7{s6dol z8Iz`v-4lzfCL??ULt^%nsbXxwWC0`}bZKlctDkRLGaOu}#^0r3=P}7W7gFYbZ6*V=2EdQ! zaQ$7_H~X+=DtnUXul_{OD5&T|{q%g>-(kIUQ+dmXa6!*(Ir?fTxT!T^`>f8$MX)+vNPzEis}Kzx~13WQ}JV+Hx&?+t&`D(Z~LMI z05p_a)mzVk*-YZwch?&%hroHU&E^;67rJqn|E=^%o5M?q-dAr8Er|j3o}x zBw@j9RB-gCqN>#(A_2EcWLgOSA7-|nJ`ulMr~pYiDTZkrdsi-6ow|3RX$V1B zi|5zGTuRj+{?4l3U26OMJzv6ZVv4Ts&$*#11H?G+M>Q%5;nt2cYhg+@w((m0He}Rj z!BDl=o10y067Tg>q7N79`x>IQqGr z&M1{gzc)D(anLfr3!E0$Kvm&^ec_-6k{Zp1=o!&PRU-d#>Rl94gbN-f!F9pVM#^8cRJNq^6Bp?wI z#;xq2b98o&@aRMHz2!AE8B%>R+qnXQqYj6||9JOpMmDv)gbYCDyfu%gmFb!pgZwLu8Eyr6KlKf9k*z`&WfIl{_9xJj2*W0eUb4D5!4 z2HENThB8D{oZ5h5RDwpV?MR|GgwHK&zEXWoyUW6<91R414KYAJMJXYlwn_ zN7d$DehPCCd>Bq=D^V+9l8oPQ=#<+VzdUlaLK49!3>LID0p`vWsvJC|4P3Do&503i zlO?14_TJ;O}loy>@2cWPfT%C|12xzmnQBAZ@G%Yy5V&ENcT;c)PeP%s(_ z`*r%U5iyXZV5sQDWl>59k9a?%p3^#NmDW?glmt7XhEp!iyZIB8-_-GyYT z+PP;%8eG_KH2@p$+EXEjdh#(N82&VVacfwfo}6AA;l3T8Spt;cgrC1XFgUa2tnlZ% zJY5Qli!-aK2h~BNzbu3;TrXf<-HVM~Z+%UGUt~1z+?QSxQan5|a{rUKYWIW@KAHTz@Q)452l#-F{uuP{Wi0il8j1u%kD?7I*+7$UM9xov#K{&mB zbQY3Zc_G4h1+2g`!w!A@Xn1TaAn}XP>l;ztXb3XuwHR4sN-=LCQ7{<~fy78VI_l-4 zrtkAOe>Pv=S6ByAr>&4D41W%Qv7Rp2Hi5Ug(%r~R>2x!Zn|pF$(`ka}y?ONH*L<)4 zxjgM3*?v^=(C|YvX3eu}*7A$GT>l%zCHMTWv;V{Xe@(U6>!v4PRMAS9rBJl~_mJzo zFnjd*eYJDxtz(8YdCmSR-P)W?7*#WJTg`iJ&(^BLs&{MLc!Y$T+^c_Iw_rmo0wQxR z7h~zeKUX-EtzUDH(WHY%Wy!D#`{VAZod@W3e30wU!PlD+Gz5S+J!{qfM?k4kp z<$NcZe=6mzjP4|JCz-!V;0|a03t#Ufb0?XP#;^K{gLG*9@J5%3~yItV;Nw96EnU~U6gcdM_~otmIV#G- z$KZ#9qw;h;24-)xu`oF`Tc`)QBh1Y5!+gA%@YlQ*;V51_Z&k_w^2=_XT#8H*20Ujw zwnOQw7>`)nO0GPoGie7XPbwxq(`}Zee7^AB=;En0<}xyCFDI!6?GxIsPnPFpskI}$ zDM5nPdEbBP2>U-o&`)~<*^X4@VCUEHp>kz{{-JVK>}>hvovg9AtKg5aI#n$tc;8RO z_qop2*|;3s!unh*jTqHkIzwGQSQ1mjx(X&LUenFV)bEz*`fnNe(8=q`xAXzq^e9SBhq1! z7>rCa8@1bm5zrv+*Pp!g^VJ1E1U+QtJ?kZ<03|<+J!12G_ipGp_zKFiR3)dhwm3nM zAA@_Ck>1fHSuQDQ7Pn6R03J1|NsQ_*i7@2%8p#(=m;*S!uU@BZVQDA9d6ZIi6&JRu zbemK*Qn!{R4QO21NfTr6bOD;QLKjaZwb&bIb=m64(r)muRPf32#C@3W`Ov)j&lyP-szi8vL#cz$=?;$~6cgBfBA55F8I;gE0&acN_D0YjU5 z4n}Up;JEqtjvx1)rz@cuviv>!=H-c{b6>OpjR5U;YKpuNX|v=?w$Ud(FVe$*S4m7Q z+wrZLW|_6LoN-tzjde}i@sF%WBMvxcrhWc~>ji$X-@tV%yWR^5Kb z_WVPn+8M1-Ps0lpR%U*jS|%R~`iWFjOIUrdaC7Rl%1Krmsx`@$cT9NLia)EUq=Ic+ zchc-(dZHc1a)u<;TbU1eQY2VRA7E9u4Hvv_{AXtMbS#D+a^FWWMKR!K$%g;UA;Z={ z?)k(@Sk(Ks!XbZzuO(Ih^a>1TjE{k{DWqG-Dmu6L7`YjRSCE51_6b#u^U>Cr1c!6^ zRWs;EiFiL~G9~n8yHYp~YmT1z!Fqx>+$jXi z;{3Cm_RFgwzqw8+zcMW)_%(koIBev;3dK*2=nOxG)R>N4uLhU2N`b()k|P`{XYa?!1 z5Xi2S$1b+KKNhr1gB%M#!}+K@m-%br+H(a7S?9g`S7Y+uChYyD;R)4~)O=feQKq(G z`P!!A*)bur9W7va%MYdx-l@Zv1T1y_V3S0x*td_(F=o{~?8db~>)T8$z+Duc3m#Uxf6KPJKY_UvJZ0M3jJ+ko zhmAFV3YYUAxg1$j7s=(i3BAD4m5N@jZdKxZ)=L4a&zW47gQN83wHlDltGvT2l37(i zLq!J->tn8;jszXwFtG@pp%i`4Wdfrb+VA)&{9-4M&H2`Au( z0gW;>2JlhkXR3tdp^UuB#s~ps_&_^$c>VeTpU5f=_NRJ3bNIY3lGBQXIE+-p_1_n= z+-uqff!uj}c()IZ1@MA!FP0Jw10}zIj+Q$!6^mJp8kKO7Pjfv<<{p(aj-g?%<4e{_ zJY36x2BI74oGQfL&g7ThZ%9JuZrICYw5Pa%W@@kgv4;h8!YR)nVB6Fv@Jof%$Yi`E6CyKc0-8GvT1SxkVlZ1Z! zt4|)?RouOouRhU2kL25a)Q8fU#ijw3qy;vfZPdg<3@JFDH&<3xx(>8ktp(!nd8gM* z5@6W$f!C~^?V(qRYWDZU|5x?8cKC3x3KDXIP-@5TX4Ly$U*C;oN#OThpidGWr0>rU zGFIbY4_+bxa%iXKvPR68EWl+)jy!~f>iRp5lqpH;S0Iq|_lkA7j?S+U4+{81cPJMs z+ZxgEf}>s>BSBP5Gn#?u1yPL$_ha}dMOI}k=e|}`?{G#5+00>AQIDO`>WvzVqz82a zxa{D5{%k_U0lVXTACWAfO&!{M%&K_A1y;on>8uw}c<1bz+*3<(F>!zQ%is2f`1)uC z1XRMj4h5qI&IB~$)6qrxQobVFwg=(KGS7lnlVZ8oP`%#(_Z_s; zo1>Xivo8}P`iD+J-dZN@9}bKmX=U#wMC@ z)H53Gp!+NLBJU@ImDC`@F|Id0*;B>q>k}1mx%d8Kyx{QQb(Z{cb2JfCg$>+mKU@m) zy*p)7>4seeX$$jRe3i{gqH{TUMS$?_`JX3c6%UVxyec)`y*KW={0ysx$1kMB;Snxk z*h)}YIEpnhKdD6*4VVr2p=5_a@#5Lc8vAZc56;$z z=t2Qe<`Jgi0s&sN5tMfnJP019dGTC}xVECe`W@M2I5wWeu(C0|<}mHkKX$wVgNfM3 z*h||fn0Uao{j6-+rg19;mb+ovnj9TJn`+9cMp>DEL`mDoviK&>3dA$b&um!>aMWv* zZw`tYb$5qy^7D}M46iN$JNL7%F60@bS*56g1H;LWJzQ)L#`t@1hTM9G6=T>dZOJXR z3s2Jfy7{Yl>>P8w@m5#2hgzSvD;^IGE!XLfyefA%I&$f}q0cvJsf=NKRiH&z>cern zwL6GMhO@!z`~1+nq+xVFd-whPM;uWe8zpX;Qso3h=F&I`#ef8>dKIf}eQlA8#3*7s zs(sVBN40)nUu__h`4I@cWF%@+i4;cgi)T`z;$zuqR7oo@@vVP$JO<@*y&wXACt9)7 z;)JcGaJ{e0!L;9~-k6-!c%+G#{16_u}e(UFOsTp&|40CAk?=&eDcBoiFxX z=u{H&xkKV0+af!q;PcBEhJ$J&j;;VQkRuXPD{9a;ZH?uN!>zi?DuS=;(%@-t32yR{ zxKgg_3tRrWCMr;EmT&Rj5T!v7hA8;rfZd>9(r6&ZL$>)WAy*Mg^8}k7GOqqm-gzK= zS4mPLy{gzdwXG_+$2&C5dG!YyZk`oM+=JcY2VA^k=ayOK+MCp`TA@)rhI}O2M33fd z6CO$@ZgG=-y~k$;d$XI=(kOaesr2so8r9i~c3otBEpRH8iyL{rlbQ>YkoY<0Po}Xp z!`99-+Yx%)2vRrexJjofB^ln%jf>W`2g{?>b?k-&=SwpV)FqA9n`#MNV`Tp2maL6i4!u1)u-|77K7w^^0T z2zUTIWMXXdx$v{1ihTU7z5KGHoJWP9zmJH0J^8Ix7!3#uF;NolA44XmI;3fTcY$5^jgOQub=V#O%}d!AV7$9)lds6=4M#TN3o)KO&5R zBmHAVRf`z4l>zE67EA>K4`pobv)eJh?jv=2+w;j-fllbcYT-qPly%kBaxT5luQf;M$}brCTaxyx+0y}{;5FP4T96JwVn!tY zqer`C6E3U$H6|va%n~7O*+E}y7N!^Sx9l+VCjf`(S(VbdBQLdgHW2q2JfbRqFK^%YEh=yVw+pM3Jk;f8p58X$f8lpu+5oaR;6d0*^t z_=GI zXUaI=Rx^ctvDVt7I34>L!Cj9J98wQ3Eb8pS)XI!MeOF?C8>aT~ZPJkJwl-k*z28Ih zZ*!VlO0kN9LS%6g9t2m5B-9dm3OPkF<(D=WwCvvVovdy6$IpMRgdNetX@*=5&ekaR zz3J}~Cy^_ms+)aB#X1tE$bGSTK-20@Xjlo0roy>_14q_aVSzCZsshg_t1F*W@PlXS z9(FM0aYh<(n=01u<6zJ{rII?5wG4lZy)nVe4XF8p)$>OmfqSn6AtdkHq~g&+CAcI= zuU3Ej7|*GfaInjHqn69=t>V%sOP=B%=&T8DZ1AW2esQ& z(>;1Q4Gnr{XXlGi|Gf`U)w?TorQuupzTRb#E+*~Ht*^Ge5;MZMB6F~MwxjsY$(+P- ztJw%@0F#CaSBhwL@Uc+%l)TbHjKEPl(K@Hg*$=j3#CkK)&#@>U?qk^{I&aU)XmY$o zdTF=UC}qgEiYe+MD$-`w7tJ;N;&PvJM>~+np5$TZdU9Jm|F{vNC~Ol_zALliyPCKa zwQvnRW3Rg)a=D25KZmeL2|RxIn)^> zmFR?i_+hf|TL1$iY&pbjM{rHLT~%Wx1#C6R!g#fh=+Rj>($izRM)564o)BQhI#k6N zRF56X5N1S5*?hg!tyMyz8a1b5=k4QvDlEf4dFDJe##{YNeaEr5 z3ph~a!0RCcHBycm=<<%)C`Rgq#)6zlUU|WO5v$hOl>iMx)u5|j{R;TOWjl%D5C;K z^GkSva#*+hedlYtB2>RQ3r-xUhD$5_I?PZ~f7HYEyP25+l<#>DOJwLnBA23AVE;wp z15zB+6i#bXf>dZh%+oIh$DKcH+gkOT+XdVKpXipbfgiuk3i67W>i{NMHZXU~w42#Y zV{$5f_RF|(S1^)(eqcMok8RXTNJ|+JloIuMlX2##)-T0_o2r(kVz%iGsQyP!fm*!5 zcH4rM3`ig)>TIw?YgNkLVt6;PZ@&SV_o!d^AVB;4wMEUo=o#$&3pD%t-GKzr-{$lG z2S{F2f^Zc5qJbyB69LR{2uMgs`3)lGc_3BvOAD>&$!_&4Vu^>+@ihCklSbN3IbBZ( zsaX`RmKnMt*D`{vEmmj3du7SuUO?K^ZIa&2Gk7p34zW^NoLw$c>`q!<>iMseX>XM3 z4T)TZ=2HGY%zgDkl-={TAP5M8bSWSq9g@-@0s_*F2q@i1cZh-tNXa51-QC@yba#ia zbi>l|o)zoy^Zf(f-^jhsea_6BnQLaQi9g`VNKX#|jcdweF7w#x6jUrMU0OF%;DxKd zTyBlWpe&7vn}QSN5;S5&n_paPm*ttHPLd3U0MZAAYGl@9yK8~%{MT1F*evdK*k6_g`b?p#sH(PtUR@S)&%=c7fJo*i zcij#)V48l9J}Qjbs2MJ zrl-fg9XMWQ9rq$fC?juE>N=X~^qw}#YKM1B-f&}Q|N8O*_HV50BGvc|=4+ ztF_n36PLqT{~(cV&~`{sP*4!l_TqQ{06?Ai(Lu%C?oz+grQW)uF0u9r9nM_UEO`F9 zV6Am!FslY41XwAc#r<--pPHqRj%DI&Yjtw#&RzAbeX?>Z+vZC7Yzi#LxiyPTLYt3b zt9xZGmb}9~2c_yN$w3Wa?J>g~V|RA5IF;yljSj`AzV{y z)Jf7SBhmSpx8GQk9UGL*f5BPA-GBvA@*fcFAFNl9gT|W_g>29nnJOKM6!aZo;dSmm6L!Ekn&+|s*({BnT2zw2o0>IkY8-0wvCRPR)^1MQ_1 z#yiteg)oAGZfcr2IvBaRRM^Lx^uuRM2AcP+ZQ~sR_QxK^S5$~kxqBs57p|GNb}}9; zIXG*)2_z1sB#h+}Blvt@RM2G#CUk=@1kS?KMt*T9shO|mBPm!d3YuI>H#)+{$(_Zn ztFYzPrm8&nLP^6UAhCWAhg$f)SL*3uUpO_b@x?RNJA=AsGeW*k^Z>n1p~I?Ll%QMb zwwrRd8HaL_>(;Y?>y%zcojRk{>+vcogT!vd$!Sfkd-RP4vlCKbA8uL_EaD@yUtb$l zr^XUK3bmN1)U|vx=dQ-iz^>9IQEDoI=5d$$&S{o;N~N0Y?UfL%!D-xM>p)PBU9sYC2Mzny>`2$} z1Sy+p>r3=1^abohL!3o`WN>K6>{Lfi_C1(PQTdp?A6H<<#rK!dvZocflEi;!2d*GI zKI>KJ^L18kIwULRG}N3*ZRk#|wU(PQs>_}*a^r@u5C{zwEo<1vLyWo1I9-=_+V1=J zthj96)Yqe_WTv@#?F)BH>pXiszi|o*z&q3cb-46C#3@B@+SSz5ij8Cu6*HoEmcw9A z(OBl8LE!1^5W2xoVDHson~gwkLZ1WSBRS8DCH|&G$BR;|z-}95UpwOBt@MYmBOyih z=1z{6r2VwiE#Hr*Q1zT&%n#I~eo!B1%Q>iFdEzuL<#2@1I$kAl#+(32P^t58Ov0gd zSu&6!=aNBYdl0Bs6gzt)h+kh0T7OVd8z%4($PqW{g6$mLnP^bT9?2-jJp7g-voOLN z_Ccui&`sZ2B23_H3tz|>Sic*ZzXV6wN3UeBF@}4ce-fT`ab>nSxv0$;+GP%(39)IN zE|V+Gb&()3lL@+@Ggp?+A6CVe65Z!}D?^-9>;341b?JOdMVfs0o2?$5=>YVbOXfbp z0{gLAvGd**Cp#|0+*7>#OVJTtkit{TL}Sb-DHN_fLNjsR=|{E;_L@a^B)bF*Garik zMTt9$>xm$@4Mf%n*PS85zF9>qb=#OushL`udcCYk$CMZ4Rvy0 z=+dEm2Gb;Mm8}_Q^S~qK5+IgBVxsEl>7gDzU(j8djU_o+m=n&L`DWDHU~oGyD~~?( z%jX+S9rLdSm@=|+BM0U+qC&X7vahGz`L%%_2#a4n3|Z6uC| zNJE#Vin}^>Y`}%|P7Pl7K8!UzTg_CZT`XHA+AaJhkqJW0>}j*EMP&6FD-HWy)h|?Y z1}EyQB%)=xW2uoJkL8e;S5~5-N5eyos)4(xdsX#zdD*J%hb65}=5Y~~E8kf*^A2do z&N^4kM8hdG_qS77+CzOryslU84XGD87}u^%9j4w=D4MO54syUR-0r7QdZJ3Fc+a#i z8S$3uTe4ENHR-_Jt9Q0GGMW1ZCK#$}U1ZOMSa!xRhnib_;Lh@M-P~N+x!lox#ZZ4fzvS2p$(>(qlp93f=Ev5^Vu_o)lHw9z6x9 zo7hY0KI^>4j-qDE>8k8UugIk=4Slu;T%Ce?!D?8#nNJ}!)Cm(8@UwZ2p8AW zEt%(QV3sR(6~iB&^-fOiZLaS178y7>+Bu20O4e^8c6KuFu;Sm$W$BK%S|zl;@5$T= zTk(0TZa7wLo?NU~H!u0MmAm9KG`V1TQbz^>=~zq;rtUUq5gKgy@H~hN?M%zeO7t;k?K`bSpQ`xAR zpS5j!WpD^@ucZ+vF(cOC+E8~!G%e`q^Q!8qa}PjBN8jra_-sKRlKng*d(iRFJNk+P z2q-`qN0w0M+_T0QDJUyXU}-SF=o@CYS3j>Ku~xsdE!0KET(f0SPUMLj7QJ61DI%}D zn^1TsVHSza^_>K79onl`;v^0{TztN5f)$E(`Z327DD9uPRtHu*JH9TxB{n>zisy+W zkEU#jC8(+JjU9{%d-37Jf>&Csy|tPyLD}Y$foNB{01;jE$DgB8xMSjyW49E2&`R9R z)akW5^@dD4P+`JXaDK}c;1egRsZRuLFe6X&PCs&96|sKwQNX?sEQ2`O(pCBGdr}6_ zVE8ak*JJf*aBJd`ay9y@sZNu~5XrQT)PX-G==dFu%R&4S@jLYs0$5JC_ zHUlX?{c-|&2HuoV9DJUyBmI;wTAX2q z8XvoBv;&ROX&IE+gTo4vlni^nAbXk+t5Wv3`4~@+F>*yHKeWaef7+*e+>{mw>5U3y z_c}j?cZz!JBAY$oAw|HrO*C|32;*E%>9^nFy-)p^rOG5Bnflw_o>JSL8FiniqE7~c z$91hENvxdQEBpHuWJ232C9D&ubJlO?QiJ!*-O_LUOebH9yMnHOS$i)<5|v_x7NKUX ztK`C^@ZN4>Y38FazsDi^a#d1&lAS8L*VAO|-Te&SN7FWE+9?*8n%%7tW4Ui9kv1b9 zJ>>&=B_fReS-`FOMf0<{g{(8y+7JaUUz7Ry{wm9FY_O;y-On*~OG}qMu{igQXYJpr z)j7d%YbH~$aphEXkNw&~W0r&Z2X~}X7Mp>ukLSpwmT}$OS#Mj&Jpz*N^TM5=A%Lu1 zF<%&olIj68H@aIDYyE5My|gL1DE-CE6t(byMxjugNCB<8oyVbpv9_tyO_Hs;o?e<> zoNs@@_w^LGsI|%|qC>QmaJS=7R&PGvpkz%f%e^e^G0DJ=!m)5ypC36hu_mo_)5lQ> z1+iw@mxH)<{j|ny7ge)N4r_tukHp8z8ijz?yY$;rtmNC z*!16`oT<1Tr_ZINr1Xx=jEsx(-H+rt6Nq9L67$>+NB(Yu{JB6J zTV7611Yj3Yzx((j1!(3B4Xzux`>Y@-h}Lt}{9R4yv!x|mgN}hB!{=&+OdkcMo8I4E zExARDy~OHsIPysI1g4W=9iZMYUozSKQuzj{&%5QVR^r~I3kg|Ll(?yX_zJm6OsLmvVeuBe^GW;UbR;VN8pUsvyapdtNgi>R&c=F> zE*RKVLg&Rs*`1Mo#+1}j#dY-&%FfOkO@)}4l5dvsoLTu+i7FO^7onWHxF~ ztXwuK76&4~G4FiEm2Z*vI#wPhMeMl%z1c9^LFVo@pHth>D(F)wHTf&eX@QE-ts9Ym zMIgEI)LZK)TekBqiJALyNXRFeB!Aj=F(++GDYV|RX}t}TCq8yvyv<8ho?Qzg`Y}aJ zWX>}PM}PtE?s%zXcu2_2)>g)F-Kp(Fyhs%p(XMOsBV}@CjJtzIlbHHgi3w&!^T~T? zO<^=VI=nKN^G5A}R?Na&x;ckxYkf$&Vs@V}CDA$9;&=^B-!wf}D=qSbf8{Ia3n#>! zbK1Tq8MKs1w}?$-18;&XnO1v$x`E{d$(ddcxv1GZu>Eo`$?%ccgm+ocwV$!L$P7Gn zH&KuTMWtY?M;zq3vC}#hv$L?%J-<7+YPca)WVrFR{~_yOQryGw0)77pyKcdq)7@@T zAM~OMd7-1^(|KCqd(%7bMCz{NMXI&uc)800dH~KL-8^OU{N~IXi?GU-4JnlGECS|! z+Ay&ayodMYA6Jgstax|!A%)GP(+?F6&(OI_c=BJsgOBswSO-{|3h2Me^p!Y6O9S3+C&^K>AY zeI8_lD=3ug&E1iT;RI3|8gB$_7Z1K_d$uJ}Sl`T#uMhUzqtdLj&j1~bn)wc#ze++7 z0h^sfd=Sw(NJ+J$RiEy(1A5g+IQf#n?3dSt9w(0aP2T_=pmlKl>rop-qEw{3t28~? zu_4;2M%Xo#?!IfC^xGy9nPWuzAHwgp1hWb%Qc?-{l8_<0_#5k<82!5UP%9kVp&Po){Y&Gn@sd_|;r5sU7{8f{K%KK*LsCJ%xvA^KfPT zvu2aJj(pqRH<;@(bB#fuOWpgt+Kt&?z=DtanN%|-?pacto^HhodK8~!C5F`7a3lb! zZP@9H#c*;Bk;Xx1X7uz;>HWt(PDT5d*$EchYhO~wK@`OAIYFBQjiRt$6eQgVTz z{fn|;9@9A?3MJj&UWU8YiWX+!VBTCZ_9>E&$%G6OQ{^$8XbGuu@Y(J54{lCyzg0Rx zx+Q2>&cx^Pu0#0Q=hXu%VV>ae-}V8OLJtjW1BB=r38*OW6&3MnIIo&MlS@oY1iYIx zeLX!hB~Nx2yCXp`-mJ5G>Y%h~9zU*c>Zt_VFikeRK{8mRisA+44MKgOa)~3xe(u+YxEwx#PE& zqz)R_jN=bd>c;B4E-e>8rPLuqFjsp z-EY?suBdVd=X?v{aP(|1K}15rZ(BTHC+`d}+WIa*6Zx*OcccxTY?V%`rEC&o&uUAj zjDNw)kEqu)j-df2UP(iP4nRCfImdFwIgDG!t9k99&(wpPO+aA6SSSj>9Ny;_gGj81 zmIEfNzkDWd?>c0}ivkHR^dH0yHbjU~QBgaQb*JKv>ti&-I&L{*-r+S!n7*Mhr%YRe z>bk8v(DtCJQ=qyZK7Aq)o(^5XUWyWtH_D#yH~_iE4XVG+1^yx!I^b-4oh+7#5m7Zn z^hWZZ?TqLL6Z-_3=T`cA)^*w6ugo7{6 zz+2Ai?m%^Q&EUx}44>IX|Mt0HVV1Hf*DMuv^`w{e-snqEmnrAD z8^MkOTlH6)0u##1?+5R~aTsL#cqhPaq@=6Mdhg!7-0{W5MHyGu%GI;B2uA2T#$214 z#h)4JU#ST_8_?oSq&+8P4TuW9BT`8O<~yUC#?~S25f-=#e>glmY^;4f^{Og6`!VR% z#jL{I{o=M!k=EbT=gNvvlW-8@G-XOKhwp=c?Yt0WOwGkr(q9wFq)Dm%)Il|E_wyw( z_4}DbpTWCv-EWT2`LUTAk3?Ag?ErIM+em@_)0|9jN9Q~#bG%Mjem(taO>G@Zk}o-5 z4ItZWimbdiT@(&F@IUE(f17_E5E1XV9yMYKTUe~DbA^Ya?uC1o&4>$6ej5J+(f;^K z1Ed%HUSIP~{qfyiPW?cJvf#^W8zrk}WP6dxtaYb_aKdGFqYk1tVNFfV#Ni2HK=l@A z8>#DAQCwPDscw2pK2|`p>;4=@@GhfTHX9-~pagY{5e7_laFXa4$O~J11(dQ$$7sa` zKpw{-x9TeP;K6)GTC6SW(CQq#mE3&XK^(xUiIHKH{|FI$RaZf4vVOdz_sI{`|4^2K zhezGk*0y>`7o>-dC0SeX@bL|iIP68lE(Qh$TBd5;f{1S}@YYx;+t}p01GYYisoid2 z)+wV0aKE6is7P=lBeJGFPItp z;AeObSAm~8^-v$tJSH4@Bk(82p+Y!S*?2tRx5X3WO((&eSwA*u`t#YqVCTOlP z=Jh?-JrjMY%%&!ZjI1mU?g+2FUdbTCq<8P$J+uzXj{L|MR>P+q1pO^)e)ppA;&T9HZK2|Q z9-nifFA*uXg^l4;n+9YYJUnVZGn~453+30_|JjFnJv<8Udqk2ELO)LVP*r5pW4l@S zycd;v0`OWYDJb+n&xZ1Jt>*!i+*DEr;FG@N+PL<18I z^v4Rixm#Vaf&k3(m$v?XJ5&{)eM!-)B`C$Dm%k(jQGaLCWB>Q!YI!=7+bY4+WhEu8 zfOh<$^?VM`l{#Ru=kvVIvW8aykE+4vVK<#U)jo&il-Qk}9qkkSJ=Mv9-!S3NV+Aj! zAr7=hRpO=Nj~^Y1i*!SJIBuVY0?S1PcRmD;=EH3P&w9&zYdC<(tnqS{by3*%Ixq#% z8z*L}`h$4?Qwn#vWk}^$WF8?OUg?9Aqg+9lModUZCB#9UNxKOZXx{chc^1b z??8^D0t$Ijt)r-@`0Vp=yAVuOT3((fv(L>Y3un2!<;Qk!$6+|@xJ&?|k^e?@%T{;vjEOHywyK+e}aOJ;$EA}Ff>&~5qmiE@x z<)Yg8g#{9aD2D^hZz}(}2`~3R<5B(Y69ROuz)QPCagz=aSj>pWAK64f+Jqw`FYmyl zl=q|&gU{K((RmjT?2{Jp@(uMSeEbML)Tv_;POJ5o3c}aP1_m4IVfZN~WBvHmRfd~5 zIFtv|UUkFVfJe3Qeq2n6)?MVfb?q8iD~d$YUy4c7GIw2<%Ic_19XP))qwwi0R*>=rsUzUI zoW5UXG0J}MJ@=$?x~{Zzh%OugSx_803yMwYotR)Us@ZV4^?&TMFW}{C(j$>QehGi8 znLF{3TI@X4`z;D^*6Qg;s;Uui4)5DV^MHkseWy$K;#AJXl{LU}ywrg&PhJ4nW#Ac6 zFskR;LxYJqB=(pB0o8pwn8U2ScmYI!rcMSs9DfkUb31{h#-S|LYx)51o(w{f^fsy z+%)Mb9CUo_O3ZEX@wD0N)Ze^22+R&>FX|Rz1Q+MN78WhGNc<+!hNokZ#l%w!42Q`rm_ulXQ$Xx>ehW4|~%cZ#3 zWX&R@x97_rC4$MAkNtsuo)gm1<-n^mGt5o4+dG9$D@CNUNDR;SIME!DS{I|=L3zBU z>6WFicfPFCI=FNF50QP4S-^^1Wg%jvm6N)MNYq}}LFL_>LPGqvZr)4>J`a96 zm1NbQydX<~gC~zj6LF)Ex9SV3Mn~-D=M#o~EzX*r7p;?1+dr$Y z32t#bsZsqg%#3FBFtS-#9h(u$(kHQ6er zhDn^9$Hv{5yE^|+?3@Amx+LL}iXX<}$3AZ(q3ZL@nc79e+rp2VWJUH?-&Z7f%#d>T{=wH4#+=nG zhK!%Z2R}E2$Oagvt3pVee{=v)D(+bz*Q_ZigtT~hStouK_m@_a$QE#3zZaIv-OkHySh|Qi) zLup!8s_>6A13TnX5BeLh@9T@p{Q2>*LVSG3k)fC0Sl$=Gk2gWPf{}>GGz;~}>?gx* z(7PsqzQ-ZQWW-Fh9_;6GelJk_FTYOYhb#c(wRh1I_TS&b@_yf$} z)}45SKR>>DM?rZLgZfp5Cf;O8^4=UDXm_b?`Ea1sIOvJb zr#f`s__;IdU{%pVHYQiptN&il7$iO$7v`2d_7U&@2oyyRO9L@wy>iJvpE8WIe^mXi+#+4}o8 zH$p8>7(2_F%#Eazx3yF@-0I#{hw-_XjanVq>nAkBrlyuC0_CFU6{iY$f1oSB?EzBq zm0%O(`TSm!OYu6Ei%&@r*Lw2g+w4q*AHaB1MQ-+R+$HGl`miY60zuIoyPH);Amic? z?w-WGxDO%7geIOJFR+;QM(d{AAHV4G_S4+|nxq6>A4Gu$bz{AWeX6L`OTll;kMf`? zWAxUhbnKW-Raf=EY8K`zfg;K$sm3%d6Q-s*?MvbKX7rh3egtUfFIwK*L1L?XMS1ma zpBPcj+nNEOuu(sFq)-n?~-iSLc*gZlS~>ao09$oJR-bMuZ%6*?j5A-i&IoSu8F zdXgu(IB55tvD+yXDmprAeRzBe+xS-W1+bk=@N&%;oh0}etzCJc=nEL(9LBs?Z#Oh_ zRlX;S)YDG9inq|Gy5P~lUg@oqD@`jX^Hu%$xm{?e$H0{N8@wvURL>NX9~ZoIh0Yp*v4_8g;3MviQDCrg=w*`A#Tak!a0E9jfUC!U&BO%e%& z^Z#C)h>yLAi4eH;I0=93vrjDn+f^cur>w9$Hys1Qe?=X}xF$%IsK1q1oGDs6 zqHSTn3JR#I)?zgyb^4@etsP?S_46{$%~_8fOufOta-6(4gB^wGYOxY_k=|8qrhZdf zHYGC`z}$6`*2K?4k*7rJ=T%ap^5wy8+nru;#<%d;|90{zC_vvA(%<3;fX-=6rT!TL z=goEjeWNDM654M)28$9C{^N6?#_ulnGt46k<g$VEPZ+yJl&;4WsnI`;^audx6gwT-wd$3GUO=LT+^(hJlpzpchko9Rsh@6xl? zFu}Xj$TM2-;EU`E+{2o~X+| z{_YB*~b1kRkfR6BQF#%WKDHT|vaP9!0YMXeY96jaH)%; z6aX)}i9z}F_o4jU0KJ>=o`YAu(vg=xSPB9HG-aZA`G0@!(=iw&A>k!u_cOd;V%WME zt-?CYeCO^ncd>{0O5_0=!UOBR*Q#oh_zw}V6~Df#%neAlevzYx!q~l-xwgVmnfQH- zZY2>5{l%Isce-EN~1W6 zpVhwyI|Bs=TYY+?e(o|Bj7KB_fw;*39K#hv7WfXO`1$@kqMj|#8ffBdxAEqGc5D!w zSo+i(oXd@LE(XMY!DKpQf3X){?nW6t=*=q?lWe~9TGGjZIK$>1G5&J^s1yoyV38}s z;X{`@YXe4r6It;X*c1J|tMK3D!MpwN8#}}0GY5aWT{^z|#c)_!vBKI%lr+yqrjq~`M-RghWSPo(xi~}NC*Go$|EnM%QKAbz#DmcGa#>p>2wYdi)_wma--IiSwv@L# zqqZsMZ$th+8x9_A0=B#w`{#cB@uvTLRSyNbTRsuebLU^{^VdI7csIEH`IrCwN*m0~ z@@v0=cKaWW(EsQ)SP05rpZTBPfoK1JypZ}DWF=+gl=AX&IV?pbB}#gFdIvM#wtvK7 z15V6Y(;Pf?DK_BXS!me2&fQIW6JLWdXU%gfCD->-1_j?Qnr>mmhcgD({lh?Vne5iPAxI<8X8b1Fjc zI%_Z#>LGJ?GT>~OU0h5H4tV#@;ADUO`|2tqL;kbWtt}gma?dl@)|Qr#XN`VQkxY4_ zqN2ynVERwtv&L)0eD>+~OFg2C;t~?l;df<|mDA)PJ5VeY4}1;9yp?kXadGk1t=UGy zqaA2iNJzXdDRlM4`~TX?nNeU11l-(R_oAEUyRvH{2wjcVnKj}?Bs4QnEteN^)g5hkl8ZR zOh0~XwiyT})HJ0^mGG905$a!@!_j?;7=EYH3Y}rxl8OVkqykXf!SdblKg+ec3zpk? zEpX%Vh5!Q3g{@KLn3EoN{FuQTOeT69eRr?}9l!pOlvM8#!;C_lR7sg(I|1ls99Z-) z`2lYKu_g6j(5GJc)W;sGUmG3Yg@`4%7xFlY{ab2GU!T(QSV@+wj-}kd-6<(4DQ;Ct zO3FPKkb?D8l@S^=24kNhta491R(HvY|JzSYF#~7p)wa`K!vAG&6=;3Z)9IE6GQwp^ z9VR6oSf)=Vilaw!cjA^Dyp~>GUN%yzsZLEvS#~@8VpanptzfqUCAt{^e( z*PeK{U*|L*dVH`B5`eI==DFtJ)%}kB)shz^zYp`$_}Q2OlguJ0VEfTJhQql)^hc?&A&mV0Fuj26E7rR zM}sMs`h7`~SZ{%K08Je4wDvQ=j4S-MDB^Yqb2!L)O6FFsFu>CuQBP-8EpO)T*S-l1 z{D66zPo-S$+OIvje5N=2GTd6*hA#iK-SzEAr)S+~yItvk|>HULvk+i}#8-Fiyk_J?xV&=Utqpk-z;o&N>$yuBlV&$|`ovvBX1=<*{0Bv=Rxc?<)PwYsw&etb(?8;d8(c0AoiI1m{iGdeTO)vVO zkmUp59Qt<5d4jJqPFOeYJy2b~=>Ot!R}jEVFYj5JtDX8k1>O4q_2I*ZD#!UC#S)=| zj)s=8rzOi0c#Bs_tHg|E+I=_3z{m)0$m~n<^VwuzaME;;=ZC(fjScZUJ6?vSf~l=3 z##+Ue<3mbv%<3@E8YA5gs+}jwUqIY3|G6ZqCF8#jZ&ojkx+wk9j4M8_cnU>K=OB|T zBP!~{q*Fa&)jsWU5EDoys1@sZY^o@Foon^^mse<&YhaA7VMY=pxDD7|0AI_fA`r_U z|N9I9IS?h2@gxh}{B`<0&WH@&3EaD6;9{AS>-fage)j^51B;xP(5{#dlX=)V!=~IF zD+mRt>ks$zn?7p(gBYSZN5P#VQX8_%z#F)iHmTlPEYg-MPy3T0VTF-=j%#!)aLD?X z3c}6{Of;oWQq~kCb#BT+CO29O=0~Q)pYwD zV_I8L7FopizUOI^IqAudrsuXJuv+}|%|APmlk5xlfE!s#B8Y?ro(pRhoajrDfo5jq zhFu)RU2K;d&-wg59&2npEDUR6(^%Z!zH|H#;%l1MVV~Sm+gyddQ}xc~d>wAyuM$l$ zOxYQ%6%TtpU`>WYb6&f6u^glOfp+?~7K^D1J$rhm!ZP+`TASNPLV4r3u(Fj5yu~z6Va+JDyIw>iC@fpcyH$c=$r6d@X03Y#+$LGoz|^A6VJ=~xJ~0pz6Zd7RGNRqM;gG%z=EAjs>HzxD z(b3Ah%x(tg;^L~YLNic~GHs9|UJ3bgI5QH6_!r2l5;196e;wC-<(UXB4w`#sA|+*q z?c)6@>h`+Pw&U_vLk;K=yyffS+{2EsQ!KitJ5gOSUA#}{t~4=FEhQk5GrUF-ZyDJb zUezZjCnp}*2mCZMIJ}S-8t-ETnO=1AEt#M0Jce6EUfcOcIb>GHd&@iD16Z|IET(Ip zqOhtofq~7;M=N?@hGL=FAVUm~Ne{*HR83VUtTGm!3Ipllc~A)x@%{UA!K((~3dZE( zH}1L>#cla1H|zXKIUv{HEBK^|gN=O$luCfXo7yfQS$^9rPWsQ=1<~rAa1}JIqB#Ag zQ7;w|V=)FTonuVzi;2AU_eDh6$l9O-=c#PW{gr>_8>fahiM0&SGW|ZLISHcMlKo&U92PELPpGw(zz) znXTtv-(V-cWP%<{<>Tdk69sutspuHnUt9pypyb5STpJ9s=F1O_jm?b=rSyIl`SvGF z141rCka21=^!I!JQ($xub1Of;g@xT2k_n)DYS)TaqVn$8j|I)=7-uBrv?zt%P4pF_ zKJLc1I+hROZbG4F^mQ6-YECu`{8F_eCsF4GWfkRPD2D=yd%v%}!9(*uZdiG-cTpsV;`BAo^P|O-`1pDp z=XA73sxkrg8xxvfzAW97GONMq4i8W(W8Oz?cr#sXW@}WeGB7SUuK2yllY~=Eed`iZjlKjkzBE~jsg339YjlS?x)Q^ZxCm@VVs z*N+q#2U|>(D-LLFQ6sHT#_!j$8B22Bu(~@$>NuP-`Dp;g0jX3vP(cXe;BDo~ctFgG zr-1PGHoxN%t-2SLprEEO6#|Alxn=AxlwSZ;-TI{k(RE*z40uOhS1J4aR_k2KQZ5T} zaX7i@No&>}^ohEpxCP7>O$h)FlMe91Eixx<8gJ~hPz5gul&D-sz#t`)J7-idVLIB| zUl_#~a1Xpuows~XbU})8ue&;7klZF&GaG;G8-py>v8dX@&JWc z1`8cgTs!GjrlFvs&~&Kdu<3ZT2!xIbN!;}F9*8uCG=R@`&S0vpJtmm3CsPko_20jYyaXz0?{nV=7@K45OX;h^WyofwwrSIK)NXS@;d1N37mvmVfZ~~%fk_K>; zf&@<$XvY0A3Y2Xckqs@WGB+E<4B#jlDS64{ zT*Q7l3OdVt$-?j^Aq-Z;<5*YzYX}lB z=bj=aAS~8R#!jIE&msP;gKo7sp7U5H=wG2?jtnLjw&L9%xVlOa1S@h|+1=6p<)wJj zqJb+vTq4}~GC-}DMZWd4E4h~J03c3~R1KZ44;p-Y5r?8(pP=BHjcF@NCAhoUM9t$b z5|jELgl;gq4HjpzM8-OK`YsI3TAHawewn({lc7wDhKd@Jm`GW0ZZjPbcMh#G?j2E9}A_Kwew zeYB9-RJOz>l~b1TiO?`ndz()m)N6^X%l2levyOsh2#t%gB9^)AIZ!*Lx!*&_#8f=7 z&=N{jXxe|*BbWPPW4f-c(-R*KC%A*F;ka_lDamPb3Q{?ETSGX>Eb>2B3KH9darP4Z z<#Ea3nyhJ}49U1^c=gN|qp%F;JM?_5WQD$au>wUT-*nGY^0|3&^GJ4r2C}*y{cZJ% zU)%kUrFCkeZ@w2km*GeLb_ALGbnDh*nS4BD4~jbqQT6pcR~@cUQBjFuk_ph6srgZ5 zfC2XG04>>u!Ezig0zi#dKxJM{vE#ZHF}uMVXdfu&Hk@w>h3taZtNn$JPAs(iE(*BG zO7KX*wF&)r^|yc{3=@KoWSE@F^)i#--H$9SBa@Py-LZU-v3702_U=lh16xAj&i)P| zs}9p<%|R?E_Zq9xHjE|aghNbl2tX4OFh$do;WA7jm>-w7e(;jz4etf>CNnL!ib>;i zru)$-@v_S@WT!xO{K7Mqt^zq7@S;bU zhI?(C2s2h~@0#(3fL#+z+?qUA0VS=N~`O*q`Z3l zT1rmteFp#>Qe~r!l-)t0JG|oltyx;;6+hV&Gr8|`bIk3Ms>%Zkg<4u>b4_-e>%iaz zT>k`Oh*4srnQzs(Mz#QBSm_6Y(E9cF zhVup-GNNa=BO{PvDLF&h<-(+@r;L`A&n;JCI{2RPTH_3K*k)ty$BgQaUL^V#966cH z9M!*(6_pdvq7V?!M!B8o_K2(T87SPW9NAwV7e8YzmSZr%#>ZD(&%VCO*To0-)g1cQ z$IBuKb=?cTSY{hfMZ`Tzc*((#v8ae(^{2eV+YO{~Z1T1f%P!5|eerrtgur9%u7~UW z&sYSN&Phub`Y5k2zC>q9j|hcX1Suv-3}&*t#)ccB{Z}=H$!)``?d2Xdn-h1xtp}zg zNjTh23;5hA9|=VZFg)MzYOhQZUJ~Gn7PIWLjNzt@(amf-o*FW|F9A$V=MY;sZ2AF5 zH@=S7`H^uujsl1wbJMQ`Ni%4i0sN-zWV4Rd>tuY|KK7F$4yl{aQe<45b%xuZs`}fp zJm0?zj1C;((3umrzJ#=F-gy^+TwMk5ve+MXb}6hzMNXTz?XQiFiF-IZ02Li0((G=b zBR^C??zCQ%c=)j84K|g3FfQ{U4Zb``u%akS$%nuwFx{9zIrZ^9`Iw!| z{dffqy0F6c0iQHq$V>~5bd=J0^%qyGNOkct%LYZ^dlA{>eLOLsPQ4xS+D)jGUWj`C zxnhw(M=~T>=|F+LRO<8I`(<;T@Ay8_$nb*Imi%XpdtR(7*0X!d{fx}x8iWI0-3!zE z?W}bZeK$((9h3~~nDeTm!1065wQzuiOM&5eEW5xf9Qgo6`VErQ3&N4!=KH-emxk*z z!%X%Szm?)6%h46GdhM=xP+^H{#~Tjr`EeCx^5+@ktI0xNmB^Jc_IBD?WvU$4vsuS2 zR-SsAhMt`qw1fND{s`nB)T)+qWE(%1XM1ZI*HxxG7do29O*_zK*0dAb9G|ZeYp1T~ zQBVnKa3v4ixvg;WyxIYV0xhCp=agpUig@yYidjs5{-xTPp?i0>b9XUVCl?~X6<0p1%)%waTwr_2GaUc$(y zj!~Ule+8ZsV!*F<3&gFt)XMq{#4ZfIfz&xOUR)4wNHH5LgxWZlA}W%Mg4O-wUcYIFE1k{1iSC-Fj6cJ-WBY z8ePeEWdhROJiv-pUe6~Wnh{7G876=9m`(8Q)V~aee*)l=*V&w=sQe@RA6)VAyLsP{ z{i)EoZMb{YJ@HD+Pju#`F_5oGi;zsyHz#&jNGr| z$1~}z%PNjva3@G|bFfY5e_o;i`QZ=|QPI#2PwNt}LAq1QXTgrtTYTjpY0I;%L=rmB-rCB?{OxtccR~UhYp-I7skk~hvZ@1V;`8xycE>dh zsYj1ONUh2fQahC~eXil&cV`-O?mnkpExR4orvw{(W0#>{MOR_>UEFrLC)bD|@0WWG z4$C>lJ8nCt*tljR!$l8BRK2JV@5Q;ITHZ9PzoQ4UzatCe%PEtLj5zXWn4@jp{9Z(qex^PiO)B;N{47F)GfU|}i@IL)7pk5$AYYqFqB!*~_RQwbc}ij$2hIQ)eV z^0mw3WqC+QNGcg{4kF_FHu2bbbm7E#9?XoMr{7TTT1QTrU(ht~=Ro=;xk)z{$zg81 z#hRb9r(>zVeWS76eZ`mz+~B@6`8bC0MM}gQ%v+6qNG^vJ$YIWFf!pVk3{oh=Em3p) z)U5pC*QFchn`QlJ)-B$j8i`bchB{9BgK~1j5*!8HcM2CxhmPyd&`hUfJOw{fq4|f2 zpYc=jF(j&4D*lSLZn6Po{5idI5Y@m4+RBmVNkdQT6>*hcD6iOFP9hA+zbvGG`J8>H zPX`B^%tCnxP-}q%@f_(E4GuA})Rjhwx46b=^H9Q8-zKKSj-u8=OPB?HM|-wSR`nW3 zQ+A+lU$v|kZ%i0-U0wN;*X#xq3hD@4C(nBm#lvD^nBl1zOoQ~!`4iZxx+z5?i2CZk z)d^42(PCzYzOqD5PA;!c(0k-igEKPl=}%kq#IP?7|z3`^X}rwoc&S@#x}x)3j5a^!UuLAe}-_-dnbMJtG8O z$~uBQQaqQR9`rsOGq=QUkXt4~F+e08O3{&wNC>|v3YtZXT~>3-lJi4ufP|(FsoYF+ z5L#Gh1S^UYOK4c)skHUy2*-zVw?`?3iJ^mKUQ(#{pUi;%!5x6GNxQpOSA(>4SY)L7 zGP5xEemPx7H=9wThpH(t>|Ac7wC8ZBGOzkPPu2F+z%|9N!dpZi^>n>PRY_Zgj^E4u zNq0Hjw0~b1<&i^wv#Ufe3i^m{Ci&Y^Vs_`4%5wS!XxtheS1!p79BX}g;)zcjp{-RI|rRZ_r za#128A#2TE*tz2iQJ?j)SDk(;Z2BM*wU8l}XC@Uf3@QJxnO3p6^|CMrJRX&eT_c~U?m z9h&;WO;z_&9%F85vdBJ}V9Xj`4@c9#rltsyLFKIWK0djaSe2os<&k%Ezev3ouv9Kf zdwrJ!TEE{-!@3$6>J{Xkujf1AvC9_C{`q&#OBFc2>C*n@z%F@5hC>f~eYa1;xw&b{QQyb>W%g`<3YMlI*SLg}p?(Ssd z6xw6v-oKfC?5hjgXJlx>hIz8m(Ph|6kylx3Wp(TvlvS!EiLJP8{J-|TJ09!rjXP3S z6tamzHihg}ME1;{Ss{CGQYa+J$S&k2p=7VSVcu5AUKLr{dq3x6$M^Tw^ZfI?Uizn- z`*wfMxz2UYwcgkJta0)4m((r^13xF(pz0atXUw~w*9nTcs1mQzv(i>{Bnm2fa)3LW z9sBPr;=s^JjAB``g=RjLiMxK>5RP|1WmCjm=%3i|Z}{<{tnxb8JJ}BH3I?xWhCC%s zw;vBYcIcNSTb6heWVG?eYlt>1ll0!YMV4zL)p*ffGnKa!RD7PC86E|lHU!Az_mb>K z-zty?Tg|^!QCA9ANE0chgUUWo@96A&iyQ(_BG4vyEWUzyKX$;t#*Tpe9o|3#l-Mgx z7D`Y5@~K=Y^Q>HLt(>L4o6>ZZnWjWt6G}aQV7)n<{?r!BM*|a-NWNN<=b~1!H}X@p zYuN?&>WSRBlbAV7p;uh~ytgt)ddvGlskoS>=kpKl8u1+FxEksTvpDy!A>m7_q!moQ=vJT{sr<^AnXjfnSGIb=I0SR5`^&eI&6c;X9qWtV4S~H66=7X!2zLr2M@D@hE z(0T)#rXzkVikgJuE^^mo7Insrv|-*54>&Oa@wNOmtkzk46tn1T|Kv!#9>at zuq0dbsmxM_53K|3TSdi>`<0JnQnun@r)=(Im(LTi)u0)6px-098HimfCh-oNP_ipc zm->quhRm7YchCx+dar7_Z;bd<+VaK)dvR$go95q&`F5uSC%~MdTgwldTj*U*GlrIM zqR$y#rw67e9qU2Ce;Sz+G=5CP5c`;fE0gcX-ttOFpu_9Or)xvowX_CwCQWcf_WGK* zL}PW%Wd$keUY?nI#9nu|BqN1Bq``rqq@JFTE3Zaav2bxlvWC{pC%y3Gy*Y)~l-F__ z_aX`XdkwMbjH;}bwE_-BLV$zBN9privsh_K*i4>V`YZQh?b4Q7ZIIQ*+k`$V6b8>5 zi+I)H1KD&`)jhhmzGzl=4xYv(`K}nv5%|KZantt$N@Jms8v}>*S>#|!kTIeLqqSk+>#H85ykBE)XO!Z?bim*xTt{T3q9uh2NhMRLj=+vHa!xF862g-Iv#N zg>lydzFVAf zjaB3YF4rB|N(qoVZI_bDopgS^yn)6b8d^h zX&=AhrGoLLW<&>dp*erQhSkW-faf#x`Keyn*7N8{P8X+VX1;%Zf>i0IR{sr+!&pm= zcA>9qxJ@eAZCItPbI-B?tQ@W$-W17oh3!5CI3CJdxdawMZTnDaTH1ERZ?*or@_AUAwdr+NKSR1{X>x=GoAI(>i61gR}S7_pjR(dmXO=& z&w(W9io0zSD1U*=k)3*Ln{m!t9Ah2y-F}1+oVB&-k;6&|zck@*QP&OZ-OSrELP02C z7K!V&K2w#Vnx(^L-o~UY$J{+QHN|Q--0(iqlcyO}HN;zCdu_-=XOP`7si}s%&yA4N z*2g>!FR$B8H2{1%BM3Eff4$jmfbz~q?{OV}-~3jFR6<7P{q%^LfM+Zb!MM^SF&zIqyB0%{ryjx9) zcm)Ahp-KjuX*0Ey=ZY`D3vM@w_ZB&XZEmcMBX&`&JoY2cVVmiCVj@?K^*E9_myQ5h zfhtF%T|1q0Rr2L_-rt(hXE@$X7M0bq;OAKhDXB036{#Nh?0cJe&G0JzKmp)MSXcmv z6J<$BNkfoCa3y$%p#r0P)b-a5KrD)C!Lt_%3%|8wY8Tsk0t`i`VVv$=&6SlE*P3)* zE)~RCVDxc=ejUmR?0Dt__CSGTY5$zveg}&_(U=wTL~)6T=ol`1&roL{?aWDq(If%z zv3^VV)>`>i3+erG0A8`3tLH-GS#-~jgO>W&h{yi^8Y>Dm=k0yUgBhgJ%G;by ziQGsx*5|Ol;{CnpSI=IB{s7F94yu_7aTnUR6}v}Lc*5iL;@gql;(QLPzMf_$d~bOv z5%p98W{nY=1mz}!&Gi)!L(@ZeaEDOI5WsTAkYz!L z_VO|c3hIEfkP>3Jalk>?n3|>ysgc7{(v$&$Fv|0Nh;CH~MSFdeMZ0*+r~hE-D0l~e zh)(1@tJ@ct=&1@bpZ<}l%}1`LR_dCYQa7C7#R5SmhqTB`0DkBr04TrWwt2&(nI}Ev zGy_0qPrI_arT<6=K6-@0E>-S0)+$jReG@e%P=z)vEsYJJ${_ZMyO|pzIm|g0Ybn+H zweEwJDuJ@UPC@K)5P;q}9A3KA_b$N+z%yq7m>&U{hv>&TTnQ=wL(&l7^JpWrD+f8* z=3H}Cm%&i+WMHWvnv%fCT2)iP(=$7g6f!2j-s%Fd+CKtej_HyN*WPr+TsIKATLE#l zwe}G*oKo`4dd9$Li?g3}-#_-I<-s~$;?3qK0LF2k9+{yDDo+6f)ej_pU;%5kIr;?_ zql^{S1nOdWl)zbNulE|%Re84ne)U4`%a_rG`VDRJ3q?*-ECAyl21;7D0l3VeQ;M}z zxtvA2NVz;+_m3d%=H_Muzr*=(fKOm{S;SV!)S@@Tv&hkQGparUiH1_ZB@zScv`l^T zt^C{lGv4Bu%d4x<>5zFE+e}3S>`H7F7%>sVa|h~YqegMYAT4g97e!=NNQV!rMj*lh z0?^{f2?=R|ef*I_rR5@HEzkFoRn#1BV0v$p=?{HXF5OLHJ)~VUaVpKnjJC z;T1wD?e_|}xQ&ol$g{+A=%CAe|BM`%07#?hMW<7ZP`jXWwy?0U_b03VJ~sASXE>Fo zZ_zj~gJAhf%R4<^;*x8utINv)(JA_8S?ksTQ@#jA^k-CuH(gvNdKqdTZgE=mWte!* zkGIRAia|C95y{%VQ1^=B@#Dv{hmgRlB;^MhwExNqy3uuF>Qp?%w$;-2DV{8nisrIn z!6abH?)KC0GXua7*DS2N0}xu5)1s>yfFx3Y9gY}!(>kjF@Skihvx69)@rtp+_S-pj zjx)$)Ir{)g8<)?Na{=3XzX3-+hZ*V>5D&SO>*fo{COQX0$oW)&1GS*21R-SO&%D8y z|J$h~xIQfc3Lrr;E{Ccf2csqcqpnYC%-KfuwyNumG`)V~Y4*bJ9PyR0RQIut4@oeJ z{b*y)c7h?mT|n;wuhnOqJs%DqjFx_*2s;#J~a2UI3Z2T5*<+P+a2=mpUzvmaPv|(2ih1(niS3^{Vnku7FX)7+ z`JN=s90{S2 z22iUDlh+I--!#rTru!h0Rw9I)ep;6MI5*T^9uly33#ZV*pIcQ-Ev{40$tz0 zy4WXvZC~mUGKJhY50}ZF!lQS@ZtJzonf2aJ{r0Lc78634**To8&H@1ZBFLvt8v#6X zgj9N5`Q}2_N82Gu2&YPua0th-tCiNQ;H;j!p)B&JMVpw7ORBC-}j0uLn&h^Hb=W}ApZkGa_Q}N zc`xBc33-))j?QXuU{^#3$f&gLy5zAxu;L_k?w^jQvg~xm-%@zonepEr{md-DU z`WNyN^-Tc*0ZkTZb1z?BT)1M~bTgrKCbE+NDTw!$BKZ;H=_3YCvG3KV;S;-+`@25i zN#mK?uA?cO+JAxLRll;(uA6ll7Xqxn)JYgT7CTKq51gjH?zxz!l}`#4oTR0tSER5L z-U)C)&Zunx7Q=q?)Sj~?{2L@a@2_7^+-(LdNcPqskfgC&6g7%dW@l)v>;yM0&4SZ? zT!7hgWr%PEfHGAln&r)=FvCF@%V=KKwqjhqvA%Am9f`5`rF&;3}Q&`6NXF|MD7s_YMD$8Bxb zmu<9r9~84#DC{?P>~Aa*Ie-HD-DzjCqr+MniB|n(MPAU&FHt_IxhsnXAp$C|`=JUs zNNLri-+ebP)^BE9;n1#aLT}RW>CT-y*@J1m0Q>J@d6xVEO9Y=f=nz^T*WG#nda-O( zAIR0`@j8qdy7{mlI=gqyb89Ggw7j8Zzq?=j@S=b#2i#$;vaL*QO;0R}LyCL=(aNDp}1c&A@4 zHe8GJWE+3!6~|`u6e81feUd)gp3zue%BQTmU%faGbs2OfAJoxh@zF3bTi6*OFuU2} zSJ0y{?@1)Ixt!FOSiF7-FTdZj7(~(|g5kP?l4lq|yLp<4Z3zelU0>1LsrW~izHSEK zeGo|*3+TMa9I%F2Pcgk&%%fxF&I~sjy#f%?J*q67SMdq6Lt@Fpy!;1qlc$ zX3e~ppp_Fqn{V6zEnU#9DMtNJUzNfV8st}PHx*X;F?7EcL5)lGObh;c*ZP|@ux1}! z=8Q`@!V6I%T!R0(OKe+fj-GO2ICMhepL<%q6tSBbKkgb>10069Fh(1A?caEW)T$$Tk9tDbac-FMY1ZL z=U_FC0O@bwv4;YyN6*C6n!^IpxrvBApuQ2jW(5Ax=(Zr5P@m=A-w^w*#?nQ-vwm*R zNK1G6N#H3ct^oD_Q1l!x_=TV1fBSZpbim=jLJej)=20E&vQaQ)evd~kf(K7O`l?4Bcy^@NB-;C( zJjaF%ZvhUX^D5e$`~RMJ!D}#aE;5dyf63$yZ#VB$sA4sb^JnGQ6ZR)=EdrfV-1qHw zt~ozqWp;j)gWUX4s=B$uP?Y^SZ!g7wAGFriO(SgoTYm9SM{$dI)bM{qT)P8=V}Xem z{7n3AbW~~wGvL<-@=Fc5*7z^6(mxX?1(Y4u|IZc2+Rr#^t-Ui6- zia(=Bb9V+h&5eW)r(*gDE3EP7bFyZz(wgt8+^5KhrmOa9N?`KQ2Eo3NmaNMXfVu4aaSlu<#sSH{=1M; zJY#Cy>9!VT1_J)?G?_9Gp6pPH&5q=I#KGUKfy=CqCQ9I>pr+#dm2NI*XA8CpgZW`S zr#ntLf^`*8j(mm{?nZ@~5c5KAm4DyS%`#vCWL{$N%|&PD(F|Zw70}Oh9^2wgbl`&| zkKh-S>GSC3&ImXM6C^h%{xotoB?JYLauS?+~Lg)%`P9Z zP!V6nQgaz3N=m1Z*c;;w=tw$-sUN>K_%6HAEq0DfEbccgYt( zi&By&ndx?U;opr5c7WIhcQeNB#)TGt!v8tyQI`Rp1Z{{`>)C5NgW`rxa~nRhqcZmv z-hlS3my*UVx)j=q1w+~2O?TAfV5IyiKwqn{na}T(#(=OuQ(+2d$#e{-+qvKQ9~;9B z9U5VJrn|gLP9W&|X-OsKb#!&6AAqUH3zYGb@66~1fjm;s$B!RLB2y`Mesfr=|74Pn zF3awSq03bLL>?#~%lseuya}WOZ&@sk>A=ApyhVUElHE%>c5ELS4>v^H!|q_ZmuonX z*1;<>Trl$sq~Qm@Ira+v3Op({O{)9QSMO-`f!!K-4phm%1Gbkv`mc{RKzb>y-Sxj+ zl~YLmKrgtziP0Qg-a~`3ehVIDGSksx;id-qbeWUk=&`x~0pKCC16ATyip)E@ zei^Jj_6LQ1)W@Os_Vhj$5Qu(#RrKjUlx$x}*buxwAox%V_A&XKsLqM%98(IQ zt|zMVUsBoq-N~u*e}C$DnQ+SPo$Lc$$Pl*;8NahUh#%$~U$L7Qf_|L{yYQ_p^DL+^ zuf2A2WY-iPS{LV7$QL11ytGq<@QKERfC>{AUX$0H&Eu`03DmKd0xUpCP28a@Y17GrgZl85_Yg zvFqoaMfUXcoc){A*fn7q!)YvPtk? zYa&bqOz?eC_w74+fw5pj>v8@a-26^83quqj5ZfgZ6!DCMIc+!e@qV zCy6i!7Xp`#_>EvJ9kCYosev^KMrpLzu{hTFfNw$f6_4Fp87ELl90Hevg~?o^MdL{J zE-x>Ku5AeKvmJo@Ks@lOu5p-$|wt(8UxOaHSMV5~l~IoF~*6m&m++5_3_ ze6pYn=-h5Ys3qk91rfu_85+wbrJ?|D51-$(Ek-TRL}$)hFup2GS;Pzt1k{8ubs|98 zzdE^StPAq#wQ@1oBjaaq>2vy4#mZLnvo2mF;8F}Z%3LVyfwugTI8Bh`4Hg(UpmtPj z$ua8tg7xvi4_SIT-lfz}w+*AT7S{S~TW$3xLlNCc?Ki5@pCTx?n?OUu_BRs>f)+$tpf zJLpvZvUuD|d?;*V^MpyeRJ_l9*p4Sgn>voKLaKe42;6$v?QGqTjb9tKAF{MU&CN0| z=^*jG#Oo+!mAK?)6KZHecxUz1=@;RS%3f*&#J#t_P#L5+*yF8?v*RVpNaxtz;JFRI zf%5dJtezDR$)SV`B0Yv@gXy#oS8V*EEpY;`N^~gk)*1&w72xV zI!LW*$?A2E^|B533rV_)={zo580`~#+viYdKT4$i6R`OgJQW5A`pWWPp~xiK<`fvE z_*Tr9c2?nNju1QwU7?kYieAGm4^>I|#PWCcuRM2^*gFQQUWWO+i2;R1USzXB70IZ5 zVGw)JWUd_z`$D$Nu*RneMgf)*iC1Yuc|lezoR zzT}3sk^4)^-%+Axrhn1FsWAKVZ}GQ;PSG}Zvf=>v)`ne*`@)25O`}A(!CF~l4n1me z!8P57B2+1luP4#vA+DjHJNLqbqC~?_yQA~As|?o#Zr11FVXR*6&hfDv4~bA6-A&!; z@A=xy^!zm}_tqQjaQpP>$yTry0PgZ{z(?lrdG@0aGs=Obj8HTAXIHkj*A{-%+;_Q; zbCGx-=Th?-QCO8jdoo%oxw_bCDBpyMa$<$YYik(zdrP4|==YG`Tm`@U$zS53Xfi$; z-e}#yj1Lc#jm_?*P^V&By|_XZFw@(m%gFro3<`;q-uGsU%Amk2JooELU@YW)#XZZ{ zn8gX)E!KQQ9Tp0ndFPxVa4(f%?R6R%&a%&bzrG>)6f@|2i{Ldulm<=}F~%@~^7xoE zg)*{XG>!=G=$$6l3u}4q8a7vZy!hT48^c&-E)ra7U?0u6pt!`V+#EIQ&(C+mo66f^QvsmQE=~2X*ca5Z~&{J%+O4iMT#>8T>VDi>1}l z0vZxe<~oVf#0N)aKFtx6T^_iC9OFnt!4}(-T0W$y9oag|XCnLeg+OH)>q@1+2{nq= zZP0wDRB5a5_ziMpI1TBT5n7*pk&!@0JJqdgZa{NW0TsY)5Of( zYT@%MjhZ64{?Ce9&lP^VUK9l}?0dIfhBGULtE8oOd&QZ@Qx(gBFEX_qkiU{A2WDKJV)6p^X zv~sZ&_QJptC|{lDJ`4sCWR| zVS4Z76LW;8#$6Gcy9gf8*zl*e4HY-uh zM$P6KCcIy%OT1d&Ep`8nxBEt?*SE%yAGDD zeZ5s(+8qvLTem1)TZ^N9%2P`=M?J@&$0b2OLiGGIXlJ@wWH@vA{yniEvER7)QP(yC zM<_ObE4_JA69g(=qBiI^(HmsGvSFQje);l+OI2ZZaBs!z_-><`YZ-%#73faj<4bAX?Ys8pQ9a+E5d*G< z{P=}MFDu8nHtn2yI4@cGt!!|ggG-)K&@NC44`8985m}u=q~cNv>!}Z)z? zyWwXgm^AmBqS!Sr;a8{i7HgCo_kCn1&!Nzj5!%cc@?qT@PMe5?#<|9lRSyxDiF6z4LIZ1CtZfmdsw%zUi#a+;!E2&wf+Y`$9Q4f>s7q3-Mr;vcwL^7 zZ0xvIiA98L2WN8E$ihchreO`mDxbF{Co4&~U_X}F_c6Z;qt3WWe@ zQ8eG=#_DE&asTqF!CZ4-zx$K8p~&Da+pgO=6Mq?v5q}ouYAPD1W~VF(k^Nu5yF)bd zx146uE7%$1T?bEPNfmOB4E0x;{9Ub%wVm}_sW`7|P1lQ6bxV&kHv7|ZjMGq$;}Twu z%eYIuLpbwB(tkebqJOUAbuV6^^ccab!VKiP-N*i}jfL40m4e5lbUb-w&`L`XasFcQ1@4PyGB=V)hjY04 zd6qm@rU<6YXXLV60>m@b1WYD3S1Nozr#9qLl-oK^xNoo`El2wb-WhPCwr&>)X_(p= zm6?N!$kpdxGE(C;u3!H~W6~&|$?w!gUi!z)9$}2WEd!sg>cVjD*z)2@kkf`V!y8U$ zRqW-tbbiw_-o>))id>S7nLlJZn9FxAMSsXA{*8m6CAzqRdJl> z&cr*fb{5V=6#t1|yOcnM99s$ zjF~sqk5#buN1Xl`=8FBqD$sX#A=QzPelxGVw;R~m>&1l5n~x*^_<0o=L?ndHG$qNg z_KEBDKj{19=~%u-Znr{XQs4YR++&IBo)6Fil(Hmve56ji(0ZWeA1kXOk1h3a1nc-x zErPY7_6OqpdS;XXp-J{uEvptsWZ6>CRKZV-ol|9dbG`HSwkhy5>193m9ZDY?a)|pv z`EZ0+smWyNR*lQ%V}Z3sEurBc=DDQeJ|1z3+*M{fAu_t%7+y+=`Rp`-4fKimD7kC-t0KH2;K@h%ebYj1Cyd(l_^ z^KZc*NRW_{Y(CxkuWyEi_y`$UEWq^X;o%-0-hC95pvvzUhbD9QUCIj>XhfIa(*Lii zCE{GTMk3Dm&z~F`=gGuRR{S3coT&Pt-EnwJp6v0%(l|IzP7b&KKABHW_5U_WC+6&6 z?;Q3wPVD$$X&fBFCl1ViyPYSV+dqEG{vmwgz#QzIe`$qyNF6)WQ`i}%oeDxDw{T~UO1Suy$%0Ch~iOx= Date: Fri, 5 Nov 2021 13:58:09 -0600 Subject: [PATCH 010/109] update documentation --- doc/source/user_guide/figures/CICE_Cgrid.png | Bin 77699 -> 51536 bytes doc/source/user_guide/ug_implementation.rst | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/figures/CICE_Cgrid.png b/doc/source/user_guide/figures/CICE_Cgrid.png index 659babe7ad33d68b6406da72658b36953b0a832c..db665ff7261d24e6992be012254cf19e57a8c605 100644 GIT binary patch literal 51536 zcmeFZbyQSc8$L`*sDP9rDcva`JxF&qLr4ioNJzJU(mm2BokPcvD&38A*9;*s49#!w zdEFBg34qoC8cgJB`4+d&dEi~+1$cP($eXTm8RTl9wDJuC@9SF7G`Fe z^6bpL=7eTuy+dp)xGo-=pFTxtng#TC40ZH({50#%N&8}Kyz-1-LI4;wW#BNKy8scD8)Ihk3_&CCW-s*?wlZ~=Q%G)D9+h~bb&ox0QvOlMro0p7_n_vwc zMQcORiNu#?0BJdCD}4oP6%`a# zg3g5sB&Mv3ODi3V6_1*MUl!Yyv95~JItJMbuLmdFWu}W=`Yj z@Xpay*i(%5PYq$@@qIBDEzO@QZuVld`YP%)Qcf;bGypBU|1H#cWtE-nub4^9t0PA3-|F62}TadGo<@$zyYYjC)FIl7s9ayYuu{ng38 z`;oSCwQ#X@cC&SIq`B|c{Ed^ln;0$a{e}MX=dW?P*;@biN{+7ox-H}la^0Wd;^E}x z`oA@^^0fVbXm)?*uV#Pl>#xg+-ghRf=3;Aw9Ps_H#Cb&jT;PAo{yRL;`(A|AZ9T2t z=}X%>SUI}>X~X~Or6||`_0<2hslnfx^78&q)4!eiS5r~0drSSza(_+BpHk$!h~tWK z{b!EFahF*!Kck=kP!yykwLMYy7O{L6HZD4jdVFF%yHY!6BiU&p30~8DY-Opqt9whcn#uu4uzA?;@ud54Q*&Op%1C}PRQRF(N^$>(-iFJ@S{M?9NwUU;@06zeLy zn|unrqAK(`ZSyA41}DT308mgd{`2z`2cx;DjF{waMHmF!7}%EQG&F%IXxRVy34|ew zjP22xqeQ8`Pi@6D?hB8 zTv+dt&ek!R_Oq4HHyK4mJ)bsG{lXnL0{-d#^I#vx@{lVbR>G7HCSX%M;!>Aa*gdde zaXFxn7%nf*bP1$Dx(2bdj=plXt*-&Y@m%eT^lgS_9 zlBK&25v;l;}CaB?qAZ$K*^vLLr#Mz8Q~Ta%P8YH+gCE1mK)gY+ul}qzoa_b zvPL=fG{p^97uCzV8-t7+N}-?-qtzbC1yOYMYR25^=(p&+U;;pBI}lByu*e%M|@CA-nwe%b0_0@Pu(Z}-x57h;R=$+}RsvxBwp z{l-HL$27^P>urpyHQy~Y`R+OKW~!*-F*&G>af;ua7Zxy(&bcLq7K1H%GKa>(S**TX zGvZZYENB|%JzIZ?-nZDSyL7uFGF#8qaPe+Xrb9AF=wzC#ov6Od6Wn#Ue(jfTd6&h1 zJUau6&RO`rR^L|vB}wYMBe!SsKzw|E&{6f<9K)<@cYoQM;2Mm(DR6CKm4 z+F}3QRPvzrCitQiYG!=}X{y#+lrP7^pm{;Sj3&%%db%hu#oCEOQpF|J>->P zg`vrZivPoc z=`b#1Z>m1Z)ZZ@JEyWsreK6$Wg9OZ4<&LK_3bEQ-9-4mWT3XEmajASaCZSQ-wc!90 zhX59KmN>{9Yr7!0d2_NTC3VRUt8dJ0=xjbZYM)IVNX>wy?bLHAb)fOitKA*yV}#G+ z4=pY%CNBnxytN*{>Lr}PPMXeFs=CBZA=&=L>T0VI)5!$a$zDx`^1ZXy;iVg`Knr!R z($r07e(^P4+{ahCr;vPeG}hbsesB|F48zW`F^3glM#J`Xc&}g6(TlQ5T4#tFI$pv3 z%D0%%(e_!K*QuA{WR!GgmqMvxE`C|W>iqn??BoN~1>}ffC@QjTmSu|9o4+}BCC+)5->mxdAfDOJn~^`gM}-gQ6+2j!8_Qb}oCg={=7LR%Tj<=H zrE=G2n_l#N1o+^(>EY)`d{RwXiQ{hfAS%gmNQ_qPhgjD$K~kc*o+>P;XZ^P9G_15;N~WD!dr4+hVN(cbwNXhc0$y`R2| zZ7}m2dn$s*!uPaHe^K-77DB7=-giqy;Pn^z>Ai3Hg6K}sWHx?&yt zvb?fcV~SP5mZQHOGUx-fFTWey0zcYWg|N%6e8T9C-pdt_{f2f_B3^ zPmHfZ$8>x6)n8W+eCo*SyQa3W!_uCVb;CrSedJH43c@K|w!L<|oyBMDo5TqbuC&~5 zV4D$_v+j(`I-U(aWDxKwN&->5+CY7m#?JV)FiL!t^>dVJ9G0Vqn)e5%n+HW_xz+>N zFR@P*)skY{^y$88>_TTTlnh&Elp4CDzIZmB9;C9)n7&W&E=pztOQ_#uYUh|9TFzF4 zeH&tn)F}_s&@9S|&db&_Q}MnXJ(5V+$fDa|da4|!rTWYoIQSCjtE8nr76&om(u+t) zAG-)#@EpJH=maAw@Sp+(-ebLR=avJg( z>lF5)0m!E{ocjK)y-G7_38f&FHds@{u^lMbYOj{HaMm%OPzRP*JoXs4ZMC1A@O(Hq z^__V)MOZ$;=Mm`lZ}p3xP9q5{nJZr<_9ObbMYLB2oCKfaKN(zZFqr1@m0V2Ho87y0 zC*X5A9$^q~%D&#Lc&Lr&jj}%BAwp%sJgsRxxSM}>VRvL*T*01_0bEwFHLu4gzQSH3 z^ie{zSup6k+cut;y?5DHD;#6!>Fa;9Y=U;>61fqPZkiN)?P~y^YM5p#z;t1?~iH6Fet^=e4nZ7Ugz3uWfXtq-*+{-$E0%Qa1`uj^a9=rcs z!pg2l5QV?PYeMZAth zLOg%(F=-)dVjx{Qt&NC)OIu#exyVP!>9tt18z@wuXfxBCA6yi>wv`38!zTX-P! zNo_bzq30BIjv=kI?TRbY`=_ z=pd=^_QIoSCwPKEbTB!jIyJo0{wcwi_P2?mN7*CMSeDR(S5r_gxl5C0esoz>Ip2H~ z%dXErd{K!uPghn5L_Py_RkI$E3CMaQ4j3ZaZ#385(24ZLzkimgW44`ze*9a@)oLiL zwfn`icCx&Rd%m=ES7}NYH{6F-np10$Ho#?7da4x^-!BZ?&f%ldv%$9%9^&%z3(s?&~h+X*J0Ecc+lzi+YhLEGOL~2>ZZD=tO zGY+aCj+@p`HW(JE9+MnFXJhdi#MvMgn#``YbSE`d&bQ3INZULa(lGD`Oqbbd>@i$6 z8wOM;VABxjqh&J2;jrr}h6LWBJ%Nf>?DpXE++LeTX8d8x4jkOxo=#%{^cV!r;V7A+ ziyCT4gnwtv>}VLgq>mV%{EdfqP$J6?ikK8X|Ce&3ppl}Ykv)8n_-_t=e`*B}S>`cG zlrQ~nzID&!BT<5)Fhdmn%j*AJIw(p6`dO3q?<`&>5YQd`o&G38pm~FS0_Co7Xd>c#>7YcHR5Uqp5^;n6N2{j2|Mf>fgs+TW? zb}mYgaxSJ8HqB^62mg$*MFz#zVCsaTDuQFHbGuDAw-fsnhq|5~6F)zH{cRfC$zi~q zPpX(teHpNM+Wsho!)T+ADLV;?@Mi7(joO8rR|4P7*Q@mR_v86YU}9n(|9HstY;$U2 zKlz%)UPdN#=On4#sMXuuha`0WK_K$lDB5T;fSgv$y{xzJz9!o=iGVgy_@q{M-`DEu z@tVf^dfg?jZA!1xF*&P~t)Z-oTV;0RMs+HmJ)VoTNUrNXe|;~vi+ZPJPM@>Aqb6}C z@oZ6cX_O#qj7!8Jm$|ul{jEQudD+Xswkv{!*J(-AVu=A{yjHgdEd>i0lDH`ig*_&3 zNvZ2fKR$M8T8|N-7ZU?Mc=U{>{c;M7=Tmcci)i1BV5{Knzv^cLQ=EM;G#pbkaEIv{ z8)sA%Z)f>bN3F5(7Widyq~%}_YtqcdN_kcaM`!U*n2m>+)AQsQ&|1En0M_CHF>KK zLZISReeDZXUyeR*YE%qELS}aMceWalCvX`e`^z4Q+f}dmmvO}v6=&=dPp#&7{Vp~^ zymph!UOQjIqevNA_vvQkbaavnJ>hV8)^7cmN~7uCUU?zJf@?GoHqCB7JNHfW9i21A zYxF|qBn(uHIjI$A)!Iv?#It-us zpl;83c?ST6z$y}Lvi;7S1;_ThA^fr)DzNEtqjX?~a6&aSM?C-pt9X0Ze%EH~d(<_Q zlAd}%io}Z^HyYj;h9r%9)dNwaI4~04oAul4lM%t`DZP!`^BxIr2f4eV84fOMZ&-PQ zD5~VdbRvsZLGKM>nG4cvFj4w>>vE_SO0&9Km@RP0rIsz;+Qc5tuV&hGH9O+ZD12U> z{_1F!giZntXk>Q0(HD-wZcyFHcR0v5l4Slf%xg(7b97nH%gd{KuVA^{yWrM;DxjR9 zHM;o#q7)nw5>1-&>(?(*J%dWKAf`{LJQiU)X7+<$csRBY9zl-q+llPkNxQQcR#jj&occuvzao3#2Z))Mr zxx$~u^eE?PcT1h*5(X_cIz8wk$|6zF%5io&EI0d;Wd#;FA}7z#boJ0u>k*GXa!`4_ zrN)+%BqIRiEMg3@g?VF5C%$gpA6UDR9T$a1&LBVfQi!q(A*leApbx~Oxwff&2OhBQXmT+r zMwo+sdm;R!rR?Wp7-vn}K`PI{s0mm#WeVCKT6|VwZquMdvtB0QBS*7>8M9&O(}>mc z`OtY&A0HKCfi~w*;$4 zQ3Nq~_kr`n@3H~iWFBV`f%X`KiREo2g-%&chn=6nt)a8xXxJFRf$*G|WI=A~dlagtR~-Yvo?1YC z1=JPG38spZWoq6|*@_AG3St7#5P7=4{!D<)jCx{O3bx*jZ<%{@u4anW`*{R}H-Ma) z8srS1@$GH{XB8j=N&LGN7S`j7lY4GDd^7f7J*I&YbaBeT9ZR?)*uturk_$jHakRDf z1XT~`i|H*Swo07kC88{(UYthN*MXz|i~vA0is8oAkow_p$wBpaE(~oNWemC))@-JQ z_mKO#`X@=p!4qsZR2V2g@71i!+QU;d`c0`ESL{d6xB)+Y{D=yv@;}5l9^3y_C1v7! zB;$qjmuO92vZ@V64B8&Z|1RtJ2*Y?A{~nmEVTK{B+i|%GR;wpsRIb!P@motr##wrL z`d+W?OdES36R+!<>+4KE+EF;hX)7C@Y!o+Dge`I|u|FTST@Aa&M!OenWr}$8Com{Q z(eC)r_QX)a+@1Q6K7kz>8)B-AmqC0k8@(U64pviio(@7wVnZh{kjz@xG}A>IyS6wG zKCtYq`P+ku(r<0TuM6JyOBo`Pk$Q?V*K(xgVG5gX05|oX*on!)s(g64gOIs{+V~Eh z9tsN#-q6s{YGk|}D}NcDc_>-?PkGDD+58acVf6s#$zb-Y_ur66WAtPxqmqh_Nn9&b4;jkzXjoYlX=!Pr!Zo3K zrV0`#o3l3A{MGl#t%JsOJn&w8e0;ahqW8M}WbgU8XVcX|bL+Iju8Nym?Z=NFJspFc zShBOgv0Z6{4<#X@Qels(IFEfGx&4L)1}yikQiIcv3bxeRylh3E>x0oK*DdBQqwwS{ zAF9~#7xC}V_5!Ao0+BI2glzP4tHIlq5Pa!uiZTU!+hVl{;Z!Cd?6_X^XwiKJSe_!t z$vF(UA}vf8aYSMVmntM^y(S)A)@o{M($v(Xy}3dlnx<_FGZ}e!cx1DX=;I`=1z+qm zuBtpZ7%e+!@CDoDm#+hPSWd-YuoK(~I$4~>u}iw&75S9>5ZrcYhm0ir23&^x9-Nh1 zaF0vpfszKWQDQ_yL=+H+L^!tx3BH^}$lz-bs$q;V_Zoo3d)e6-aOdxJ(6nv1nCC?} z$`q<5e$mO+E}pvU_{7_DOXRR+!alW~sjXgkcQ;ew?)s!WD+7*WEMfoEcCB+p`y&FI z#`zVR_b>0o<1xeWr`p59Fm}562og3+q?0!(-O+t^Q#rWB+2U!n7bu8W4rs3e<;Dmr zQu+S=E_K%EclBYU1@QKU8(qr5b4=!`SAVzkHX+iX7{mL6c%aQDe zi5sFCpj@h3e(W|e4qP4Fj@;FKCKf#VkgJ6z;O=TU`Ww7{U)I{L z5AKM>oo~}x1X~oRv-fwdfD5H|!(35v+wjk`H8x5}s9I$OwAS1vS+TwwHfo?CLxvLP zd54C+g(lZ%@g9iJtk8DGy6e@}=fgDniX-8pMgOaPlSPOd2*+>DRrvjyE9Aj1Bm&Y~ z840+p*^vl1Jc7Mt5u;7JwNE?UiM>?w;GlZtoMY_#9n%Hrrv!$kR}bN>>DEevwYC$& zzIS_->-X+rI8E%uiXEyS$-zC+bC|Ecn4&DtC(8DB45nJs@=$rkXB{Pa+)Fo}D~I3X z@ajm&1d|vgh?C9v8Yydnyl%pg#Ls_6xhAgO?|(LH6M0ML?tlNjoAyw=KyzR;I|~v| zcznC>6hL(I^zCON)0`>nP02rWuA>13(R+So&hqzsAx6$ULD9vPdHwf{=rS^&+UBPp z@%MBpD)Xok`MqX@n zZmRvV#szmXzu{?Li<6+DflQr-t@#T{m$f{k_@Ikuv&f_m_>ERb9`SHbM4dgdeV)_c z3YUSMU7GhYulJnihF+L;;RdVciJNy&M8BQaNx;Bn?#Q(K*)8q5CkzI&JpC>1(kIl@ zpS1qqgEu-}r7Lz)=`x{`t|Q2y*(DzW(H);4L`p*Z6H*A5h4A8XcQpSb>UXfpq2cNt z-5XGi0aEr^T7>wmL~%(;d`re-&7_8bqTFAdCdala<-Wzi>GKt-EQKoMV`Q)x3n)Dr zQblsS@$h70ooiZtVgPqc!`x*PzpyvdCJII`H(>52i|Qx2ixeRw43?p>u=S^)8!RiS zxHn06(<=d0^2=`hV~@mqVPLi7(KywKCpmn&Rv?K24;yftjfoLWzLky%sU&o9cS5co zu9wl-#lS_j)F?^wihcpRA1*UKoMJprTjMxk(sX-g0;*0xuRfdK&adSjD0TeyM$57@ zFI&OQ7jnO#WBKFPBk*R6Bn<(E;sERB+?|g~{h=_;6|bJ+bbY9%VN!n91R*O93)#5G zP9I_7sMfHMd(wjMR1|=j*9y>~1i)I;Te?ixTS{jl2eq?Yec(VCQV~9Xfi9cPwtLe( zfLR!uOdi0qXfNbMfK43Kb9d6zB50F;+cHiT+2j ztfC`Z?ZIP@`xVujCa>egZ8do>g@YI2>RLLCR~}$ZrV$IP#ey4`TU(23?@!Aw$lp9b z+h_j0vb^0JQI}e+aD%ddRH$gDY?3n#8@iJ;modv#H*k?sCLInxPC1?QBuk2)j>$Kl z!2b2r^ZoQ|gG#s3clyUAdn3m0dmlhyb~+D-gy9z4zz(# zH+tc+L!{cvsD9&co)PP;B6%%XOw&Yz3)Vndv|=3{B?1A{dql#UCm zy)l=2xHEDz0rs`RU?&H)7Kj(t4UxXE+18vbH5_U;Y%vgU7_b$<=X>I8Xm!Ax*HCRxL^n?uE4sCqJaBLFPFDD;UxQmL#BN}!Mqp#ksePKv)QT% zf&PE`KQu6Ho+yuoJpAp?U(|)v;9U0i^I`~V157D~hS-u9jYvP;w!xD> zI60oT-P(;=o6hQo1=68;=S0Ze-4wq+a^1WRD4|li!e^1ox@G_4HDnc||C(84W2tz!27?;ZyRUE44d|EU*IbTb`-@3HNRKvUDS;rd> zf#t69H}PM8Vz%j)hlX~MRJ+6FG>sO28#Oh(CNBM6|Dg zVrV9=OUDK$&iT~Nd*I95KMzK84%IBZK zi0|&cn^t6KlpN~%+K#8EqaoV0DJ85C(d;r^2mWnQZ0M=t(4Gu`b+ZxF9wlSV0aQgd zUyof8OUNV2UqD&B-C4uTYKkB{k}tyMaaHY0Aq5u;PU_B|PO032p9AS-{Zc0Uwya(t z`~MwM6T>e%Js}m^kQ6-WycW`JViwh_XJBIaQS_)vgTu;39F3Bd+Y;cRpb`-;9#Zh> zw(L@|6-$keG<|uw?YR0;PzEqYD? zO@WTc>DD^6JcKuEJ&wORGsH`>iJdsPujAwB}+O9N=vw!j{tA|s*~vv^a}g@cQm z@}lQf9Vyb;D51{G2_r!GO*PL(a}yyc?W?QFv2l(UhL_Q=R4!wMjZR%H%2)wyq&ZosP6kpMh2^599bz{{S#HQOKurZ1&tnRgqKXM^`qwD_6l9EFbFV!U;78iPF z4c01zSUKMv90&mv=d3WgA06h3w@W6B!*+CIh4K;1>)*!5qFmd}dFgiXcvR|} zK2Y7PdveuIyZ4>fl|}rssdNjQL@eYwJ!2i>S>H|~d&4bsl9RdfMYE(?|9fF@!r zfLAiZdTP>4_p(Yg(W}rFKA%wUw`M`_lhqmqwRMYDQxOijsF!a@R+QsRN+LJTHCi`Q zN(&Vb2gYM~8<%?}Rz7!$I^BF$iv1-m*r%Q-Hp&u)6<4z_{q;Qielw0@kRml&2cEwv zEu<&mk)|nxa2jQMMJ?396${RkJjbKsJvxw*VnQ=@#O@`a+fc}<6`pdRG350)FuqT~ zxj&h}#%KXJntWzAsRCBJ;6^(Nl9ezVHICiTv+}a945*?$DJm~dY!^d>wiNnfPwRpP z1gRG|E=x?os9N+aewAZRK1ic7aIk9CJ1nxRi5_9R#m2}3$XL~}8#GBFJxMYYYN((V z3Bs}9lg4+HeBJRF&D6I>lFHYm-Y&j4nU=q-(LYSD60$6y;l~7f5-_2)zVUS#Sj2hF z)V%8Kn-UM`X2O5?XI*^{yUeiNXvs4m{(9HrsnST0mgVHfNUO=pC6}`(4ZFCMHeRz1 zl)j;%Ki>4Dv>=gaU_08?H;)8eHwKqPibypedS)k;P#0+hiUFPnmlF|B3?11%7H+Q2v4D?U-rx z-Cx+oSuGTQ|H1M|Zi&Ggp4%<_r!#+?+R5h@70RViEZ^Dw^an)BBbwiA$lZ7K+bAc0 z`k5kjOhnCV)>jmTPQcj~=BKYZZELg@(GvY?FE7eZsjP zK@c8N;IKHCGG9fbKyY$!MqnTb{h1G|y-q^o0j+lo8 zy|3=s&%K%c{j-Nl(EIB?6aAMx{xpBZiOdpvlBlEntsX%o8&ZU!7j^x&dNeDINU6uV z4gmGP{Sg2{-g8dV5{^Ko>(f*&<{Wo;;{|3Myu=BwDgKY``7t`&ER$H#L z$VEqq_Z1l79(+&hcI|;vs4RwVY4X5)X$to`X)ELTf_WPYpj=K#fvV$Wk4J)@HUmic zkZqf4xp@)qb`i|CS$>h4KurB2)>K6IwN&GWUyi@tA$PVy@{qY?P}v7O2pjI?%#5PL zDsC~D<0%eq;^@aJLSn)(b{Rj1)X5Dl!dBI{*KbJ*^C%O*Z3X(Qh&IPTe9a(b^|xhA zYCxHkB&PsPXz7`fKD3x{?1XlN*OZ7Kb0FUZ5s+_!$sX?qP8#jBcwSHaoJ;ciYWH-PzCk2es^06CRC;s)KM$?X5FYAd& z=bq^&=}h*Du>S7t11H&9jBkATt{{%E0-hhOwI1IL(~^zy=?c`ft_y1#g$M7UIq|2u03hki`CIkdMjk7$mh6JS{B|8LjPeP|?;YY4?+9G>AEE;#t`Z<<27WXG&d=bF#hdJ70)j4Z zMZrsQv0gslwVO9*naF3o@+$@pC#JQ4qYzn?ykoYQ6#;LWHc0e7JhxcuQ<(JColDR24lMM&YFF`G?*y>J@v=9-Nk_YGC3@? z*ddF*xUg{33R?8i%XeC~xV-c!g0}b7p)4?nxoycjh#ylX>s;mDv^N`!gG#SpYd2{Vbuc{PxEmGbVPaDecj^6 z#;+HTIYflj``Id9)*Ss|FryF9YI|!@Y1086W$o=j&rhjrp56#YsUo3B^h@pLZNN?z+H4PCJDSM(^f#+s)47N^?asb%twoS;#1x#9_ zN|wxO?u4P>?jVKWMN1_u*1w1Czep=z7R;M~?x!;!?K@A1C3`oU-fqiJ@L9TmJGgOFqm|1GVQ)^u#E=h(*6(x@qdXl*Q0T{)0WDI_30CAcW)vQ~R2{z-pSi2Q&Bv^D(fQv1IxwWuv~9j81vWE1PuDn^jVS zR{mv3(bO(}`y$<)E^Npk!CijoORsuvogCM%WGzOYsiC_+t4l%kNCIg~E>|BMSc*aG zZ9*&g1gm*^Qb#`Yx%uF-wy7yoT^G6Ntkj!u&jPE}B)Wfp3t$LmvAj%po340CLa*e6 z^h=GAU%3WiU;FT`^SqS9BtYID)&^z$KwT}WZoP1~Tz4GeL)v?A^El*1tyNRW)bHu+&Ov~Hgg%h9A!z{+wVYn63Z;6WpDR_tvM0!%V69<6D?Tl~jRB$o zf%KU*@`Lxvemvw}f-UY*eN5Duhecnb=&tz1BV~|}3&-rnjQLcugC3%d;+lt)aG!s# z+LZiNurg#IjeXq~3Xg?H&0H3Pm44tO5snIz_}C2g5{KZ9}qYf0z5B;2r2l^%~oOgzOY<>N_n7H0h4Lv zY{`1+#4S;l&0NvgDAeJ@XZySHx;rXy!^WOpeF_#6q9U;3tW-jB64uY}SF^lSm>^6t zik6<0dG7WU8VB_5bP&~=snL#1v3 z`1TF6fll+bsA+A1J>?S+`^;2LqUY!jT}^!242=Tc#@njd)<>hD0acJHr3&433NAF# zpvpW2LOUGe{s!@BJCAaRoeetKNUIq1722vencu{X>GB-&rFj&hrBar(VxSH?()XJq z%FKy`$;Kk|XkpKx`x+>ef4l{1j@m7da52?g>^SgyA5Ec-aN4PTXOUGx9gp)fk>NEa zpS8EscSj39zY~8Y1&|U*%IT*v#C|R*1V7Xm^7VaNQwv`!sR25fk{&qbmFx|-8sGbx z_P_Ey&KvhL>DWt3;9LjLzDw7>AaBEkDhiVSe10XGBRn8=GbzSPnHkJ}^$6FcUc)OF za=4b(&PhTkU#7*ua7mRcGIhghUoZH;=l$5%?&}^Khu;g=cSC76$Ky=%G7>pF1~QOc{^8D1E33+1)qg-R0agw5d$+XcInlgS z&{RIWv|0SEdexux=6C;F{setF#>M>+WgWZ92ZZA%t^Pq>QhV+;7PY7nNwTgIFw-u^?!~TEJ_&#K_~>YuBte-$F70!}O1H=q_YDF2 ztg|#uX4~t#V%4XCNYx=ywa17wy~B@QSpw&VK%QU&=btxE;C4G3NGM|{nv*lyO%3(_ zvKgZ~ZL_35Fg~_9Dn$uns)w;%3*MO(b@qJh3(R_dyu-gf*~++tFk5i`ZU$dSAaLhW z%rZVX=e3ZoQJ~YDB-$W%`h_UGwni|+hSqP(w%KBv$^rYwmP-w}=-+Qt49m)T^0ULW za*uZ=3l$=hvuqqlc(R#tw>_lCM{al$>nOHFP5$Ix@zju>>Gy*E!yr`#0*ZLnCw~4= zp@OvF6lwu7;^&8bHji0F`R2X8Rea;|tWL?#^ziB38p2Y5KKVIl2Cx~I&@M+v?|6A% zB-=b5P5fr0bGl>R-um{_(bBL)zFm=JCZw4Bbe`2c(&5=|S*{VwHAg`EX*X#H*W~B~ zM?P4`LyV8o%AhBsu=zZ-@45b$lerTe{4R%r#-jRC5!duT*-4-dQeILzE!qjalY@`>+5#O0^IC25LA07*E@vrO%xT z@IO1UM@qmNtf*?pwLHHBnC6PH(*=iymONR<``})kQe3Ah_k*NlaP4!?-)~*hs~)IMySxN}y#5g7+lMbrz8wcpy3hs!n21LO$6%b+o9PE|FSqaU ztum*6ymgoU(ZXVYdI>_^%UD?30CBFX?_Zzw$ySr+N13iI3bgAx;`<%F4> z1+;f7I-IL%`u>#HzKY^haVLB}{%rJR>sKR*XIioE3KAWwSU5}L1NfTTiVTH-)V*rC$aRDQW?;0tL<_~ z#YTW=;Mr82D~n){NZSfI^K>(^v^mk~pjPr`>9n4AoG05lSUg)ac)6EM{bSIPWy}43 zfnH6#fElL0Qtl^c(Kbt4Aik$a5)l{Jw%dO6B6WzmvCW10`OI&0<837L829Z?*)lYA zhH^*=H6$$xFGIj_jjPi#l;W8b)SNibwr74dciRKx&7!Sm7kKdXIUmbP8;K=W2;1C8 z^S$9&{O*UDs02Ip1xMTSXY8=w1KAk$?bOR_&)6} z)9E_4I6K$KG~oR(dq6*A{m#6Azi2Y3I5RK%#-iZ6%7YN@?UO$+*31;SZLp2Zo@PZc zC}+hPCqkX$ndFOah3QI?b_Ro$SUOK@v`5zBz6532y3)S1EfHeTx!U$F7{Y;_g#@=A z;uLY%@1YSB&R@r>x{9cnmY(^orIMuh2HVVi##xMTna(rmuAugF4LOQRSr)y>yuSKb zZqhaOy?5T!N>N`vZXw6t5`T|`X*)B=uuj|iH2N|00bF>DE`9g(R?sJEHQl}R=ZWj{ z%&1Ukwxk^}^O}tHVi&)mH?pL8YPvQ2Ll>`r%kS*6g|HXtLk>nD%>G&`>D6`*m4t_d zXYrl`t(?{u5Rb^D(h(|VwPlx#nH$9|mz$Ch3fGSUVtaKL@2L1<4-RY!oCj*N^h#x= z2ek0%__btXyB+PeT2f5l{-?KY(0B?KHx7G+hzsPa5$J~sNtqS2c#L7pLNEr?#8H#b zOKvsk;$0efQAHJp$4ST-zzMH|VpVvnDop^Sw;wBs$*|UoS(oW1BFZOch)c_&fgVu~ z${Hsy`WsEcoZvXi9H zM&8^uc@UV!aiX`Mi0$4!*wiD4At=l|R*rM|9pv4)NIkMsXsG>sA>e&CN-8*O_uJwA zx!sh%knOv)>z$3K6-H=A(|L17f$=DgGa z_^sRc0Bh-U$O=O(FL%f5I*H8gM^NjQ2T%#`FHqRjj?<+YzH+5cK{v?mdWB}Faa_Dy?cQYmAziA(q=h|MJ`e(UsQz zUZ2a9ze?&Ntp!ZNUL>A@r%Zfv+yhByy!b zKxmY$tJqEMb+iNpm(G-Xx60k6+?h^^%xc8M75Th^_F|D+Y2Z@K?P^6sZlYuA=;FXx zk#5qVz~+GGl#WVaGbg$Ef|JEodJVl&|J38%9y|qJLs~bglb(7Zv%oCG&G75K;Zkmh zL-Zaf$EJyvp(K9#Gs{6K2 z11;3i|8j{E8LqtT)3%CfdX*=-33|Vf{s}-1FGhc*ERh;nkR$gQe*ri;R#;kDx&OMQ z^ETa@1=S6JY^)PMU(Vqc5Bm0fA*Yy<`*yi(raY7KE}=Raq0k4l*Nz-$$f^s@1y3&lJM6Hj3Yj`W*G3=fAr_+X~_(vG(CrNkq*@sEZ*+%OI(+>nN2C{{Jd$AJRaeH&Mc{c9^^1r$4gHDy+ z804!Qf6UJ;IB42+e5d4bQ37T;XTJpmpsf?wMJA=$9U|9 ziT*qp{48cQ_a$tk*<`!j)uZU@YrW?K%h8za;)B5HyynMgyW_i$ulScwpcx&JTy4yl zn2!-0LVCDoQOI2qF90*u_3D=7*so|RotDMD&R4@Xk^5K@#x=!=t0DO^?i$b{Vqywb z4Rj|x$zBn=p3AV?umgUMeTkQjMf9pbm)^CNUK6NznkNZ%a?!yXF@~463DS=nXW2~s zS|QZ*wh?1>>@7Vq&{t9|DIWQPM2) zFq?do59nw^E}r`FO#?bKf-*8F)KIi^x0*@vS|2{IaJ-xekW2Uhi3WXSi7w|Ea$tMK zZt^Df*2pyyvLSjyK@>2t6SOFO#}6vwcr?Uu7F;o;8yS^`2_8UzQ!V^^ ztq<|=bP>Cg#X)G#O@``9OG~2_{HCt%H&WSgUu@zV z1d$;>o}-5LSN z?TE%6i|*a6Sb9MB|xjBqY$Aru)1Q3Xe{6a+nKlxejT(l2^;K=dEE9-vb#uhHaT6s6U>ZA1@!Nd>) z>{m>chk&0XhY_stL-XWWI_t)^zxdFW(5%@Kggn+dKlA#Wy;~Pars>vRt*gPFoWfGD zRE|J?JnlpPa+f4DU?)t#8#Mh3VN|{O`OcT~vhGzt1M!w)Vp0~JPCkP|Lnm|7*|9CJ z1vEZZeO;3(f^r3opTF+-dtvG3er$WO(!7W?PU}z6G_v_P;^pdg7`WPms;3-`O!D5m3cU$tjxM1i} zvfirrd0;qgu=uJXhU6K~n~!Ct?Iw&WX_mfDP+dp9siKEDu*t4G2DXL)$*CN%!;zbI0G;Ysl?)Xnrb>*lj_HLxWZ598AuD1+`s_Wi|1t~=k zMLI=7KtQ@n1f-Gf0U5d^hE5StDe3O+ZU(UEj-f-kJEY$|df)f+`+s;pGG~~x&)Ivg zz1Fp^buEoi{f~N1ZG82B215~gf0KH^7PSF=B~%p7tn0EOYvShDP|m0J?TN8^>M3dd zQ-11#)0UU{g-^dePLD`<#N2YT9nWDIcY6CYfv}{aLIu>GB=iB*uWi=7^Na4KRJLAD z+b?5d?Un^2-<3u?RWUgQ*4=JjWOwS1$%nJ(@=zg!Ikvs=w$kLV^xT%C9Xjb^JAz3# zh1zxX^_eS0bXZH9Dz#+0)KuR@s^xkst<4etGJ5PV8m9Cms9zvQ26vVQj}Nufr%XOY zWv-p+-9A&4H+|x@fucfaIKvKH%{}`!s zY{HYtM4}g(^a~0J{GdPLsw-_-psio)u}68S^tz!!EWDxXw0r8B84%n} zPFG5^%x+{h0u7S4w0NZf)iZK3LDrvvk5+!@JUBWy+)Ng#R&=@EcWCl$Jh()@y&N;G zH5cN==Ht2WWYMwF>bqH+SzSX#x(D>76Yud^$}>2I0{6S5L5YXUMfevFJJ1Y;okwX1 z9>sJf--k17@|wuVe5LFJejM6JTJx2ImWK7 zbu{S~>uzhco_i(Lugme5xg?Kh#L59$`4K6fWW-P)Nel1!k$eOZ)lGc=EG3F(TuMej z-p$>pCHbY|@;pOiY4Aung3 z%MJ4Ut0S);lBB1KDZXgc_7lBY#2>XW3Ipux&qtKg13GmMvvWwU-dfB{&h>ezRIO99 zlBgwvOq0RS^88m>nRK8ypywMiYUtgiLMp$pn`_wd?NY5HGd?ye_{b=a!C*;oi84_F z57~^js=9Ksl}l(aX%bClXij%&CP8~2j$gqBY5j7T9f^1kf}5%<$HeZ{r;Qu~_1DlU zrVZAJGk!8UvoQ`C`E@q-JL{k>K(z)MBO{giXnl-1BLqWe3dU{+yW4Bl0}3fWe=Gi zyxy;{KO>Xcn^Bo?8mrt@FEcQB{iL*MQ&>e?Zu2>klIVK2{j()j>HbFso9)#1=IQ5oIy?c|j|{ zqfnrpmf5&dcgeFDgByFsN)X5ClMKR&&Y0-kg5AZdUdo@Lu*wjRuFcVb?L$IaYo4$~ z(+>^#Wv1GiJ}pym_rhd>=%Dd{`*DAdakECua$zAuBdy5#Q@6$BerR_>e~)An&HTsK zHhK6I^=P(r^SxBp`Y3{BV$E`0=HTYLT4g?oo_w{6O!qx=8`i68FuEU@Lr{d(dKkJl zMpqpg2N#AWXOB){K&o+~VQ~QSVNDGy_=**Z@OW3@#y4f4tT<7*ys5zp(N#@nO=^CU znR6|B;NUFd3~jPjo|z8Bk+*~Cb5IXFK*f%=(=29Fk7zWj?w?sFY^xhw?Zkx{jc#{F z`^l$lbfral=lI%Q+pw$)!H%mBdM}lhk8@khSp>oTH2r|sTR}|RBbbCw$;)<_=2dy3 zPo0Tlz|)=?rce1-%O#Dw>MRjB64N}fVQxS@P3)ywhCJWmO>l|v^>(SjXWXslA~%r? zN+)&%4vz;EQN@4dKTP0IKe0W&5v()3A=>k7y zC->82URsW%LSOIi;pO^hpXCadeywU0UNn&^e!kwK72;47)|6W6W?Ng?3?p)zRGKd| z(LR{?CfpV>;CzzN7`Og_E==?krQX%lhu^*IX6~2siG%^#*4=0Nl@O7nT2=JPCu(12 z4&Waxyu#RZY`oth%AH*vygdV2lJ}@aB;2!p%H=fNB_<_glxV-vLTpk6D3t636XIde0HZ2W{QOH~_n5;u zt)wjLh%`UtXE+n5nS5=v5>PudK)tzo+ZaU{qAU^EZwn(jZ-=}imEp+Lj7oA{`1FsW zp|VCT0tGvnA#Af(pWXY2kPLic(R7^^wu{<((^-em23IP!MaAo4ciWyj z8-&5Mi*7;tog^IA-`jkO(pHcv*8b)VR)B$#+w)%NnI)3G7X zhi6uOpWJNNz9c8Jl$XzHCUtjpLHm;UIiMYQB-1U8q>7&Y@7qm?HE%J?CQ_m2O1HgS z_c@B^a1yJKPaRRhz$G6)2`~x58pnI}F(~5aoK5WKb2Qj%3;WID1Q7Y zG`ci{c)Kwxi8^?(V>@puU8we6tFM1=r&0l0Z&3rfc`8|-2)awC2{lk8XR3=z3dap# z+cq{V{BhSROCsc#Z$qC33M!g1{pe|)v!(2d{z6Twc!bhSOC3f)c7 zr@uw><<9mhr9$<^+M(n@*L$j>d5_v^zI|wCIbzE}@%*cKqHV5RyYUB^7cY{U_BwvP zaglTEYv(~tfBJSgTC*!lCX!ZERP<4y=AwY)OKM|g`4k~-sW3{GO4Htmbcw*O$!aSF z?4Cd>@3n`m<7aoE)^`l7S$n{VQtcpjJ*X#vP{b zPZk>tiXeSaSCd^*3zl*zw{o&BWh z4D5KiFXiQ~I3y_BGyC5X=U%Js%|*5<*tmUL&$|k5$gAjvnI12=X*$Vu+l?Q zND1~OsfTXImnBsfRbXnWsJv+iUe65xLX!QNSKsf8y54)s#N0yPzPPxkpm~o)f`R3& z&v8|M-{N*0o7<&*vvcQg3GK=8Gz6~yeTu!2+|I3u0(kNKSO{dgmfFtH0bU zTe1>rVI7h^=)ua=6T6UsdRR$r)vH^m-}k2I&NCr5*U8nOkYs6y4AyU^v_+g$&jT5x zN)CB0mx2YgS+aAm^_5M`58Xw>YuC@axAQ3i%KXH>p!W$@hK+91E9A&SXhd~{g`Br6 zEUx;5pUEKj-g(0(JIgrTJJ%VUVVAQw4##!ms90*$c;SA**XdV4I9EnRC8Fv2d{aeB zYi<7ocHZA|wQWZkhHjaA{h;8uJdmEhMZj+JSQvf^9(NP@xG&B;ixV-jXE)ZiwVqD5 zV33evS>k1sIwx;wQrPWHJN9>umm8btzIfN}aqJv68#*zz7t=n)UfhTfoQ|9g^h*8Z z&Gxn6rp9Kf$zTI%9|&1=I^x)kn^qR7QPWumxU!qqzZK579-qFi>7&)r?*z3Icnzoz zE_}>^luj?z`^nS`fY3eMmRg}47*938JltR723N>>M|Mi^tBdnV+EoL`Mo5`wh zq@t2(TLqb2(;#VHyOzIf2cI4`YRlz8P*0paRQzjhs<#dPcA9g;Tyx+%d9gPQX8lAV7qBRY|fk=argQFvJSDQE5JNmR4 zXOWvr$Jotv0gB0YaY^Lbhw#Q}TDxyA#6LvuZOO05~Tg96{x(9!p|0Wl3ixmcMC5R zh^&%Jz@ME6xG@H!YFuPnFm_WHP0A##>dBJKql4Y#;ym`+OC1HK9Ak3wdu=>QwtYXePR_N zu2Ey8=98->$wZlvzx71L&*g+k3aR1jSc8r6QhaePi=meZJT@EStIdN2>dY47W$9~G zvk6?gUmi;=wNv>-9dC{`nw_r05&F8$PT^|F5sd(vU*V>s`{|@TT+I(;6=(ofxtA}* z@plD*Wi&_&8YTHm zPFG}l`10&<#^vnNz>IWruBBzo*C+YuJkoY)LcB&7&zLoeo?pP^gIgkEvh`a*o!K_A z=wfLFBj$hP;Iub@L-^T-Z@~=3MtCqLOpI1P$Ydx><^Xx{v(!%02pGqE4BbKJIurad z7(;0jUCw~|wuN=~f4a1nY+_>^H;F25&`|{hWBX`Vu3w;wHMLA z^o6zKZ%?#T3C6WVz$@xQ2Os~pAh6JoDopd*(pS+={=OUbAOCyl~nI2!?34(9_XL3N)|`XtQz({%|0Xmu>m&?>xOP zwCB6DwA3-G3EB33mEy6(?`Zww(so<=~t}x@XXHiHgBYPnhmfj-% zO8Z4A?FOfO&k73^S37;Y1GE*vqqtsL4 zzY+zk6hKSdE6O~V1&3V*BqgGc6_F->TPd)|_v&-#XtA#5!-o%rQgQ{4<1Ttkkhz6U zJIu%DQfB0i%^yl+GCA|%mj~Susi}7F5`bb+!r^39}s992s0EAR+=xyRzDC5ifvqH1hsnCk2658BA=K03>JfFw-si&&tM7|E*N zz}FCNQOltpYYK$qhRTd2lqAc6eAl;E)W$x3)-#TIQrseH$~0sy%dcL`XoqzDg8s24 zb7)%9ZM+N)k{`vc&W|M^ntW|bNS!o#`8ewD zf%i|fa0MTTfvp{;q{Z0HyA!)=OG<5%$Z%FVKPSgjgBr%(&l;h}j2FM(L*npz&qy17 zoW@l|#E`7xX3K(5bv*cNvVGz|l&v6PF-5$tWkru2d z@ZQs)jJ!~EMWOwdgKUtYI}AzG=gTR;V$z-il@`BN?5$p=FGp7a5fClXFF{_F&{B}8 z=v?Wqyx&xU-!py>k|pSAo~kri|7RNz}&=-!R);Ss0-U!0_MD za%Y$DLm(b5mb6pDhtQunS4L08qk)z2+k2QMN#B%0RQ;zCbuY8E**~Ddp&JFiE@`AP zp7AUWHw4<&(w}$4CZoAM-3u| zIqf6h5?C}B{-l?Tp{q>1SnsaKVs}V|2c-FOdzvej5TBwnV^KAMo}nw`Nki8U{;48+ zR>XU9pADMg1cYPM{_GZusn~VzlLnb#sZSNIHZk1vQcLes6zB#m0|uj_RiEBmLE%}| zC2X*D$A1EHBDerxo{|4m+Igx6=N(dO7%s>qSvy$WK<-a zm4eK35J?qWK7L%BlJWK1Zr)c_RsRQ_E4IR zt^77l&{3smm;1mSG*OC(>Pe_#9xo-Upko)2Qrw=J?%NpN6fcxZpcnkTx1J@-!Ck9k z!nHlAsVt+2`=|PLFCp{V1FaCb zzr6HYch#=hq;9b(4*s>=d1ClhHS7xO8r||T7V#4`Ed<(MKRa#=<)k}V42wAwBck#} zx49xw%v~z|TKdUz+^UzQfL5)3dk@*Bu6@3h`Ec57cnq=r+hK@IBiv#LLHn2II*&Aq zW^$caYTDEyvpzGr1)lbbVrhiv2|7W_JQZy}*9>p$y!nT|WsIKfC9+hUoW}o5Dl}8- zz8qrM?6K8v`H5@b6d04+=|;NbVrCn`J%yWabyCaI8!F$MR6cj%N|=1$HhmOwuz28f zF>)8eO*}l3qpcF)#)ou}sFfX2L|6;zAJb9K^ZE@9sW37#N5&D081IoWp$7!`Vmxl3 z?7o25g_|*ygr6a}It6eJIIQqma=At}q)s+Qa8wHXjf=nwm2OC!a8ufunU){CJZnnM zC}z1RvWOewSBSJ;-L$OU>4};_w0iDx23v@6c_Rqdr|Qd-EQ<+LUkez*UUxG;i+*26 zY%%vvEQf~S1i4bK@#ahEe4eUcpn5<27!pE&WBLl~WoGN>s#C?gc*F{#<%WC4Qkb8b zC~2t8bG>goE;Cb+l=l^0@>hA)#u3`}L6HgTsX4`poVVBA@f1D$qPoC;trT*2tINOo z{t1eb^;USrIn2gdaZv2%&X6gjy)TobiY$@dhWjAKTPNNK_jG5ZBRimDil6+?p_!L* zi-lvFoyQr<|zcPKV65=ZY?2hNC} zSCql7q)U+Yg!Z!rQCN6kzYM1dca7=8VU33@n>QgFD^KNeT@gDbf}DpYoekFbbmtkJ0?ch>mCh zw~Z8E1!?HZZj;0xL&927^68 zC>nOMTbb6Y?d)W&2>+qgeryJgEsu)3&=CupY$YAzO52do>NAVc@Xe`ViB&Cp?8^$C z?w%%RDN$6C+kL!9+vJNREd0neM=Jm>ld3xVfplT3Cbml=BHFl^ow>NE+RQS7l`p$^ zq&mkarIF@4Y37PRVSLQR6vCgQOc%f2)|JRHejj2JzL~0vJaDq&tA8)=3x_({Cz_#a zOfEQr^tZ(r@88X0hz?A$7DQG$O%|23W6jV9eC$dzumg#`F85Dzwszbl@#0Ic4)HiU zJ-+H4>DLIWa#%kJ@~GRo@E@IgXa}>hLh!x*aL?QV#uCKc?g6u=J%!8;KNzn|`a$-` zeP{ua1izD`P#@$qBPYCev1je!fOh9mH)LI_usg0a-!AsiAlqqHB<>o~-_w*2)rkc= zlv7i@l>r#i)IX}Te@zCK-#t+@toL8QiH{|GuK+2LfIN&Jvw*N`C@io`G7(&<61Lv~ ztk^%UX8}+&P}S$}lc2tZe5+#Ful|0H_wg0*X+L@vO{>)bb%ghuk(sl9Z$XV`4Xzjl zISQjS0dj`x*I#&lZvw076NJSl z{@Zp!6kPd@5>A7+7b_+&9~>1Eb6&{T^;caE96dBZ;g9|e6$>w6C@9i;zo?qo_s=kt z$-scC)BCs|9UJm3_wR86(|cNlzkz%Yh~GfEzSco}~` zERO8IVSp>Y??k-8+Y1EbBhg2Uf3X7;vtg96(?)e$j(_+bRX0OI3l%cqwXN zi~G-HTs{TwPg?%-i?kf@v)lwrp8rk;=sOZ`?lv@SriFZwKaDdY{+$d|@Xg12czLL( zHDKT@e?>Qt(>|{c=)jHlr5|7wfZHh7*AxFU==bH|Z(>oQ0!laVzu~9y^-un}4OTF? zVyMSR;AaV5hgwj`Um7Raa(9vLz+SpzcRDj3KK_&J?Cf-1amx??EHAA0AHhF<6v8U- z^SwJj^SvMC-}f^Q)?2Wh;?fK1r{G@VMk_S`jvEF1{a_ETKA_{_Q&!f{BC-YBe`6uP zTi_wlZbj2P$OFDX#~L5t_5Aa1zP>If*KZDXzQ6uw$bSmsvBbga!Sj5o_Gf$i8*ns% zedT)k>%X}IuO91t8W<>+$fJTk6%)u$klSMxJYCbp+eiNe#z>z>8O%Ex)m`=ffh^!& zyI|C_UVQ(Lh~Z-}YxwW(2Qd9N_25UrI8jMCq>?{ zwGq4`7phQXX`~SglJ3XI$e2GU40eL7si|p5*Zkio1ML!QsztlkUC6@$cw~TLG*qO` z>9$=xM$SFioVLP59L$#I~Z0<`3N?Ez0sBx0pEu^T9aE{?Zj{C(E{vo*zj+?9`e z{uS+3Y0Gs(y9Ypx;N(7+`zz!2?Gy|a78V9Aw(w&>b_VEa@$zy zt-)$vvQ;0MfK%R+u#2{@{+QVy#2Ko#(qv>};t1>)xv>5e6chfXN!qsyzws7N!vK)Iv(pPuj>u)+GICYfIQ(Sa^esGk^T}@eza1*>WJw zZz2gu?~?Pq6~nksX-(zOL{CqjuuZ_M(E-SG&lpl%Gn}@@2e;&8WWtx0pzG81l}}nO zKBSbQ#58Xlb938Fs(rXeLIV;vNYpBky8TLM_ODU+pEUzh;qQxa$9bp(0@_hJ^su*g zyhF#{nqe*7-`_s~czfNJqc^A90rc=>NRAD*wKG~6d&ietP{`W+Rwamn9i&<7mmcDG zm>2e_DhaFa@ck3qW7Pm2;X9VTV=(J})L0<2{H|v*lJ`2aaCv2=3qXyGi|t`9TZdKq zV8&vwHIR7{*v3dWQu{_A!0YeIM+3W=er-t)4=fq1B41|(U4STz6lyAgEXHbGoU^?L z5bj(C^6ziK52d>yg^gn+LP)tI0FE7pAl)XE{=1ITYQTA>@z*qq{lEtKnc|`NZv~&z zjIAiWe;dUPJK?p2CTiE(CO;x1l#ON8*9#px3Qi`|=ZZ2pV{CW+5W(WRkpas7I?=#PlX?%CaS#3Qr z^k~&v-^ake;_oKf12sBrnv4g?%{TgJU_EFiSEyv251+!dRF%g=R)9xJ)!Dfw_xtE< z?y!tiMee5(XYr(ue>T1^6&mr@F6PV6$o=_3CJijs9DxSK5YgdK3PE{$=df)`C8_|=AGpYQOY8qGC_nkKpY=em>1BwR-gG#m{G{+AWYb%z-Ra)vP`^QxtCWZm zoJyo@@ac?8H$^j$@Rz4{Y$Gv?3t;2ss|PTOf{2H8&>{DI3IS+nc1}(VbGKVN0J*ZW zvR*2RUOBDKSpVG~%2vSQRZH~;JP@NELbq*Lr>{TgV$K%#$5^&iza0(ext~5U&VAR^ zRpx4ai_4dUJ@Av2S&qoSHT@10a0i#LpibvY)6;q~^72ke*9cv^_|0kODVtC)q#!^3 zBfeZDR8$L~B8S=DKQZtv)M{`(s#+cCf$IMZ@z#`w6T@)OtYB=|wz5CPt?1jmXmVm6 z>$o+iPKeiWG`WzgaztcgFA3PzE=l}Wh-cbW=HWtnt+$6tA)>=L+f#Ne+$?M$gJQol zY-_w{qlw_r-)$tCzof))9yIRK0jk$}+x%X7!1)_;Ce91j2&SRHyuIj%e$ z6db72zzr~~tNRE2LWjZK*8o4)Lo!~gh86K`2Fq3ZQ-n>vy}!fZ`TMO0(OL?^?AgkoxfZU6J%>_7sl)%J^u!sLY zN8$?^U`3)*FkpZDE3CNIrsBEIt4h0Q>1MdDY?mymbMnH_8^@3fvpITEyHv8WP35;s zcP@V~hB*NJ=`I3Iy$ANZxmG&s-x_arZ?0NCHuBo$ouCmvQS~^{BS8)(?JlZhwqykx z+xJZpSXO8jk2xbdOiwhISx}E2Qj|;+k2q7<;Et|~%e=WHc(mHhn`kPl;hjT1cK(u| zzB+8oY8>-)Whb1f)|*+9E`TGen?FLQC7VCXgo>#vdh6Y@Gt2L>4sRO2o-j5^<%wx) z$FFIsN94jXoG`v3aZ35X9X$kSO#%hoScdBCjoTZP``WLn;owx|lt@>cNr{S5D}Dnj zH%KngA1fOA=~~^F07w`;!fPkhV+PFalpaO45qNpwt%Gw*+)N)L|03O3MXf$7BJ_!s zylU>@K1rUbN3z?S5q9f4lJp-3ll>=?wj8X_X0s30?WhvH)&oz@$vZr)&_zjaSy+A< zSMJ-j51&B%R1C9njOdx=Q>dXDr;rs(B>>xUuWsC6H#{$+x3L&npMpJ}2bQf9=xH0C9OC#TVAXj;)gvSLKnOsiNF6sr z?&Qp(KMcLTJbSR(fpaE3FD722gdXkw8+zH;TS);VIuP7n>}}!vp1;eUKupNar4y2? z=sNiMA;vp3{C$Drp<*cdX88p&x=)jayV+W{TsJ8@9uW}bkvz%@tg1q z&~pUY!^Q!)9&%$P`d&kMdEI^~O>chQJV^+{b-zl+ z($DCJE)yxM;x*YDw${yvPrO)aAPUL3s|A&$ z)4^H(nZeKo}`*HZ8)ScY>K`m?LnqZB2rRVffpkZEt9i`|MAnO zNNeShKI_rhduOkhisB5B0Ac{x$$PdR1l;k zt(&L&9eqko*4&kWLVgCzGaOQ`xBUyQqCg{38jsUyO?l9;5Yplux;Q#xK2=kUY+6LA zB)VBt`SNzs)|T}!JXoyl3BF(f|$QLo^JhD+vME|P%?11ZzUub0caH&VkI4J2= zRumI$Esx9<-5D#=Hhve{rBhfPeF;(^(KlDSJ{2CPwvDgCN)-}$qET*RbS!m7njng_ zbE?~o+n9Dn3^$O?b|3HMP+)>HZPvfp`bFZjo~Vos++Fr;bv^Ca5OFk3i%2+Q5?A0nlZ_XubI;PIx9@N zduBFwJ6`JCz3gLX1m%#m4WbA$rBlG6uzqCRyh1`~3D_izA0bIP-%( z0&Mb#*O^lB`FzR!jo?qfb=~1;tine33$DbSs_`wEIylcLKhbS3?4xo1%AQHFS-<87 zua|Bd%hK9xM-yD<=KjL3TnBNYNP0j1-cZwTGoa4b8M7Ot$*P^MpNRe_)ZgBywi@fR zFPRm*11uum-|73P?AO)6zb2;OA+{lUbEf3BRo?Cykyq3$4xF{#1q*@3iSy%4FQ!QO zK?Y^_IyJ(b%g)7+Q#GMQk*a7D2p#QIuy$Sc`si1i6xY2Rrc3kNNlCFu64iZ9$J zNsWqb*_h~m{Uq1#-c76I{QcNeTbVJh^@-`jg83a|Lp{{^N%Qc0k1B$vPwd$If>XUQ zX!40YYB=BwtD*`+_E~8inm+%4~X^z z?-yz{u(*vBMSAr^MX&LHjZ7PGqdzQOkFXOw>*)FwP4HSvXRRaRQR>n-F5<%>caU6= zKTNv>UkbI4(_>x!O*1_XMnOap(c9uKx9h$_d;%`p>G7h*#c9&z-A!c!CoHw?n|Eu^ zf3ZIJXo<4wqg8OQp4Kw3Wko6EI`Aw(Bid*DN*%N76FCZw`<7E#8E7>&wW@K-ox7K| zsFn;5e(&4pd#J+(>J3X}%-ly9V_@tGg6leZ$Zle87u&US4Ni2!B_`=*BC$2+R;N$} zb)2>y8Bd8;;PkZJ@mD~@!)wp5wlu;d`08ZeoZ~DAg7F;hCH?MtNm}NcHLdrCGZxNY z2dxCg=(NUG1=88lit(K%o4p$0>%)c4!w!m*upY(p#*wqre0WO@g!}i9p;Y7{e9ReM z=k0oT#FOS=ugT65NOlllsnsS*p#M~Nt96`Tm=;i5e#eJ`B1g@5z1FodQ4Iu0F_rpY zzkX30Vz0@bI!^+|2Arh4wC`)eAzXS3$I%#t0633nO^-swe%}E<<=?Wy_2kL-*Na$P6P^LKWj5Gk9B+PnWKtjG*E;0U7LUG5=Qsl3zL(pX zt8EYVp3P5>K*(Jjx9GOWL0K|OJ-4VRs&;@wcSB5Ft+UmTeT&#mKQ_opv&m8MH7LpDc zYZ1`Zo92XUn8uV8MOX5sO?yW2Aw}Q?-{{x!$dHeO19L{mY-`34Jr=^2N`P?m%C zCa7+@NU521O_bv2=ht=$U%;ev6+1eg1ubqO0RH0My}-9(5F@-9hsXs!3n-Md0cb!M z5yaD|!UAtJLo%pK@fRPe0lJ>glt*uuIDjNR_+Ed{;A|KHxhjQ5esf-bY}UF~WqqeX z$%S<)6T47n>fu}^s??kCV-3o78fn9xG+oJ-f`s#G0mU{pkM9YWzsoKwto$yscSkdV zEGP3VR5;Ie_gu2FipCSj(?E36g&W&hgab)9Er-1^d9~_D-tLsynjg?6d~3+@SSR6V z+M7k#=fH+zkA()EE3WPsp{ua@5d&v-2+*upKyVl<5s2IQ;}ib!tcU)Xj5{c5Tmr0z z2Q2Nn)Qj7sJMBY5L*z4E(_wBNrTTq@&-e~1g-v^i2j@Ha`D+p5C~;nTfaGN9rGN}= z;fb1P1uSomc-^Xl=3fF?<6K{gj^C9%1u-Tryk(92YRjIhK0lIby%>$Zf4~X#S=ti# z?Lg^<2Q;>yvulT1cxT=BQ5+Q4PONXxJ83|;bK*QCZ9wCbb7qMWu5Xl|J!}YzLf}j_ zB-IL<YTx_yJ)|9k z4U#@Y0t`3?PEyjRF%MRSzD{~0DGu2JPHWVlaS*h=dn2RE*}=|!kXj*fHq3iK^8cXC zm(TF>#H4Oejx>2OZCENAY3qCzOkMbuV*sjCBP-BX%zKhytIp)x1K67L(+Lat9`P3z zlkmgtYH1A4+RBEgBcgC$Q1wm zDJ(v0^UaXtBp;&5ixvWjTc`Br8B5`f0SV-4Ip)zo$4lG@iF zJ$l5~Ck*=Y=@D+;3-HLq{ly4;#63^j5rKE;s03gF5&>gt0DxV)TO!mxTjS*`>uaKK zkVVGbkq)-$_V<0cGOsN3!IYS+4WzTK9?%P+BS~tGt9>vvgT`p{vO<0Nt&w6~-uho3 z$hK-x+(y}%CNnCS%>vEkx=p-pR))?Oyk93TKf4+*uz*%qUD~vUya~&kNU9d6FFkLi zySQVc^uXr0JkptY*vi~8aoWJWcFl@vfxz!Bx}v~3Es2n8+1U-4pjnqWFM^fg;@jsp zl~;{dK=Im1O#*W-jGTXH-+;FSexehL$Rpu2{Rmf_(07?xj)(;zfF^Lz8XS@W#eC7g z)0)G!mHL?q8$d7snv)VXCQ2Z|I>lk19>9WF7d1T(i_SrELXr5O=eWsUi$|iw~CG9XDvc+xo6!`rEUn%Y~H`zI1+G$w9R9DvskU{A#PIpVoy8XOx z9N!)H)&~W^!^ZX=4Zvu)o{z?F3DN1}?E+Ww#HnNVqRJ#Ir}zm)u|*Vw?-ZvLHjGG2 zRIK}MMTtu@FEbyds%Y=ja^NJS2HH&ZH@fVe=)gs<)|E0Kv}l)%t;8fgmMVm(-gsEi zVr@GsX|+(7vjb-jfvG;QY*_?uIZfBuhsMLUSTOsP00m|;Bo={!shYo6$9y<6%q!P`p|lUX;i;whtPiIx_}djHOMhRJv=070z#4#^R@1%mbW zupoIOqw-)ck5Ksw_ke1H%8p+ZDl#@xkY?Sgv2%CLKJsaaojTi(BTBMGCHGe!<2^#b z5B6zL_SUO2sIk#|4BPClH1(E=OwT=z3JScxWXAVY$uDR`&2YO7&SBgqs}1*#CFeb| zwUWG`al08t=kV+(AIdWlwzrK==g!HQOrm<_H!TrR;iVb#h#*$Ba0Du20s)X>14@Ph*e!C>wYtga5|fdjFnM= zzn3*}-Wq=lF!|gt?`#$5UT$lZokmAxX>U#2&nOw^Huuiy>K6<6WLkc11 z>M8X;QBHD>`fvMFVQEEcgab098YOy+#%;m*h^}8h5mX**p%)s>aq#K$m+NKZU7-1c zq}xN?mi(scYdtr!1to|65VP(ts$o5bHGo8$)~iN1qZHFvTF@G^eNmJ;L1l6sp`fCX zLlsUblRU{CUC73-tteWa2uHGYuu%N8Ht{Q@U!+m0mwdC@y`pwpDUDQMZjMsu&Xs*F z9-0}v@J+9C60URipn|SO+-xG&>y@H#bI@Yl#RrV%0@Yif9D|OG*A}{==|Vk=DL;Ra zI@Y;pY^EjtoAvBCP!qa(PDRJ-5lG%KP20$G1$}pT&Zxh9k;s4n0|4zcPx8X{hP~uJ_-@NQ=CPq za}elQTYW^ias1M%sjQK<*O}2T!p=#bSBj)xR^p=B$6-tE$XW%e(2Y4_NdFYgal%mq zw=Qnyxx#vek8~CfPA}}UC!BZH(}<*lyr(lZF!kyMhf8TVPPd4CGga)i1g5jh3`Q^1 z&N=CxAUu}KT6t`_&hgeEu$zg1d>ZY5D0^$Ppx}#kfc<&dxlnz7{@O{K!>s7hGSxGe z?91tUh0&&&ToyBs6VsJ0Dy4M;*875ZdHKFIZEK%rklaH6Iefz45?FuP;(f(*&wNWv z>UP|zh&Ht#Z%_2G9KDQ%Ou8HlOcr#v29l+{Ia}dw9CNfWsLe{s{)6tm9bTRd$~K|f z7=9$(so{rbiHDGhN|W?6>h*!I!WE)kaDtIbDD2FJNsz<1$WT=3{KQ&!N`2)VOYLbI zOHi%IWx1z_RsExwHaYE2Cwm=Hu*6D}$y#Q~u+)K;$m0v)nr|H6mc_JiPat*HL2JHi z`edu>8%1ta_F9RbwzE)zSrNH~{nwxyY-9!vhQ<_66Nb`!Ao}LAn4+Jxo``~M5H?fO zTSbJ%L?x{J(tLSL>9!0OnvNlWxE+9lD+iJ%LZC|_AaX)}(TSF|wLT&sFVksn$$K7l z?O6)pm^gnVC}rS!s5@b#F!169fVU0YFLW6hStFK3n{0Iqr#Ce;be>redK>Bx&n0s~ zI4T`xD)%V;zTl15aLU=%?RrgAu*{s7-Kd^fY)DkNp7=n&9x*JfuHSILg_|qgxz;iB z1>~Pht7?UYPegmS)=>$}dl6{&LRTlMU0Td<9RiMdyb>8Uw!SEL6;&1nH$Xlr<}^0d z8+Ar#?Y*6TE2P$o;p&KYHS04oU)h6KA3jps=B;q|gZc(SPH z7dH?#`A7*jAXwZOTdeVuf_0RciSH>{j5p00KMCR*duc{Sw7c4eWGjMUdiK2uH9nO~ z7Gsg3{IbFuSkX^(?w{H-v{h3tH8f%Xe5?683A7`J-dtZ%hK=Q`wl8`^TpYmDs~;uJ zoUSetY5QD8YZ>2Tf?QVA?R{D6_b@WD2+;Rpy5M(e@isY|33K#by1wKwA85U@Z)_Tq zR1k!1EIs^Tj&Y%CK_M|O*CiTdp#Tl171g?`*@asWFW&PMf1=cij}Ak7ir@Gypp}{&o&yTsS#j{5X@K?8__7KD6_8tK4QxG8)l+hG^oWfXOPKCnOhiPYoPV1QwAB{{Q5V2E zTj6lej|uykZcWc3zkt*Vl6jAedW;8N@19OjFD~e}Q@KCunIs{C^arPRGQX;7P+yT- zT*~@fQ9z&ls;wG_Qwb&)%GsO99k{^cty&%6<4eV zyeUj7Ps1FtD}YNsV{fUQ~*p+@P=t_lZoKT>S4 z_5rgT*{v1|xZsrj#PC;zBT@)b={pfa83!^?on-ehLUeT(Ekc7?KIR)?yM)+9pyK`3 zPdnS?hOMw1^Z<~JL zsUP70wT^5W%6Uzu!Lhw5HOOuH_xUpH_qQ_s_Y(ie|Z9Ya0E z^Q#bVWwlbKSVMYX{b10%T%Hs;Myd1d_w2|l<|c^nq2ORMjC~c!Yg~IRzU4Jca4beP}b^u z#{%Qtx5|}_-j-)OWmoZ15BF;WMa=yli+(FIgAoDa5?ZP5gsc9u~5!&jIw>jo&EJsg<*B zxVddra3O%`-xXjs)HV*`?cm(BHGgigL*C~w3H_zp5<{%9xWWci=jnzy!4{}f>}YLLarSpV0ZM_w+s`-`Vp8!= z$3xLpE}dqO9#Bx@p-<_tW?O%La9Au^*c6g_2lqLk-x2v8_4ptBS`9MU$vCd?+T>yE z>5fA2R}4?jrF`ettodDp*?->ckdr6kxCjMD{PMu{ zjK)|cYs432&6FmW_u3zOMf#Xg(`mjq=82^c3reiqRCI2jx{W7f@XVZFWOO8)HwSPt z%Dt;HM<`#z#?Xi0jzP-#9LHj5NxmhHTsw=YMaQdz@yCt0vGsky30kG$M;AaT#4clOiBSyBF-N86VYp7*G}k0*WhTF` z%e^X`$2$LHZ!1EbeWyP{Dcnoc$!fPZJM$bdu}OJc(NBlU4|NO;LOiE93@ANV`8e&L zj=_0IS~|yEGYRB_8YXFq=ef-9&Avw9D8M>6ExNM%j5>aOSYPLzzAM^&ThUyDae^Lo z(ubRwqRIYrK;%lSlXOYTnsgW=lU+ZTSGZ1>-cM^YcYmrECN*5f@9X8=d@vK~+*?WXmBAl*Qf|xK(HBV;=91YRS7y(r+t+*EyJS|_GXWVSzR$k2zVkCACoXQo_XF*Ki z$Iuknq%o^qp{G}_wwz0s!ZWP>^-jW|2ppPrW&`C#GjPq;w^<7>4|#CUWV*=z3thSB zww9>mXh?&PMZPR1142?rGJ=i8dKx2;#*oC5e%%4ebS&+`pW;x@P(beCxdgX9pnzjH z=}wz1Tgwz&)68yAcakG_4C>42hlv=g@j1qB^y%zG_E_=WVvtt@;AUgL#eWsQ#eY^s zpTMb3jRRQ{{}2$OzTiwCX?swki^YFx9M&{aLeyO4vVwZCC0e!>p}_jBE99#-e_oB+ zv8moEHciSmZKZQrcz4_gT)pGKJBU~hdcj50;~<6iVg*(OqR;$et05a#LGk*zs4j?R zUMYPm7UE&i7T2`PJ8Vry8$yg{Ujnnmzlvjj^BB6Ipg>P2)Z&l{)W1}dZKS`}P@7e& zS!@}?(ZYKZlt|`}j!}j0fk3dE=)vRwxSmuIatqC-|0r9#q6?FKO{Y?A7nHxUaC7`; z&JS=S-jk6!U6(PE!W`|-oReQGW6B)`%3qf{i6H!!QO_H=Iygc#W+b5sM&fOqhu?9g zfZF;sKQv_6s{Ta-V|pQ$n@U|))Z_@hak2zsbf4CV{Wo8$3_$k-4?BIOc_a@W<1^$w zZ$z>Tuan_1F5#KkQ^uDF9dhEGHsLRucY%xsN?tK)GXSk2SA!X810(DutnCUAWU$im?cpXM%Sph|O z>pYM3H6gdvv$HZzfZ&yW`?$9=w1@Ut<)2;D-CZJ6hbV!I5X6h+s&EOJPM z_4W0!2B3Hy?;P@ZLHas}{hb0f`#aJJ*Zm=Z3WO2yeZ8YLwFQu{`dlQ6pmA=#q5O=7 zBcV8^DcZgb=W@~2Xj~D9=AVssoY|O2bZ$}lUVQ3jcj~~q?Ffxx!2-pOTlEkeBds<^0Skpx1g}lqjByo zb;VpC|CyP46x}CRh3tg(=Yi+}@w-icGowOCH94Q~O*OZBZS`zJdGHtjEzNQ!a}!f; zYvgZdyiFh)nNXl=F$9=evkOQXuRJ*bbzur)A$T#H20_=H{%l_nF(DhoCPt2IfW;b( zwW+eL>?Vvak~5)cXNqx@KS!my3?D$QppvkGr5{y_X>TXfAQ|z-fUYBBVYiN(yu+ul zPodh8+D=9re1V5_5lRcD?XauVKqqpAyZx6GsUYs@a)e_@?r+Jt!Y{FW^R$p5?!q)L zdA#HX61_d4tK+q^a*D&vbqr8HF$X7(&smKL-0Dq>k({D!OVl2}?%4FEmX@kHSiZo$ zKWE@lnb071&^bq7w+uvZ06#x#>e2d~lEEu3E-s7<(df1};#=lO>Pd%DnPtDPO z-NL&yU7n(F&*wNbYW99QGoRCR0830STzp>G)$FyqOw$XfY$I}sd~7#RcCo5E#z7qU zWDOl2tKnaOnAj|?<%G<|t!5wZZ)bngIc-_+l_7BNYE>9)3r;T&#N zL8r+^#jp-^O^=R`%iO=8+?X;n=sL|@h|0hfcWOH#8#Z7iU*g!sDgWP8ltSPyyfKd! zHwFs59;JUOp? za~STizi!bf7sJ^&(%UCA3yT_ufi35IFqtelnkA7S!IuV4H03}8MiZSY%K>DZomhDN zquzfGyHfEgkdav=PAordj#_!3fu%usWrNl~WE?nIbOFhdBg0(HEv>~6m_0?r@{r2W zaDCxB%f{cbTMd}gFINO#$rED#56Z@U@<7$p9V4jtLz4dS z$I@cJEZx}JP!u>Eeee*V`)Nmx`fC6E->5#M_#O9GR9HoHhun72X==gCtB+E+t~}eP z$KbiA;hWToJ-(oWu3F?{z3r@(GZrdoLvY!;8v6jpM~ z_@AYp9dMiF!FC=i4>Le*9A^8J{r&yrNVT6!9ZTY~AAfjA7Xy$T0CENM5A}_p;;Dfs zQkukb(j^e$7ix{EW~*Hg+1oBO8=LCMPvJ`l4W(;rY-}sDpRn7QrGn^U8w_vag%2Mn zH;U{aeMHQASVIf<@76)q<8^s&oa;N1(%?e&Lvk3WW@I#hf3HdT;nHZMaU#`|$ylAY zu8k0A{WRt{_|m5t^zU)bXdEf7b1^W*x7VuoBI*JtDkvui5Aa3+P_WQ3leR2aVx-Y9 zfSTj@?mDpTLI#d22*a0x+ZSK~0U&nMue2LYy1PF>G3eR?-gyRQ&Ejz7EETi?qWFaq z@>k0t$8PPmS9j=<8*E)f%sVbGzaHXTa2XAeYA!-D5Y>CG1%Mm<9*C1ifzPw2Y_mh; z(r6*KVuS+CPdrR1aA3pTu^(MrSa^TB38H4kxhfe7R2%SH8f=CFv!<&cih2hF3k@Mq z<$(r(zU6IiPUj^bTkzT#BP;ZRwIW$J1|fB%Wn@CfnxbUix~`;ZS2!70d$uP{K{PW0 zi0FSxL?xF^$y=L2 zGknv!7vv-C5(R@zx_g_d_OeLEiIFa3M_lLudg$m}-9GbVX-&)X+?J-F+SXtN-@I>3 z!$IM>E=fDGk7gs&GYFYM>OKF6nj#03kM|}aw&I)_o!kB+4#^)u91ZsnlO^YH#|ZDP z8?>T7e5?P=o0!qR>g^fd+N>*GKWYkru&8SPRLX;CT?5q5!eejIb9koC*X5loWF(Zk zNJ**m5)%_yA;-->L2Nl0|ZcAVZC7#L)^vs^mlZ3kiU zR8BoO>4PGAqsa~Xx}Qq)v)1DY4l~agXLZtZuE(&z)_M8oc20J7^G1$=>*#EMNySWF zV=R?`Q*=ApwT#jKk3bdXO9CPs%6xO$2Sq#y8|}~<*|cWGHJj&V zd6+T4Kgm5J5a{@8pjrzo~9%C&A}okv2M?T(*! zqLU3bBd~%8x+ieYm?h2H z361Ihz6t_acoeC8QoPXjCM;XTkllpoS^;D^xaI0vZg!^?n0PZXj){+~j)}N@ee5YY zyyd}ZZf!X1HdPzOjSUUKpl*?3Z^?T2VcrcHmOjVXNPJXk-*FBr3Sam@#u zdAZsKm*VwmTznL?w~SkpCDNN>2Dn;UnttDd{JRo&5vxUDCk$5WfelW6eI>8zO-&7q zMQ`HXwu!B^v~5euyXRCjJzHFpt^eR4=aWJ;W5{X2m&$y@sm+&cd?3p}tk1dz&6>4QMCTn%ZR@fzQY5JZ z98@BkatqVVrkf$9(1r3rxjT14%zVj(TrrJqkSu|>1vo^_rwH(0FU);)VqhuLdHE?f|!1`M+GItLQBmE|c0 z$fO4Ijn|pysw(Ya;tL;h;EnDP2ezZ|l#f!eF2lT5t#IuN0LcQ&@`gL1nqV3cz2w8b z&%%cjmld`T%O~i452qij%A#zrY(L8UTy$7f_<9X1SPf78RMf%0!uA-2mkv}tTTvP5 z!CLZriIRZf$ok^YqWxay-yw3Xbnfo|M*&t1W@Ftqfdc{@xx8BO;e- zHYL2j%fW_W|M#iu_fxh3_8TFSB=`;G|Jj8c@zH~B!M+>&?*S5??X3%@VY~L$g}=)| z$-$t~Gq-2{yO9geK~$!DjtTo<1B5*G5fhkcIjdaX!<`s3-4g(+#s6JrZi5E!{a(y2 zNgC&Vna?z@rNoHEbrzJ>5n!UuN7haq zv_f+&hlCv9IJ*40uIw@FD78Gf9Xg z=}k)Ri8_42^H2by`4i1SI8g!tvVBdT4;u(O@l*UHlv9wLKduQ}&Ih`{PGX?a5-3bA zpcJ9n-b+fn`^AWRL*=jq#jP(a!Kj zA8)?~X7L0VMv&?!#M$5*(P2@D+F3+Q=_J-JRPXodu|JYS5DDf#w{g zhUyV9K#lGMg~k45k7)U!{>dYJ_E87VgJy^$iDkrv6s&RowK6`F$SVw-Xx83u>F>V2 zgH&~ew~YKR8aF!cUUCI8ct{~42iOvL|o;(UWt`XUhQ52$J*tCowh#nH!;6v$2g1K6$=St)*N2Zq z*#Q9c{o&Bdo5%S8x1^l!-QSeJ!lLe-Yt*{^t86X!OerD+wdl?z>6!w~S;YGKIzf;< zvcvlQ?PrW&T1;L0zFFKl0Z8#D`XIp{`GJUsC@Zh3Z{#{;QZ)5x47()zoRnFs3USUy ztpUkzos=cE@yEs>IU`CREa3X|k(7ci3JGA0U9s)fc9y<>|9;udj416nEPBZo!63%+&7jUb#>`_2H_3NL$YwkKW@Y}s!H7Lu+4Px+I05-t`b{0j$N5U)(>&T2mc6I0`2 z0wK2fP0{)Xpw@02NuvS%bkNqmpG%Y$_M=#tvb}isZYIH5DNqHC=(`r{D6;MlWY7-l z`uJ-YnFcK8aiskY5v;So9`L z3>T%v^vycYHUx;`;m`EMGoOgEiY!g#wqwh?_QYIL8HEC`BuEi&e$V7e$6(}u(0fP8 z8h+Mg+)KAnbto^gHu=o@pLe^EB^v);Wy#{7BE_Dn87*>=4HNX@tR2%eVLO4(NMur3#SZG%*9gt}(t|Kl?*1K_#PsraLr1@R zZA$~(G;Mdv28Ac#bLJ|oZj~#f^$KNiagk_Cwe^_l&&qMI?B_h2B^!^rd`VGB=}P-V zc4*Yly7=k@scck>SsQbXZ=A)BmEbFzdb^GHj#53x*{^obg}Nog(Uq0?hQ{ZM*%cI{ zy*TeGRmeOJFK)ZSrJmsS5QDL9<<&|gS_ypM^W%q-%a1g2_KA!lrF#!VoosY2mu)%w zt0i9Vu<4M|aH72EIL#9=vgH^qHNPHqu~YWh10rkI4je$;%Nxr?FB zufEQ_Hjrnq&Zyl!c7`nKGQQ(VPkKaQH+EHv{2*&5VU3+Xu)4-})A}plQ zl+zu@q%DvZDNuI`qW*f_gF9VAb%_nd@x4k~4;!}SiHIp!8|C|C|vI`m9enInsHVn02}@4iKu^E_nb80v!a zvR7V9Bx{up3y$Av<7&}R<8CvdlF~g|$1tT-bZ2c(@wA@wty|$z)_Y+mg{x$reB&x1 zlE&imBN&}`4vZR5VJp<{HRTjifrFR_rYcGO@3 zy;})OBmjD`D$z?Nu4<c1XUW9AmBC<=ATQ8Lde*CGQy*F4a%!rSg4 zGXupRHu*fY?FvfN#pDu5yvfjAOFL833?5sc;wZ@2#yI%JvA=pgWjCJB=0v9`i!~w~ zb4i4CfG*1`L2svW_lMH+_g(X*JHbpZtqTlCJ0gSu0w`TmLT;9*>0Co;`NJ;>nT)?s*T(~HP^3_x#HuYY1AK>EwdbQ)CVM0 zGlpAEd0dH`%1&0Zkzx7vzS#wJz9edU&q5K0ND;-F+CYQ9xCrm-%Vuna0H9tS&ZH+- z=OZyP5CKR2!pb);B`q!E9UqK3MHwL=WwpEnPeJs=5vyL+-x&z)&ah@R~vuHMm!wpyYg>uk?BovVj5R^&2 zHX}tliRE~oI90oP`l1NW`w5F_k)*!%qT54aN=grFCYBqX!Q!E@Qm#dCVyXF(`(SR$ zfCm>9OX2Rz&n>Hw5e9hJ%EM8*Jh9ic`$*cNFgD#XX6;6AwksdT$9dj2(N(x`8tW)o zPs#i(J#7_@4TrVO@1}Qb$2PEZrq1ioS{@^hFJme)JI2?M9DD0yj)}3;Q%QLN8F#q^ zhz}G!)rt%B-orj6OT|>+-;Ne{mv7fxs$m#@iN>`%O;cI5G;$^9;HA&1mgC&dUfn?c+@_d3^0L>%+G5 zVQP(>Yh4#_G^!-5TGe`0TJ_+vZ-zaXquFW@*s@D=sn`w8_sFcL30xica#s7|@;gn| z&2DPH8vHY3_`LZ8cWPmLnJXR=k1=R2J$zS zgn=&$(^*cLwXV*s>g$!*Z121akJe_Oz zs~F1NRWff&uILPSNg?(^;MBZ@@^B@GTa#ZRb2!T}7O+T_KWvj6%qxzzmAa6TyC{g1 zeb(@M{6zFJe&xn@Ei{{vPe-9~SJM~iJdAm{N__g(e3f*$L#kt?5*s_Vyxv$$yxqt# z8kzaKxgilEZy8*UYuCSYo7OL1L6J6%8 zQRDa=CXpz#nef2!q+9&fPS3k;nYWvb#ygKjaUF$>3}|3p!3~ivkl!ncWcqaUPzC== z0n6O$sl5_s)(a|;w&!a0BGxox()o-H$s|rhV4-K|{F5Y#-*7(?zmr9)fh{Hxj0x4i z;G2_SPCUA-`GkQ~JocHKxZFiC!sQ$KNzJL9TU53e8c^&Uyr-)RWBb?QC)Q|AO>DIA z&G)~JxBMRJ=s4kj$y|RZyRU0?Xp*{O08dL6i|2D(pW8L+AssWmPExovqDzRHb$Upn zWQe;YVQA^AV)aiGY)Ngsxj6E-0W4m9UdfxJLSOsU+?FuCdQ}PQM~>j=NQ;Zz>x`I} zSQFMLVbQsJ!WykSU%7aXK``oEJH>sS&G|_u$)UxS!M@H&HDUK(_W~?V9Qjscs$*p- zU)cW^qsdSoP*ZeeVc@ji&z0H4uHiC{m8PIXjB@j;wv(z!@$tluvodR`nf5)3R*Xvsc$D`YgSBe6%th1TI>bkG8TD~gEiaNR1NEK4UMi9rlqv}!Src1^9%T1GU z+x>GQPYSiQKvJH@6#ft!yFdpX-bswntgh?wNT1ij!+62+j6lksPT}_tvz^oDW`7n= zuv)2NXtEmHt#c=e1=Vhg^on4u)n33qnJ4O;U#%JGa>I?G(rxVe?2jrsj>%SH{)@5B~OK4u6=C>#cUH=VGs8k^bj`3qd1(*my4jz3DjkTUu^- zAAy8scrS>24C?o?2Zm@~J+4m3E}X!1RV`3u(NqiiH8h|S7{?~5R2qxDgmV_XO)h)u z&;LNFvl_Jgg9>1 ztqsU2Ok1D28^5(NTftIWcOip!hAyIhpD38q2Tb!XQoz z)HTA;MfSvqqxhp?zW%$H*9!XU?zAYc4_=S&-Mdo!98K772M@UK1F#K~4CqX5WqwJ( z%8api5AHeQTNOU@r%f=)y)t+&VJIyPM+^Im3vHkiYj&cqc9GoDW#2X<+C;06dlz2@80Oy{^%L#BO1_DYb5;BGqGI^Y_(3irl z>@ud5QYHG$90vdQra9O}jKHLa7E18Ys!#Md*l&>JktH#ZF}_ni4~0XS;4GFZ1M`>n z+EE#W#Yh;g@^-j|B!#;6@qy#7E-sV$^xfRS@k0i-1}Kl<5fG6v$OS#&{`rBN!YiL( zBnA7wu6qh933`4Y|JQr3pEiMl%v@~d_vzoC4(kE$!}2e~{+@_FHF-mOKIOw_|J4H# zzxDsj_;wAU37&8i)sZj0{#OqS9sGO$v1(622Lyz`ZM78g|5`U}(WrNe{;O#3cKWZ< zxK*q>^}e;o8ymi}>3@TiJ0SaqH(cU6}W1eB0@$BTDy=6r6$dgZhQ0`^|CaBsgNSgd{UnBX(~Hf`xQ^kydN# zcoV-#YU$cIST|_y0K^} zr4`g-zH9FL3sI6wknJWkRJpr+kGW7V$op{-Hm*Ve8*z>;wdnk0ah&FX29LA@fjLp4 zU{ZkYUg1SbhARl4-KM+(a!{hzyeDLCEe<8`&p-gnx<0+UaIFvcB02l@$gIIeVv0ZKjDe7nHSj{H$&ZFDl})wX{11{8-%Vw{M@ZFysY=pw(;hmolxEJe)ubBl^Eh%+|zyY2$VUpSu&tmRNFRzV@rxk zQnpldUXJoe9%kJR|3N=KySt4&!VZ?lMer-=van?S_yP(W!(Nj*sUPU_)a!lG>59|k zew^;H|5*?R@hdwiVyjcv6vz6R#ZK8o0uP5H_#iFtXTcZ*)-Ll9VW^An=2cL*xWY+iYZmbiEsWe(h!5 z`^x1Nrv*RtXGonU8)_(vJ|XE z!?|wgPisK8jKOuBg-RJLf>k2+7FCK&hI|;xrikgHdV^L13|kr&u$I0=;kMFH6BoB| zBx1f0)9GwtYF43}JCQa*lTJDa>U`4_zp47hek5(b#&m*iA0ipjnmp6iishk!=}TM~ zWhpfZf5t=PwPsXHngpFB87Vp+zL*O>HXBlX6BNZpP@}WE#A4U{*-0N_W)0z?ego3t zBeYy5@WMpIWz-@dgKo>}o%tK=FMqdGZrf-*HX~9j<{GUldbO(Jjyq-t!s!}){8Ni` z=V#~21wEn-)_XaEzNAXX=zA>`rVN!<+nv~zV@iwJnkcyn$i{c!UHsotdiR{Zw_bEEz~IU^mfREe%d@*B0D*WTcf<&(_Q_H!p&) zLtmKhAiPriu0BtSo<6{cw1%HhHjXuOEDr6GeCCem&dC}tCc!kScJo+A(v#puM3TL-Z_ zKe-f8Y^uQQvN!%+3)7*-#?aaREUoYo9$^KUmtW5!SF`-7R>GNJz(c2LQ`5Zxw`$9PRApY- zXnVaLV>O*!vnyNkO*_Azk@P?_mFB9>B#_osc*P=?5pbEI>}kIRf-9MuDC*==XlEK| zh=ISgQ6;0z-8YfM<9MO3*{tuXss&W?6t>j$38V5?d*dOBOp=3xOKE;Ul{NGbA@BRZ zBZe}Heny0Qz9iVGRPlI^*7ye&FA6kkpUY!7E}3^Nc6Rle?L$9`CsQD{NsfbqDSmQVowE=0Z5}N7V`;_J#m+7AY76>o1M8C$2p2t z>BQ-j9`iTJwTO`)iLts=7y_h;%+%e!+MOk!MX<7n?v{Aebfk}|ZyPq9;JmM|bfnMM z82M9{qU^k<)Z-+7BD#32BBZBC;uG;4g5^g-QaUm^ z4pW&geW+Dht#W2f^$>Pw3*8jz-`85p4cl!2dsum)sQ6lWdZ&f(J8B^bSif0Xb)qMn z-q==_>>E!(9+YjCQV;t+!2Rtf8&jb@4D2&5q9L>QmYN-<0=q6(J6Act++Geir>6oJ z^rVR=`Z*y3=`R%=nxBX@k3?yYY=T|xd7WUqKW?phb#XB!nsn(}>IA-a6hil)%|T-q zr7fT$*p^_NZZNr%%|ZHTrhr@dnr4ee0#FmmhESc4c*&f=fCk}^9{;X-C$qqDP(}3N zwq+cz=vXW%i|%$2i%p+U5m>95y)r1`R&DZ7Dju|s>gQ4mdN$x9h)GSCL-N`_K#A*G z=%fW^03eBoo$A(i;iBxdUO+@{Ih~N12XFfLy;IDj!MJ2V^$`AnJl7;~5D=#D$=Y*? z*8P{Rt%bGJ%B{8`hFzNzMpmekfRDt0@o2+xAk4Sp;$LPaW!BRDP&13Hi(ryPB^gw4 z8X#nwa`@UX;2RXY|6rQ{O6n$$O2SfP)qV6OalfrbpbETAhNrz(*0`m73rDe77&>g! z%l0sJKbO$y_vE)e*k95cZ}($y3bCHDlQ7Z>bRNnoGFo9fD<@`gh?{cxCMbqX%w?@; zu$V*TvuiINfL`uukefNY+Y_hwigkQ8XHlJbA(syA|rbz$Z-)+Wru$ zXPUK}exO&t(x%*Y`g1?C`CX=42E_I7!7Mz5 zxwNR^>w@=U6quJ}#sxj(j7i$;M2m4_J78Z-8=nF^$;a<7fHDBg0r8evOKw{{Kokuk z#w=N*_ovK(D)nM+l62wK;m6T|iA^igjvtVo;j!o!?C;mv2KzhK9Vh!Rxk@EtdnNK! zt603Kr(b2t^P?;ffVuxjPW85mra6+0OZcSAxQ?Q!poA<=HO4><4(HbgQq|9g<`PNUNGi3t zM7U|as1SDndzGitW4m9|A8CCR;I$K8F^t=a9jB$$tCS_S+176Le44u0r4p}Di8{GX zRbCPXqZ(VIc_BGAkVS4bOC*P2*VCjbG0PDtRo%}P_H%&2K|x#8O) zHtSI%3Hm-k-)8Z=P-nouVttUTxj0>NspIO>!C<=A?8v2wI(5#@WgF12Z{151zPRLZ zeB#68EJZHqHF-?5*ciZ%Szm4Rw%f@jwoA@%f>PwkFZP`v3ByC%V@V+FpvtpC7ishJ zTpNmB?eo=+_r)LW0ZM$X!_E6`!2toYfR8H&@9u4lxeAiwd0jakHtp9YNd?)PMYi5> zvS1iW=B$j^ZQ)dh#ABqkB@xfKqJ6!BMo~9cW|BrqZfugvVTjK)RT^#D?k`Pcv~)+| zj@BpcTJtU>f7`!23=VFhu0T}d3w$tv%zNgmvN)tNN1UTl5X99tLv&~)pmA1to>{^| z8^tRLHYZT3%xr*(bt$^3tyw9mmab~#6&7RMeOmoE1UxOL57Tx-(nW?nvE~+QBENmE zr=TPvB0j^rVl-I1SN1HFsxQBKA#>j`>0HWfD{idWc`Y83ugY<%b5t3tT^Pox`r7I} zK#oNfn04u0ySducDJ}&8cHTX9OJ{sOCjs>r^L`@|CA_rCNM5Vg!N&7e5@#a|J4gmc>XgN}%O#Z|%0C?$|ceSErn(E4-6EUg)lQw9S`nL-h{a3BqEx7Fszl z0`Y*?I}Ofhv*N!r$UGx&7?&VSm-;pH!f@nhgQ?$N53t2K8Hva1U31?b*lgFSGcAZD zn#+&iB3zT$k+?r(07or1u~9?>9EEP*%!AhJ*r-jpZfC0YrM2PHLRJ)1(s62K#ss1* z-y>|TZKu|^?G@CNC*RzaGp)^^{`upW z{um@g{DevrfG?NO@lllKu4uKO*pW@(i)> zh5w$f{aBi%R&%vj5cq8#b&6mf8RAxW(riiw=Wbr{ru{fCXNE&h@V!yiz>j zQjK-5HQ-;LAy*8$zI^``?|*iH@iuJf-2X}9uTO{F^cEKHe4mE%g}JwXt5qQ&`on(h zlj6tz@594N!5y&cVMOzL|JmW4pyxZbBAFJPN|rv!IOaHj-!NN|S) zckbZM9o%t(J5KQbIx!R%k8rWe#)`2S0!z^yia2?d2H$nvqzg6qKcM?V}p zmM%6`>irdDxG4Egib8h6lKOmaUo!BW5(a&ev(~?(6)#*A4Vwtd?s^+6{i)oTk5LiadrnyQ@wxvlY74~XC?ZCwn6xR=gACnky170kHf(mk#&M>I7Z zXYAFd(vSa)Ve(+nTPbGu>hA`@Lxc^YqPzs1t(j`aT#d@&fnJ6F&`7U)4_Z5@lQga- zz0vvc*AiVV+z!H%0{h%gN{+YUTti<+bc9p4rAKKIf&d`bxGA@4Em`=EW6gL8 zT*VNyrM#W?&tt!moH}L*T~SPPlalJ1NsWU(U7U6ML6A)K`}kX91qB`qUy95tr(Hp{ zEFT&_pAJUVDe`$g-*FWSey6Uku06Zj6SqZH-r_wvItfdIIE_`$D)HHIyQY6IH-8rb zxQ*bra|FwKA%6fYeg|C17l>s>pBSW1*mxi5^~Q5qw~;t12Jmj>mZTWq76NE9a@IVt3VD{m^`>#j@Acs3P>@@1(S;! zm^{w9-j^D`OO4pr_9n8dDXQ9~(>{AkY}Ej(p2_|&oeBFit!`NhgdF`$4fY>M6&tHn zynt@z6a-IMR5P>&5#c=`bF)evD+5bBl;ejCgtUU|uP%0GM8ZLmM%rnTa%iNy6+N#v zNV3?!2yGZSS1k{FT#R|NoUJ9z>CY&`Ch;4kN;L#M2W;rnZyFe|E9O5v$~GcrI~V;J zJ1hWGnH*Q~xrU21kIOYtSIO|R4XJ>tl{JAYX8=aw$^<4%9vT|3}QczFV_% z!*evmr`JZcdE7B~c~Z^^>~P4FoEyGqc)*WAV`s9X+n58?Eu-$QL<;6R8AoU+H5u9* zOK_MpvF;aoFd{H_AOvv+>UV@~ZZde3WkhVOnL&o8!%nPQa8%xm{+e z&wD-CYzn}Vw1+;mZY3~cb5VzkTF%(O9^M*bl&A_W?Wnh`J6qj^lomMF z9VP^oNeI1(eKnOx`Gt-_RgHBaf>AG`F#x+k!+9lw#c_A~6oycfVNIV=3UFB_zQTEN zVE*JOVc5yfLOHRU3&>K)RHk0+<64W)BW0giTbr7@bT~eibz8(Ly1Il~+D{$Xo)IxO3uIc>Uz}_zXN^|5%6&X53(jW0nEKv}UvmlBTH37!1=|_U zmVHWK*gA^8O;sfSjRkzC52Z>UtbINhMwMP=Z+1>6$z4LB$w(>vIKiqBWu$~pP0gl? z<+MA7HD^xFVl%GW9%|C_StYhYa?a_p5w8xT$poWOR?aW+RwQzJh2(jtHoFy$Z?S=DFUXeNeT8wKaJ ziYQOc0)QNC8g3j&Mwa0juhTXdyK1{W|GoL#Ne#gs2^ohmXU#WI#`9KpIE9RW+MDFCN@e}CukZ3vbsHV09&EdXn6 zX2m=kdY^^vGAWj`#Tyr>mjmmEf@+5V?qy~1FeCdCrR8yPyvgVFkg4?0zu1g~-_7Z` zPMT)>iB73)$<9rtYaJVA9x?-lWMRKnwXK0*AO3Mey=Eh0e5W>g&(!h*lv)NbeyR&8ua5eZ27fInwJHXeMNORTOC z+;Yd7X<>2W{cVxt=w4M4Wq)`-5EGO79JAM%3d)87B>)L>yrtsdiS!6}SioZU4|nJe z-r;JGnH@@2W|BL%4BzqaraGu{Em1ToaP#M0WmZ*JKKslIz{X(u$e`tD{AO6b5d7&b0QcVn&t8ki6Vs8-T?Nq|nC94Ugy3WYk84Aw4gc);{F9U=} zlQ3%`jPZ$5d0N*y;A)tnJ;h&SJkj6_4Dvz-n5MOff_G45I_4$i{l~=kMhW)HIkW1mtyDFga*k3639{rfak}TH``6}Vd}^CX@toA-d>`U! zJUlu&N_Makr42hEl%bTA=}Iac%SJ;u%dHeAg9cO`uZw)?k=KQ_<{>KPt)Br*mSgl%)gl{1tA1R4yVKdThBBzQ{x_e}% zpItMJ-4Ar z_1ZJb8F3c=Fp^d?unHrd+5ECbW@)U?%F0mjnaTN~6MYstE23M_qYW%}cJ^b=a*Lxy zT-iY*lZs)ahO9*ErB->d{0uz(UzU^Ijuap3!btzQrFT7QKCPcKO4h5DZX^| z`&;!Y61?`WSY{?R=3OY+4CoPcJv=ZjwYjN%zKHjY>I)&`#=$d_m-Gzw;pS-CmR%?X zbomu26E09NXl`#$9WXt-#<2iYVq}5=+-t8RThoCQks{NPYRiqjV-EF!19gCf zbTNPe=?Pkt&j)*jD_pkevhOyOVZ*9J*{@ZWxHFyNFNys>5At&_`idDGrU8 z>88P)Ius$0?P;0SLSw~l)mH3`^~|ztEGp!nKw$k>R5O8uO)Jl=_W7}1(W_4NJ>~)|u2;ijulk@X1 z_MdP65lq%ZdRy_y!+o`$?7f&4E|Xoyq^aQ6)g{gjY|4{%x`63%h#gHfI$&LRq@&S>TrsMI$P_1lzI~8$FYkr7HE{{yvh+G&$(dVK^essy9YVS6qs2*klw(gHEGP*ZuzV- zL1N{Gkf1WJeYv9j?80P&&y8Dc69yfDM@C%+r+W+EF4H8DcgyG8t$VrFnqgG!>?X2ck_sXd z;D-T`S}kG+3?m4&VKJ^AEbOD#Z2zpX!5|0cB}2CtSzeQV879Vq^}32}sK)uQnC>i) zwfE~$+y7Qx1{-C4_?`xa7}`@!NP5j^k?s4*g+2cF-)XCt?r*a5K#55hvFeGG#OfT((CbA zA21V4Ys0;s1?;F@OEz$zq;LK!Bl`;W{LjA)-oJ{V$)OU-F_%LE!z&#;yl61w;IuD} zEEsmegi*ObQ~RQH#;mG2iG-Drk@sqS+_5=SQ=_46N(m8{UW*`vm!D^q;XUb0^;q+I zt#9i$-(Y|KTGwq_vVpbnFG!>L>SUa=`|r;W%4OTmztCkT*&VJ*v+U2;F7X+WK546U zuGc&rf*MzcHNmvCTOh|ourown)<;OPn zfSVebo(i_O&_QNC#JM}6oL;#gnk-=Lf(3R86ss2U*iV{hv_-+g4G6J4Rq(Ktx682} z+|Pe?-yx#UZYx2y+0o-b5Egcp4Tx>ZJ3Dkvq5Fks+I^C%{1!;f<1Gj~bTq=u2;geG z)vxg<@J2e@UlYNYb}~?!tQKEM#B#@AE^9fu)GC$Y$G+8T?19yrb*Ku@-!K+s%$Gd+)?r zxiY2vL#{@qh-!CHyfhFmcVLHnr*>ntE&#f{CG`QqZlwvpwz zfQsoWyJp5)v5CAN;Rn)W2$Q2kd1KFehBkbo2=#b@5(+0K+4v3r^=nDEp#Zdu3AU+m z9jm69VoLQ|@MV(o7|2vbE$_KHU2P7S6Z{Q4wvCuw0}#2y6S0JdaF1qhHaqX2xt^aDp(9SfbrAXa)?8CwUQa7%W$|8I zn%0H;V?R=gO4E#iPvKOk`nHvv2>eL1XIA0on~5q0Yl&_&bkh`+4>%>2&fv3hEW`7v zyOiYg!iib!KR>5#J${Cr`N9 zS^a`?aQHpNgu05jkadI`(8MEZrCm^m9MQo5kFYC#Wk-|ZY(m?`{L`$k4ithnjAxvF z(0rIwEL5vPDf9Td=VPk+<3Gyfm8xWs0_*oD0%wTkUPg*TXDGT(iJ{-0mDD30tvPiZ zCgFSe?`&P=@n!V^?5X*LBcql!WV7=uU$&`;D^0+wmM_9j z98F+Olx#3eT=bX~UtU}(l|&|mErz|U+|Q5Ffa0Lp#Z?NxR^$nXfQ%Rn2Oro#K&R#p zhj5R+&;TlwiUi4;-IbS^RZjitEbX|`A(mW^$gf%5RiU&Sti*4bm88=*apw2^`&g%A zHE-10L+7V4$fv4;manw6RGC_Q-TNR_35l9%Q4l)$xDlo+r;>t{<){4x2#_CN7Y7MD zW{zW6^xDUIY#q5hv^|OqGYfQ=pE8ppxZHz0xH_rZ?{DJFxs>gZW854LmOq9J-^;!IFQ}9h-6tFw)R$U0Rx0+FL!ond&wTY^I4zG6hcr=a~`1uu_6@8dEW=Yq=NY zV$weUwCIwD^SZ=C-n>$`5krKCH=Ng+UMBA3t7+zE70Hx=Xuh{*tnT`UhX`IFBksFa zcqCT_)FW|N@Fag~&Xb??Cg^n0#!KKJiyhepmA$*&SJcPw2>9N5#2ZKm$Vc7{s6dol z8Iz`v-4lzfCL??ULt^%nsbXxwWC0`}bZKlctDkRLGaOu}#^0r3=P}7W7gFYbZ6*V=2EdQ! zaQ$7_H~X+=DtnUXul_{OD5&T|{q%g>-(kIUQ+dmXa6!*(Ir?fTxT!T^`>f8$MX)+vNPzEis}Kzx~13WQ}JV+Hx&?+t&`D(Z~LMI z05p_a)mzVk*-YZwch?&%hroHU&E^;67rJqn|E=^%o5M?q-dAr8Er|j3o}x zBw@j9RB-gCqN>#(A_2EcWLgOSA7-|nJ`ulMr~pYiDTZkrdsi-6ow|3RX$V1B zi|5zGTuRj+{?4l3U26OMJzv6ZVv4Ts&$*#11H?G+M>Q%5;nt2cYhg+@w((m0He}Rj z!BDl=o10y067Tg>q7N79`x>IQqGr z&M1{gzc)D(anLfr3!E0$Kvm&^ec_-6k{Zp1=o!&PRU-d#>Rl94gbN-f!F9pVM#^8cRJNq^6Bp?wI z#;xq2b98o&@aRMHz2!AE8B%>R+qnXQqYj6||9JOpMmDv)gbYCDyfu%gmFb!pgZwLu8Eyr6KlKf9k*z`&WfIl{_9xJj2*W0eUb4D5!4 z2HENThB8D{oZ5h5RDwpV?MR|GgwHK&zEXWoyUW6<91R414KYAJMJXYlwn_ zN7d$DehPCCd>Bq=D^V+9l8oPQ=#<+VzdUlaLK49!3>LID0p`vWsvJC|4P3Do&503i zlO?14_TJ;O}loy>@2cWPfT%C|12xzmnQBAZ@G%Yy5V&ENcT;c)PeP%s(_ z`*r%U5iyXZV5sQDWl>59k9a?%p3^#NmDW?glmt7XhEp!iyZIB8-_-GyYT z+PP;%8eG_KH2@p$+EXEjdh#(N82&VVacfwfo}6AA;l3T8Spt;cgrC1XFgUa2tnlZ% zJY5Qli!-aK2h~BNzbu3;TrXf<-HVM~Z+%UGUt~1z+?QSxQan5|a{rUKYWIW@KAHTz@Q)452l#-F{uuP{Wi0il8j1u%kD?7I*+7$UM9xov#K{&mB zbQY3Zc_G4h1+2g`!w!A@Xn1TaAn}XP>l;ztXb3XuwHR4sN-=LCQ7{<~fy78VI_l-4 zrtkAOe>Pv=S6ByAr>&4D41W%Qv7Rp2Hi5Ug(%r~R>2x!Zn|pF$(`ka}y?ONH*L<)4 zxjgM3*?v^=(C|YvX3eu}*7A$GT>l%zCHMTWv;V{Xe@(U6>!v4PRMAS9rBJl~_mJzo zFnjd*eYJDxtz(8YdCmSR-P)W?7*#WJTg`iJ&(^BLs&{MLc!Y$T+^c_Iw_rmo0wQxR z7h~zeKUX-EtzUDH(WHY%Wy!D#`{VAZod@W3e30wU!PlD+Gz5S+J!{qfM?k4kp z<$NcZe=6mzjP4|JCz-!V;0|a03t#Ufb0?XP#;^K{gLG*9@J5%3~yItV;Nw96EnU~U6gcdM_~otmIV#G- z$KZ#9qw;h;24-)xu`oF`Tc`)QBh1Y5!+gA%@YlQ*;V51_Z&k_w^2=_XT#8H*20Ujw zwnOQw7>`)nO0GPoGie7XPbwxq(`}Zee7^AB=;En0<}xyCFDI!6?GxIsPnPFpskI}$ zDM5nPdEbBP2>U-o&`)~<*^X4@VCUEHp>kz{{-JVK>}>hvovg9AtKg5aI#n$tc;8RO z_qop2*|;3s!unh*jTqHkIzwGQSQ1mjx(X&LUenFV)bEz*`fnNe(8=q`xAXzq^e9SBhq1! z7>rCa8@1bm5zrv+*Pp!g^VJ1E1U+QtJ?kZ<03|<+J!12G_ipGp_zKFiR3)dhwm3nM zAA@_Ck>1fHSuQDQ7Pn6R03J1|NsQ_*i7@2%8p#(=m;*S!uU@BZVQDA9d6ZIi6&JRu zbemK*Qn!{R4QO21NfTr6bOD;QLKjaZwb&bIb=m64(r)muRPf32#C@3W`Ov)j&lyP-szi8vL#cz$=?;$~6cgBfBA55F8I;gE0&acN_D0YjU5 z4n}Up;JEqtjvx1)rz@cuviv>!=H-c{b6>OpjR5U;YKpuNX|v=?w$Ud(FVe$*S4m7Q z+wrZLW|_6LoN-tzjde}i@sF%WBMvxcrhWc~>ji$X-@tV%yWR^5Kb z_WVPn+8M1-Ps0lpR%U*jS|%R~`iWFjOIUrdaC7Rl%1Krmsx`@$cT9NLia)EUq=Ic+ zchc-(dZHc1a)u<;TbU1eQY2VRA7E9u4Hvv_{AXtMbS#D+a^FWWMKR!K$%g;UA;Z={ z?)k(@Sk(Ks!XbZzuO(Ih^a>1TjE{k{DWqG-Dmu6L7`YjRSCE51_6b#u^U>Cr1c!6^ zRWs;EiFiL~G9~n8yHYp~YmT1z!Fqx>+$jXi z;{3Cm_RFgwzqw8+zcMW)_%(koIBev;3dK*2=nOxG)R>N4uLhU2N`b()k|P`{XYa?!1 z5Xi2S$1b+KKNhr1gB%M#!}+K@m-%br+H(a7S?9g`S7Y+uChYyD;R)4~)O=feQKq(G z`P!!A*)bur9W7va%MYdx-l@Zv1T1y_V3S0x*td_(F=o{~?8db~>)T8$z+Duc3m#Uxf6KPJKY_UvJZ0M3jJ+ko zhmAFV3YYUAxg1$j7s=(i3BAD4m5N@jZdKxZ)=L4a&zW47gQN83wHlDltGvT2l37(i zLq!J->tn8;jszXwFtG@pp%i`4Wdfrb+VA)&{9-4M&H2`Au( z0gW;>2JlhkXR3tdp^UuB#s~ps_&_^$c>VeTpU5f=_NRJ3bNIY3lGBQXIE+-p_1_n= z+-uqff!uj}c()IZ1@MA!FP0Jw10}zIj+Q$!6^mJp8kKO7Pjfv<<{p(aj-g?%<4e{_ zJY36x2BI74oGQfL&g7ThZ%9JuZrICYw5Pa%W@@kgv4;h8!YR)nVB6Fv@Jof%$Yi`E6CyKc0-8GvT1SxkVlZ1Z! zt4|)?RouOouRhU2kL25a)Q8fU#ijw3qy;vfZPdg<3@JFDH&<3xx(>8ktp(!nd8gM* z5@6W$f!C~^?V(qRYWDZU|5x?8cKC3x3KDXIP-@5TX4Ly$U*C;oN#OThpidGWr0>rU zGFIbY4_+bxa%iXKvPR68EWl+)jy!~f>iRp5lqpH;S0Iq|_lkA7j?S+U4+{81cPJMs z+ZxgEf}>s>BSBP5Gn#?u1yPL$_ha}dMOI}k=e|}`?{G#5+00>AQIDO`>WvzVqz82a zxa{D5{%k_U0lVXTACWAfO&!{M%&K_A1y;on>8uw}c<1bz+*3<(F>!zQ%is2f`1)uC z1XRMj4h5qI&IB~$)6qrxQobVFwg=(KGS7lnlVZ8oP`%#(_Z_s; zo1>Xivo8}P`iD+J-dZN@9}bKmX=U#wMC@ z)H53Gp!+NLBJU@ImDC`@F|Id0*;B>q>k}1mx%d8Kyx{QQb(Z{cb2JfCg$>+mKU@m) zy*p)7>4seeX$$jRe3i{gqH{TUMS$?_`JX3c6%UVxyec)`y*KW={0ysx$1kMB;Snxk z*h)}YIEpnhKdD6*4VVr2p=5_a@#5Lc8vAZc56;$z z=t2Qe<`Jgi0s&sN5tMfnJP019dGTC}xVECe`W@M2I5wWeu(C0|<}mHkKX$wVgNfM3 z*h||fn0Uao{j6-+rg19;mb+ovnj9TJn`+9cMp>DEL`mDoviK&>3dA$b&um!>aMWv* zZw`tYb$5qy^7D}M46iN$JNL7%F60@bS*56g1H;LWJzQ)L#`t@1hTM9G6=T>dZOJXR z3s2Jfy7{Yl>>P8w@m5#2hgzSvD;^IGE!XLfyefA%I&$f}q0cvJsf=NKRiH&z>cern zwL6GMhO@!z`~1+nq+xVFd-whPM;uWe8zpX;Qso3h=F&I`#ef8>dKIf}eQlA8#3*7s zs(sVBN40)nUu__h`4I@cWF%@+i4;cgi)T`z;$zuqR7oo@@vVP$JO<@*y&wXACt9)7 z;)JcGaJ{e0!L;9~-k6-!c%+G#{16_u}e(UFOsTp&|40CAk?=&eDcBoiFxX z=u{H&xkKV0+af!q;PcBEhJ$J&j;;VQkRuXPD{9a;ZH?uN!>zi?DuS=;(%@-t32yR{ zxKgg_3tRrWCMr;EmT&Rj5T!v7hA8;rfZd>9(r6&ZL$>)WAy*Mg^8}k7GOqqm-gzK= zS4mPLy{gzdwXG_+$2&C5dG!YyZk`oM+=JcY2VA^k=ayOK+MCp`TA@)rhI}O2M33fd z6CO$@ZgG=-y~k$;d$XI=(kOaesr2so8r9i~c3otBEpRH8iyL{rlbQ>YkoY<0Po}Xp z!`99-+Yx%)2vRrexJjofB^ln%jf>W`2g{?>b?k-&=SwpV)FqA9n`#MNV`Tp2maL6i4!u1)u-|77K7w^^0T z2zUTIWMXXdx$v{1ihTU7z5KGHoJWP9zmJH0J^8Ix7!3#uF;NolA44XmI;3fTcY$5^jgOQub=V#O%}d!AV7$9)lds6=4M#TN3o)KO&5R zBmHAVRf`z4l>zE67EA>K4`pobv)eJh?jv=2+w;j-fllbcYT-qPly%kBaxT5luQf;M$}brCTaxyx+0y}{;5FP4T96JwVn!tY zqer`C6E3U$H6|va%n~7O*+E}y7N!^Sx9l+VCjf`(S(VbdBQLdgHW2q2JfbRqFK^%YEh=yVw+pM3Jk;f8p58X$f8lpu+5oaR;6d0*^t z_=GI zXUaI=Rx^ctvDVt7I34>L!Cj9J98wQ3Eb8pS)XI!MeOF?C8>aT~ZPJkJwl-k*z28Ih zZ*!VlO0kN9LS%6g9t2m5B-9dm3OPkF<(D=WwCvvVovdy6$IpMRgdNetX@*=5&ekaR zz3J}~Cy^_ms+)aB#X1tE$bGSTK-20@Xjlo0roy>_14q_aVSzCZsshg_t1F*W@PlXS z9(FM0aYh<(n=01u<6zJ{rII?5wG4lZy)nVe4XF8p)$>OmfqSn6AtdkHq~g&+CAcI= zuU3Ej7|*GfaInjHqn69=t>V%sOP=B%=&T8DZ1AW2esQ& z(>;1Q4Gnr{XXlGi|Gf`U)w?TorQuupzTRb#E+*~Ht*^Ge5;MZMB6F~MwxjsY$(+P- ztJw%@0F#CaSBhwL@Uc+%l)TbHjKEPl(K@Hg*$=j3#CkK)&#@>U?qk^{I&aU)XmY$o zdTF=UC}qgEiYe+MD$-`w7tJ;N;&PvJM>~+np5$TZdU9Jm|F{vNC~Ol_zALliyPCKa zwQvnRW3Rg)a=D25KZmeL2|RxIn)^> zmFR?i_+hf|TL1$iY&pbjM{rHLT~%Wx1#C6R!g#fh=+Rj>($izRM)564o)BQhI#k6N zRF56X5N1S5*?hg!tyMyz8a1b5=k4QvDlEf4dFDJe##{YNeaEr5 z3ph~a!0RCcHBycm=<<%)C`Rgq#)6zlUU|WO5v$hOl>iMx)u5|j{R;TOWjl%D5C;K z^GkSva#*+hedlYtB2>RQ3r-xUhD$5_I?PZ~f7HYEyP25+l<#>DOJwLnBA23AVE;wp z15zB+6i#bXf>dZh%+oIh$DKcH+gkOT+XdVKpXipbfgiuk3i67W>i{NMHZXU~w42#Y zV{$5f_RF|(S1^)(eqcMok8RXTNJ|+JloIuMlX2##)-T0_o2r(kVz%iGsQyP!fm*!5 zcH4rM3`ig)>TIw?YgNkLVt6;PZ@&SV_o!d^AVB;4wMEUo=o#$&3pD%t-GKzr-{$lG z2S{F2f^Zc5qJbyB69LR{2uMgs`3)lGc_3BvOAD>&$!_&4Vu^>+@ihCklSbN3IbBZ( zsaX`RmKnMt*D`{vEmmj3du7SuUO?K^ZIa&2Gk7p34zW^NoLw$c>`q!<>iMseX>XM3 z4T)TZ=2HGY%zgDkl-={TAP5M8bSWSq9g@-@0s_*F2q@i1cZh-tNXa51-QC@yba#ia zbi>l|o)zoy^Zf(f-^jhsea_6BnQLaQi9g`VNKX#|jcdweF7w#x6jUrMU0OF%;DxKd zTyBlWpe&7vn}QSN5;S5&n_paPm*ttHPLd3U0MZAAYGl@9yK8~%{MT1F*evdK*k6_g`b?p#sH(PtUR@S)&%=c7fJo*i zcij#)V48l9J}Qjbs2MJ zrl-fg9XMWQ9rq$fC?juE>N=X~^qw}#YKM1B-f&}Q|N8O*_HV50BGvc|=4+ ztF_n36PLqT{~(cV&~`{sP*4!l_TqQ{06?Ai(Lu%C?oz+grQW)uF0u9r9nM_UEO`F9 zV6Am!FslY41XwAc#r<--pPHqRj%DI&Yjtw#&RzAbeX?>Z+vZC7Yzi#LxiyPTLYt3b zt9xZGmb}9~2c_yN$w3Wa?J>g~V|RA5IF;yljSj`AzV{y z)Jf7SBhmSpx8GQk9UGL*f5BPA-GBvA@*fcFAFNl9gT|W_g>29nnJOKM6!aZo;dSmm6L!Ekn&+|s*({BnT2zw2o0>IkY8-0wvCRPR)^1MQ_1 z#yiteg)oAGZfcr2IvBaRRM^Lx^uuRM2AcP+ZQ~sR_QxK^S5$~kxqBs57p|GNb}}9; zIXG*)2_z1sB#h+}Blvt@RM2G#CUk=@1kS?KMt*T9shO|mBPm!d3YuI>H#)+{$(_Zn ztFYzPrm8&nLP^6UAhCWAhg$f)SL*3uUpO_b@x?RNJA=AsGeW*k^Z>n1p~I?Ll%QMb zwwrRd8HaL_>(;Y?>y%zcojRk{>+vcogT!vd$!Sfkd-RP4vlCKbA8uL_EaD@yUtb$l zr^XUK3bmN1)U|vx=dQ-iz^>9IQEDoI=5d$$&S{o;N~N0Y?UfL%!D-xM>p)PBU9sYC2Mzny>`2$} z1Sy+p>r3=1^abohL!3o`WN>K6>{Lfi_C1(PQTdp?A6H<<#rK!dvZocflEi;!2d*GI zKI>KJ^L18kIwULRG}N3*ZRk#|wU(PQs>_}*a^r@u5C{zwEo<1vLyWo1I9-=_+V1=J zthj96)Yqe_WTv@#?F)BH>pXiszi|o*z&q3cb-46C#3@B@+SSz5ij8Cu6*HoEmcw9A z(OBl8LE!1^5W2xoVDHson~gwkLZ1WSBRS8DCH|&G$BR;|z-}95UpwOBt@MYmBOyih z=1z{6r2VwiE#Hr*Q1zT&%n#I~eo!B1%Q>iFdEzuL<#2@1I$kAl#+(32P^t58Ov0gd zSu&6!=aNBYdl0Bs6gzt)h+kh0T7OVd8z%4($PqW{g6$mLnP^bT9?2-jJp7g-voOLN z_Ccui&`sZ2B23_H3tz|>Sic*ZzXV6wN3UeBF@}4ce-fT`ab>nSxv0$;+GP%(39)IN zE|V+Gb&()3lL@+@Ggp?+A6CVe65Z!}D?^-9>;341b?JOdMVfs0o2?$5=>YVbOXfbp z0{gLAvGd**Cp#|0+*7>#OVJTtkit{TL}Sb-DHN_fLNjsR=|{E;_L@a^B)bF*Garik zMTt9$>xm$@4Mf%n*PS85zF9>qb=#OushL`udcCYk$CMZ4Rvy0 z=+dEm2Gb;Mm8}_Q^S~qK5+IgBVxsEl>7gDzU(j8djU_o+m=n&L`DWDHU~oGyD~~?( z%jX+S9rLdSm@=|+BM0U+qC&X7vahGz`L%%_2#a4n3|Z6uC| zNJE#Vin}^>Y`}%|P7Pl7K8!UzTg_CZT`XHA+AaJhkqJW0>}j*EMP&6FD-HWy)h|?Y z1}EyQB%)=xW2uoJkL8e;S5~5-N5eyos)4(xdsX#zdD*J%hb65}=5Y~~E8kf*^A2do z&N^4kM8hdG_qS77+CzOryslU84XGD87}u^%9j4w=D4MO54syUR-0r7QdZJ3Fc+a#i z8S$3uTe4ENHR-_Jt9Q0GGMW1ZCK#$}U1ZOMSa!xRhnib_;Lh@M-P~N+x!lox#ZZ4fzvS2p$(>(qlp93f=Ev5^Vu_o)lHw9z6x9 zo7hY0KI^>4j-qDE>8k8UugIk=4Slu;T%Ce?!D?8#nNJ}!)Cm(8@UwZ2p8AW zEt%(QV3sR(6~iB&^-fOiZLaS178y7>+Bu20O4e^8c6KuFu;Sm$W$BK%S|zl;@5$T= zTk(0TZa7wLo?NU~H!u0MmAm9KG`V1TQbz^>=~zq;rtUUq5gKgy@H~hN?M%zeO7t;k?K`bSpQ`xAR zpS5j!WpD^@ucZ+vF(cOC+E8~!G%e`q^Q!8qa}PjBN8jra_-sKRlKng*d(iRFJNk+P z2q-`qN0w0M+_T0QDJUyXU}-SF=o@CYS3j>Ku~xsdE!0KET(f0SPUMLj7QJ61DI%}D zn^1TsVHSza^_>K79onl`;v^0{TztN5f)$E(`Z327DD9uPRtHu*JH9TxB{n>zisy+W zkEU#jC8(+JjU9{%d-37Jf>&Csy|tPyLD}Y$foNB{01;jE$DgB8xMSjyW49E2&`R9R z)akW5^@dD4P+`JXaDK}c;1egRsZRuLFe6X&PCs&96|sKwQNX?sEQ2`O(pCBGdr}6_ zVE8ak*JJf*aBJd`ay9y@sZNu~5XrQT)PX-G==dFu%R&4S@jLYs0$5JC_ zHUlX?{c-|&2HuoV9DJUyBmI;wTAX2q z8XvoBv;&ROX&IE+gTo4vlni^nAbXk+t5Wv3`4~@+F>*yHKeWaef7+*e+>{mw>5U3y z_c}j?cZz!JBAY$oAw|HrO*C|32;*E%>9^nFy-)p^rOG5Bnflw_o>JSL8FiniqE7~c z$91hENvxdQEBpHuWJ232C9D&ubJlO?QiJ!*-O_LUOebH9yMnHOS$i)<5|v_x7NKUX ztK`C^@ZN4>Y38FazsDi^a#d1&lAS8L*VAO|-Te&SN7FWE+9?*8n%%7tW4Ui9kv1b9 zJ>>&=B_fReS-`FOMf0<{g{(8y+7JaUUz7Ry{wm9FY_O;y-On*~OG}qMu{igQXYJpr z)j7d%YbH~$aphEXkNw&~W0r&Z2X~}X7Mp>ukLSpwmT}$OS#Mj&Jpz*N^TM5=A%Lu1 zF<%&olIj68H@aIDYyE5My|gL1DE-CE6t(byMxjugNCB<8oyVbpv9_tyO_Hs;o?e<> zoNs@@_w^LGsI|%|qC>QmaJS=7R&PGvpkz%f%e^e^G0DJ=!m)5ypC36hu_mo_)5lQ> z1+iw@mxH)<{j|ny7ge)N4r_tukHp8z8ijz?yY$;rtmNC z*!16`oT<1Tr_ZINr1Xx=jEsx(-H+rt6Nq9L67$>+NB(Yu{JB6J zTV7611Yj3Yzx((j1!(3B4Xzux`>Y@-h}Lt}{9R4yv!x|mgN}hB!{=&+OdkcMo8I4E zExARDy~OHsIPysI1g4W=9iZMYUozSKQuzj{&%5QVR^r~I3kg|Ll(?yX_zJm6OsLmvVeuBe^GW;UbR;VN8pUsvyapdtNgi>R&c=F> zE*RKVLg&Rs*`1Mo#+1}j#dY-&%FfOkO@)}4l5dvsoLTu+i7FO^7onWHxF~ ztXwuK76&4~G4FiEm2Z*vI#wPhMeMl%z1c9^LFVo@pHth>D(F)wHTf&eX@QE-ts9Ym zMIgEI)LZK)TekBqiJALyNXRFeB!Aj=F(++GDYV|RX}t}TCq8yvyv<8ho?Qzg`Y}aJ zWX>}PM}PtE?s%zXcu2_2)>g)F-Kp(Fyhs%p(XMOsBV}@CjJtzIlbHHgi3w&!^T~T? zO<^=VI=nKN^G5A}R?Na&x;ckxYkf$&Vs@V}CDA$9;&=^B-!wf}D=qSbf8{Ia3n#>! zbK1Tq8MKs1w}?$-18;&XnO1v$x`E{d$(ddcxv1GZu>Eo`$?%ccgm+ocwV$!L$P7Gn zH&KuTMWtY?M;zq3vC}#hv$L?%J-<7+YPca)WVrFR{~_yOQryGw0)77pyKcdq)7@@T zAM~OMd7-1^(|KCqd(%7bMCz{NMXI&uc)800dH~KL-8^OU{N~IXi?GU-4JnlGECS|! z+Ay&ayodMYA6Jgstax|!A%)GP(+?F6&(OI_c=BJsgOBswSO-{|3h2Me^p!Y6O9S3+C&^K>AY zeI8_lD=3ug&E1iT;RI3|8gB$_7Z1K_d$uJ}Sl`T#uMhUzqtdLj&j1~bn)wc#ze++7 z0h^sfd=Sw(NJ+J$RiEy(1A5g+IQf#n?3dSt9w(0aP2T_=pmlKl>rop-qEw{3t28~? zu_4;2M%Xo#?!IfC^xGy9nPWuzAHwgp1hWb%Qc?-{l8_<0_#5k<82!5UP%9kVp&Po){Y&Gn@sd_|;r5sU7{8f{K%KK*LsCJ%xvA^KfPT zvu2aJj(pqRH<;@(bB#fuOWpgt+Kt&?z=DtanN%|-?pacto^HhodK8~!C5F`7a3lb! zZP@9H#c*;Bk;Xx1X7uz;>HWt(PDT5d*$EchYhO~wK@`OAIYFBQjiRt$6eQgVTz z{fn|;9@9A?3MJj&UWU8YiWX+!VBTCZ_9>E&$%G6OQ{^$8XbGuu@Y(J54{lCyzg0Rx zx+Q2>&cx^Pu0#0Q=hXu%VV>ae-}V8OLJtjW1BB=r38*OW6&3MnIIo&MlS@oY1iYIx zeLX!hB~Nx2yCXp`-mJ5G>Y%h~9zU*c>Zt_VFikeRK{8mRisA+44MKgOa)~3xe(u+YxEwx#PE& zqz)R_jN=bd>c;B4E-e>8rPLuqFjsp z-EY?suBdVd=X?v{aP(|1K}15rZ(BTHC+`d}+WIa*6Zx*OcccxTY?V%`rEC&o&uUAj zjDNw)kEqu)j-df2UP(iP4nRCfImdFwIgDG!t9k99&(wpPO+aA6SSSj>9Ny;_gGj81 zmIEfNzkDWd?>c0}ivkHR^dH0yHbjU~QBgaQb*JKv>ti&-I&L{*-r+S!n7*Mhr%YRe z>bk8v(DtCJQ=qyZK7Aq)o(^5XUWyWtH_D#yH~_iE4XVG+1^yx!I^b-4oh+7#5m7Zn z^hWZZ?TqLL6Z-_3=T`cA)^*w6ugo7{6 zz+2Ai?m%^Q&EUx}44>IX|Mt0HVV1Hf*DMuv^`w{e-snqEmnrAD z8^MkOTlH6)0u##1?+5R~aTsL#cqhPaq@=6Mdhg!7-0{W5MHyGu%GI;B2uA2T#$214 z#h)4JU#ST_8_?oSq&+8P4TuW9BT`8O<~yUC#?~S25f-=#e>glmY^;4f^{Og6`!VR% z#jL{I{o=M!k=EbT=gNvvlW-8@G-XOKhwp=c?Yt0WOwGkr(q9wFq)Dm%)Il|E_wyw( z_4}DbpTWCv-EWT2`LUTAk3?Ag?ErIM+em@_)0|9jN9Q~#bG%Mjem(taO>G@Zk}o-5 z4ItZWimbdiT@(&F@IUE(f17_E5E1XV9yMYKTUe~DbA^Ya?uC1o&4>$6ej5J+(f;^K z1Ed%HUSIP~{qfyiPW?cJvf#^W8zrk}WP6dxtaYb_aKdGFqYk1tVNFfV#Ni2HK=l@A z8>#DAQCwPDscw2pK2|`p>;4=@@GhfTHX9-~pagY{5e7_laFXa4$O~J11(dQ$$7sa` zKpw{-x9TeP;K6)GTC6SW(CQq#mE3&XK^(xUiIHKH{|FI$RaZf4vVOdz_sI{`|4^2K zhezGk*0y>`7o>-dC0SeX@bL|iIP68lE(Qh$TBd5;f{1S}@YYx;+t}p01GYYisoid2 z)+wV0aKE6is7P=lBeJGFPItp z;AeObSAm~8^-v$tJSH4@Bk(82p+Y!S*?2tRx5X3WO((&eSwA*u`t#YqVCTOlP z=Jh?-JrjMY%%&!ZjI1mU?g+2FUdbTCq<8P$J+uzXj{L|MR>P+q1pO^)e)ppA;&T9HZK2|Q z9-nifFA*uXg^l4;n+9YYJUnVZGn~453+30_|JjFnJv<8Udqk2ELO)LVP*r5pW4l@S zycd;v0`OWYDJb+n&xZ1Jt>*!i+*DEr;FG@N+PL<18I z^v4Rixm#Vaf&k3(m$v?XJ5&{)eM!-)B`C$Dm%k(jQGaLCWB>Q!YI!=7+bY4+WhEu8 zfOh<$^?VM`l{#Ru=kvVIvW8aykE+4vVK<#U)jo&il-Qk}9qkkSJ=Mv9-!S3NV+Aj! zAr7=hRpO=Nj~^Y1i*!SJIBuVY0?S1PcRmD;=EH3P&w9&zYdC<(tnqS{by3*%Ixq#% z8z*L}`h$4?Qwn#vWk}^$WF8?OUg?9Aqg+9lModUZCB#9UNxKOZXx{chc^1b z??8^D0t$Ijt)r-@`0Vp=yAVuOT3((fv(L>Y3un2!<;Qk!$6+|@xJ&?|k^e?@%T{;vjEOHywyK+e}aOJ;$EA}Ff>&~5qmiE@x z<)Yg8g#{9aD2D^hZz}(}2`~3R<5B(Y69ROuz)QPCagz=aSj>pWAK64f+Jqw`FYmyl zl=q|&gU{K((RmjT?2{Jp@(uMSeEbML)Tv_;POJ5o3c}aP1_m4IVfZN~WBvHmRfd~5 zIFtv|UUkFVfJe3Qeq2n6)?MVfb?q8iD~d$YUy4c7GIw2<%Ic_19XP))qwwi0R*>=rsUzUI zoW5UXG0J}MJ@=$?x~{Zzh%OugSx_803yMwYotR)Us@ZV4^?&TMFW}{C(j$>QehGi8 znLF{3TI@X4`z;D^*6Qg;s;Uui4)5DV^MHkseWy$K;#AJXl{LU}ywrg&PhJ4nW#Ac6 zFskR;LxYJqB=(pB0o8pwn8U2ScmYI!rcMSs9DfkUb31{h#-S|LYx)51o(w{f^fsy z+%)Mb9CUo_O3ZEX@wD0N)Ze^22+R&>FX|Rz1Q+MN78WhGNc<+!hNokZ#l%w!42Q`rm_ulXQ$Xx>ehW4|~%cZ#3 zWX&R@x97_rC4$MAkNtsuo)gm1<-n^mGt5o4+dG9$D@CNUNDR;SIME!DS{I|=L3zBU z>6WFicfPFCI=FNF50QP4S-^^1Wg%jvm6N)MNYq}}LFL_>LPGqvZr)4>J`a96 zm1NbQydX<~gC~zj6LF)Ex9SV3Mn~-D=M#o~EzX*r7p;?1+dr$Y z32t#bsZsqg%#3FBFtS-#9h(u$(kHQ6er zhDn^9$Hv{5yE^|+?3@Amx+LL}iXX<}$3AZ(q3ZL@nc79e+rp2VWJUH?-&Z7f%#d>T{=wH4#+=nG zhK!%Z2R}E2$Oagvt3pVee{=v)D(+bz*Q_ZigtT~hStouK_m@_a$QE#3zZaIv-OkHySh|Qi) zLup!8s_>6A13TnX5BeLh@9T@p{Q2>*LVSG3k)fC0Sl$=Gk2gWPf{}>GGz;~}>?gx* z(7PsqzQ-ZQWW-Fh9_;6GelJk_FTYOYhb#c(wRh1I_TS&b@_yf$} z)}45SKR>>DM?rZLgZfp5Cf;O8^4=UDXm_b?`Ea1sIOvJb zr#f`s__;IdU{%pVHYQiptN&il7$iO$7v`2d_7U&@2oyyRO9L@wy>iJvpE8WIe^mXi+#+4}o8 zH$p8>7(2_F%#Eazx3yF@-0I#{hw-_XjanVq>nAkBrlyuC0_CFU6{iY$f1oSB?EzBq zm0%O(`TSm!OYu6Ei%&@r*Lw2g+w4q*AHaB1MQ-+R+$HGl`miY60zuIoyPH);Amic? z?w-WGxDO%7geIOJFR+;QM(d{AAHV4G_S4+|nxq6>A4Gu$bz{AWeX6L`OTll;kMf`? zWAxUhbnKW-Raf=EY8K`zfg;K$sm3%d6Q-s*?MvbKX7rh3egtUfFIwK*L1L?XMS1ma zpBPcj+nNEOuu(sFq)-n?~-iSLc*gZlS~>ao09$oJR-bMuZ%6*?j5A-i&IoSu8F zdXgu(IB55tvD+yXDmprAeRzBe+xS-W1+bk=@N&%;oh0}etzCJc=nEL(9LBs?Z#Oh_ zRlX;S)YDG9inq|Gy5P~lUg@oqD@`jX^Hu%$xm{?e$H0{N8@wvURL>NX9~ZoIh0Yp*v4_8g;3MviQDCrg=w*`A#Tak!a0E9jfUC!U&BO%e%& z^Z#C)h>yLAi4eH;I0=93vrjDn+f^cur>w9$Hys1Qe?=X}xF$%IsK1q1oGDs6 zqHSTn3JR#I)?zgyb^4@etsP?S_46{$%~_8fOufOta-6(4gB^wGYOxY_k=|8qrhZdf zHYGC`z}$6`*2K?4k*7rJ=T%ap^5wy8+nru;#<%d;|90{zC_vvA(%<3;fX-=6rT!TL z=goEjeWNDM654M)28$9C{^N6?#_ulnGt46k<g$VEPZ+yJl&;4WsnI`;^audx6gwT-wd$3GUO=LT+^(hJlpzpchko9Rsh@6xl? zFu}Xj$TM2-;EU`E+{2o~X+| z{_YB*~b1kRkfR6BQF#%WKDHT|vaP9!0YMXeY96jaH)%; z6aX)}i9z}F_o4jU0KJ>=o`YAu(vg=xSPB9HG-aZA`G0@!(=iw&A>k!u_cOd;V%WME zt-?CYeCO^ncd>{0O5_0=!UOBR*Q#oh_zw}V6~Df#%neAlevzYx!q~l-xwgVmnfQH- zZY2>5{l%Isce-EN~1W6 zpVhwyI|Bs=TYY+?e(o|Bj7KB_fw;*39K#hv7WfXO`1$@kqMj|#8ffBdxAEqGc5D!w zSo+i(oXd@LE(XMY!DKpQf3X){?nW6t=*=q?lWe~9TGGjZIK$>1G5&J^s1yoyV38}s z;X{`@YXe4r6It;X*c1J|tMK3D!MpwN8#}}0GY5aWT{^z|#c)_!vBKI%lr+yqrjq~`M-RghWSPo(xi~}NC*Go$|EnM%QKAbz#DmcGa#>p>2wYdi)_wma--IiSwv@L# zqqZsMZ$th+8x9_A0=B#w`{#cB@uvTLRSyNbTRsuebLU^{^VdI7csIEH`IrCwN*m0~ z@@v0=cKaWW(EsQ)SP05rpZTBPfoK1JypZ}DWF=+gl=AX&IV?pbB}#gFdIvM#wtvK7 z15V6Y(;Pf?DK_BXS!me2&fQIW6JLWdXU%gfCD->-1_j?Qnr>mmhcgD({lh?Vne5iPAxI<8X8b1Fjc zI%_Z#>LGJ?GT>~OU0h5H4tV#@;ADUO`|2tqL;kbWtt}gma?dl@)|Qr#XN`VQkxY4_ zqN2ynVERwtv&L)0eD>+~OFg2C;t~?l;df<|mDA)PJ5VeY4}1;9yp?kXadGk1t=UGy zqaA2iNJzXdDRlM4`~TX?nNeU11l-(R_oAEUyRvH{2wjcVnKj}?Bs4QnEteN^)g5hkl8ZR zOh0~XwiyT})HJ0^mGG905$a!@!_j?;7=EYH3Y}rxl8OVkqykXf!SdblKg+ec3zpk? zEpX%Vh5!Q3g{@KLn3EoN{FuQTOeT69eRr?}9l!pOlvM8#!;C_lR7sg(I|1ls99Z-) z`2lYKu_g6j(5GJc)W;sGUmG3Yg@`4%7xFlY{ab2GU!T(QSV@+wj-}kd-6<(4DQ;Ct zO3FPKkb?D8l@S^=24kNhta491R(HvY|JzSYF#~7p)wa`K!vAG&6=;3Z)9IE6GQwp^ z9VR6oSf)=Vilaw!cjA^Dyp~>GUN%yzsZLEvS#~@8VpanptzfqUCAt{^e( z*PeK{U*|L*dVH`B5`eI==DFtJ)%}kB)shz^zYp`$_}Q2OlguJ0VEfTJhQql)^hc?&A&mV0Fuj26E7rR zM}sMs`h7`~SZ{%K08Je4wDvQ=j4S-MDB^Yqb2!L)O6FFsFu>CuQBP-8EpO)T*S-l1 z{D66zPo-S$+OIvje5N=2GTd6*hA#iK-SzEAr)S+~yItvk|>HULvk+i}#8-Fiyk_J?xV&=Utqpk-z;o&N>$yuBlV&$|`ovvBX1=<*{0Bv=Rxc?<)PwYsw&etb(?8;d8(c0AoiI1m{iGdeTO)vVO zkmUp59Qt<5d4jJqPFOeYJy2b~=>Ot!R}jEVFYj5JtDX8k1>O4q_2I*ZD#!UC#S)=| zj)s=8rzOi0c#Bs_tHg|E+I=_3z{m)0$m~n<^VwuzaME;;=ZC(fjScZUJ6?vSf~l=3 z##+Ue<3mbv%<3@E8YA5gs+}jwUqIY3|G6ZqCF8#jZ&ojkx+wk9j4M8_cnU>K=OB|T zBP!~{q*Fa&)jsWU5EDoys1@sZY^o@Foon^^mse<&YhaA7VMY=pxDD7|0AI_fA`r_U z|N9I9IS?h2@gxh}{B`<0&WH@&3EaD6;9{AS>-fage)j^51B;xP(5{#dlX=)V!=~IF zD+mRt>ks$zn?7p(gBYSZN5P#VQX8_%z#F)iHmTlPEYg-MPy3T0VTF-=j%#!)aLD?X z3c}6{Of;oWQq~kCb#BT+CO29O=0~Q)pYwD zV_I8L7FopizUOI^IqAudrsuXJuv+}|%|APmlk5xlfE!s#B8Y?ro(pRhoajrDfo5jq zhFu)RU2K;d&-wg59&2npEDUR6(^%Z!zH|H#;%l1MVV~Sm+gyddQ}xc~d>wAyuM$l$ zOxYQ%6%TtpU`>WYb6&f6u^glOfp+?~7K^D1J$rhm!ZP+`TASNPLV4r3u(Fj5yu~z6Va+JDyIw>iC@fpcyH$c=$r6d@X03Y#+$LGoz|^A6VJ=~xJ~0pz6Zd7RGNRqM;gG%z=EAjs>HzxD z(b3Ah%x(tg;^L~YLNic~GHs9|UJ3bgI5QH6_!r2l5;196e;wC-<(UXB4w`#sA|+*q z?c)6@>h`+Pw&U_vLk;K=yyffS+{2EsQ!KitJ5gOSUA#}{t~4=FEhQk5GrUF-ZyDJb zUezZjCnp}*2mCZMIJ}S-8t-ETnO=1AEt#M0Jce6EUfcOcIb>GHd&@iD16Z|IET(Ip zqOhtofq~7;M=N?@hGL=FAVUm~Ne{*HR83VUtTGm!3Ipllc~A)x@%{UA!K((~3dZE( zH}1L>#cla1H|zXKIUv{HEBK^|gN=O$luCfXo7yfQS$^9rPWsQ=1<~rAa1}JIqB#Ag zQ7;w|V=)FTonuVzi;2AU_eDh6$l9O-=c#PW{gr>_8>fahiM0&SGW|ZLISHcMlKo&U92PELPpGw(zz) znXTtv-(V-cWP%<{<>Tdk69sutspuHnUt9pypyb5STpJ9s=F1O_jm?b=rSyIl`SvGF z141rCka21=^!I!JQ($xub1Of;g@xT2k_n)DYS)TaqVn$8j|I)=7-uBrv?zt%P4pF_ zKJLc1I+hROZbG4F^mQ6-YECu`{8F_eCsF4GWfkRPD2D=yd%v%}!9(*uZdiG-cTpsV;`BAo^P|O-`1pDp z=XA73sxkrg8xxvfzAW97GONMq4i8W(W8Oz?cr#sXW@}WeGB7SUuK2yllY~=Eed`iZjlKjkzBE~jsg339YjlS?x)Q^ZxCm@VVs z*N+q#2U|>(D-LLFQ6sHT#_!j$8B22Bu(~@$>NuP-`Dp;g0jX3vP(cXe;BDo~ctFgG zr-1PGHoxN%t-2SLprEEO6#|Alxn=AxlwSZ;-TI{k(RE*z40uOhS1J4aR_k2KQZ5T} zaX7i@No&>}^ohEpxCP7>O$h)FlMe91Eixx<8gJ~hPz5gul&D-sz#t`)J7-idVLIB| zUl_#~a1Xpuows~XbU})8ue&;7klZF&GaG;G8-py>v8dX@&JWc z1`8cgTs!GjrlFvs&~&Kdu<3ZT2!xIbN!;}F9*8uCG=R@`&S0vpJtmm3CsPko_20jYyaXz0?{nV=7@K45OX;h^WyofwwrSIK)NXS@;d1N37mvmVfZ~~%fk_K>; zf&@<$XvY0A3Y2Xckqs@WGB+E<4B#jlDS64{ zT*Q7l3OdVt$-?j^Aq-Z;<5*YzYX}lB z=bj=aAS~8R#!jIE&msP;gKo7sp7U5H=wG2?jtnLjw&L9%xVlOa1S@h|+1=6p<)wJj zqJb+vTq4}~GC-}DMZWd4E4h~J03c3~R1KZ44;p-Y5r?8(pP=BHjcF@NCAhoUM9t$b z5|jELgl;gq4HjpzM8-OK`YsI3TAHawewn({lc7wDhKd@Jm`GW0ZZjPbcMh#G?j2E9}A_Kwew zeYB9-RJOz>l~b1TiO?`ndz()m)N6^X%l2levyOsh2#t%gB9^)AIZ!*Lx!*&_#8f=7 z&=N{jXxe|*BbWPPW4f-c(-R*KC%A*F;ka_lDamPb3Q{?ETSGX>Eb>2B3KH9darP4Z z<#Ea3nyhJ}49U1^c=gN|qp%F;JM?_5WQD$au>wUT-*nGY^0|3&^GJ4r2C}*y{cZJ% zU)%kUrFCkeZ@w2km*GeLb_ALGbnDh*nS4BD4~jbqQT6pcR~@cUQBjFuk_ph6srgZ5 zfC2XG04>>u!Ezig0zi#dKxJM{vE#ZHF}uMVXdfu&Hk@w>h3taZtNn$JPAs(iE(*BG zO7KX*wF&)r^|yc{3=@KoWSE@F^)i#--H$9SBa@Py-LZU-v3702_U=lh16xAj&i)P| zs}9p<%|R?E_Zq9xHjE|aghNbl2tX4OFh$do;WA7jm>-w7e(;jz4etf>CNnL!ib>;i zru)$-@v_S@WT!xO{K7Mqt^zq7@S;bU zhI?(C2s2h~@0#(3fL#+z+?qUA0VS=N~`O*q`Z3l zT1rmteFp#>Qe~r!l-)t0JG|oltyx;;6+hV&Gr8|`bIk3Ms>%Zkg<4u>b4_-e>%iaz zT>k`Oh*4srnQzs(Mz#QBSm_6Y(E9cF zhVup-GNNa=BO{PvDLF&h<-(+@r;L`A&n;JCI{2RPTH_3K*k)ty$BgQaUL^V#966cH z9M!*(6_pdvq7V?!M!B8o_K2(T87SPW9NAwV7e8YzmSZr%#>ZD(&%VCO*To0-)g1cQ z$IBuKb=?cTSY{hfMZ`Tzc*((#v8ae(^{2eV+YO{~Z1T1f%P!5|eerrtgur9%u7~UW z&sYSN&Phub`Y5k2zC>q9j|hcX1Suv-3}&*t#)ccB{Z}=H$!)``?d2Xdn-h1xtp}zg zNjTh23;5hA9|=VZFg)MzYOhQZUJ~Gn7PIWLjNzt@(amf-o*FW|F9A$V=MY;sZ2AF5 zH@=S7`H^uujsl1wbJMQ`Ni%4i0sN-zWV4Rd>tuY|KK7F$4yl{aQe<45b%xuZs`}fp zJm0?zj1C;((3umrzJ#=F-gy^+TwMk5ve+MXb}6hzMNXTz?XQiFiF-IZ02Li0((G=b zBR^C??zCQ%c=)j84K|g3FfQ{U4Zb``u%akS$%nuwFx{9zIrZ^9`Iw!| z{dffqy0F6c0iQHq$V>~5bd=J0^%qyGNOkct%LYZ^dlA{>eLOLsPQ4xS+D)jGUWj`C zxnhw(M=~T>=|F+LRO<8I`(<;T@Ay8_$nb*Imi%XpdtR(7*0X!d{fx}x8iWI0-3!zE z?W}bZeK$((9h3~~nDeTm!1065wQzuiOM&5eEW5xf9Qgo6`VErQ3&N4!=KH-emxk*z z!%X%Szm?)6%h46GdhM=xP+^H{#~Tjr`EeCx^5+@ktI0xNmB^Jc_IBD?WvU$4vsuS2 zR-SsAhMt`qw1fND{s`nB)T)+qWE(%1XM1ZI*HxxG7do29O*_zK*0dAb9G|ZeYp1T~ zQBVnKa3v4ixvg;WyxIYV0xhCp=agpUig@yYidjs5{-xTPp?i0>b9XUVCl?~X6<0p1%)%waTwr_2GaUc$(y zj!~Ule+8ZsV!*F<3&gFt)XMq{#4ZfIfz&xOUR)4wNHH5LgxWZlA}W%Mg4O-wUcYIFE1k{1iSC-Fj6cJ-WBY z8ePeEWdhROJiv-pUe6~Wnh{7G876=9m`(8Q)V~aee*)l=*V&w=sQe@RA6)VAyLsP{ z{i)EoZMb{YJ@HD+Pju#`F_5oGi;zsyHz#&jNGr| z$1~}z%PNjva3@G|bFfY5e_o;i`QZ=|QPI#2PwNt}LAq1QXTgrtTYTjpY0I;%L=rmB-rCB?{OxtccR~UhYp-I7skk~hvZ@1V;`8xycE>dh zsYj1ONUh2fQahC~eXil&cV`-O?mnkpExR4orvw{(W0#>{MOR_>UEFrLC)bD|@0WWG z4$C>lJ8nCt*tljR!$l8BRK2JV@5Q;ITHZ9PzoQ4UzatCe%PEtLj5zXWn4@jp{9Z(qex^PiO)B;N{47F)GfU|}i@IL)7pk5$AYYqFqB!*~_RQwbc}ij$2hIQ)eV z^0mw3WqC+QNGcg{4kF_FHu2bbbm7E#9?XoMr{7TTT1QTrU(ht~=Ro=;xk)z{$zg81 z#hRb9r(>zVeWS76eZ`mz+~B@6`8bC0MM}gQ%v+6qNG^vJ$YIWFf!pVk3{oh=Em3p) z)U5pC*QFchn`QlJ)-B$j8i`bchB{9BgK~1j5*!8HcM2CxhmPyd&`hUfJOw{fq4|f2 zpYc=jF(j&4D*lSLZn6Po{5idI5Y@m4+RBmVNkdQT6>*hcD6iOFP9hA+zbvGG`J8>H zPX`B^%tCnxP-}q%@f_(E4GuA})Rjhwx46b=^H9Q8-zKKSj-u8=OPB?HM|-wSR`nW3 zQ+A+lU$v|kZ%i0-U0wN;*X#xq3hD@4C(nBm#lvD^nBl1zOoQ~!`4iZxx+z5?i2CZk z)d^42(PCzYzOqD5PA;!c(0k-igEKPl=}%kq#IP?7|z3`^X}rwoc&S@#x}x)3j5a^!UuLAe}-_-dnbMJtG8O z$~uBQQaqQR9`rsOGq=QUkXt4~F+e08O3{&wNC>|v3YtZXT~>3-lJi4ufP|(FsoYF+ z5L#Gh1S^UYOK4c)skHUy2*-zVw?`?3iJ^mKUQ(#{pUi;%!5x6GNxQpOSA(>4SY)L7 zGP5xEemPx7H=9wThpH(t>|Ac7wC8ZBGOzkPPu2F+z%|9N!dpZi^>n>PRY_Zgj^E4u zNq0Hjw0~b1<&i^wv#Ufe3i^m{Ci&Y^Vs_`4%5wS!XxtheS1!p79BX}g;)zcjp{-RI|rRZ_r za#128A#2TE*tz2iQJ?j)SDk(;Z2BM*wU8l}XC@Uf3@QJxnO3p6^|CMrJRX&eT_c~U?m z9h&;WO;z_&9%F85vdBJ}V9Xj`4@c9#rltsyLFKIWK0djaSe2os<&k%Ezev3ouv9Kf zdwrJ!TEE{-!@3$6>J{Xkujf1AvC9_C{`q&#OBFc2>C*n@z%F@5hC>f~eYa1;xw&b{QQyb>W%g`<3YMlI*SLg}p?(Ssd z6xw6v-oKfC?5hjgXJlx>hIz8m(Ph|6kylx3Wp(TvlvS!EiLJP8{J-|TJ09!rjXP3S z6tamzHihg}ME1;{Ss{CGQYa+J$S&k2p=7VSVcu5AUKLr{dq3x6$M^Tw^ZfI?Uizn- z`*wfMxz2UYwcgkJta0)4m((r^13xF(pz0atXUw~w*9nTcs1mQzv(i>{Bnm2fa)3LW z9sBPr;=s^JjAB``g=RjLiMxK>5RP|1WmCjm=%3i|Z}{<{tnxb8JJ}BH3I?xWhCC%s zw;vBYcIcNSTb6heWVG?eYlt>1ll0!YMV4zL)p*ffGnKa!RD7PC86E|lHU!Az_mb>K z-zty?Tg|^!QCA9ANE0chgUUWo@96A&iyQ(_BG4vyEWUzyKX$;t#*Tpe9o|3#l-Mgx z7D`Y5@~K=Y^Q>HLt(>L4o6>ZZnWjWt6G}aQV7)n<{?r!BM*|a-NWNN<=b~1!H}X@p zYuN?&>WSRBlbAV7p;uh~ytgt)ddvGlskoS>=kpKl8u1+FxEksTvpDy!A>m7_q!moQ=vJT{sr<^AnXjfnSGIb=I0SR5`^&eI&6c;X9qWtV4S~H66=7X!2zLr2M@D@hE z(0T)#rXzkVikgJuE^^mo7Insrv|-*54>&Oa@wNOmtkzk46tn1T|Kv!#9>at zuq0dbsmxM_53K|3TSdi>`<0JnQnun@r)=(Im(LTi)u0)6px-098HimfCh-oNP_ipc zm->quhRm7YchCx+dar7_Z;bd<+VaK)dvR$go95q&`F5uSC%~MdTgwldTj*U*GlrIM zqR$y#rw67e9qU2Ce;Sz+G=5CP5c`;fE0gcX-ttOFpu_9Or)xvowX_CwCQWcf_WGK* zL}PW%Wd$keUY?nI#9nu|BqN1Bq``rqq@JFTE3Zaav2bxlvWC{pC%y3Gy*Y)~l-F__ z_aX`XdkwMbjH;}bwE_-BLV$zBN9privsh_K*i4>V`YZQh?b4Q7ZIIQ*+k`$V6b8>5 zi+I)H1KD&`)jhhmzGzl=4xYv(`K}nv5%|KZantt$N@Jms8v}>*S>#|!kTIeLqqSk+>#H85ykBE)XO!Z?bim*xTt{T3q9uh2NhMRLj=+vHa!xF862g-Iv#N zg>lydzFVAf zjaB3YF4rB|N(qoVZI_bDopgS^yn)6b8d^h zX&=AhrGoLLW<&>dp*erQhSkW-faf#x`Keyn*7N8{P8X+VX1;%Zf>i0IR{sr+!&pm= zcA>9qxJ@eAZCItPbI-B?tQ@W$-W17oh3!5CI3CJdxdawMZTnDaTH1ERZ?*or@_AUAwdr+NKSR1{X>x=GoAI(>i61gR}S7_pjR(dmXO=& z&w(W9io0zSD1U*=k)3*Ln{m!t9Ah2y-F}1+oVB&-k;6&|zck@*QP&OZ-OSrELP02C z7K!V&K2w#Vnx(^L-o~UY$J{+QHN|Q--0(iqlcyO}HN;zCdu_-=XOP`7si}s%&yA4N z*2g>!FR$B8H2{1%BM3Eff4$jmfbz~q?{OV}-~3jFR6<7P{q%^LfM+Zb!MM^SF&zIqyB0%{ryjx9) zcm)Ahp-KjuX*0Ey=ZY`D3vM@w_ZB&XZEmcMBX&`&JoY2cVVmiCVj@?K^*E9_myQ5h zfhtF%T|1q0Rr2L_-rt(hXE@$X7M0bq;OAKhDXB036{#Nh?0cJe&G0JzKmp)MSXcmv z6J<$BNkfoCa3y$%p#r0P)b-a5KrD)C!Lt_%3%|8wY8Tsk0t`i`VVv$=&6SlE*P3)* zE)~RCVDxc=ejUmR?0Dt__CSGTY5$zveg}&_(U=wTL~)6T=ol`1&roL{?aWDq(If%z zv3^VV)>`>i3+erG0A8`3tLH-GS#-~jgO>W&h{yi^8Y>Dm=k0yUgBhgJ%G;by ziQGsx*5|Ol;{CnpSI=IB{s7F94yu_7aTnUR6}v}Lc*5iL;@gql;(QLPzMf_$d~bOv z5%p98W{nY=1mz}!&Gi)!L(@ZeaEDOI5WsTAkYz!L z_VO|c3hIEfkP>3Jalk>?n3|>ysgc7{(v$&$Fv|0Nh;CH~MSFdeMZ0*+r~hE-D0l~e zh)(1@tJ@ct=&1@bpZ<}l%}1`LR_dCYQa7C7#R5SmhqTB`0DkBr04TrWwt2&(nI}Ev zGy_0qPrI_arT<6=K6-@0E>-S0)+$jReG@e%P=z)vEsYJJ${_ZMyO|pzIm|g0Ybn+H zweEwJDuJ@UPC@K)5P;q}9A3KA_b$N+z%yq7m>&U{hv>&TTnQ=wL(&l7^JpWrD+f8* z=3H}Cm%&i+WMHWvnv%fCT2)iP(=$7g6f!2j-s%Fd+CKtej_HyN*WPr+TsIKATLE#l zwe}G*oKo`4dd9$Li?g3}-#_-I<-s~$;?3qK0LF2k9+{yDDo+6f)ej_pU;%5kIr;?_ zql^{S1nOdWl)zbNulE|%Re84ne)U4`%a_rG`VDRJ3q?*-ECAyl21;7D0l3VeQ;M}z zxtvA2NVz;+_m3d%=H_Muzr*=(fKOm{S;SV!)S@@Tv&hkQGparUiH1_ZB@zScv`l^T zt^C{lGv4Bu%d4x<>5zFE+e}3S>`H7F7%>sVa|h~YqegMYAT4g97e!=NNQV!rMj*lh z0?^{f2?=R|ef*I_rR5@HEzkFoRn#1BV0v$p=?{HXF5OLHJ)~VUaVpKnjJC z;T1wD?e_|}xQ&ol$g{+A=%CAe|BM`%07#?hMW<7ZP`jXWwy?0U_b03VJ~sASXE>Fo zZ_zj~gJAhf%R4<^;*x8utINv)(JA_8S?ksTQ@#jA^k-CuH(gvNdKqdTZgE=mWte!* zkGIRAia|C95y{%VQ1^=B@#Dv{hmgRlB;^MhwExNqy3uuF>Qp?%w$;-2DV{8nisrIn z!6abH?)KC0GXua7*DS2N0}xu5)1s>yfFx3Y9gY}!(>kjF@Skihvx69)@rtp+_S-pj zjx)$)Ir{)g8<)?Na{=3XzX3-+hZ*V>5D&SO>*fo{COQX0$oW)&1GS*21R-SO&%D8y z|J$h~xIQfc3Lrr;E{Ccf2csqcqpnYC%-KfuwyNumG`)V~Y4*bJ9PyR0RQIut4@oeJ z{b*y)c7h?mT|n;wuhnOqJs%DqjFx_*2s;#J~a2UI3Z2T5*<+P+a2=mpUzvmaPv|(2ih1(niS3^{Vnku7FX)7+ z`JN=s90{S2 z22iUDlh+I--!#rTru!h0Rw9I)ep;6MI5*T^9uly33#ZV*pIcQ-Ev{40$tz0 zy4WXvZC~mUGKJhY50}ZF!lQS@ZtJzonf2aJ{r0Lc78634**To8&H@1ZBFLvt8v#6X zgj9N5`Q}2_N82Gu2&YPua0th-tCiNQ;H;j!p)B&JMVpw7ORBC-}j0uLn&h^Hb=W}ApZkGa_Q}N zc`xBc33-))j?QXuU{^#3$f&gLy5zAxu;L_k?w^jQvg~xm-%@zonepEr{md-DU z`WNyN^-Tc*0ZkTZb1z?BT)1M~bTgrKCbE+NDTw!$BKZ;H=_3YCvG3KV;S;-+`@25i zN#mK?uA?cO+JAxLRll;(uA6ll7Xqxn)JYgT7CTKq51gjH?zxz!l}`#4oTR0tSER5L z-U)C)&Zunx7Q=q?)Sj~?{2L@a@2_7^+-(LdNcPqskfgC&6g7%dW@l)v>;yM0&4SZ? zT!7hgWr%PEfHGAln&r)=FvCF@%V=KKwqjhqvA%Am9f`5`rF&;3}Q&`6NXF|MD7s_YMD$8Bxb zmu<9r9~84#DC{?P>~Aa*Ie-HD-DzjCqr+MniB|n(MPAU&FHt_IxhsnXAp$C|`=JUs zNNLri-+ebP)^BE9;n1#aLT}RW>CT-y*@J1m0Q>J@d6xVEO9Y=f=nz^T*WG#nda-O( zAIR0`@j8qdy7{mlI=gqyb89Ggw7j8Zzq?=j@S=b#2i#$;vaL*QO;0R}LyCL=(aNDp}1c&A@4 zHe8GJWE+3!6~|`u6e81feUd)gp3zue%BQTmU%faGbs2OfAJoxh@zF3bTi6*OFuU2} zSJ0y{?@1)Ixt!FOSiF7-FTdZj7(~(|g5kP?l4lq|yLp<4Z3zelU0>1LsrW~izHSEK zeGo|*3+TMa9I%F2Pcgk&%%fxF&I~sjy#f%?J*q67SMdq6Lt@Fpy!;1qlc$ zX3e~ppp_Fqn{V6zEnU#9DMtNJUzNfV8st}PHx*X;F?7EcL5)lGObh;c*ZP|@ux1}! z=8Q`@!V6I%T!R0(OKe+fj-GO2ICMhepL<%q6tSBbKkgb>10069Fh(1A?caEW)T$$Tk9tDbac-FMY1ZL z=U_FC0O@bwv4;YyN6*C6n!^IpxrvBApuQ2jW(5Ax=(Zr5P@m=A-w^w*#?nQ-vwm*R zNK1G6N#H3ct^oD_Q1l!x_=TV1fBSZpbim=jLJej)=20E&vQaQ)evd~kf(K7O`l?4Bcy^@NB-;C( zJjaF%ZvhUX^D5e$`~RMJ!D}#aE;5dyf63$yZ#VB$sA4sb^JnGQ6ZR)=EdrfV-1qHw zt~ozqWp;j)gWUX4s=B$uP?Y^SZ!g7wAGFriO(SgoTYm9SM{$dI)bM{qT)P8=V}Xem z{7n3AbW~~wGvL<-@=Fc5*7z^6(mxX?1(Y4u|IZc2+Rr#^t-Ui6- zia(=Bb9V+h&5eW)r(*gDE3EP7bFyZz(wgt8+^5KhrmOa9N?`KQ2Eo3NmaNMXfVu4aaSlu<#sSH{=1M; zJY#Cy>9!VT1_J)?G?_9Gp6pPH&5q=I#KGUKfy=CqCQ9I>pr+#dm2NI*XA8CpgZW`S zr#ntLf^`*8j(mm{?nZ@~5c5KAm4DyS%`#vCWL{$N%|&PD(F|Zw70}Oh9^2wgbl`&| zkKh-S>GSC3&ImXM6C^h%{xotoB?JYLauS?+~Lg)%`P9Z zP!V6nQgaz3N=m1Z*c;;w=tw$-sUN>K_%6HAEq0DfEbccgYt( zi&By&ndx?U;opr5c7WIhcQeNB#)TGt!v8tyQI`Rp1Z{{`>)C5NgW`rxa~nRhqcZmv z-hlS3my*UVx)j=q1w+~2O?TAfV5IyiKwqn{na}T(#(=OuQ(+2d$#e{-+qvKQ9~;9B z9U5VJrn|gLP9W&|X-OsKb#!&6AAqUH3zYGb@66~1fjm;s$B!RLB2y`Mesfr=|74Pn zF3awSq03bLL>?#~%lseuya}WOZ&@sk>A=ApyhVUElHE%>c5ELS4>v^H!|q_ZmuonX z*1;<>Trl$sq~Qm@Ira+v3Op({O{)9QSMO-`f!!K-4phm%1Gbkv`mc{RKzb>y-Sxj+ zl~YLmKrgtziP0Qg-a~`3ehVIDGSksx;id-qbeWUk=&`x~0pKCC16ATyip)E@ zei^Jj_6LQ1)W@Os_Vhj$5Qu(#RrKjUlx$x}*buxwAox%V_A&XKsLqM%98(IQ zt|zMVUsBoq-N~u*e}C$DnQ+SPo$Lc$$Pl*;8NahUh#%$~U$L7Qf_|L{yYQ_p^DL+^ zuf2A2WY-iPS{LV7$QL11ytGq<@QKERfC>{AUX$0H&Eu`03DmKd0xUpCP28a@Y17GrgZl85_Yg zvFqoaMfUXcoc){A*fn7q!)YvPtk? zYa&bqOz?eC_w74+fw5pj>v8@a-26^83quqj5ZfgZ6!DCMIc+!e@qV zCy6i!7Xp`#_>EvJ9kCYosev^KMrpLzu{hTFfNw$f6_4Fp87ELl90Hevg~?o^MdL{J zE-x>Ku5AeKvmJo@Ks@lOu5p-$|wt(8UxOaHSMV5~l~IoF~*6m&m++5_3_ ze6pYn=-h5Ys3qk91rfu_85+wbrJ?|D51-$(Ek-TRL}$)hFup2GS;Pzt1k{8ubs|98 zzdE^StPAq#wQ@1oBjaaq>2vy4#mZLnvo2mF;8F}Z%3LVyfwugTI8Bh`4Hg(UpmtPj z$ua8tg7xvi4_SIT-lfz}w+*AT7S{S~TW$3xLlNCc?Ki5@pCTx?n?OUu_BRs>f)+$tpf zJLpvZvUuD|d?;*V^MpyeRJ_l9*p4Sgn>voKLaKe42;6$v?QGqTjb9tKAF{MU&CN0| z=^*jG#Oo+!mAK?)6KZHecxUz1=@;RS%3f*&#J#t_P#L5+*yF8?v*RVpNaxtz;JFRI zf%5dJtezDR$)SV`B0Yv@gXy#oS8V*EEpY;`N^~gk)*1&w72xV zI!LW*$?A2E^|B533rV_)={zo580`~#+viYdKT4$i6R`OgJQW5A`pWWPp~xiK<`fvE z_*Tr9c2?nNju1QwU7?kYieAGm4^>I|#PWCcuRM2^*gFQQUWWO+i2;R1USzXB70IZ5 zVGw)JWUd_z`$D$Nu*RneMgf)*iC1Yuc|lezoR zzT}3sk^4)^-%+Axrhn1FsWAKVZ}GQ;PSG}Zvf=>v)`ne*`@)25O`}A(!CF~l4n1me z!8P57B2+1luP4#vA+DjHJNLqbqC~?_yQA~As|?o#Zr11FVXR*6&hfDv4~bA6-A&!; z@A=xy^!zm}_tqQjaQpP>$yTry0PgZ{z(?lrdG@0aGs=Obj8HTAXIHkj*A{-%+;_Q; zbCGx-=Th?-QCO8jdoo%oxw_bCDBpyMa$<$YYik(zdrP4|==YG`Tm`@U$zS53Xfi$; z-e}#yj1Lc#jm_?*P^V&By|_XZFw@(m%gFro3<`;q-uGsU%Amk2JooELU@YW)#XZZ{ zn8gX)E!KQQ9Tp0ndFPxVa4(f%?R6R%&a%&bzrG>)6f@|2i{Ldulm<=}F~%@~^7xoE zg)*{XG>!=G=$$6l3u}4q8a7vZy!hT48^c&-E)ra7U?0u6pt!`V+#EIQ&(C+mo66f^QvsmQE=~2X*ca5Z~&{J%+O4iMT#>8T>VDi>1}l z0vZxe<~oVf#0N)aKFtx6T^_iC9OFnt!4}(-T0W$y9oag|XCnLeg+OH)>q@1+2{nq= zZP0wDRB5a5_ziMpI1TBT5n7*pk&!@0JJqdgZa{NW0TsY)5Of( zYT@%MjhZ64{?Ce9&lP^VUK9l}?0dIfhBGULtE8oOd&QZ@Qx(gBFEX_qkiU{A2WDKJV)6p^X zv~sZ&_QJptC|{lDJ`4sCWR| zVS4Z76LW;8#$6Gcy9gf8*zl*e4HY-uh zM$P6KCcIy%OT1d&Ep`8nxBEt?*SE%yAGDD zeZ5s(+8qvLTem1)TZ^N9%2P`=M?J@&$0b2OLiGGIXlJ@wWH@vA{yniEvER7)QP(yC zM<_ObE4_JA69g(=qBiI^(HmsGvSFQje);l+OI2ZZaBs!z_-><`YZ-%#73faj<4bAX?Ys8pQ9a+E5d*G< z{P=}MFDu8nHtn2yI4@cGt!!|ggG-)K&@NC44`8985m}u=q~cNv>!}Z)z? zyWwXgm^AmBqS!Sr;a8{i7HgCo_kCn1&!Nzj5!%cc@?qT@PMe5?#<|9lRSyxDiF6z4LIZ1CtZfmdsw%zUi#a+;!E2&wf+Y`$9Q4f>s7q3-Mr;vcwL^7 zZ0xvIiA98L2WN8E$ihchreO`mDxbF{Co4&~U_X}F_c6Z;qt3WWe@ zQ8eG=#_DE&asTqF!CZ4-zx$K8p~&Da+pgO=6Mq?v5q}ouYAPD1W~VF(k^Nu5yF)bd zx146uE7%$1T?bEPNfmOB4E0x;{9Ub%wVm}_sW`7|P1lQ6bxV&kHv7|ZjMGq$;}Twu z%eYIuLpbwB(tkebqJOUAbuV6^^ccab!VKiP-N*i}jfL40m4e5lbUb-w&`L`XasFcQ1@4PyGB=V)hjY04 zd6qm@rU<6YXXLV60>m@b1WYD3S1Nozr#9qLl-oK^xNoo`El2wb-WhPCwr&>)X_(p= zm6?N!$kpdxGE(C;u3!H~W6~&|$?w!gUi!z)9$}2WEd!sg>cVjD*z)2@kkf`V!y8U$ zRqW-tbbiw_-o>))id>S7nLlJZn9FxAMSsXA{*8m6CAzqRdJl> z&cr*fb{5V=6#t1|yOcnM99s$ zjF~sqk5#buN1Xl`=8FBqD$sX#A=QzPelxGVw;R~m>&1l5n~x*^_<0o=L?ndHG$qNg z_KEBDKj{19=~%u-Znr{XQs4YR++&IBo)6Fil(Hmve56ji(0ZWeA1kXOk1h3a1nc-x zErPY7_6OqpdS;XXp-J{uEvptsWZ6>CRKZV-ol|9dbG`HSwkhy5>193m9ZDY?a)|pv z`EZ0+smWyNR*lQ%V}Z3sEurBc=DDQeJ|1z3+*M{fAu_t%7+y+=`Rp`-4fKimD7kC-t0KH2;K@h%ebYj1Cyd(l_^ z^KZc*NRW_{Y(CxkuWyEi_y`$UEWq^X;o%-0-hC95pvvzUhbD9QUCIj>XhfIa(*Lii zCE{GTMk3Dm&z~F`=gGuRR{S3coT&Pt-EnwJp6v0%(l|IzP7b&KKABHW_5U_WC+6&6 z?;Q3wPVD$$X&fBFCl1ViyPYSV+dqEG{vmwgz#QzIe`$qyNF6)WQ`i}%oeDxDw{T~UO1Suy$%0Ch~iOx= Date: Fri, 5 Nov 2021 14:03:50 -0600 Subject: [PATCH 011/109] update documentation --- doc/source/user_guide/ug_implementation.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 01daef5d5..ebc218e34 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -148,23 +148,23 @@ the primary prognostic grid variables name on the different grids. .. table:: Primary CICE Prognostic Grid Variable Names - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | variable | T | U | N | E | +================+=======+=======+=======+=======+ | longitude | TLON | ULON | NLON | ELON | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | latitude | TLAT | ULAT | NLAT | ELAT | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | dx | dxt | dxu | dxn | dxe | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | dy | dyt | dyu | dyn | dye | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | area | tarea | uarea | narea | earea | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | mask (logical) | tmask | umask | nmask | emask | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ | mask (real) | hm | uvm | npm | epm | - +================+=======+=======+=======+=======+ + +----------------+-------+-------+-------+-------+ In CESM, the sea ice model may exchange coupling fluxes using a From dc72321f14d7335a6970a06846b2a4a0cb514ca6 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 14:10:29 -0600 Subject: [PATCH 012/109] update documentation --- doc/source/user_guide/ug_implementation.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index ebc218e34..b3da2bd8e 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -375,15 +375,19 @@ testing. Masks ***** -A land mask hm (:math:`M_h`) is specified in the cell centers, with 0 -representing land and 1 representing ocean cells. A corresponding mask -uvm (:math:`M_u`) for velocity and other corner quantities is given by +A land mask hm (:math:`M_h`) is specified in the cell centers (on the +T-grid), with 0 +representing land and 1 representing ocean cells. Corresponding masks +for the U, N, and E grids are given by .. math:: M_u(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j),\,(i,j+1),\,(i+1,j+1)\}. + M_n(i,j)=\min\{M_h(l),\,l=(i,j),\,(i,j+1)\}. + M_e(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j)\}. -The logical masks ``tmask`` and ``umask`` (which correspond to the real masks -``hm`` and ``uvm``, respectively) are useful in conditional statements. +The logical masks ``tmask``, ``umask``, ``nmask``, and ``emask`` +(which correspond to the real masks ``hm``, ``uvm``, ``npm``, and ``epm`` +respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global From ce5461d087b823b245f2e02698c70b523d5b1c9a Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 14:12:36 -0600 Subject: [PATCH 013/109] update documentation --- doc/source/user_guide/ug_implementation.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index b3da2bd8e..2560fe030 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -382,7 +382,11 @@ for the U, N, and E grids are given by .. math:: M_u(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j),\,(i,j+1),\,(i+1,j+1)\}. + +.. math:: M_n(i,j)=\min\{M_h(l),\,l=(i,j),\,(i,j+1)\}. + +.. math:: M_e(i,j)=\min\{M_h(l),\,l=(i,j),\,(i+1,j)\}. The logical masks ``tmask``, ``umask``, ``nmask``, and ``emask`` From b58e3856c808b88fcb7efb87e4a3b8438b085859 Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 5 Nov 2021 14:21:41 -0600 Subject: [PATCH 014/109] update documentation --- doc/source/user_guide/ug_implementation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 2560fe030..5bccd33e1 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -142,7 +142,7 @@ The input grid file for the B-grid and CD-grid is identical. That file contains each cells' HTN, HTE, ULON, ULAT, and kmt value. From those variables, the longitude, latitude, grid lengths (dx and dy), areas, and masks can be derived for all grids. Table :ref:`tab-gridvars` lists -the primary prognostic grid variables name on the different grids. +the primary prognostic grid variable names on the different grids. .. _tab-gridvars: From 4cd4039ab1d7a25b6a5af991edf1b67eb011a5dc Mon Sep 17 00:00:00 2001 From: apcraig Date: Sun, 7 Nov 2021 22:27:18 -0700 Subject: [PATCH 015/109] update pio history_write for new grids shift to only support S and F X2Y transforms for clarity add uveln, vvele if grid_system=CD. proposed placeholder for extra velocity variables (but this could change) extend io to start to support different grids for history variables depending on grid_system --- cicecore/cicedynB/analysis/ice_history.F90 | 50 +++- .../cicedynB/analysis/ice_history_shared.F90 | 3 + cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 12 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 12 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 7 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 12 +- cicecore/cicedynB/general/ice_forcing.F90 | 10 +- cicecore/cicedynB/general/ice_state.F90 | 4 + cicecore/cicedynB/infrastructure/ice_grid.F90 | 24 +- .../io/io_netcdf/ice_history_write.F90 | 4 +- .../io/io_pio2/ice_history_write.F90 | 277 +++++++++++------- .../drivers/mct/cesm1/ice_import_export.F90 | 8 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 8 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 8 +- 14 files changed, 274 insertions(+), 165 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index cf6b470d1..fea284495 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -94,6 +94,7 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character(len=25) :: l_ustr2d, l_vstr2d, l_ucstr, l_vcstr ! define location of u and v fields character(len=*), parameter :: subname = '(init_hist)' !----------------------------------------------------------------- @@ -107,6 +108,18 @@ subroutine init_hist (dt) nzblyr = nblyr+2 ! bio grid nzalyr = nblyr+4 ! aerosols (2 snow & nblyr+2 bio) + ! B grid default + l_ustr2d = ustr2d + l_vstr2d = ustr2d + l_ucstr = ucstr + l_vcstr = ucstr + if (grid_system == 'C' .or. grid_system == 'CD') then + l_ustr2d = estr2d + l_vstr2d = nstr2d + l_ucstr = ecstr + l_vcstr = ncstr + endif + !----------------------------------------------------------------- ! read namelist !----------------------------------------------------------------- @@ -278,6 +291,11 @@ subroutine init_hist (dt) f_sispeed = f_CMIP endif + if (grid_system == 'CD') then + f_uveln = f_uvel + f_vvele = f_vvel + endif + #ifndef ncdf f_bounds = .false. #endif @@ -328,6 +346,8 @@ subroutine init_hist (dt) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_uveln, master_task) + call broadcast_scalar (f_vvele, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_atmspd, master_task) @@ -549,29 +569,31 @@ subroutine init_hist (dt) "averaged with Tf if no ice is present", c1, c0, & ns1, f_Tsfc) -! tcraig, just to test capability, tcx -! if (grid_system == 'CD') then -! call define_hist_field(n_aice,"aice","1",nstr2D, ncstr, & -! "ice area (aggregate)", & -! "none", c1, c0, & -! ns1, f_aice) -! else call define_hist_field(n_aice,"aice","1",tstr2D, tcstr, & "ice area (aggregate)", & "none", c1, c0, & ns1, f_aice) -! endif - call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & + call define_hist_field(n_uvel,"uvel","m/s",l_ustr2D, l_ucstr, & "ice velocity (x)", & - "positive is x direction on U grid", c1, c0, & + "positive is x direction on u grid", c1, c0, & ns1, f_uvel) - call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & + call define_hist_field(n_vvel,"vvel","m/s",l_vstr2D, l_vcstr, & "ice velocity (y)", & - "positive is y direction on U grid", c1, c0, & + "positive is y direction on v grid", c1, c0, & ns1, f_vvel) + call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & + "ice velocity (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_uveln) + + call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & + "ice velocity (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_vvele) + call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -1945,6 +1967,10 @@ subroutine accum_hist (dt) call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) if (f_vvel (1:1) /= 'x') & call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) + if (f_uveln (1:1) /= 'x') & + call accum_hist_field(n_uveln, iblk, uveln(:,:,iblk), a2D) + if (f_vvele (1:1) /= 'x') & + call accum_hist_field(n_vvele, iblk, vvele(:,:,iblk), a2D) if (f_uatm (1:1) /= 'x') & call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) if (f_vatm (1:1) /= 'x') & diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 1c8823b62..fc4b68ae3 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -235,6 +235,7 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & + f_uveln = 'x', f_vvele = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -384,6 +385,7 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & +! f_uveln, f_vvele , & ! for now, have this set from f_uvel, f_vvel f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -557,6 +559,7 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & + n_uveln , n_vvele , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index c550a4b14..cf5a2fd67 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -254,8 +254,8 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2U',tmass,umass) - call grid_average_X2Y('T2U',aice_init, aiu) + call grid_average_X2Y('T2UF',tmass,umass) + call grid_average_X2Y('T2UF',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -274,8 +274,8 @@ subroutine eap (dt) field_loc_center, field_type_vector) call ice_HaloUpdate (strairy, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2U',strairx) - call grid_average_X2Y('T2U',strairy) + call grid_average_X2Y('T2UF',strairx) + call grid_average_X2Y('T2UF',strairy) endif ! tcraig, tcx, turned off this threaded region, in evp, this block and @@ -556,8 +556,8 @@ subroutine eap (dt) field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (strocnyT, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2T',strocnxT) ! shift - call grid_average_X2Y('U2T',strocnyT) + call grid_average_X2Y('U2TF',strocnxT) ! shift + call grid_average_X2Y('U2TF',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index cf7048e15..861f8780d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -219,8 +219,8 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2U',tmass,umass) - call grid_average_X2Y('T2U',aice_init, aiu) + call grid_average_X2Y('T2UF',tmass,umass) + call grid_average_X2Y('T2UF',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -239,8 +239,8 @@ subroutine evp (dt) field_loc_center, field_type_vector) call ice_HaloUpdate (strairy, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2U',strairx) - call grid_average_X2Y('T2U',strairy) + call grid_average_X2Y('T2UF',strairx) + call grid_average_X2Y('T2UF',strairy) endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -565,8 +565,8 @@ subroutine evp (dt) field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (strocnyT, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2T',strocnxT) ! shift - call grid_average_X2Y('U2T',strocnyT) + call grid_average_X2Y('U2TF',strocnxT) ! shift + call grid_average_X2Y('U2TF',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 5e14d9686..0cd7f70a5 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -17,6 +17,7 @@ module ice_dyn_shared use ice_domain_size, only: max_blocks use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice + use ice_grid, only: grid_system use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -130,7 +131,7 @@ subroutine init_dyn (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_state, only: uvel, vvel, divu, shear + use ice_state, only: uvel, vvel, uveln, vvele, divu, shear use ice_grid, only: ULAT real (kind=dbl_kind), intent(in) :: & @@ -162,6 +163,10 @@ subroutine init_dyn (dt) ! velocity uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s + if (grid_system == 'CD') then ! extra velocity variables + uveln = c0 + vvele = c0 + endif ! strain rates divu (i,j,iblk) = c0 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index a8bf7be89..6a8095c69 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -320,8 +320,8 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2U',tmass,umass) - call grid_average_X2Y('T2U',aice_init, aiu) + call grid_average_X2Y('T2UF',tmass,umass) + call grid_average_X2Y('T2UF',aice_init, aiu) !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -340,8 +340,8 @@ subroutine implicit_solver (dt) field_loc_center, field_type_vector) call ice_HaloUpdate (strairy, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2U',strairx) - call grid_average_X2Y('T2U',strairy) + call grid_average_X2Y('T2UF',strairx) + call grid_average_X2Y('T2UF',strairy) endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -655,8 +655,8 @@ subroutine implicit_solver (dt) field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (strocnyT, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2T',strocnxT) ! shift - call grid_average_X2Y('U2T',strocnyT) + call grid_average_X2Y('U2TF',strocnxT) ! shift + call grid_average_X2Y('U2TF',strocnyT) call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index a4dd66c67..7b1e2eac7 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -4072,8 +4072,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('T2U',work1,ocn_frc_m(:,:,:,n ,m)) - call grid_average_X2Y('T2U',work2,ocn_frc_m(:,:,:,n+1,m)) + call grid_average_X2Y('T2UF',work1,ocn_frc_m(:,:,:,n ,m)) + call grid_average_X2Y('T2UF',work2,ocn_frc_m(:,:,:,n+1,m)) enddo ! month loop enddo ! field loop @@ -4474,8 +4474,8 @@ subroutine ocn_data_hadgem(dt) ! Interpolate to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2U',uocn) - call grid_average_X2Y('T2U',vocn) + call grid_average_X2Y('T2UF',uocn) + call grid_average_X2Y('T2UF',vocn) endif ! ocn_data_type = hadgem_sst_uvocn @@ -5278,7 +5278,7 @@ subroutine box2001_data call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call grid_average_X2Y('T2U',aice, aiu) + call grid_average_X2Y('T2UF',aice, aiu) period = c4*secday diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 362fd1413..514d30b9a 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -108,7 +108,9 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & uvel , & ! x-component of velocity (m/s) + uveln , & ! extra x-component of velocity on CD grid (m/s) vvel , & ! y-component of velocity (m/s) + vvele , & ! extra y-component of velocity on CD grid (m/s) divu , & ! strain rate I component, velocity divergence (1/s) shear , & ! strain rate II component (1/s) strength ! ice strength (N/m) @@ -150,7 +152,9 @@ subroutine alloc_state vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity (m/s) + uveln (nx_block,ny_block,max_blocks) , & ! extra x-component of velocity on CD grid (m/s) vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity (m/s) + vvele (nx_block,ny_block,max_blocks) , & ! extra y-component of velocity on CD grid (m/s) divu (nx_block,ny_block,max_blocks) , & ! strain rate I component, velocity divergence (1/s) shear (nx_block,ny_block,max_blocks) , & ! strain rate II component (1/s) strength (nx_block,ny_block,max_blocks) , & ! ice strength (N/m) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 8f8c85904..f6322606a 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -2164,29 +2164,29 @@ subroutine grid_average_X2Y(X2Y,work1,work2) select case (trim(X2Y)) ! flux unmasked - case('T2UF','T2U') + case('T2UF') call grid_average_X2YF('NE',work1,tarea,work2tmp,uarea) - case('T2EF','T2E') + case('T2EF') call grid_average_X2YF('E' ,work1,tarea,work2tmp,earea) - case('T2NF','T2N') + case('T2NF') call grid_average_X2YF('N' ,work1,tarea,work2tmp,narea) - case('U2TF','U2T') + case('U2TF') call grid_average_X2YF('SW',work1,uarea,work2tmp,tarea) - case('U2EF','U2E') + case('U2EF') call grid_average_X2YF('S' ,work1,uarea,work2tmp,earea) - case('U2NF','U2N') + case('U2NF') call grid_average_X2YF('W' ,work1,uarea,work2tmp,narea) - case('E2TF','E2T') + case('E2TF') call grid_average_X2YF('W' ,work1,earea,work2tmp,tarea) - case('E2UF','E2U') + case('E2UF') call grid_average_X2YF('N' ,work1,earea,work2tmp,uarea) - case('E2NF','E2N') + case('E2NF') call grid_average_X2YF('NW',work1,earea,work2tmp,narea) - case('N2TF','N2T') + case('N2TF') call grid_average_X2YF('S' ,work1,narea,work2tmp,tarea) - case('N2UF','N2U') + case('N2UF') call grid_average_X2YF('E' ,work1,narea,work2tmp,uarea) - case('N2EF','N2E') + case('N2EF') call grid_average_X2YF('SE',work1,narea,work2tmp,earea) ! state masked diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index f48371f8d..ddfd95297 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -96,8 +96,8 @@ subroutine ice_write_hist (ns) ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 - ! 8 vertices in each grid cell - INTEGER (kind=int_kind), PARAMETER :: nverts = 8 + ! 4 vertices in each grid cell + INTEGER (kind=int_kind), PARAMETER :: nverts = 4 ! 8 variables describe T, U grid boundaries: ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index 0e91d42d0..00a121a59 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -49,9 +49,12 @@ subroutine ice_write_hist (ns) use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: nx_global, ny_global, max_blocks, max_nstrm use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, hm, bm, tarea, uarea, & - dxu, dxt, dyu, dyt, HTN, HTE, ANGLE, ANGLET, tmask, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds + use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & + hm, bm, uvm, npm, epm, & + dxu, dxt, dyu, dyt, dxn, dyn, dxe, dye, HTN, HTE, ANGLE, ANGLET, & + tarea, uarea, narea, earea, tmask, umask, nmask, emask, & + lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & + lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared use ice_arrays_column, only: hin_max, floe_rad_c use ice_restart_shared, only: runid, lcdf64 @@ -89,15 +92,16 @@ subroutine ice_write_hist (ns) iodesc4di, iodesc4ds, iodesc4df type(var_desc_t) :: varid - ! 4 coordinate variables: TLON, TLAT, ULON, ULAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 4 + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 ! 4 vertices in each grid cell INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - ! 4 variables describe T, U grid boundaries: + ! 8 variables describe T, U, N, E grid boundaries: ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 4 + ! lonn_bounds, latn_bounds, lone_bounds, late_bounds + INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 TYPE coord_attributes ! netcdf coordinate attributes character (len=11) :: short_name @@ -110,10 +114,10 @@ subroutine ice_write_hist (ns) character (len=20) :: coordinates END TYPE req_attributes - TYPE(req_attributes), dimension(nvar) :: var - TYPE(coord_attributes), dimension(ncoord) :: coord_var + TYPE(req_attributes), dimension(nvar_grd) :: var_grd + TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvarz) :: var_nz + TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz CHARACTER (char_len), dimension(ncoord) :: coord_bounds real (kind=dbl_kind) , allocatable :: workd2(:,:,:) @@ -252,74 +256,118 @@ subroutine ice_write_hist (ns) ind = 0 ind = ind + 1 - coord_var(ind) = coord_attributes('TLON', & + var_coord(ind) = coord_attributes('TLON', & 'T grid center longitude', 'degrees_east') coord_bounds(ind) = 'lont_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('TLAT', & + var_coord(ind) = coord_attributes('TLAT', & 'T grid center latitude', 'degrees_north') coord_bounds(ind) = 'latt_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULON', & + var_coord(ind) = coord_attributes('ULON', & 'U grid center longitude', 'degrees_east') coord_bounds(ind) = 'lonu_bounds' ind = ind + 1 - coord_var(ind) = coord_attributes('ULAT', & + var_coord(ind) = coord_attributes('ULAT', & 'U grid center latitude', 'degrees_north') coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' - var_nz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_nz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_nz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_nz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_nz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_nz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') + var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') + var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') !----------------------------------------------------------------- ! define information for optional time-invariant variables !----------------------------------------------------------------- - var(n_tmask)%req = coord_attributes('tmask', & - 'ocean grid mask', ' ') - var(n_tmask)%coordinates = 'TLON TLAT' - - var(n_blkmask)%req = coord_attributes('blkmask', & - 'ice grid block mask', ' ') - var(n_blkmask)%coordinates = 'TLON TLAT' - - var(n_tarea)%req = coord_attributes('tarea', & + var_grd(n_tmask)%req = coord_attributes('tmask', & + 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_tmask)%coordinates = 'TLON TLAT' + var_grd(n_umask)%req = coord_attributes('umask', & + 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_umask)%coordinates = 'ULON ULAT' + var_grd(n_nmask)%req = coord_attributes('nmask', & + 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_nmask)%coordinates = 'NLON NLAT' + var_grd(n_emask)%req = coord_attributes('emask', & + 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_emask)%coordinates = 'ELON ELAT' + + var_grd(n_blkmask)%req = coord_attributes('blkmask', & + 'ice grid block mask, mytask + iblk/100', 'unitless') + var_grd(n_blkmask)%coordinates = 'TLON TLAT' + + var_grd(n_tarea)%req = coord_attributes('tarea', & 'area of T grid cells', 'm^2') - var(n_tarea)%coordinates = 'TLON TLAT' - - var(n_uarea)%req = coord_attributes('uarea', & + var_grd(n_tarea)%coordinates = 'TLON TLAT' + var_grd(n_uarea)%req = coord_attributes('uarea', & 'area of U grid cells', 'm^2') - var(n_uarea)%coordinates = 'ULON ULAT' - var(n_dxt)%req = coord_attributes('dxt', & + var_grd(n_uarea)%coordinates = 'ULON ULAT' + var_grd(n_narea)%req = coord_attributes('narea', & + 'area of N grid cells', 'm^2') + var_grd(n_narea)%coordinates = 'NLON NLAT' + var_grd(n_earea)%req = coord_attributes('earea', & + 'area of E grid cells', 'm^2') + var_grd(n_earea)%coordinates = 'ELON ELAT' + + var_grd(n_dxt)%req = coord_attributes('dxt', & 'T cell width through middle', 'm') - var(n_dxt)%coordinates = 'TLON TLAT' - var(n_dyt)%req = coord_attributes('dyt', & + var_grd(n_dxt)%coordinates = 'TLON TLAT' + var_grd(n_dyt)%req = coord_attributes('dyt', & 'T cell height through middle', 'm') - var(n_dyt)%coordinates = 'TLON TLAT' - var(n_dxu)%req = coord_attributes('dxu', & + var_grd(n_dyt)%coordinates = 'TLON TLAT' + var_grd(n_dxu)%req = coord_attributes('dxu', & 'U cell width through middle', 'm') - var(n_dxu)%coordinates = 'ULON ULAT' - var(n_dyu)%req = coord_attributes('dyu', & + var_grd(n_dxu)%coordinates = 'ULON ULAT' + var_grd(n_dyu)%req = coord_attributes('dyu', & 'U cell height through middle', 'm') - var(n_dyu)%coordinates = 'ULON ULAT' - var(n_HTN)%req = coord_attributes('HTN', & + var_grd(n_dyu)%coordinates = 'ULON ULAT' + var_grd(n_dxn)%req = coord_attributes('dxn', & + 'N cell width through middle', 'm') + var_grd(n_dxn)%coordinates = 'NLON NLAT' + var_grd(n_dyn)%req = coord_attributes('dyn', & + 'N cell height through middle', 'm') + var_grd(n_dyn)%coordinates = 'NLON NLAT' + var_grd(n_dxe)%req = coord_attributes('dxe', & + 'E cell width through middle', 'm') + var_grd(n_dxe)%coordinates = 'ELON ELAT' + var_grd(n_dye)%req = coord_attributes('dye', & + 'E cell height through middle', 'm') + var_grd(n_dye)%coordinates = 'ELON ELAT' + + var_grd(n_HTN)%req = coord_attributes('HTN', & 'T cell width on North side','m') - var(n_HTN)%coordinates = 'TLON TLAT' - var(n_HTE)%req = coord_attributes('HTE', & + var_grd(n_HTN)%coordinates = 'TLON TLAT' + var_grd(n_HTE)%req = coord_attributes('HTE', & 'T cell width on East side', 'm') - var(n_HTE)%coordinates = 'TLON TLAT' - var(n_ANGLE)%req = coord_attributes('ANGLE', & + var_grd(n_HTE)%coordinates = 'TLON TLAT' + var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & 'angle grid makes with latitude line on U grid', & 'radians') - var(n_ANGLE)%coordinates = 'ULON ULAT' - var(n_ANGLET)%req = coord_attributes('ANGLET', & + var_grd(n_ANGLE)%coordinates = 'ULON ULAT' + var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & 'angle grid makes with latitude line on T grid', & 'radians') - var(n_ANGLET)%coordinates = 'TLON TLAT' + var_grd(n_ANGLET)%coordinates = 'TLON TLAT' ! These fields are required for CF compliance ! dimensions (nx,ny,nverts) @@ -331,6 +379,14 @@ subroutine ice_write_hist (ns) 'longitude boundaries of U cells', 'degrees_east') var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & 'latitude boundaries of U cells', 'degrees_north') + var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & + 'longitude boundaries of N cells', 'degrees_east') + var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & + 'latitude boundaries of N cells', 'degrees_north') + var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & + 'longitude boundaries of E cells', 'degrees_east') + var_nverts(n_late_bnds) = coord_attributes('late_bounds', & + 'latitude boundaries of E cells', 'degrees_north') !----------------------------------------------------------------- ! define attributes for time-invariant variables @@ -340,12 +396,12 @@ subroutine ice_write_hist (ns) dimid2(2) = jmtid do i = 1, ncoord - status = pio_def_var(File, trim(coord_var(i)%short_name), lprecision, & + status = pio_def_var(File, trim(var_coord(i)%short_name), lprecision, & dimid2, varid) - status = pio_put_att(File,varid,'long_name',trim(coord_var(i)%long_name)) - status = pio_put_att(File, varid, 'units', trim(coord_var(i)%units)) - call ice_write_hist_fill(File,varid,coord_var(i)%short_name,history_precision) - if (coord_var(i)%short_name == 'ULAT') then + status = pio_put_att(File,varid,'long_name',trim(var_coord(i)%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_coord(i)%units)) + call ice_write_hist_fill(File,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then status = pio_put_att(File,varid,'comment', & trim('Latitude of NE corner of T grid cell')) endif @@ -362,39 +418,23 @@ subroutine ice_write_hist (ns) dimidex(5)=kmtida dimidex(6)=fmtid - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = pio_def_var(File, trim(var_nz(i)%short_name), lprecision, & + status = pio_def_var(File, trim(var_grdz(i)%short_name), lprecision, & (/dimidex(i)/), varid) - status = pio_put_att(File, varid, 'long_name', var_nz(i)%long_name) - status = pio_put_att(File, varid, 'units' , var_nz(i)%units) + status = pio_put_att(File, varid, 'long_name', var_grdz(i)%long_name) + status = pio_put_att(File, varid, 'units' , var_grdz(i)%units) endif enddo - ! Attributes for tmask defined separately, since it has no units - if (igrd(n_tmask)) then - status = pio_def_var(File, 'tmask', lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', 'ocean grid mask') - status = pio_put_att(File, varid, 'coordinates', 'TLON TLAT') - call ice_write_hist_fill(File,varid,'tmask',history_precision) - status = pio_put_att(File,varid,'comment', '0 = land, 1 = ocean') - endif - if (igrd(n_blkmask)) then - status = pio_def_var(File, 'blkmask', lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', 'ice grid block mask') - status = pio_put_att(File, varid, 'coordinates', 'TLON TLAT') - status = pio_put_att(File,varid,'comment', 'mytask + iblk/100') - call ice_write_hist_fill(File,varid,'blkmask',history_precision) - endif - - do i = 3, nvar ! note: n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - status = pio_def_var(File, trim(var(i)%req%short_name), & + status = pio_def_var(File, trim(var_grd(i)%req%short_name), & lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', trim(var(i)%req%long_name)) - status = pio_put_att(File, varid, 'units', trim(var(i)%req%units)) - status = pio_put_att(File, varid, 'coordinates', trim(var(i)%coordinates)) - call ice_write_hist_fill(File,varid,var(i)%req%short_name,history_precision) + status = pio_put_att(File,varid, 'long_name', trim(var_grd(i)%req%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_grd(i)%req%units)) + status = pio_put_att(File, varid, 'coordinates', trim(var_grd(i)%coordinates)) + call ice_write_hist_fill(File,varid,var_grd(i)%req%short_name,history_precision) endif enddo @@ -853,8 +893,8 @@ subroutine ice_write_hist (ns) allocate(workr2(nx_block,ny_block,nblocks)) do i = 1,ncoord - status = pio_inq_varid(File, coord_var(i)%short_name, varid) - SELECT CASE (coord_var(i)%short_name) + status = pio_inq_varid(File, var_coord(i)%short_name, varid) + SELECT CASE (var_coord(i)%short_name) CASE ('TLON') ! Convert T grid longitude from -180 -> 180 to 0 to 360 workd2(:,:,:) = mod(tlon(:,:,1:nblocks)*rad_to_deg + c360, c360) @@ -864,6 +904,14 @@ subroutine ice_write_hist (ns) workd2(:,:,:) = ulon(:,:,1:nblocks)*rad_to_deg CASE ('ULAT') workd2(:,:,:) = ulat(:,:,1:nblocks)*rad_to_deg + CASE ('NLON') + workd2(:,:,:) = nlon(:,:,1:nblocks)*rad_to_deg + CASE ('NLAT') + workd2(:,:,:) = nlat(:,:,1:nblocks)*rad_to_deg + CASE ('ELON') + workd2(:,:,:) = elon(:,:,1:nblocks)*rad_to_deg + CASE ('ELAT') + workd2(:,:,:) = elat(:,:,1:nblocks)*rad_to_deg END SELECT if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d, & @@ -877,10 +925,10 @@ subroutine ice_write_hist (ns) ! Extra dimensions (NCAT, NFSD, VGRD*) - do i = 1, nvarz + do i = 1, nvar_grdz if (igrdz(i)) then - status = pio_inq_varid(File, var_nz(i)%short_name, varid) - SELECT CASE (var_nz(i)%short_name) + status = pio_inq_varid(File, var_grdz(i)%short_name, varid) + SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') status = pio_put_var(File, varid, hin_max(1:ncat_hist)) CASE ('NFSD') @@ -901,36 +949,43 @@ subroutine ice_write_hist (ns) ! write grid masks, area and rotation angle !----------------------------------------------------------------- -! if (igrd(n_tmask)) then -! status = pio_inq_varid(File, 'tmask', varid) -! call pio_write_darray(File, varid, iodesc2d, & -! hm(:,:,1:nblocks), status, fillval=spval_dbl) -! endif -! if (igrd(n_blkmask)) then -! status = pio_inq_varid(File, 'blkmask', varid) -! call pio_write_darray(File, varid, iodesc2d, & -! bm(:,:,1:nblocks), status, fillval=spval_dbl) -! endif - - do i = 1, nvar ! note: n_tmask=1, n_blkmask=2 + do i = 1, nvar_grd if (igrd(i)) then - SELECT CASE (var(i)%req%short_name) + SELECT CASE (var_grd(i)%req%short_name) CASE ('tmask') workd2 = hm(:,:,1:nblocks) + CASE ('umask') + workd2 = uvm(:,:,1:nblocks) + CASE ('nmask') + workd2 = npm(:,:,1:nblocks) + CASE ('emask') + workd2 = epm(:,:,1:nblocks) CASE ('blkmask') workd2 = bm(:,:,1:nblocks) CASE ('tarea') workd2 = tarea(:,:,1:nblocks) CASE ('uarea') workd2 = uarea(:,:,1:nblocks) - CASE ('dxu') - workd2 = dxu(:,:,1:nblocks) - CASE ('dyu') - workd2 = dyu(:,:,1:nblocks) + CASE ('narea') + workd2 = narea(:,:,1:nblocks) + CASE ('earea') + workd2 = earea(:,:,1:nblocks) CASE ('dxt') workd2 = dxt(:,:,1:nblocks) CASE ('dyt') workd2 = dyt(:,:,1:nblocks) + CASE ('dxu') + workd2 = dxu(:,:,1:nblocks) + CASE ('dyu') + workd2 = dyu(:,:,1:nblocks) + CASE ('dxn') + workd2 = dxn(:,:,1:nblocks) + CASE ('dyn') + workd2 = dyn(:,:,1:nblocks) + CASE ('dxe') + workd2 = dxe(:,:,1:nblocks) + CASE ('dye') + workd2 = dye(:,:,1:nblocks) CASE ('HTN') workd2 = HTN(:,:,1:nblocks) CASE ('HTE') @@ -940,7 +995,7 @@ subroutine ice_write_hist (ns) CASE ('ANGLET') workd2 = ANGLET(:,:,1:nblocks) END SELECT - status = pio_inq_varid(File, var(i)%req%short_name, varid) + status = pio_inq_varid(File, var_grd(i)%req%short_name, varid) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d, & workd2, status, fillval=spval_dbl) @@ -978,6 +1033,22 @@ subroutine ice_write_hist (ns) do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) + enddo END SELECT status = pio_inq_varid(File, var_nverts(i)%short_name, varid) diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index d0eac5a19..7aa60dbdf 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -472,10 +472,10 @@ subroutine ice_import( x2i ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2U',uocn) - call grid_average_X2Y('T2U',vocn) - call grid_average_X2Y('T2U',ss_tltx) - call grid_average_X2Y('T2U',ss_tlty) + call grid_average_X2Y('T2UF',uocn) + call grid_average_X2Y('T2UF',vocn) + call grid_average_X2Y('T2UF',ss_tltx) + call grid_average_X2Y('T2UF',ss_tlty) call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 50cba8883..10d42137f 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -801,10 +801,10 @@ subroutine ice_import( importState, rc ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2U',uocn) - call grid_average_X2Y('T2U',vocn) - call grid_average_X2Y('T2U',ss_tltx) - call grid_average_X2Y('T2U',ss_tlty) + call grid_average_X2Y('T2UF',uocn) + call grid_average_X2Y('T2UF',vocn) + call grid_average_X2Y('T2UF',ss_tltx) + call grid_average_X2Y('T2UF',ss_tlty) call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index d1eee8ae0..4b2d6d65f 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -940,10 +940,10 @@ module cice_cap ! call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2U',uocn) - call grid_average_X2Y('T2U',vocn) - call grid_average_X2Y('T2U',ss_tltx) - call grid_average_X2Y('T2U',ss_tlty) + call grid_average_X2Y('T2UF',uocn) + call grid_average_X2Y('T2UF',vocn) + call grid_average_X2Y('T2UF',ss_tltx) + call grid_average_X2Y('T2UF',ss_tlty) end subroutine subroutine CICE_Export(st,rc) From 63a1f1f9d04776223f0b44fde1f14357b6989b61 Mon Sep 17 00:00:00 2001 From: apcraig Date: Mon, 8 Nov 2021 21:24:36 -0700 Subject: [PATCH 016/109] update history coords and attributes --- cicecore/cicedynB/analysis/ice_history.F90 | 33 +- .../io/io_netcdf/ice_history_write.F90 | 351 +++++------------- .../io/io_pio2/ice_history_write.F90 | 277 ++++---------- 3 files changed, 194 insertions(+), 467 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index fea284495..aa2a05cd8 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -94,7 +94,6 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag - character(len=25) :: l_ustr2d, l_vstr2d, l_ucstr, l_vcstr ! define location of u and v fields character(len=*), parameter :: subname = '(init_hist)' !----------------------------------------------------------------- @@ -108,18 +107,6 @@ subroutine init_hist (dt) nzblyr = nblyr+2 ! bio grid nzalyr = nblyr+4 ! aerosols (2 snow & nblyr+2 bio) - ! B grid default - l_ustr2d = ustr2d - l_vstr2d = ustr2d - l_ucstr = ucstr - l_vcstr = ucstr - if (grid_system == 'C' .or. grid_system == 'CD') then - l_ustr2d = estr2d - l_vstr2d = nstr2d - l_ucstr = ecstr - l_vcstr = ncstr - endif - !----------------------------------------------------------------- ! read namelist !----------------------------------------------------------------- @@ -573,16 +560,28 @@ subroutine init_hist (dt) "ice area (aggregate)", & "none", c1, c0, & ns1, f_aice) + + if (grid_system == 'CD') then + call define_hist_field(n_uvel,"uvel","m/s",estr2D, ecstr, & + "ice velocity (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_uvel) - call define_hist_field(n_uvel,"uvel","m/s",l_ustr2D, l_ucstr, & + call define_hist_field(n_vvel,"vvel","m/s",nstr2D, ncstr, & + "ice velocity (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_vvel) + else + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & - "positive is x direction on u grid", c1, c0, & + "positive is x direction on U grid", c1, c0, & ns1, f_uvel) - call define_hist_field(n_vvel,"vvel","m/s",l_vstr2D, l_vcstr, & + call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & "ice velocity (y)", & - "positive is y direction on v grid", c1, c0, & + "positive is y direction on U grid", c1, c0, & ns1, f_vvel) + endif call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & "ice velocity (x)", & diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index ddfd95297..5587f2b6b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -505,51 +505,7 @@ subroutine ice_write_hist (ns) lprecision, dimid, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - if (TRIM(avail_hist_fields(n)%vname)/='sig1' & - .or.TRIM(avail_hist_fields(n)%vname)/='sig2' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistreave' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistremax' & - .or.TRIM(avail_hist_fields(n)%vname)/='sigP') then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots - .or. n==n_sig1(ns) .or. n==n_sig2(ns) & - .or. n==n_sigP(ns) .or. n==n_trsig(ns) & - .or. n==n_sistreave(ns) .or. n==n_sistremax(ns) & - .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & - .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_2D @@ -564,39 +520,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dc @@ -611,24 +535,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz @@ -643,24 +550,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Db @@ -675,24 +565,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Da @@ -707,24 +580,7 @@ subroutine ice_write_hist (ns) lprecision, dimidz, varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Df @@ -741,39 +597,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Di @@ -790,39 +614,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Ds @@ -839,39 +631,7 @@ subroutine ice_write_hist (ns) lprecision, dimidcz(1:4), varid) ! ferret if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining variable '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'units', & - avail_hist_fields(n)%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid, 'long_name', & - avail_hist_fields(n)%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'coordinates', & - avail_hist_fields(n)%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//avail_hist_fields(n)%vname) - status = nf90_put_att(ncid,varid,'cell_measures', & - avail_hist_fields(n)%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//avail_hist_fields(n)%vname) - call ice_write_hist_fill(ncid,varid,avail_hist_fields(n)%vname,history_precision) - - !----------------------------------------------------------------- - ! Add cell_methods attribute to variables if averaged - !----------------------------------------------------------------- - if (hist_avg) then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//avail_hist_fields(n)%vname) - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Df @@ -1448,6 +1208,94 @@ subroutine ice_write_hist (ns) end subroutine ice_write_hist +!======================================================================= + + subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) + + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg +#ifdef USE_NETCDF + use netcdf +#endif + + integer (kind=int_kind), intent(in) :: ncid ! netcdf file id + integer (kind=int_kind), intent(in) :: varid ! netcdf variable id + type (ice_hist_field) , intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns ! history stream + + ! local variables + + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' + +#ifdef USE_NETCDF + status = nf90_put_att(ncid,varid,'units', hfield%vunit) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//hfield%vname) + + status = nf90_put_att(ncid,varid, 'long_name', hfield%vdesc) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//hfield%vname) + + status = nf90_put_att(ncid,varid,'coordinates', hfield%vcoord) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining coordinates for '//hfield%vname) + + status = nf90_put_att(ncid,varid,'cell_measures', hfield%vcellmeas) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell measures for '//hfield%vname) + + if (hfield%vcomment /= "none") then + status = nf90_put_att(ncid,varid,'comment', hfield%vcomment) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining comment for '//hfield%vname) + endif + + call ice_write_hist_fill(ncid,varid,hfield%vname,history_precision) + + ! Add cell_methods attribute to variables if averaged + if (hist_avg) then + if (TRIM(hfield%vname(1:4))/='sig1' & + .and.TRIM(hfield%vname(1:4))/='sig2' & + .and.TRIM(hfield%vname(1:9))/='sistreave' & + .and.TRIM(hfield%vname(1:9))/='sistremax' & + .and.TRIM(hfield%vname(1:4))/='sigP') then + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell methods for '//hfield%vname) + endif + endif + + if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & + .or..not. hist_avg & + .or.TRIM(hfield%vname(1:4))=='divu' & + .or.TRIM(hfield%vname(1:5))=='shear' & + .or.TRIM(hfield%vname(1:4))=='sig1' & + .or.TRIM(hfield%vname(1:4))=='sig2' & + .or.TRIM(hfield%vname(1:4))=='sigP' & + .or.TRIM(hfield%vname(1:5))=='trsig' & + .or.TRIM(hfield%vname(1:9))=='sistreave' & + .or.TRIM(hfield%vname(1:9))=='sistremax' & + .or.TRIM(hfield%vname(1:9))=='mlt_onset' & + .or.TRIM(hfield%vname(1:9))=='frz_onset' & + .or.TRIM(hfield%vname(1:6))=='hisnap' & + .or.TRIM(hfield%vname(1:6))=='aisnap') then + status = nf90_put_att(ncid,varid,'time_rep','instantaneous') + else + status = nf90_put_att(ncid,varid,'time_rep','averaged') + endif + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining time rep for '//hfield%vname) + +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) +#endif + + end subroutine ice_write_hist_attrs + !======================================================================= subroutine ice_write_hist_fill(ncid,varid,vname,precision) @@ -1467,6 +1315,7 @@ subroutine ice_write_hist_fill(ncid,varid,vname,precision) integer (kind=int_kind) :: status character(len=*), parameter :: subname = '(ice_write_hist_fill)' +#ifdef USE_NETCDF if (precision == 8) then status = nf90_put_att(ncid,varid,'missing_value',spval_dbl) else @@ -1482,6 +1331,10 @@ subroutine ice_write_hist_fill(ncid,varid,vname,precision) endif if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining _FillValue for '//trim(vname)) +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) +#endif end subroutine ice_write_hist_fill diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index 00a121a59..a6660544e 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -470,39 +470,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimid3, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - if (TRIM(avail_hist_fields(n)%vname)/='sig1' & - .or.TRIM(avail_hist_fields(n)%vname)/='sig2' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistreave' & - .or.TRIM(avail_hist_fields(n)%vname)/='sistremax' & - .or.TRIM(avail_hist_fields(n)%vname)/='sigP') then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots - .or. n==n_sig1(ns) .or. n==n_sig2(ns) & - .or. n==n_sigP(ns) .or. n==n_trsig(ns) & - .or. n==n_sistreave(ns) .or. n==n_sistremax(ns) & - .or. n==n_mlt_onset(ns) .or. n==n_frz_onset(ns) & - .or. n==n_hisnap(ns) .or. n==n_aisnap(ns)) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_2D @@ -519,27 +487,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dc @@ -556,27 +504,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz @@ -593,27 +521,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Db @@ -630,27 +538,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Da @@ -667,27 +555,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Df @@ -710,27 +578,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Di @@ -748,27 +596,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Ds @@ -787,27 +615,7 @@ subroutine ice_write_hist (ns) if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & lprecision, dimidcz, varid) - status = pio_put_att(File,varid,'units', & - trim(avail_hist_fields(n)%vunit)) - status = pio_put_att(File,varid, 'long_name', & - trim(avail_hist_fields(n)%vdesc)) - status = pio_put_att(File,varid,'coordinates', & - trim(avail_hist_fields(n)%vcoord)) - status = pio_put_att(File,varid,'cell_measures', & - trim(avail_hist_fields(n)%vcellmeas)) - call ice_write_hist_fill(File,varid,avail_hist_fields(n)%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg) then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_4Df @@ -1388,6 +1196,73 @@ subroutine ice_write_hist (ns) end subroutine ice_write_hist +!======================================================================= + + subroutine ice_write_hist_attrs(File, varid, hfield, ns) + + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg + use ice_pio + use pio + + type(file_desc_t) :: File ! file id + type(var_desc_t) :: varid ! variable id + type (ice_hist_field), intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns + + ! local variables + + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' + + status = pio_put_att(File,varid,'units', trim(hfield%vunit)) + + status = pio_put_att(File,varid, 'long_name', trim(hfield%vdesc)) + + status = pio_put_att(File,varid,'coordinates', trim(hfield%vcoord)) + + status = pio_put_att(File,varid,'cell_measures', trim(hfield%vcellmeas)) + + if (hfield%vcomment /= "none") then + status = pio_put_att(File,varid,'comment', trim(hfield%vcomment)) + endif + + call ice_write_hist_fill(File,varid,hfield%vname,history_precision) + + ! Add cell_methods attribute to variables if averaged + if (hist_avg) then + if (TRIM(hfield%vname(1:4))/='sig1' & + .and.TRIM(hfield%vname(1:4))/='sig2' & + .and.TRIM(hfield%vname(1:9))/='sistreave' & + .and.TRIM(hfield%vname(1:9))/='sistremax' & + .and.TRIM(hfield%vname(1:4))/='sigP') then + status = pio_put_att(File,varid,'cell_methods','time: mean') + endif + endif + + if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & + .or..not. hist_avg & + .or.TRIM(hfield%vname(1:4))=='divu' & + .or.TRIM(hfield%vname(1:5))=='shear' & + .or.TRIM(hfield%vname(1:4))=='sig1' & + .or.TRIM(hfield%vname(1:4))=='sig2' & + .or.TRIM(hfield%vname(1:4))=='sigP' & + .or.TRIM(hfield%vname(1:5))=='trsig' & + .or.TRIM(hfield%vname(1:9))=='sistreave' & + .or.TRIM(hfield%vname(1:9))=='sistremax' & + .or.TRIM(hfield%vname(1:9))=='mlt_onset' & + .or.TRIM(hfield%vname(1:9))=='frz_onset' & + .or.TRIM(hfield%vname(1:6))=='hisnap' & + .or.TRIM(hfield%vname(1:6))=='aisnap') then + status = pio_put_att(File,varid,'time_rep','instantaneous') + else + status = pio_put_att(File,varid,'time_rep','averaged') + endif + + end subroutine ice_write_hist_attrs + !======================================================================= subroutine ice_write_hist_fill(File,varid,vname,precision) From e0914ccdd08675b635222b5de5a2ca39e5f8764c Mon Sep 17 00:00:00 2001 From: apcraig Date: Wed, 10 Nov 2021 17:04:55 -0700 Subject: [PATCH 017/109] switch to six velocity variables (uvel, vvel, uvele, vvele, uveln, vveln) --- cicecore/cicedynB/analysis/ice_history.F90 | 30 +++++++++---------- .../cicedynB/analysis/ice_history_shared.F90 | 9 ++++-- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 6 ++-- cicecore/cicedynB/general/ice_state.F90 | 20 ++++++++----- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index aa2a05cd8..ef7d96e6a 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -279,8 +279,10 @@ subroutine init_hist (dt) endif if (grid_system == 'CD') then - f_uveln = f_uvel + f_uvele = f_uvel f_vvele = f_vvel + f_uveln = f_uvel + f_vveln = f_vvel endif #ifndef ncdf @@ -333,8 +335,10 @@ subroutine init_hist (dt) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_uvele, master_task) call broadcast_scalar (f_uveln, master_task) call broadcast_scalar (f_vvele, master_task) + call broadcast_scalar (f_vveln, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_atmspd, master_task) @@ -562,12 +566,19 @@ subroutine init_hist (dt) ns1, f_aice) if (grid_system == 'CD') then - call define_hist_field(n_uvel,"uvel","m/s",estr2D, ecstr, & + call define_hist_field(n_uvele,"uvele","m/s",estr2D, ecstr, & "ice velocity (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_uvel) - - call define_hist_field(n_vvel,"vvel","m/s",nstr2D, ncstr, & + call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & + "ice velocity (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_uvel) + call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & + "ice velocity (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_vvel) + call define_hist_field(n_vveln,"vveln","m/s",nstr2D, ncstr, & "ice velocity (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_vvel) @@ -576,23 +587,12 @@ subroutine init_hist (dt) "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uvel) - call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & "ice velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vvel) endif - call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & - "ice velocity (x)", & - "positive is x direction on N grid", c1, c0, & - ns1, f_uveln) - - call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & - "ice velocity (y)", & - "positive is y direction on E grid", c1, c0, & - ns1, f_vvele) - call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index fc4b68ae3..74e97d489 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -235,7 +235,8 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & - f_uveln = 'x', f_vvele = 'x', & + f_uvele = 'x', f_vvele = 'x', & + f_uveln = 'x', f_vveln = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -385,7 +386,8 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & -! f_uveln, f_vvele , & ! for now, have this set from f_uvel, f_vvel +! f_uvele, f_vvele , & ! for now, have this set from f_uvel, f_vvel +! f_uveln, f_vveln , & ! for now, have this set from f_uvel, f_vvel f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -559,7 +561,8 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & - n_uveln , n_vvele , & + n_uvele , n_vvele , & + n_uveln , n_vveln , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 0cd7f70a5..a5ac8a4d9 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -131,7 +131,7 @@ subroutine init_dyn (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_state, only: uvel, vvel, uveln, vvele, divu, shear + use ice_state, only: uvel, vvel, uvele, vvele, uveln, vveln, divu, shear use ice_grid, only: ULAT real (kind=dbl_kind), intent(in) :: & @@ -164,8 +164,10 @@ subroutine init_dyn (dt) uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s if (grid_system == 'CD') then ! extra velocity variables - uveln = c0 + uvele = c0 vvele = c0 + uveln = c0 + vveln = c0 endif ! strain rates diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 514d30b9a..7f93f0392 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -107,10 +107,12 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & - uvel , & ! x-component of velocity (m/s) - uveln , & ! extra x-component of velocity on CD grid (m/s) - vvel , & ! y-component of velocity (m/s) - vvele , & ! extra y-component of velocity on CD grid (m/s) + uvel , & ! x-component of velocity on U grid (m/s) + vvel , & ! y-component of velocity on U grid (m/s) + uvele , & ! x-component of velocity on E grid (m/s) + vvele , & ! y-component of velocity on E grid (m/s) + uveln , & ! x-component of velocity on N grid (m/s) + vveln , & ! y-component of velocity on N grid (m/s) divu , & ! strain rate I component, velocity divergence (1/s) shear , & ! strain rate II component (1/s) strength ! ice strength (N/m) @@ -151,10 +153,12 @@ subroutine alloc_state vice (nx_block,ny_block,max_blocks) , & ! volume per unit area of ice (m) vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water - uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity (m/s) - uveln (nx_block,ny_block,max_blocks) , & ! extra x-component of velocity on CD grid (m/s) - vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity (m/s) - vvele (nx_block,ny_block,max_blocks) , & ! extra y-component of velocity on CD grid (m/s) + uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity on U grid (m/s) + vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity on U grid (m/s) + uvele (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) + vvele (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) + uveln (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) + vveln (nx_block,ny_block,max_blocks) , & ! y-component of velocity on N grid (m/s) divu (nx_block,ny_block,max_blocks) , & ! strain rate I component, velocity divergence (1/s) shear (nx_block,ny_block,max_blocks) , & ! strain rate II component (1/s) strength (nx_block,ny_block,max_blocks) , & ! ice strength (N/m) From 8f2e63400a52d5710c5cb7a706f2a64789b6e5d1 Mon Sep 17 00:00:00 2001 From: Jean-Francois Lemieux Date: Mon, 15 Nov 2021 20:53:04 +0000 Subject: [PATCH 018/109] Added strain_rates_T for calc of strain rates at the T point --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 59 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 861f8780d..afb8eadb7 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -90,7 +90,7 @@ subroutine evp (dt) stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, tinyarea, grid_average_X2Y, & - grid_type + grid_type, grid_system use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index ab6466ee2..fc7a36899 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1368,6 +1368,65 @@ subroutine strain_rates (nx_block, ny_block, & end subroutine strain_rates +!======================================================================= + +! Compute strain rates at the T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_T (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + integer (kind=int_kind) :: & + i, j ! indices + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT ! height of T-cell through the middle (m) + + real (kind=dbl_kind), intent(out):: & + divT, tensionT, shearT, DeltaT ! strain rates at the T point + + character(len=*), parameter :: subname = '(strain_rates_T)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + ! divergence = e_11 + e_22 + divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + + ! tension strain rate = e_11 - e_22 + tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + + ! shearing strain rate = 2*e_12 + shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyN(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + + ! Delta (in the denominator of zeta, eta) + DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) + + end subroutine strain_rates_T + !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. From bfdef303b055d1aed8d10ed89f7f8dbf558ee8aa Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Mon, 15 Nov 2021 13:55:28 -0700 Subject: [PATCH 019/109] Additional CD grid variable (#2) --- cicecore/cicedynB/analysis/ice_history.F90 | 32 +++++++------ cicecore/cicedynB/general/ice_flux.F90 | 53 +++++++++++++++++++++- 2 files changed, 71 insertions(+), 14 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index ef7d96e6a..d9d9215a1 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -565,33 +565,35 @@ subroutine init_hist (dt) "none", c1, c0, & ns1, f_aice) - if (grid_system == 'CD') then - call define_hist_field(n_uvele,"uvele","m/s",estr2D, ecstr, & + call define_hist_field(n_uvele,"uvele","m/s",estr2D, ecstr, & "ice velocity (x)", & "positive is x direction on E grid", c1, c0, & - ns1, f_uvel) - call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & + ns1, f_uvele) + + call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & "ice velocity (y)", & "positive is y direction on E grid", c1, c0, & - ns1, f_uvel) - call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & + ns1, f_vvele) + + call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & "ice velocity (x)", & "positive is x direction on N grid", c1, c0, & - ns1, f_vvel) - call define_hist_field(n_vveln,"vveln","m/s",nstr2D, ncstr, & + ns1, f_uveln) + + call define_hist_field(n_vveln,"vveln","m/s",nstr2D, ncstr, & "ice velocity (y)", & "positive is y direction on N grid", c1, c0, & - ns1, f_vvel) - else - call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & + ns1, f_vveln) + + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uvel) - call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & + + call define_hist_field(n_vvel,"vvel","m/s",ustr2D, ucstr, & "ice velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vvel) - endif call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & "atm velocity (x)", & @@ -1968,6 +1970,10 @@ subroutine accum_hist (dt) call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) if (f_uveln (1:1) /= 'x') & call accum_hist_field(n_uveln, iblk, uveln(:,:,iblk), a2D) + if (f_vveln (1:1) /= 'x') & + call accum_hist_field(n_vveln, iblk, vveln(:,:,iblk), a2D) + if (f_uvele (1:1) /= 'x') & + call accum_hist_field(n_uvele, iblk, uvele(:,:,iblk), a2D) if (f_vvele (1:1) /= 'x') & call accum_hist_field(n_vvele, iblk, vvele(:,:,iblk), a2D) if (f_uatm (1:1) /= 'x') & diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 23fb9df63..1b17c130d 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -73,6 +73,26 @@ module ice_flux strtlty , & ! stress due to sea surface slope, y-direction strintx , & ! divergence of internal ice stress, x (N/m^2) strinty , & ! divergence of internal ice stress, y (N/m^2) + taubxN , & ! seabed stress (x) at N points (N/m^2) + taubyN , & ! seabed stress (y) at N points (N/m^2) + strairxN, & ! stress on ice by air, x-direction at N points + strairyN, & ! stress on ice by air, y-direction at N points + strocnxN, & ! ice-ocean stress, x-direction at N points + strocnyN, & ! ice-ocean stress, y-direction at N points + strtltxN, & ! stress due to sea surface slope, x-direction at N points + strtltyN, & ! stress due to sea surface slope, y-direction at N points + strintxN, & ! divergence of internal ice stress, x at N points (N/m^2) + strintyN, & ! divergence of internal ice stress, y at N points (N/m^2) + taubxE , & ! seabed stress (x) at E points (N/m^2) + taubyE , & ! seabed stress (y) at E points (N/m^2) + strairxE, & ! stress on ice by air, x-direction at E points + strairyE, & ! stress on ice by air, y-direction at E points + strocnxE, & ! ice-ocean stress, x-direction at E points + strocnyE, & ! ice-ocean stress, y-direction at E points + strtltxE, & ! stress due to sea surface slope, x-direction at E points + strtltyE, & ! stress due to sea surface slope, y-direction at E points + strintxE, & ! divergence of internal ice stress, x at E points (N/m^2) + strintyE, & ! divergence of internal ice stress, y at E points (N/m^2) daidtd , & ! ice area tendency due to transport (1/s) dvidtd , & ! ice volume tendency due to transport (m/s) dagedtd , & ! ice age tendency due to transport (s/s) @@ -112,7 +132,11 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & fm , & ! Coriolis param. * mass in U-cell (kg/s) - Tbu ! factor for seabed stress (N/m^2) + Tbu , & ! factor for seabed stress (N/m^2) + fmE , & ! Coriolis param. * mass in E-cell (kg/s) + TbE , & ! factor for seabed stress (N/m^2) + fmN , & ! Coriolis param. * mass in N-cell (kg/s) + TbN ! factor for seabed stress (N/m^2) !----------------------------------------------------------------- ! Thermodynamic component @@ -348,6 +372,8 @@ module ice_flux ! subroutine alloc_flux + use ice_grid, only : grid_system + integer (int_kind) :: ierr allocate( & @@ -537,6 +563,31 @@ subroutine alloc_flux stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') + if (grid_system == "CD") & + allocate( & + taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) + taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) + strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points + strairyN (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at N points + strocnxN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at N points + strocnyN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at N points + strtltxN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at N points + strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points + strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) + strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) + taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) + taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) + strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points + strairyE (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at E points + strocnxE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at E points + strocnyE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at E points + strtltxE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at E points + strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points + strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) + strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) + stat=ierr) + if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') + end subroutine alloc_flux !======================================================================= From 58c20f7db104e152ccbe56120ae8e92e38d65121 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 15 Nov 2021 15:31:43 -0800 Subject: [PATCH 020/109] fix recent bug preventing compiling (#5) --- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index fc7a36899..24f4621c0 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1420,7 +1420,7 @@ subroutine strain_rates_T (nx_block, ny_block, & ! shearing strain rate = 2*e_12 shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyN(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) ! Delta (in the denominator of zeta, eta) DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) From 50d67f8e8e47de1a9a91e6c7adbc66ecb9621f36 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Tue, 16 Nov 2021 09:28:22 -0500 Subject: [PATCH 021/109] ice_dyn_shared: add 'viscous_coeffs_and_rep_pressure_T' subroutine (#3) Add a subroutine mimicking what 'viscous_coeffs_and_rep_pressure' does, but at a single location. Name it '*_T' since it's going to be used to compute the viscous coefficients and replacement pressure at the T point. --- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 24f4621c0..81dafb307 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1507,6 +1507,63 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & end subroutine viscous_coeffs_and_rep_pressure + + !======================================================================= + ! Computes viscous coefficients and replacement pressure for stress + ! calculations. Note that tensile strength is included here. + ! + ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. + ! Oceanogr., 9, 817-846. + ! + ! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by + ! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. + ! + ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice + ! by combining tensile strength and a parameterization for grounded ridges. + ! J. Geophys. Res. Oceans, 121, 7354-7368. + + subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & + Delta , zetax2 , & + etax2 , rep_prs , & + capping) + + real (kind=dbl_kind), intent(in):: & + strength, tinyarea + + real (kind=dbl_kind), intent(in):: & + Delta + + logical, intent(in):: capping + + real (kind=dbl_kind), intent(out):: & + zetax2, etax2, rep_prs ! 2 x visous coeffs, replacement pressure + + ! local variables + real (kind=dbl_kind) :: & + tmpcalc + + character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_T)' + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + +! if (trim(yield_curve) == 'ellipse') then + + if (capping) then + tmpcalc = strength/max(Delta,tinyarea) + else + tmpcalc = strength/(Delta + tinyarea) + endif + + zetax2 = (c1+Ktens)*tmpcalc + rep_prs = (c1-Ktens)*tmpcalc*Delta + etax2 = epp2i*zetax2 + +! else + +! endif + + end subroutine viscous_coeffs_and_rep_pressure_T + !======================================================================= ! Load velocity components into array for boundary updates From 509079660afbfefe4d5a45cd1769c7dcd5c43f89 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 16 Nov 2021 07:29:23 -0700 Subject: [PATCH 022/109] Add stress terms to history for N and E grids. (#4) * Add stresses on N and E grids to history. * Add back in JFs changes * Turn of new history variables --- cicecore/cicedynB/analysis/ice_history.F90 | 275 ++++++++++++++++-- .../cicedynB/analysis/ice_history_shared.F90 | 48 ++- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 10 +- cicecore/cicedynB/general/ice_state.F90 | 16 +- 4 files changed, 303 insertions(+), 46 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index d9d9215a1..a257cc19a 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -279,10 +279,34 @@ subroutine init_hist (dt) endif if (grid_system == 'CD') then - f_uvele = f_uvel - f_vvele = f_vvel - f_uveln = f_uvel - f_vveln = f_vvel + f_uvelE = f_uvel + f_vvelE = f_vvel + f_uvelN = f_uvel + f_vvelN = f_vvel + f_strairxN = f_strairx + f_strairyN = f_strairy + f_strairxE = f_strairx + f_strairyE = f_strairy + f_strocnxN = f_strocnx + f_strocnyN = f_strocny + f_strocnxE = f_strocnx + f_strocnyE = f_strocny + f_strcorxN = f_strcorx + f_strcoryN = f_strcory + f_strcorxE = f_strcorx + f_strcoryE = f_strcory + f_strintxN = f_strintx + f_strintyN = f_strinty + f_strintxE = f_strintx + f_strintyE = f_strinty + f_strtltxN = f_strtltx + f_strtltyN = f_strtlty + f_strtltxE = f_strtltx + f_strtltyE = f_strtlty + f_taubxN = f_taubx + f_taubyN = f_tauby + f_taubxE = f_taubx + f_taubyE = f_tauby endif #ifndef ncdf @@ -335,10 +359,10 @@ subroutine init_hist (dt) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) - call broadcast_scalar (f_uvele, master_task) - call broadcast_scalar (f_uveln, master_task) - call broadcast_scalar (f_vvele, master_task) - call broadcast_scalar (f_vveln, master_task) + call broadcast_scalar (f_uvelE, master_task) + call broadcast_scalar (f_uvelN, master_task) + call broadcast_scalar (f_vvelE, master_task) + call broadcast_scalar (f_vvelN, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_atmspd, master_task) @@ -415,6 +439,30 @@ subroutine init_hist (dt) call broadcast_scalar (f_strinty, master_task) call broadcast_scalar (f_taubx, master_task) call broadcast_scalar (f_tauby, master_task) + call broadcast_scalar (f_strairxN, master_task) + call broadcast_scalar (f_strairyN, master_task) + call broadcast_scalar (f_strtltxN, master_task) + call broadcast_scalar (f_strtltyN, master_task) + call broadcast_scalar (f_strcorxN, master_task) + call broadcast_scalar (f_strcoryN, master_task) + call broadcast_scalar (f_strocnxN, master_task) + call broadcast_scalar (f_strocnyN, master_task) + call broadcast_scalar (f_strintxN, master_task) + call broadcast_scalar (f_strintyN, master_task) + call broadcast_scalar (f_taubxN, master_task) + call broadcast_scalar (f_taubyN, master_task) + call broadcast_scalar (f_strairxE, master_task) + call broadcast_scalar (f_strairyE, master_task) + call broadcast_scalar (f_strtltxE, master_task) + call broadcast_scalar (f_strtltyE, master_task) + call broadcast_scalar (f_strcorxE, master_task) + call broadcast_scalar (f_strcoryE, master_task) + call broadcast_scalar (f_strocnxE, master_task) + call broadcast_scalar (f_strocnyE, master_task) + call broadcast_scalar (f_strintxE, master_task) + call broadcast_scalar (f_strintyE, master_task) + call broadcast_scalar (f_taubxE, master_task) + call broadcast_scalar (f_taubyE, master_task) call broadcast_scalar (f_strength, master_task) call broadcast_scalar (f_divu, master_task) call broadcast_scalar (f_shear, master_task) @@ -565,25 +613,25 @@ subroutine init_hist (dt) "none", c1, c0, & ns1, f_aice) - call define_hist_field(n_uvele,"uvele","m/s",estr2D, ecstr, & + call define_hist_field(n_uvelE,"uvelE","m/s",estr2D, ecstr, & "ice velocity (x)", & "positive is x direction on E grid", c1, c0, & - ns1, f_uvele) + ns1, f_uvelE) - call define_hist_field(n_vvele,"vvele","m/s",estr2D, ecstr, & + call define_hist_field(n_vvelE,"vvelE","m/s",estr2D, ecstr, & "ice velocity (y)", & "positive is y direction on E grid", c1, c0, & - ns1, f_vvele) + ns1, f_vvelE) - call define_hist_field(n_uveln,"uveln","m/s",nstr2D, ncstr, & + call define_hist_field(n_uvelN,"uvelN","m/s",nstr2D, ncstr, & "ice velocity (x)", & "positive is x direction on N grid", c1, c0, & - ns1, f_uveln) + ns1, f_uvelN) - call define_hist_field(n_vveln,"vveln","m/s",nstr2D, ncstr, & + call define_hist_field(n_vvelN,"vvelN","m/s",nstr2D, ncstr, & "ice velocity (y)", & "positive is y direction on N grid", c1, c0, & - ns1, f_vveln) + ns1, f_vvelN) call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & @@ -976,6 +1024,126 @@ subroutine init_hist (dt) "positive is y direction on U grid", c1, c0, & ns1, f_tauby) + call define_hist_field(n_strairxN,"strairxN","N/m^2",nstr2D, ncstr, & + "atm/ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strairxN) + + call define_hist_field(n_strairyN,"strairyN","N/m^2",nstr2D, ncstr, & + "atm/ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strairyN) + + call define_hist_field(n_strairxE,"strairxE","N/m^2",estr2D, ecstr, & + "atm/ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strairxE) + + call define_hist_field(n_strairyE,"strairyE","N/m^2",estr2D, ecstr, & + "atm/ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strairyE) + + call define_hist_field(n_strtltxN,"strtltxN","N/m^2",nstr2D, ncstr, & + "sea sfc tilt stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strtltxN) + + call define_hist_field(n_strtltyN,"strtltyN","N/m^2",nstr2D, ncstr, & + "sea sfc tilt stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strtltyN) + + call define_hist_field(n_strtltxE,"strtltxE","N/m^2",estr2D, ecstr, & + "sea sfc tilt stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strtltxE) + + call define_hist_field(n_strtltyE,"strtltyE","N/m^2",estr2D, ecstr, & + "sea sfc tilt stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strtltyE) + + call define_hist_field(n_strcorxN,"strcorxN","N/m^2",nstr2D, ncstr, & + "coriolis stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strcorxN) + + call define_hist_field(n_strcoryN,"strcoryN","N/m^2",nstr2D, ncstr, & + "coriolis stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strcoryN) + + call define_hist_field(n_strcorxE,"strcorxE","N/m^2",estr2D, ecstr, & + "coriolis stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strcorxE) + + call define_hist_field(n_strcoryE,"strcoryE","N/m^2",estr2D, ecstr, & + "coriolis stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strcoryE) + + call define_hist_field(n_strocnxN,"strocnxN","N/m^2",nstr2D, ncstr, & + "ocean/ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strocnxN) + + call define_hist_field(n_strocnyN,"strocnyN","N/m^2",nstr2D, ncstr, & + "ocean/ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strocnyN) + + call define_hist_field(n_strocnxE,"strocnxE","N/m^2",estr2D, ecstr, & + "ocean/ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strocnxE) + + call define_hist_field(n_strocnyE,"strocnyE","N/m^2",estr2D, ecstr, & + "ocean/ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strocnyE) + + call define_hist_field(n_strintxN,"strintxN","N/m^2",nstr2D, ncstr, & + "internal ice stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_strintxN) + + call define_hist_field(n_strintyN,"strintyN","N/m^2",nstr2D, ncstr, & + "internal ice stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_strintyN) + + call define_hist_field(n_strintxE,"strintxE","N/m^2",estr2D, ecstr, & + "internal ice stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_strintxE) + + call define_hist_field(n_strintyE,"strintyE","N/m^2",estr2D, ecstr, & + "internal ice stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_strintyE) + + call define_hist_field(n_taubxN,"taubxN","N/m^2",nstr2D, ncstr, & + "seabed (basal) stress (x)", & + "positive is x direction on N grid", c1, c0, & + ns1, f_taubxN) + + call define_hist_field(n_taubyN,"taubyN","N/m^2",nstr2D, ncstr, & + "seabed (basal) stress (y)", & + "positive is y direction on N grid", c1, c0, & + ns1, f_taubyN) + + call define_hist_field(n_taubxE,"taubxE","N/m^2",estr2D, ecstr, & + "seabed (basal) stress (x)", & + "positive is x direction on E grid", c1, c0, & + ns1, f_taubxE) + + call define_hist_field(n_taubyE,"taubyE","N/m^2",estr2D, ecstr, & + "seabed (basal) stress (y)", & + "positive is y direction on E grid", c1, c0, & + ns1, f_taubyE) + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & "compressive ice strength", & "none", c1, c0, & @@ -1783,9 +1951,14 @@ subroutine accum_hist (dt) albice, albsno, albpnd, coszen, flat, fsens, flwout, evap, evaps, evapi, & Tair, Tref, Qref, congel, frazil, frazil_diag, snoice, dsnow, & melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & - fhocn, fhocn_ai, uatm, vatm, fbot, Tbot, Tsnice, & - fswthru_ai, strairx, strairy, strtltx, strtlty, strintx, strinty, & - taubx, tauby, strocnx, strocny, fm, daidtt, dvidtt, daidtd, dvidtd, fsurf, & + fhocn, fhocn_ai, uatm, vatm, fbot, Tbot, Tsnice, fswthru_ai, & + strairx, strairy, strtltx, strtlty, strintx, strinty, & + taubx, tauby, strocnx, strocny, & + strairxN, strairyN, strtltxN, strtltyN, strintxN, strintyN, & + taubxN, taubyN, strocnxN, strocnyN, & + strairxE, strairyE, strtltxE, strtltyE, strintxE, strintyE, & + taubxE, taubyE, strocnxE, strocnyE, & + fm, fmN, fmE, daidtt, dvidtt, daidtd, dvidtd, fsurf, & fcondtop, fcondbot, fsurfn, fcondtopn, flatn, fsensn, albcnt, snwcnt, & stressp_1, stressm_1, stress12_1, & stressp_2, & @@ -1968,14 +2141,14 @@ subroutine accum_hist (dt) call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) if (f_vvel (1:1) /= 'x') & call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) - if (f_uveln (1:1) /= 'x') & - call accum_hist_field(n_uveln, iblk, uveln(:,:,iblk), a2D) - if (f_vveln (1:1) /= 'x') & - call accum_hist_field(n_vveln, iblk, vveln(:,:,iblk), a2D) - if (f_uvele (1:1) /= 'x') & - call accum_hist_field(n_uvele, iblk, uvele(:,:,iblk), a2D) - if (f_vvele (1:1) /= 'x') & - call accum_hist_field(n_vvele, iblk, vvele(:,:,iblk), a2D) + if (f_uvelN (1:1) /= 'x') & + call accum_hist_field(n_uvelN, iblk, uvelN(:,:,iblk), a2D) + if (f_vvelN (1:1) /= 'x') & + call accum_hist_field(n_vvelN, iblk, vvelN(:,:,iblk), a2D) + if (f_uvelE (1:1) /= 'x') & + call accum_hist_field(n_uvelE, iblk, uvelE(:,:,iblk), a2D) + if (f_vvelE (1:1) /= 'x') & + call accum_hist_field(n_vvelE, iblk, vvelE(:,:,iblk), a2D) if (f_uatm (1:1) /= 'x') & call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) if (f_vatm (1:1) /= 'x') & @@ -2181,6 +2354,54 @@ subroutine accum_hist (dt) call accum_hist_field(n_taubx, iblk, taubx(:,:,iblk), a2D) if (f_tauby(1:1) /= 'x') & call accum_hist_field(n_tauby, iblk, tauby(:,:,iblk), a2D) + if (f_strairxN(1:1) /= 'x') & + call accum_hist_field(n_strairxN, iblk, strairxN(:,:,iblk), a2D) + if (f_strairyN(1:1) /= 'x') & + call accum_hist_field(n_strairyN, iblk, strairyN(:,:,iblk), a2D) + if (f_strairxE(1:1) /= 'x') & + call accum_hist_field(n_strairxE, iblk, strairxE(:,:,iblk), a2D) + if (f_strairyE(1:1) /= 'x') & + call accum_hist_field(n_strairyE, iblk, strairyE(:,:,iblk), a2D) + if (f_strtltxN(1:1) /= 'x') & + call accum_hist_field(n_strtltxN, iblk, strtltxN(:,:,iblk), a2D) + if (f_strtltyN(1:1) /= 'x') & + call accum_hist_field(n_strtltyN, iblk, strtltyN(:,:,iblk), a2D) + if (f_strtltxE(1:1) /= 'x') & + call accum_hist_field(n_strtltxE, iblk, strtltxE(:,:,iblk), a2D) + if (f_strtltyE(1:1) /= 'x') & + call accum_hist_field(n_strtltyE, iblk, strtltyE(:,:,iblk), a2D) + if (f_strcorxN(1:1) /= 'x') & + call accum_hist_field(n_strcorxN, iblk, fmN(:,:,iblk)*vvelN(:,:,iblk), a2D) + if (f_strcoryN(1:1) /= 'x') & + call accum_hist_field(n_strcoryN, iblk,-fmN(:,:,iblk)*uvelN(:,:,iblk), a2D) + if (f_strcorxE(1:1) /= 'x') & + call accum_hist_field(n_strcorxE, iblk, fmE(:,:,iblk)*vvelE(:,:,iblk), a2D) + if (f_strcoryE(1:1) /= 'x') & + call accum_hist_field(n_strcoryE, iblk,-fmE(:,:,iblk)*uvelE(:,:,iblk), a2D) + if (f_strocnxN(1:1) /= 'x') & + call accum_hist_field(n_strocnxN, iblk, strocnxN(:,:,iblk), a2D) + if (f_strocnyN(1:1) /= 'x') & + call accum_hist_field(n_strocnyN, iblk, strocnyN(:,:,iblk), a2D) + if (f_strocnxE(1:1) /= 'x') & + call accum_hist_field(n_strocnxE, iblk, strocnxE(:,:,iblk), a2D) + if (f_strocnyE(1:1) /= 'x') & + call accum_hist_field(n_strocnyE, iblk, strocnyE(:,:,iblk), a2D) + if (f_strintxN(1:1) /= 'x') & + call accum_hist_field(n_strintxN, iblk, strintxN(:,:,iblk), a2D) + if (f_strintyN(1:1) /= 'x') & + call accum_hist_field(n_strintyN, iblk, strintyN(:,:,iblk), a2D) + if (f_strintxE(1:1) /= 'x') & + call accum_hist_field(n_strintxE, iblk, strintxE(:,:,iblk), a2D) + if (f_strintyE(1:1) /= 'x') & + call accum_hist_field(n_strintyE, iblk, strintyE(:,:,iblk), a2D) + if (f_taubxN(1:1) /= 'x') & + call accum_hist_field(n_taubxN, iblk, taubxN(:,:,iblk), a2D) + if (f_taubyN(1:1) /= 'x') & + call accum_hist_field(n_taubyN, iblk, taubyN(:,:,iblk), a2D) + if (f_taubxE(1:1) /= 'x') & + call accum_hist_field(n_taubxE, iblk, taubxE(:,:,iblk), a2D) + if (f_taubyE(1:1) /= 'x') & + call accum_hist_field(n_taubyE, iblk, taubyE(:,:,iblk), a2D) if (f_strength(1:1)/= 'x') & call accum_hist_field(n_strength,iblk, strength(:,:,iblk), a2D) diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 74e97d489..c4698cfc6 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -235,8 +235,8 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & - f_uvele = 'x', f_vvele = 'x', & - f_uveln = 'x', f_vveln = 'x', & + f_uvelE = 'x', f_vvelE = 'x', & + f_uvelN = 'x', f_vvelN = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -277,6 +277,18 @@ module ice_history_shared f_strocnx = 'm', f_strocny = 'm', & f_strintx = 'm', f_strinty = 'm', & f_taubx = 'm', f_tauby = 'm', & + f_strairxN = 'x', f_strairyN = 'x', & + f_strtltxN = 'x', f_strtltyN = 'x', & + f_strcorxN = 'x', f_strcoryN = 'x', & + f_strocnxN = 'x', f_strocnyN = 'x', & + f_strintxN = 'x', f_strintyN = 'x', & + f_taubxN = 'x', f_taubyN = 'x', & + f_strairxE = 'x', f_strairyE = 'x', & + f_strtltxE = 'x', f_strtltyE = 'x', & + f_strcorxE = 'x', f_strcoryE = 'x', & + f_strocnxE = 'x', f_strocnyE = 'x', & + f_strintxE = 'x', f_strintyE = 'x', & + f_taubxE = 'x', f_taubyE = 'x', & f_strength = 'm', & f_divu = 'm', f_shear = 'm', & f_sig1 = 'm', f_sig2 = 'm', & @@ -386,8 +398,8 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & -! f_uvele, f_vvele , & ! for now, have this set from f_uvel, f_vvel -! f_uveln, f_vveln , & ! for now, have this set from f_uvel, f_vvel + f_uvelE, f_vvelE , & + f_uvelN, f_vvelN , & f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -428,6 +440,18 @@ module ice_history_shared f_strocnx, f_strocny , & f_strintx, f_strinty , & f_taubx, f_tauby , & + f_strairxN, f_strairyN , & + f_strtltxN, f_strtltyN , & + f_strcorxN, f_strcoryN , & + f_strocnxN, f_strocnyN , & + f_strintxN, f_strintyN , & + f_taubxN, f_taubyN , & + f_strairxE, f_strairyE , & + f_strtltxE, f_strtltyE , & + f_strcorxE, f_strcoryE , & + f_strocnxE, f_strocnyE , & + f_strintxE, f_strintyE , & + f_taubxE, f_taubyE , & f_strength, & f_divu, f_shear , & f_sig1, f_sig2 , & @@ -561,8 +585,8 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & - n_uvele , n_vvele , & - n_uveln , n_vveln , & + n_uvelE , n_vvelE , & + n_uvelN , n_vvelN , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & @@ -605,6 +629,18 @@ module ice_history_shared n_strocnx , n_strocny , & n_strintx , n_strinty , & n_taubx , n_tauby , & + n_strairxN , n_strairyN , & + n_strtltxN , n_strtltyN , & + n_strcorxN , n_strcoryN , & + n_strocnxN , n_strocnyN , & + n_strintxN , n_strintyN , & + n_taubxN , n_taubyN , & + n_strairxE , n_strairyE , & + n_strtltxE , n_strtltyE , & + n_strcorxE , n_strcoryE , & + n_strocnxE , n_strocnyE , & + n_strintxE , n_strintyE , & + n_taubxE , n_taubyE , & n_strength , & n_divu , n_shear , & n_sig1 , n_sig2 , & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 81dafb307..fb86f520e 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -134,7 +134,7 @@ subroutine init_dyn (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_state, only: uvel, vvel, uvele, vvele, uveln, vveln, divu, shear + use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear use ice_grid, only: ULAT real (kind=dbl_kind), intent(in) :: & @@ -167,10 +167,10 @@ subroutine init_dyn (dt) uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s if (grid_system == 'CD') then ! extra velocity variables - uvele = c0 - vvele = c0 - uveln = c0 - vveln = c0 + uvelE = c0 + vvelE = c0 + uvelN = c0 + vvelN = c0 endif ! strain rates diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 7f93f0392..e07eca209 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -109,10 +109,10 @@ module ice_state public :: & uvel , & ! x-component of velocity on U grid (m/s) vvel , & ! y-component of velocity on U grid (m/s) - uvele , & ! x-component of velocity on E grid (m/s) - vvele , & ! y-component of velocity on E grid (m/s) - uveln , & ! x-component of velocity on N grid (m/s) - vveln , & ! y-component of velocity on N grid (m/s) + uvelE , & ! x-component of velocity on E grid (m/s) + vvelE , & ! y-component of velocity on E grid (m/s) + uvelN , & ! x-component of velocity on N grid (m/s) + vvelN , & ! y-component of velocity on N grid (m/s) divu , & ! strain rate I component, velocity divergence (1/s) shear , & ! strain rate II component (1/s) strength ! ice strength (N/m) @@ -155,10 +155,10 @@ subroutine alloc_state aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity on U grid (m/s) vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity on U grid (m/s) - uvele (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) - vvele (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) - uveln (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) - vveln (nx_block,ny_block,max_blocks) , & ! y-component of velocity on N grid (m/s) + uvelE (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) + vvelE (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) + uvelN (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) + vvelN (nx_block,ny_block,max_blocks) , & ! y-component of velocity on N grid (m/s) divu (nx_block,ny_block,max_blocks) , & ! strain rate I component, velocity divergence (1/s) shear (nx_block,ny_block,max_blocks) , & ! strain rate II component (1/s) strength (nx_block,ny_block,max_blocks) , & ! ice strength (N/m) From 35dd3f863d0689b574afcf637d40b4ae853233ce Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 16 Nov 2021 10:37:50 -0700 Subject: [PATCH 023/109] Fix diagnostics for CD grid (#6) --- .../cicedynB/analysis/ice_diagnostics.F90 | 57 +++++++++++++++++-- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index d4e7066fb..1b9f70044 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_system use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -201,6 +201,9 @@ subroutine runtime_diags (dt) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & work1, work2 +! real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & +! uvelT, vvelT + character(len=*), parameter :: subname = '(runtime_diags)' call icepack_query_parameters(ktherm_out=ktherm, calc_Tsfc_out=calc_Tsfc) @@ -293,7 +296,23 @@ subroutine runtime_diags (dt) enddo enddo enddo - !$OMP END PARALLEL DO + ! Eventually do energy diagnostic on T points. +! if (grid_system == 'CD') then +! !$OMP PARALLEL DO PRIVATE(iblk,i,j) +! do iblk = 1, nblocks +! do j = 1, ny_block +! do i = 1, nx_block +! call grid_average_X2Y('E2TS',uvelE,uvelT) +! call grid_average_X2Y('N2TS',vvelN,vvelT) +! work1(i,j,iblk) = p5 & +! * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & +! * (uvelT(i,j,iblk)*uvelT(i,j,iblk) & +! + vvelT(i,j,iblk)*vvelT(i,j,iblk)) +! enddo +! enddo +! enddo +! endif +! !$OMP END PARALLEL DO ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) ketots = global_sum(work1, distrb_info, field_loc_center, tareas) @@ -384,6 +403,20 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO + if (grid_system == 'CD') then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & + + vvelE(i,j,iblk)**2), & + sqrt(uvelN(i,j,iblk)**2 & + + vvelN(i,j,iblk)**2)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif umaxn = global_maxval(work1, distrb_info, lmask_n) umaxs = global_maxval(work1, distrb_info, lmask_s) @@ -1630,10 +1663,12 @@ end subroutine debug_ice subroutine print_state(plabel,i,j,iblk) + use ice_grid, only: grid_system use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, trcrn + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty @@ -1755,6 +1790,12 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) + if (grid_system == 'CD') then + write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) + write(nu_diag,*) 'vvelE(i,j)',vvelE(i,j,iblk) + write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) + write(nu_diag,*) 'vvelN(i,j)',vvelN(i,j,iblk) + endif write(nu_diag,*) ' ' write(nu_diag,*) 'atm states and fluxes' @@ -1802,10 +1843,12 @@ end subroutine print_state subroutine print_points_state(plabel,ilabel) + use ice_grid, only: grid_system use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, trcrn + use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + uvelE, vvelE, uvelE, vvelE, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty @@ -1897,6 +1940,12 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) + if (grid_system == 'CD') then + write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelE=',vvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'uvelN=',uvelN(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelN=',vvelN(i,j,iblk) + endif write(nu_diag,*) ' ' write(nu_diag,*) 'atm states and fluxes' From 63d0f48f37769ce1966bf212ac82af66d137af77 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 16 Nov 2021 11:20:23 -0700 Subject: [PATCH 024/109] Remove CD history variables from namelist (#7) --- .../cicedynB/analysis/ice_history_shared.F90 | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index c4698cfc6..cbb660ae5 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -398,8 +398,9 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & - f_uvelE, f_vvelE , & - f_uvelN, f_vvelN , & +! For now, don't allow the users to modify the CD grid quantities. +! f_uvelE, f_vvelE , & +! f_uvelN, f_vvelN , & f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -440,18 +441,18 @@ module ice_history_shared f_strocnx, f_strocny , & f_strintx, f_strinty , & f_taubx, f_tauby , & - f_strairxN, f_strairyN , & - f_strtltxN, f_strtltyN , & - f_strcorxN, f_strcoryN , & - f_strocnxN, f_strocnyN , & - f_strintxN, f_strintyN , & - f_taubxN, f_taubyN , & - f_strairxE, f_strairyE , & - f_strtltxE, f_strtltyE , & - f_strcorxE, f_strcoryE , & - f_strocnxE, f_strocnyE , & - f_strintxE, f_strintyE , & - f_taubxE, f_taubyE , & +! f_strairxN, f_strairyN , & +! f_strtltxN, f_strtltyN , & +! f_strcorxN, f_strcoryN , & +! f_strocnxN, f_strocnyN , & +! f_strintxN, f_strintyN , & +! f_taubxN, f_taubyN , & +! f_strairxE, f_strairyE , & +! f_strtltxE, f_strtltyE , & +! f_strcorxE, f_strcoryE , & +! f_strocnxE, f_strocnyE , & +! f_strintxE, f_strintyE , & +! f_taubxE, f_taubyE , & f_strength, & f_divu, f_shear , & f_sig1, f_sig2 , & From 34a3adaf8434af40eea8d06d1c4bfb7a82f8b0a6 Mon Sep 17 00:00:00 2001 From: Elizabeth Hunke Date: Tue, 16 Nov 2021 12:57:21 -0700 Subject: [PATCH 025/109] Landmaskb (#8) * boxislands land mask * adjusted landmask * add kmt_type option to grid namelist --- cicecore/cicedynB/general/ice_init.F90 | 24 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 205 +++++++++++++++--- configuration/scripts/ice_in | 1 + .../scripts/options/set_nml.boxislands | 38 ++++ doc/source/cice_index.rst | 1 + doc/source/user_guide/ug_case_settings.rst | 1 + 6 files changed, 235 insertions(+), 35 deletions(-) create mode 100644 configuration/scripts/options/set_nml.boxislands diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 6314bbea4..45af42adf 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -98,7 +98,7 @@ subroutine input_data use ice_arrays_column, only: bgc_data_dir, fe_data_type use ice_grid, only: grid_file, gridcpl_file, kmt_file, & bathymetry_file, use_bathymetry, & - bathymetry_format, & + bathymetry_format, kmt_type, & grid_type, grid_format, grid_system, & dxrect, dyrect, & pgl_global_ext @@ -185,7 +185,7 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & - close_boundaries, orca_halogrid, grid_system + close_boundaries, orca_halogrid, grid_system, kmt_type namelist /tracer_nml/ & tr_iage, restart_age, & @@ -332,6 +332,7 @@ subroutine input_data bathymetry_file = 'unknown_bathymetry_file' bathymetry_format = 'default' use_bathymetry = .false. + kmt_type = 'file' kmt_file = 'unknown_kmt_file' version_name = 'unknown_version_name' ncat = 0 ! number of ice thickness categories @@ -706,6 +707,7 @@ subroutine input_data call broadcast_scalar(bathymetry_file, master_task) call broadcast_scalar(bathymetry_format, master_task) call broadcast_scalar(use_bathymetry, master_task) + call broadcast_scalar(kmt_type, master_task) call broadcast_scalar(kmt_file, master_task) call broadcast_scalar(kitd, master_task) call broadcast_scalar(kcatbound, master_task) @@ -1362,6 +1364,7 @@ subroutine input_data if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) write(nu_diag,1030) ' grid_system = ',trim(grid_system) + write(nu_diag,1030) ' kmt_type = ',trim(kmt_type) if (trim(grid_type) /= 'rectangular') then if (use_bathymetry) then tmpstr2 = ' : bathymetric input data is used' @@ -1945,7 +1948,8 @@ subroutine input_data write(nu_diag,1031) ' grid_file = ', trim(grid_file) write(nu_diag,1031) ' gridcpl_file = ', trim(gridcpl_file) write(nu_diag,1031) ' bathymetry_file = ', trim(bathymetry_file) - write(nu_diag,1031) ' kmt_file = ', trim(kmt_file) + if (trim(kmt_type) == 'file') & + write(nu_diag,1031) ' kmt_file = ', trim(kmt_file) endif write(nu_diag,1011) ' orca_halogrid = ', orca_halogrid @@ -2037,6 +2041,20 @@ subroutine input_data abort_list = trim(abort_list)//":26" endif + if (kmt_type /= 'file' .and. & + kmt_type /= 'default' .and. & + kmt_type /= 'boxislands') then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) + abort_list = trim(abort_list)//":27" + endif + + if (grid_type /= 'column' .and. & + grid_type /= 'rectangular' .and. & + kmt_type /= 'file') then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: need kmt file, kmt_type=',trim(kmt_type) + abort_list = trim(abort_list)//":28" + endif + if (kdyn == 1 .and. & evp_algorithm /= 'standard_2d' .and. & evp_algorithm /= 'shared_mem_1d') then diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index f6322606a..0e1934fdf 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -53,6 +53,7 @@ module ice_grid gridcpl_file , & ! input file for POP coupling grid info grid_file , & ! input file for POP grid info kmt_file , & ! input file for POP grid info + kmt_type , & ! options are file, default, boxislands bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) grid_spacing , & ! default of 30.e3m or set by user in namelist @@ -1345,47 +1346,63 @@ subroutine rectgrid if (trim(ew_boundary_type) == 'cyclic') then - do j = 3,ny_global-2 ! closed top and bottom - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 - enddo - enddo + if (trim(kmt_type) == 'boxislands') then + + call get_box_kmt(work_g1) + + else ! default + + do j = 3,ny_global-2 ! closed top and bottom + do i = 1,nx_global ! open sides + work_g1(i,j) = c1 ! NOTE nx_global > 5 + enddo + enddo + + endif ! kmt_type elseif (trim(ew_boundary_type) == 'open') then - ! land in the upper left and lower right corners, - ! otherwise open boundaries - imid = nint(aint(real(nx_global)/c2)) - jmid = nint(aint(real(ny_global)/c2)) + if (trim(kmt_type) == 'boxislands') then - do j = 3,ny_global-2 - do i = 3,nx_global-2 - work_g1(i,j) = c1 ! open central domain - enddo - enddo + call get_box_kmt(work_g1) - if (nx_global > 5 .and. ny_global > 5) then + else ! default - do j = 1, jmid+2 - do i = 1, imid+2 - work_g1(i,j) = c1 ! open lower left corner - enddo - enddo + ! land in the upper left and lower right corners, + ! otherwise open boundaries + imid = nint(aint(real(nx_global)/c2)) + jmid = nint(aint(real(ny_global)/c2)) - do j = max(jmid-2,1), ny_global - do i = max(imid-2,1), nx_global - work_g1(i,j) = c1 ! open upper right corner - enddo - enddo + do j = 3,ny_global-2 + do i = 3,nx_global-2 + work_g1(i,j) = c1 ! open central domain + enddo + enddo - endif + if (nx_global > 5 .and. ny_global > 5) then - if (close_boundaries) then - work_g1(:, 1:2) = c0 - work_g1(:, ny_global-1:ny_global) = c0 - work_g1(1:2, :) = c0 - work_g1(nx_global-1:nx_global, :) = c0 - endif + do j = 1, jmid+2 + do i = 1, imid+2 + work_g1(i,j) = c1 ! open lower left corner + enddo + enddo + + do j = max(jmid-2,1), ny_global + do i = max(imid-2,1), nx_global + work_g1(i,j) = c1 ! open upper right corner + enddo + enddo + + endif ! > 5x5 grid + + if (close_boundaries) then + work_g1(:, 1:2) = c0 + work_g1(:, ny_global-1:ny_global) = c0 + work_g1(1:2, :) = c0 + work_g1(nx_global-1:nx_global, :) = c0 + endif + + endif ! kmt_type elseif (trim(ew_boundary_type) == 'closed') then @@ -1401,6 +1418,130 @@ subroutine rectgrid end subroutine rectgrid +!======================================================================= + + ! Complex land mask for testing box cases + ! Requires nx_global, ny_global > 20 + ! Assumes work array has been initialized to 1 (ocean) and north and + ! south land boundaries have been applied (ew_boundary_type='cyclic') + + subroutine get_box_kmt (work) + + use ice_constants, only: c0, c1, c20 + + real (kind=dbl_kind), dimension(:,:), intent(inout) :: work + + integer (kind=int_kind) :: & + i, j, k, & ! indices + nxb, nyb ! convenient cell-block sizes for building the mask + + character(len=*), parameter :: subname = '(get_box_kmt)' + + ! number of cells in 5% of global grid x and y lengths + nxb = int(real(nx_global, dbl_kind) / c20, int_kind) + nyb = int(real(ny_global, dbl_kind) / c20, int_kind) + + if (nxb < 1 .or. nyb < 1) & + call abort_ice(subname//'ERROR: requires larger grid size') + + ! northeast triangle + k = 0 + do j = ny_global, ny_global-3*nyb, -1 + k = k+1 + do i = nx_global-3*nxb+k, nx_global + work(i,j) = c0 + enddo + enddo + + ! northwest docks + do j = ny_global-3*nyb, ny_global + do i = 1, 1 + work(i,j) = c0 + enddo + enddo + do i = 1, 2*nxb + do j = ny_global-3*nyb, ny_global-nyb-2 + work(i,j) = c0 + enddo + do j = ny_global-nyb, ny_global-nyb+1 + work(i,j) = c0 + enddo + enddo + + ! southwest docks + do j = 2*nyb, 3*nyb + do i = 1, 1 + work(i,j) = c0 + enddo + enddo + do j = 1, 2*nyb + do i = 2, nxb + work(i,j) = c0 + enddo + do i = 2*nxb-1, 2*nxb + work(i,j) = c0 + enddo + do i = 2*nxb+2,4*nxb + work(i,j) = c0 + enddo + enddo + + ! tiny island + do j = 14*nyb, 14*nyb+1 + do i = 14*nxb, 14*nxb+1 + work(i,j) = c0 + enddo + enddo + + ! X islands + ! left triangle + k = 0 + do i = 2*nxb, 4*nxb + k=k+1 + do j = 10*nyb+k, 14*nyb-k + work(i,j) = c0 + enddo + enddo + ! upper triangle + k = 0 + do j = 14*nyb, 12*nyb, -1 + k=k+1 + do i = 2*nxb+2+k, 6*nxb-2-k + work(i,j) = c0 + enddo + enddo + ! diagonal + k = 0 + do j = 10*nyb, 14*nyb + k=k+1 + do i = 2*nxb+4+k, 2*nxb+6+k + work(i,j) = c0 + enddo + enddo + ! lower right triangle + k = 0 + do j = 12*nyb, 10*nyb, -1 + k=k+1 + do i = 5*nxb+k, 8*nxb + work(i,j) = c0 + enddo + enddo + + ! bar islands + do i = 10*nxb, 16*nxb + do j = 4*nyb, 5*nyb + work(i,j) = c0 + enddo + do j = 6*nyb+2, 8*nyb + work(i,j) = c0 + enddo + do j = 8*nyb+2, 8*nyb+3 + work(i,j) = c0 + enddo + enddo + + end subroutine get_box_kmt + !======================================================================= ! CPOM displaced pole grid and land mask. \\ diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 8656c6588..a5dd3c058 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -62,6 +62,7 @@ grid_type = 'displaced_pole' grid_system = 'B' grid_file = 'grid' + kmt_type = 'file' kmt_file = 'kmt' bathymetry_file = 'unknown_bathymetry_file' bathymetry_format = 'default' diff --git a/configuration/scripts/options/set_nml.boxislands b/configuration/scripts/options/set_nml.boxislands new file mode 100644 index 000000000..eb39a4e79 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislands @@ -0,0 +1,38 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'default' +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'box2001' +ice_data_type = 'box2001' +calc_strair = .false. +restore_ice = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index cbaed705d..da4f1280a 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -364,6 +364,7 @@ either Celsius or Kelvin units). "kice", "thermal conductivity of fresh ice (:cite:`Bitz99`)", "2.03 W/m/deg" "kitd", "type of itd conversions (0 = delta function, 1 = linear remap)", "1" "kmt_file", "input file for land mask info", "" + "kmt_type", "file, default or boxislands", "file" "krdg_partic", "ridging participation function", "1" "krdg_redist", "ridging redistribution function", "1" "krgdn", "mean ridge thickness per thickness of ridging ice", "" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 851f39883..6df4c228d 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -251,6 +251,7 @@ grid_nml "", "``1``", "new formulation with round numbers", "" "", "``2``", "WMO standard categories", "" "", "``3``", "asymptotic scheme", "" + "``kmt_type``", "string", "file, default or boxislands", "file" "``kmt_file``", "string", "name of land mask file to be read", "'unknown_kmt_file'" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" From e9aa13264e24892acc7908ef30ad4df08501cd1e Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 16 Nov 2021 16:22:41 -0500 Subject: [PATCH 026/109] Subroutines for CD-grid rheology (#10) * In process of coding stress_T subroutine * Almost done with stress_T subroutine...it compiles. * Done with stress_T...it compiles * Minor modif to deformations_T subroutine --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 144 ++++++++++++++++++ cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 100 +++++++++++- 2 files changed, 241 insertions(+), 3 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index afb8eadb7..1e308b65a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -898,6 +898,150 @@ subroutine stress (nx_block, ny_block, & end subroutine stress +!======================================================================= + +! Computes the strain rates and internal stress components for T points +! Computes stress terms for the momentum equation + +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine stress_T (nx_block, ny_block, & + ksub, icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, tinyarea, & + strength, & + stresspT, stressmT, & + stress12T, & + shear, divu, & + rdg_conv, rdg_shear ) + + use ice_dyn_shared, only: strain_rates_T, deformations_T, & + viscous_coeffs_and_rep_pressure_T + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ksub , & ! subcycling step + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + tarear , & ! 1/tarea + tinyarea ! puny*tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + stresspT , & ! sigma11+sigma22 + stressmT , & ! sigma11-sigma22 + stress12T ! sigma12 + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divT, tensionT, shearT, DeltaT, & ! strain rates at T point + zetax2T , & ! 2 x zeta (visc coeff) at T point + etax2T , & ! 2 x eta (visc coeff) at T point + rep_prsT ! replacement pressure at T point + + logical :: capping ! of the viscous coef + + character(len=*), parameter :: subname = '(stress_T)' + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + capping = .true. ! could be later included in ice_in + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at T point + !----------------------------------------------------------------- + + call viscous_coeffs_and_rep_pressure_T (strength(i,j), & + tinyarea(i,j), & + DeltaT, zetax2T, etax2T, & + rep_prsT, capping ) + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stresspT(i,j) = (stresspT(i,j)*(c1-arlx1i*revp) + & + arlx1i*(zetax2T*divT - rep_prsT)) * denom1 + + stressmT(i,j) = (stressmT(i,j)*(c1-arlx1i*revp) + & + arlx1i*etax2T*tensionT) * denom1 + + stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) + & + arlx1i*p5*etax2T*shearT) * denom1 + + enddo ! ij + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + + call deformations_T (nx_block , ny_block , & + icellt , & + indxti , indxtj , & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear , & + shear , divu , & + rdg_conv , rdg_shear ) + + endif + + end subroutine stress_T + !======================================================================= end module ice_dyn_evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index fb86f520e..1d27ff792 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -26,8 +26,11 @@ module ice_dyn_shared public :: init_dyn, set_evp_parameters, stepu, principal_stress, & dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & - alloc_dyn_shared, deformations, strain_rates, & + alloc_dyn_shared, & + deformations, deformations_T, & + strain_rates, strain_rates_T, & viscous_coeffs_and_rep_pressure, & + viscous_coeffs_and_rep_pressure_T, & stack_velocity_field, unstack_velocity_field ! namelist parameters @@ -1276,8 +1279,100 @@ subroutine deformations (nx_block, ny_block, & enddo ! ij - end subroutine deformations + end subroutine deformations + +!======================================================================= + +! Compute deformations for mechanical redistribution at T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine deformations_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, & + shear, divu, & + rdg_conv, rdg_shear ) + + use ice_constants, only: p5 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + tarear ! 1/tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), & + intent(inout) :: & + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divT, tensionT, shearT, DeltaT, & ! strain rates at T point + tmp ! useful combination + character(len=*), parameter :: subname = '(deformations_T)' + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- + divu(i,j) = divT * tarear(i,j) + tmp = Deltat * tarear(i,j) + rdg_conv(i,j) = -min(divu(i,j),c0) + rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) + + ! diagnostic only + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = tarear(i,j)*sqrt( tensionT**2 + shearT**2 ) + + enddo ! ij + + end subroutine deformations_T + !======================================================================= ! Compute strain rates @@ -1506,7 +1601,6 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & ! endif end subroutine viscous_coeffs_and_rep_pressure - !======================================================================= ! Computes viscous coefficients and replacement pressure for stress From 1b4c03e38bae2bfa40e9b02963f0fc637b3a3b93 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 16 Nov 2021 14:15:14 -0800 Subject: [PATCH 027/109] Add symmetry test and gridsys_suite (#11) * Add ice_data_type = uniform to set uniform initial conditions in ice_init.F90 set_state_var * add uniform atm_data_type options * add box sym tests, gridsys testsuites Co-authored-by: daveh150 --- cicecore/cicedynB/general/ice_forcing.F90 | 72 +++++++++++++++ cicecore/cicedynB/general/ice_init.F90 | 89 +++++++++++-------- configuration/scripts/options/set_nml.boxsyme | 54 +++++++++++ configuration/scripts/options/set_nml.boxsymn | 54 +++++++++++ .../scripts/options/set_nml.boxsymne | 54 +++++++++++ configuration/scripts/options/set_nml.gridb | 2 + configuration/scripts/options/set_nml.gridcd | 2 + configuration/scripts/tests/gridsys_suite.ts | 16 ++++ 8 files changed, 308 insertions(+), 35 deletions(-) create mode 100644 configuration/scripts/options/set_nml.boxsyme create mode 100644 configuration/scripts/options/set_nml.boxsymn create mode 100644 configuration/scripts/options/set_nml.boxsymne create mode 100644 configuration/scripts/options/set_nml.gridb create mode 100644 configuration/scripts/options/set_nml.gridcd create mode 100644 configuration/scripts/tests/gridsys_suite.ts diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 7b1e2eac7..9e75c8170 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -310,6 +310,12 @@ subroutine init_forcing_atmo call ISPOL_files elseif (trim(atm_data_type) == 'box2001') then call box2001_data + elseif (trim(atm_data_type) == 'uniform_northeast') then + call uniform_data('NE') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data('E') + elseif (trim(atm_data_type) == 'uniform_north') then + call uniform_data('N') elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_files endif @@ -626,6 +632,12 @@ subroutine get_forcing_atmo call oned_data elseif (trim(atm_data_type) == 'box2001') then call box2001_data + elseif (trim(atm_data_type) == 'uniform_northeast') then + call uniform_data('NE') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data('E') + elseif (trim(atm_data_type) == 'uniform_north') then + call uniform_data('N') elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_data else ! default values set in init_flux @@ -5343,6 +5355,66 @@ subroutine box2001_data end subroutine box2001_data +!======================================================================= +! + subroutine uniform_data(dir) + +! uniform wind fields in some direction + + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_blocks, only: nx_block, ny_block, nghost + use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray + use ice_grid, only: uvm, grid_average_X2Y + + character(len=*), intent(in) :: dir + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + real (kind=dbl_kind) :: & + tau + + character(len=*), parameter :: subname = '(uniform_data)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + ! ocean currents + uocn = c0 + vocn = c0 + ! wind components + if (dir == 'NE') then + uatm = c5 + vatm = c5 + elseif (dir == 'N') then + uatm = c0 + vatm = c5 + elseif (dir == 'E') then + uatm = c5 + vatm = c0 + else + call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & + file=__FILE__, line=__LINE__) + endif + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + ! wind stress + wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) + tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) + strax(i,j,iblk) = tau * uatm(i,j,iblk) + stray(i,j,iblk) = tau * vatm(i,j,iblk) + + enddo + enddo + enddo ! nblocks + + end subroutine uniform_data + !======================================================================= subroutine get_wave_spec diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 45af42adf..1b4d50927 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2632,42 +2632,56 @@ subroutine set_state_var (nx_block, ny_block, & endif ! ice_data_type - if (trim(grid_type) == 'rectangular') then - - ! place ice on left side of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - if (ULON(i,j) < -50./rad_to_deg) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif ! ULON - endif ! tmask - enddo ! i - enddo ! j - - else - - ! place ice at high latitudes where ocean sfc is cold - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - ! place ice in high latitudes where ocean sfc is cold - if ( (sst (i,j) <= Tf(i,j)+p2) .and. & - (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & - TLAT(i,j) > edge_init_nh/rad_to_deg) ) then + if ((trim(ice_data_type) == 'box2001') .or. & + (trim(ice_data_type) == 'boxslotcyl')) then + + ! place ice on left side of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + if (ULON(i,j) < -50./rad_to_deg) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! ULON + endif ! tmask + enddo ! i + enddo ! j + + else if (trim(ice_data_type) == 'uniform') then + ! all cells not land mask are ice + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j - endif ! cold surface - endif ! tmask - enddo ! i - enddo ! j - - endif ! rectgrid + endif + enddo + enddo + + else ! default behavior + + ! place ice at high latitudes where ocean sfc is cold + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + ! place ice in high latitudes where ocean sfc is cold + if ( (sst (i,j) <= Tf(i,j)+p2) .and. & + (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & + TLAT(i,j) > edge_init_nh/rad_to_deg) ) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif ! cold surface + endif ! tmask + enddo ! i + enddo ! j + + endif ! ice_data_type do n = 1, ncat @@ -2699,7 +2713,9 @@ subroutine set_state_var (nx_block, ny_block, & ! / (real(ny_global,kind=dbl_kind)) * p5) endif vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m + elseif (trim(ice_data_type) == 'boxslotcyl') then + if (hinit(n) > c0) then ! slotted cylinder call boxslotcyl_data_aice(aicen, i, j, & @@ -2708,9 +2724,12 @@ subroutine set_state_var (nx_block, ny_block, & iglob, jglob) endif vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - else + + else ! default case. ice_data_type = uniform + vicen(i,j,n) = hinit(n) * ainit(n) ! m - endif + endif ! ice_data_type + vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) call icepack_init_trcr(Tair = Tair(i,j), Tf = Tf(i,j), & diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme new file mode 100644 index 000000000..ca8f81e63 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsyme @@ -0,0 +1,54 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ice_data_type = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .true. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn new file mode 100644 index 000000000..2a1449ae2 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymn @@ -0,0 +1,54 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_north' +ice_data_type = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .true. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne new file mode 100644 index 000000000..af38fa6fe --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymne @@ -0,0 +1,54 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_northeast' +ice_data_type = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .true. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.gridb b/configuration/scripts/options/set_nml.gridb new file mode 100644 index 000000000..2a209410b --- /dev/null +++ b/configuration/scripts/options/set_nml.gridb @@ -0,0 +1,2 @@ +grid_system = 'B' + diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd new file mode 100644 index 000000000..9426056e9 --- /dev/null +++ b/configuration/scripts/options/set_nml.gridcd @@ -0,0 +1,2 @@ +grid_system = 'CD' + diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts new file mode 100644 index 000000000..0a3d07025 --- /dev/null +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -0,0 +1,16 @@ +# Test Grid PEs Sets BFB-compare +smoke gx3 8x2 diag1,run5day +restart gx3 4x2 debug,diag1 +smoke gbox80 1x1 box2001 +smoke gbox80 1x1 boxslotcyl +smoke gbox80 1x1 boxsymn +smoke gbox80 1x1 boxsyme +smoke gbox80 1x1 boxsymne + +smoke gx3 8x2 diag1,run5day,gridcd +restart gx3 4x2 debug,diag1,gridcd +smoke gbox80 1x1 box2001,gridcd +smoke gbox80 1x1 boxslotcyl,gridcd +smoke gbox80 1x1 boxsymn,gridcd +smoke gbox80 1x1 boxsyme,gridcd +smoke gbox80 1x1 boxsymne,gridcd From 97791af750bd3f23584d07c5d4ff3f1817a7487f Mon Sep 17 00:00:00 2001 From: daveh150 Date: Tue, 16 Nov 2021 18:03:57 -0700 Subject: [PATCH 028/109] Cgrid dev forcing (#13) * Add ice_data_type = uniform to set uniform initial conditions in ice_init.F90 set_state_var * add uniform atm_data_type options * added uniform_data_ocn options to ice_forcing.F90 * Added options for calm atm/ocn forcing. * removing set_nml.boxdsym Co-authored-by: apcraig --- cicecore/cicedynB/general/ice_forcing.F90 | 129 +++++++++++++++++----- 1 file changed, 102 insertions(+), 27 deletions(-) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 9e75c8170..77da544a6 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -118,11 +118,11 @@ module ice_forcing atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf atm_data_type, & ! 'default', 'monthly', 'ncar', - ! 'hadgem' or 'oned' or + ! 'hadgem' or 'oned' or 'calm' ! 'JRA55_gx1' or 'JRA55_gx3' or 'JRA55_tx1' bgc_data_type, & ! 'default', 'clim' - ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', - ! 'hadgem_sst' or 'hadgem_sst_uvocn' + ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', + ! 'hadgem_sst' or 'hadgem_sst_uvocn', 'uniform' ice_data_type, & ! 'default', 'box2001', 'boxslotcyl' precip_units ! 'mm_per_month', 'mm_per_sec', 'mks','m_per_sec' @@ -316,6 +316,8 @@ subroutine init_forcing_atmo call uniform_data('E') elseif (trim(atm_data_type) == 'uniform_north') then call uniform_data('N') + elseif (trim(atm_data_type) == 'calm') then + call uniform_data('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_files endif @@ -517,6 +519,21 @@ subroutine init_forcing_ocn(dt) call ocn_data_hycom_init endif + ! uniform forcing options + if (trim(ocn_data_type) == 'uniform_northeast') then + call uniform_data_ocn('NE',p1) + endif + if (trim(ocn_data_type) == 'uniform_east') then + call uniform_data_ocn('E',p1) + endif + if (trim(ocn_data_type) == 'uniform_north') then + call uniform_data_ocn('N',p1) + endif + + if (trim(ocn_data_type) == 'calm') then + call uniform_data_ocn('N',c0) ! directon does not matter for c0 + endif + end subroutine init_forcing_ocn !======================================================================= @@ -633,11 +650,13 @@ subroutine get_forcing_atmo elseif (trim(atm_data_type) == 'box2001') then call box2001_data elseif (trim(atm_data_type) == 'uniform_northeast') then - call uniform_data('NE') + ! dah: uniformm opotions inclued here to allow call to prepare_forcing + ! is prepare_forcing required? zlvl0 and precip options are set in prepare_forcing. + ! call uniform_data('NE') elseif (trim(atm_data_type) == 'uniform_east') then - call uniform_data('E') + ! call uniform_data('E') elseif (trim(atm_data_type) == 'uniform_north') then - call uniform_data('N') + ! call uniform_data('N') elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_data else ! default values set in init_flux @@ -5357,17 +5376,17 @@ end subroutine box2001_data !======================================================================= ! - subroutine uniform_data(dir) - + subroutine uniform_data(dir,spd) ! uniform wind fields in some direction use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray - use ice_grid, only: uvm, grid_average_X2Y + use ice_grid, only: grid_average_X2Y character(len=*), intent(in) :: dir + real(kind=dbl_kind), intent(in), optional :: spd ! speed for test ! local parameters @@ -5375,24 +5394,30 @@ subroutine uniform_data(dir) iblk, i,j ! loop indices real (kind=dbl_kind) :: & - tau + tau, & + atm_val ! value to use for atm speed character(len=*), parameter :: subname = '(uniform_data)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' - ! ocean currents - uocn = c0 - vocn = c0 + ! check for optional spd + if (present(spd)) then + atm_val = spd + else + atm_val = c5 ! default + endif + ! wind components if (dir == 'NE') then - uatm = c5 - vatm = c5 + uatm = atm_val + vatm = atm_val elseif (dir == 'N') then uatm = c0 - vatm = c5 + vatm = atm_val elseif (dir == 'E') then - uatm = c5 + uatm = atm_val + vatm = c0 else call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & @@ -5400,21 +5425,71 @@ subroutine uniform_data(dir) endif do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - - ! wind stress - wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) - tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) - strax(i,j,iblk) = tau * uatm(i,j,iblk) - stray(i,j,iblk) = tau * vatm(i,j,iblk) + do j = 1, ny_block + do i = 1, nx_block - enddo - enddo + ! wind stress + wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) + tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) + strax(i,j,iblk) = tau * uatm(i,j,iblk) + stray(i,j,iblk) = tau * vatm(i,j,iblk) + + enddo + enddo enddo ! nblocks end subroutine uniform_data +!======================================================================= + +! + subroutine uniform_data_ocn(dir,spd) + +! uniform wind fields in some direction + + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_blocks, only: nx_block, ny_block, nghost + use ice_flux, only: uocn, vocn, uatm, vatm, wind, strax, stray + use ice_grid, only: grid_average_X2Y + + character(len=*), intent(in) :: dir + + real(kind=dbl_kind), intent(in), optional :: spd ! speed for test + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + real(kind=dbl_kind) :: & + ocn_val ! value to use for ocean currents + + character(len=*), parameter :: subname = '(uniform_data_ocn)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + if (present(spd)) then + ocn_val = spd + else + ocn_val = p1 ! default + endif + + ! ocn components + if (dir == 'NE') then + uocn = ocn_val + vocn = ocn_val + elseif (dir == 'N') then + uocn = c0 + vocn = ocn_val + elseif (dir == 'E') then + uocn = ocn_val + vocn = c0 + else + call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & + file=__FILE__, line=__LINE__) + endif + end subroutine uniform_data_ocn !======================================================================= subroutine get_wave_spec From 2f65ca8a0818da48c40b1b20df3f333ff4243894 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 16 Nov 2021 18:04:12 -0700 Subject: [PATCH 029/109] Additional CD variables for the EVP dynamics (#14) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 168 +++++++++++++++++- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 52 +++++- cicecore/cicedynB/general/ice_flux.F90 | 34 ++++ 3 files changed, 246 insertions(+), 8 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 1e308b65a..aefb7679b 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -42,7 +42,8 @@ module ice_dyn_evp use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 use ice_dyn_shared, only: stepu, dyn_prep1, dyn_prep2, dyn_finish, & - ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, uvel_init, vvel_init, & + ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & + uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & seabed_stress, Ktens, revp use ice_fileunits, only: nu_diag @@ -84,14 +85,22 @@ subroutine evp (dt) strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & strocnxT, strocnyT, strax, stray, & + strairxN, strairyN, uocnN, vocnN, ss_tltxN, ss_tltyN, icenmask, fmN, & + strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & + straxN, strayN, TbN, & + strairxE, strairyE, uocnE, vocnE, ss_tltxE, ss_tltyE, iceemask, fmE, & + strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & + straxE, strayE, TbE, & Tbu, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, nmask, emask, dxt, dyt, & + dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, tinyarea, grid_average_X2Y, & grid_type, grid_system - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & + uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d @@ -112,11 +121,17 @@ subroutine evp (dt) integer (kind=int_kind), dimension(max_blocks) :: & icellt , & ! no. of cells where icetmask = 1 + icelln , & ! no. of cells where icenmask = 1 + icelle , & ! no. of cells where iceemask = 1 icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & indxti , & ! compressed index in i-direction indxtj , & ! compressed index in j-direction + indxei , & ! compressed index in i-direction + indxej , & ! compressed index in j-direction + indxni , & ! compressed index in i-direction + indxnj , & ! compressed index in j-direction indxui , & ! compressed index in i-direction indxuj ! compressed index in j-direction @@ -130,6 +145,24 @@ subroutine evp (dt) umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + waterxN , & ! for ocean stress calculation, x (m/s) + wateryN , & ! for ocean stress calculation, y (m/s) + forcexN , & ! work array: combined atm stress and ocn tilt, x + forceyN , & ! work array: combined atm stress and ocn tilt, y + aiN , & ! ice fraction on N-grid + nmass , & ! total mass of ice and snow (N grid) + nmassdti ! mass of N-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + waterxE , & ! for ocean stress calculation, x (m/s) + wateryE , & ! for ocean stress calculation, y (m/s) + forcexE , & ! work array: combined atm stress and ocn tilt, x + forceyE , & ! work array: combined atm stress and ocn tilt, y + aiE , & ! ice fraction on E-grid + emass , & ! total mass of ice and snow (E grid) + emassdti ! mass of E-cell/dte (kg/m^2 s) + real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & @@ -207,6 +240,12 @@ subroutine evp (dt) strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) + if (grid_system == 'CD') then + strairxN(:,:,iblk) = strairxT(:,:,iblk) + strairyN(:,:,iblk) = strairyT(:,:,iblk) + strairxE(:,:,iblk) = strairxT(:,:,iblk) + strairyE(:,:,iblk) = strairyT(:,:,iblk) + endif enddo ! iblk !$OMP END PARALLEL DO @@ -222,6 +261,12 @@ subroutine evp (dt) call grid_average_X2Y('T2UF',tmass,umass) call grid_average_X2Y('T2UF',aice_init, aiu) + if (grid_system == 'CD') then + call grid_average_X2Y('T2EF',tmass,emass) + call grid_average_X2Y('T2EF',aice_init, aie) + call grid_average_X2Y('T2NF',tmass,nmass) + call grid_average_X2Y('T2NF',aice_init, aie) + endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing ! This wind stress is rotated on u grid and multiplied by aice @@ -243,6 +288,29 @@ subroutine evp (dt) call grid_average_X2Y('T2UF',strairy) endif + if (grid_system == 'CD') then + + if (.not. calc_strair) then + strairxN(:,:,:) = strax(:,:,:) + strairyN(:,:,:) = stray(:,:,:) + strairxE(:,:,:) = strax(:,:,:) + strairyE(:,:,:) = stray(:,:,:) + else + call ice_HaloUpdate (strairxN, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyN, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairxE, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyE, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('T2NF',strairxN) + call grid_average_X2Y('T2NF',strairyN) + call grid_average_X2Y('T2EF',strairxE) + call grid_average_X2Y('T2EF',strairyE) + endif + + endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength ! need to do more debugging !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) @@ -307,6 +375,100 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO + if (grid_system == 'CD') then + + !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icelln(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), nmass (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & + nmask (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + strairxN (:,:,iblk), strairyN (:,:,iblk), & + ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & + icetmask (:,:,iblk), icenmask (:,:,iblk), & + fmN (:,:,iblk), dt, & + strtltxN (:,:,iblk), strtltyN (:,:,iblk), & + strocnxN (:,:,iblk), strocnyN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelN_init (:,:,iblk), vvelN_init (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + + enddo ! iblk + !$TCXOMP END PARALLEL DO + + !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icelle(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), emass (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emask (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + strairxE (:,:,iblk), strairyE (:,:,iblk), & + ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & + icetmask (:,:,iblk), icenmask (:,:,iblk), & + fmE (:,:,iblk), dt, & + strtltxE (:,:,iblk), strtltyE (:,:,iblk), & + strocnxE (:,:,iblk), strocnyE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelE_init (:,:,iblk), vvelE_init (:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + enddo ! iblk + !$TCXOMP END PARALLEL DO + + endif ! grid_system + call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 1d27ff792..2f569d62f 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -84,10 +84,22 @@ module ice_dyn_shared real (kind=dbl_kind), allocatable, public :: & fcor_blk(:,:,:) ! Coriolis parameter (1/s) + real (kind=dbl_kind), allocatable, public :: & + fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) + fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & uvel_init, & ! x-component of velocity (m/s), beginning of timestep vvel_init ! y-component of velocity (m/s), beginning of timestep + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uvelN_init, & ! x-component of velocity (m/s), beginning of timestep + vvelN_init ! y-component of velocity (m/s), beginning of timestep + + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uvelE_init, & ! x-component of velocity (m/s), beginning of timestep + vvelE_init ! y-component of velocity (m/s), beginning of timestep + ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) @@ -121,6 +133,16 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') + if (grid_system == 'CD') then + allocate( & + uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep + vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep + vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + stat=ierr) + if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') + endif + end subroutine alloc_dyn_shared !======================================================================= @@ -138,7 +160,7 @@ subroutine init_dyn (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT + use ice_grid, only: ULAT, NLAT, ELAT real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -161,6 +183,11 @@ subroutine init_dyn (dt) allocate(fcor_blk(nx_block,ny_block,max_blocks)) + if (grid_system == 'CD') then + allocate(fcorE_blk(nx_block,ny_block,max_blocks)) + allocate(fcorN_blk(nx_block,ny_block,max_blocks)) + endif + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -170,10 +197,10 @@ subroutine init_dyn (dt) uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s if (grid_system == 'CD') then ! extra velocity variables - uvelE = c0 - vvelE = c0 - uvelN = c0 - vvelN = c0 + uvelE(i,j,iblk) = c0 + vvelE(i,j,iblk) = c0 + uvelN(i,j,iblk) = c0 + vvelN(i,j,iblk) = c0 endif ! strain rates @@ -191,6 +218,21 @@ subroutine init_dyn (dt) fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif + if (grid_system == 'CD') then + + if (trim(coriolis) == 'constant') then + fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + fcorN_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + else if (trim(coriolis) == 'zero') then + fcorE_blk(i,j,iblk) = 0.0 + fcorN_blk(i,j,iblk) = 0.0 + else + fcorE_blk(i,j,iblk) = c2*omega*sin(ELAT(i,j,iblk)) ! 1/s + fcorN_blk(i,j,iblk) = c2*omega*sin(NLAT(i,j,iblk)) ! 1/s + endif + + endif + ! stress tensor, kg/s^2 stressp_1 (i,j,iblk) = c0 stressp_2 (i,j,iblk) = c0 diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 1b17c130d..65c21f6d9 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -40,12 +40,24 @@ module ice_flux ! in from atmos (if .not.calc_strair) strax , & ! wind stress components (N/m^2) stray , & ! + straxE , & ! wind stress components (N/m^2) + strayE , & ! + straxN , & ! wind stress components (N/m^2) + strayN , & ! ! in from ocean uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) + uocnE , & ! ocean current, x-direction (m/s) + vocnE , & ! ocean current, y-direction (m/s) + uocnN , & ! ocean current, x-direction (m/s) + vocnN , & ! ocean current, y-direction (m/s) ss_tltx , & ! sea surface slope, x-direction (m/m) ss_tlty , & ! sea surface slope, y-direction + ss_tltxE, & ! sea surface slope, x-direction (m/m) + ss_tltyE, & ! sea surface slope, y-direction + ss_tltxN, & ! sea surface slope, x-direction (m/m) + ss_tltyN, & ! sea surface slope, y-direction hwater , & ! water depth for seabed stress calc (landfast ice) ! out to atmosphere @@ -128,6 +140,14 @@ module ice_flux dimension (:,:,:), allocatable, public :: & iceumask ! ice extent mask (U-cell) + logical (kind=log_kind), & + dimension (:,:,:), allocatable, public :: & + icenmask ! ice extent mask (N-cell) + + logical (kind=log_kind), & + dimension (:,:,:), allocatable, public :: & + iceemask ! ice extent mask (E-cell) + ! internal real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -565,6 +585,12 @@ subroutine alloc_flux if (grid_system == "CD") & allocate( & + straxN (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) + strayN (nx_block,ny_block,max_blocks), & ! + uocnN (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) + vocnN (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) + ss_tltxN (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) + ss_tltyN (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points @@ -575,6 +601,13 @@ subroutine alloc_flux strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) + icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) + straxE (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) + strayE (nx_block,ny_block,max_blocks), & ! + uocnE (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) + vocnE (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) + ss_tltxE (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) + ss_tltyE (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points @@ -585,6 +618,7 @@ subroutine alloc_flux strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) + iceemask (nx_block,ny_block,max_blocks), & ! ice extent mask (E-cell) stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') From 5435aba06ff349ea06ee5d0427453e81b2cb28dc Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 17 Nov 2021 12:37:08 -0700 Subject: [PATCH 030/109] Change some indents and change 0 to c0. (#15) * Additional CD variables for the EVP dynamics * Fix some indents and change 0 to c0 --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 40 +++++++++---------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 22 +++++----- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index aefb7679b..fe2ceaf3e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -290,25 +290,25 @@ subroutine evp (dt) if (grid_system == 'CD') then - if (.not. calc_strair) then - strairxN(:,:,:) = strax(:,:,:) - strairyN(:,:,:) = stray(:,:,:) - strairxE(:,:,:) = strax(:,:,:) - strairyE(:,:,:) = stray(:,:,:) - else - call ice_HaloUpdate (strairxN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairxE, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyE, halo_info, & - field_loc_center, field_type_vector) - call grid_average_X2Y('T2NF',strairxN) - call grid_average_X2Y('T2NF',strairyN) - call grid_average_X2Y('T2EF',strairxE) - call grid_average_X2Y('T2EF',strairyE) - endif + if (.not. calc_strair) then + strairxN(:,:,:) = strax(:,:,:) + strairyN(:,:,:) = stray(:,:,:) + strairxE(:,:,:) = strax(:,:,:) + strairyE(:,:,:) = stray(:,:,:) + else + call ice_HaloUpdate (strairxN, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyN, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairxE, halo_info, & + field_loc_center, field_type_vector) + call ice_HaloUpdate (strairyE, halo_info, & + field_loc_center, field_type_vector) + call grid_average_X2Y('T2NF',strairxN) + call grid_average_X2Y('T2NF',strairyN) + call grid_average_X2Y('T2EF',strairxE) + call grid_average_X2Y('T2EF',strairyE) + endif endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -426,7 +426,7 @@ subroutine evp (dt) do iblk = 1, nblocks !----------------------------------------------------------------- - ! more preparation for dynamics on N grid + ! more preparation for dynamics on E grid !----------------------------------------------------------------- this_block = get_block(blocks_ice(iblk),iblk) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 2f569d62f..66b8a2aa0 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -213,23 +213,23 @@ subroutine init_dyn (dt) if (trim(coriolis) == 'constant') then fcor_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s else if (trim(coriolis) == 'zero') then - fcor_blk(i,j,iblk) = 0.0 + fcor_blk(i,j,iblk) = c0 else fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif if (grid_system == 'CD') then - if (trim(coriolis) == 'constant') then - fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s - fcorN_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s - else if (trim(coriolis) == 'zero') then - fcorE_blk(i,j,iblk) = 0.0 - fcorN_blk(i,j,iblk) = 0.0 - else - fcorE_blk(i,j,iblk) = c2*omega*sin(ELAT(i,j,iblk)) ! 1/s - fcorN_blk(i,j,iblk) = c2*omega*sin(NLAT(i,j,iblk)) ! 1/s - endif + if (trim(coriolis) == 'constant') then + fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + fcorN_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s + else if (trim(coriolis) == 'zero') then + fcorE_blk(i,j,iblk) = c0 + fcorN_blk(i,j,iblk) = c0 + else + fcorE_blk(i,j,iblk) = c2*omega*sin(ELAT(i,j,iblk)) ! 1/s + fcorN_blk(i,j,iblk) = c2*omega*sin(NLAT(i,j,iblk)) ! 1/s + endif endif From 256faed2ef07d5294ccdcf96967c635f12221740 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 17 Nov 2021 12:38:28 -0700 Subject: [PATCH 031/109] Add icespd and icedir at U, E, and N points (#16) --- cicecore/cicedynB/analysis/ice_history.F90 | 87 ++++++++++++++++++- .../cicedynB/analysis/ice_history_shared.F90 | 9 ++ 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index a257cc19a..4ade53cfb 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -281,8 +281,12 @@ subroutine init_hist (dt) if (grid_system == 'CD') then f_uvelE = f_uvel f_vvelE = f_vvel + f_icespdE = f_icespd + f_icedirE = f_icedir f_uvelN = f_uvel f_vvelN = f_vvel + f_icespdN = f_icespd + f_icedirN = f_icedir f_strairxN = f_strairx f_strairyN = f_strairy f_strairxE = f_strairx @@ -359,10 +363,16 @@ subroutine init_hist (dt) call broadcast_scalar (f_aice, master_task) call broadcast_scalar (f_uvel, master_task) call broadcast_scalar (f_vvel, master_task) + call broadcast_scalar (f_icespd, master_task) + call broadcast_scalar (f_icedir, master_task) call broadcast_scalar (f_uvelE, master_task) - call broadcast_scalar (f_uvelN, master_task) call broadcast_scalar (f_vvelE, master_task) + call broadcast_scalar (f_icespdE, master_task) + call broadcast_scalar (f_icedirE, master_task) + call broadcast_scalar (f_uvelN, master_task) call broadcast_scalar (f_vvelN, master_task) + call broadcast_scalar (f_icespdN, master_task) + call broadcast_scalar (f_icedirN, master_task) call broadcast_scalar (f_uatm, master_task) call broadcast_scalar (f_vatm, master_task) call broadcast_scalar (f_atmspd, master_task) @@ -623,6 +633,16 @@ subroutine init_hist (dt) "positive is y direction on E grid", c1, c0, & ns1, f_vvelE) + call define_hist_field(n_icespdE,"icespdE","m/s",estr2D, ecstr, & + "sea ice speed", & + "vector magnitude on E grid", c1, c0, & + ns1, f_icespdE) + + call define_hist_field(n_icedirE,"icedirE","deg",estr2D, ecstr, & + "sea ice direction", & + "vector direction - coming from on E grid", c1, c0, & + ns1, f_icedirE) + call define_hist_field(n_uvelN,"uvelN","m/s",nstr2D, ncstr, & "ice velocity (x)", & "positive is x direction on N grid", c1, c0, & @@ -633,6 +653,16 @@ subroutine init_hist (dt) "positive is y direction on N grid", c1, c0, & ns1, f_vvelN) + call define_hist_field(n_icespdN,"icespdN","m/s",nstr2D, ncstr, & + "sea ice speed", & + "vector magnitude on N grid", c1, c0, & + ns1, f_icespdN) + + call define_hist_field(n_icedirN,"icedirN","deg",nstr2D, ncstr, & + "sea ice direction", & + "vector direction - coming from on N grid", c1, c0, & + ns1, f_icedirN) + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -643,6 +673,16 @@ subroutine init_hist (dt) "positive is y direction on U grid", c1, c0, & ns1, f_vvel) + call define_hist_field(n_icespd,"icespd","m/s",ustr2D, ucstr, & + "sea ice speed", & + "vector magnitude", c1, c0, & + ns1, f_icespd) + + call define_hist_field(n_icedir,"icedir","deg",ustr2D, ucstr, & + "sea ice direction", & + "vector direction - coming from", c1, c0, & + ns1, f_icedir) + call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -2141,14 +2181,59 @@ subroutine accum_hist (dt) call accum_hist_field(n_uvel, iblk, uvel(:,:,iblk), a2D) if (f_vvel (1:1) /= 'x') & call accum_hist_field(n_vvel, iblk, vvel(:,:,iblk), a2D) + if (f_icespd (1:1) /= 'x') & + call accum_hist_field(n_icespd, iblk, sqrt( & + (uvel(:,:,iblk)*uvel(:,:,iblk)) + & + (vvel(:,:,iblk)*vvel(:,:,iblk))), a2D) + if (f_icedir(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvel(i,j,iblk)) > puny .or. abs(vvel(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvel(i,j,iblk),vvel(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedir, iblk, worka(:,:), a2D) + endif if (f_uvelN (1:1) /= 'x') & call accum_hist_field(n_uvelN, iblk, uvelN(:,:,iblk), a2D) if (f_vvelN (1:1) /= 'x') & call accum_hist_field(n_vvelN, iblk, vvelN(:,:,iblk), a2D) + if (f_icespdN (1:1) /= 'x') & + call accum_hist_field(n_icespdN, iblk, sqrt( & + (uvelN(:,:,iblk)*uvelN(:,:,iblk)) + & + (vvelN(:,:,iblk)*vvelN(:,:,iblk))), a2D) + if (f_icedirN(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvelN(i,j,iblk)) > puny .or. abs(vvelN(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvelN(i,j,iblk),vvelN(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedirN, iblk, worka(:,:), a2D) + endif if (f_uvelE (1:1) /= 'x') & call accum_hist_field(n_uvelE, iblk, uvelE(:,:,iblk), a2D) if (f_vvelE (1:1) /= 'x') & call accum_hist_field(n_vvelE, iblk, vvelE(:,:,iblk), a2D) + if (f_icespdE (1:1) /= 'x') & + call accum_hist_field(n_icespdE, iblk, sqrt( & + (uvelE(:,:,iblk)*uvelE(:,:,iblk)) + & + (vvelE(:,:,iblk)*vvelE(:,:,iblk))), a2D) + if (f_icedirE(1:1) /= 'x') then + worka(:,:) = c0 + do j = jlo, jhi + do i = ilo, ihi + if (abs(uvelE(i,j,iblk)) > puny .or. abs(vvelE(i,j,iblk)) > puny) & + worka(i,j) = atan2(uvelE(i,j,iblk),vvelE(i,j,iblk))*rad_to_deg + worka(i,j) = worka(i,j) + c180 + enddo + enddo + call accum_hist_field(n_icedirE, iblk, worka(:,:), a2D) + endif if (f_uatm (1:1) /= 'x') & call accum_hist_field(n_uatm, iblk, uatm(:,:,iblk), a2D) if (f_vatm (1:1) /= 'x') & diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index cbb660ae5..aea1d4bcf 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -235,8 +235,11 @@ module ice_history_shared f_snowfrac = 'x', f_snowfracn = 'x', & f_Tsfc = 'm', f_aice = 'm', & f_uvel = 'm', f_vvel = 'm', & + f_icespd = 'm', f_icedir = 'm', & f_uvelE = 'x', f_vvelE = 'x', & + f_icespdE = 'x', f_icedirE = 'x', & f_uvelN = 'x', f_vvelN = 'x', & + f_icespdN = 'x', f_icedirN = 'x', & f_uatm = 'm', f_vatm = 'm', & f_atmspd = 'm', f_atmdir = 'm', & f_fswup = 'm', & @@ -398,9 +401,12 @@ module ice_history_shared f_snowfrac, f_snowfracn, & f_Tsfc, f_aice , & f_uvel, f_vvel , & + f_icespd, f_icedir , & ! For now, don't allow the users to modify the CD grid quantities. ! f_uvelE, f_vvelE , & +! f_icespdE, f_icedirE , & ! f_uvelN, f_vvelN , & +! f_icespdN, f_icedirN , & f_uatm, f_vatm , & f_atmspd, f_atmdir , & f_fswup, & @@ -586,8 +592,11 @@ module ice_history_shared n_snowfrac , n_snowfracn , & n_Tsfc , n_aice , & n_uvel , n_vvel , & + n_icespd , n_icedir , & n_uvelE , n_vvelE , & + n_icespdE , n_icedirE , & n_uvelN , n_vvelN , & + n_icespdN , n_icedirN , & n_uatm , n_vatm , & n_atmspd , n_atmdir , & n_sice , & From 0aeee2cb81150b3cc0ad6c526e7e4c8abf072c33 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Wed, 17 Nov 2021 17:30:50 -0500 Subject: [PATCH 032/109] New div_stress subroutine (#18) * Initial coding for strain_rates_U * Initial coding of div_stress subroutine * finished div_stress subroutine...compiles and runs * Modified if to case select followin comments from Phil --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 107 ++++++++++++++++++ cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2 +- 2 files changed, 108 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index fe2ceaf3e..843a000b9 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1203,7 +1203,114 @@ subroutine stress_T (nx_block, ny_block, & endif end subroutine stress_T + + !======================================================================= + +! Computes divergence of stress tensor at the E or N point for the mom equation + +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine div_stress (nx_block, ny_block, & + ksub, icell, & + indxi, indxj, & + dxE_N, dyE_N, & + dxT_U, dyT_U, & + arear, & + stressp, stressm, & + stress12, & + F1, F2, & + grid_location) + + use ice_dyn_shared, only: strain_rates_T, deformations_T, & + viscous_coeffs_and_rep_pressure_T + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ksub , & ! subcycling step + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxE_N , & ! width of E or N-cell through the middle (m) + dyE_N , & ! height of E or N-cell through the middle (m) + dxT_U , & ! width of T or U-cell through the middle (m) + dyT_U , & ! height of T or U-cell through the middle (m) + arear , & ! 1/earea or 1/narea + stressp , & ! sigma11+sigma22 + stressm , & ! sigma11-sigma22 + stress12 ! sigma12 + + character(len=*), intent(in) :: & + grid_location ! E (East) or N (North) ! TO BE IMPROVED!!!! + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + F1 , & ! div of stress tensor for u component + F2 ! div of stress tensor for v component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress)' + +!!! Instead of having the if statements below we could define for example +! i+ci, j+cj where ci, cj would change with grid_position + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + + !----------------------------------------------------------------- + ! F1,F2 : div of stress tensor for u,v components + !----------------------------------------------------------------- + + select case (trim(grid_location)) + case('E') + + F1(i,j) = arear(i,j) * & + ( p5 * dyE_N(i,j) * ( stressp(i+1,j)-stressp(i,j) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressm(i+1,j) & + -(dyT_U(i,j)**2)*stressm(i,j) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12(i,j) & + -(dxT_U(i,j-1)**2)*stress12(i,j-1) ) ) + + F2(i,j) = arear(i,j) * & + ( p5 * dxE_N(i,j) * ( stressp(i,j)-stressp(i,j-1) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressm(i,j) & + -(dxT_U(i,j-1)**2)*stressm(i,j-1) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12(i+1,j) & + -(dyT_U(i,j)**2)*stress12(i,j) ) ) + + case('N') + + F1(i,j) = arear(i,j) * & + ( p5 * dyE_N(i,j) * ( stressp(i,j)-stressp(i-1,j) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressm(i,j) & + -(dyT_U(i-1,j)**2)*stressm(i-1,j) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12(i,j+1) & + -(dxT_U(i,j)**2)*stress12(i,j) ) ) + + F2(i,j) = arear(i,j) * & + ( p5 * dxE_N(i,j) * ( stressp(i,j+1)-stressp(i,j) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressm(i,j+1) & + -(dxT_U(i,j)**2)*stressm(i,j) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12(i,j) & + -(dyT_U(i-1,j)**2)*stress12(i-1,j) ) ) + case default + call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + end select + + + enddo ! ij + + end subroutine div_stress + !======================================================================= end module ice_dyn_evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 66b8a2aa0..40996b03b 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1563,7 +1563,7 @@ subroutine strain_rates_T (nx_block, ny_block, & DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) end subroutine strain_rates_T - + !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. From 838bf3093f0e4bbececa6e922bdfa8f731b3a609 Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 18 Nov 2021 00:47:56 +0100 Subject: [PATCH 033/109] =?UTF-8?q?Changes=20in=20ice=5Fdyn*=20are=20inter?= =?UTF-8?q?polation=20of=20uvelE/vvelN=20to=20B=20grid.=20ice=5Ft=E2=80=A6?= =?UTF-8?q?=20(#17)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Changes in ice_dyn* are interpolation of uvelE/vvelN to B grid. ice_transport files are changed in a way so that velocities are interpolated to b grid for depature point function and kept at E or N grid possible. * changed according to comments. changed average from F to S and. commented out in vp and eap * comment out grid_system, uvelE and vvelN --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 13 +++- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 6 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 13 +++- .../dynamics/ice_transport_driver.F90 | 77 +++++++++++-------- .../cicedynB/dynamics/ice_transport_remap.F90 | 30 +++++++- 5 files changed, 96 insertions(+), 43 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index cf5a2fd67..e5a89b118 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,9 +134,9 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y + tarear, uarear, grid_average_X2Y!, grid_system commented out until implementation of cd-grid use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength + aice_init, aice0, aicen, vicen, strength !, uvelE, vvelN grid_system commented out until implementation of cd-grid ! use ice_timers, only: timer_dynamics, timer_bound, & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3 @@ -558,7 +558,14 @@ subroutine eap (dt) field_loc_NEcorner, field_type_vector) call grid_average_X2Y('U2TF',strocnxT) ! shift call grid_average_X2Y('U2TF',strocnyT) - +! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport +! commented out in order to focus on EVP for now within the cdgrid +! should be used when routine is ready +! if (grid_system == 'CD') then +! call grid_average_X2Y('E2US',uvelE,uvel) +! call grid_average_X2Y('N2US',vvelN,vvel) +! endif +!end comment out call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 843a000b9..6ad42838f 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -729,7 +729,11 @@ subroutine evp (dt) field_loc_NEcorner, field_type_vector) call grid_average_X2Y('U2TF',strocnxT) ! shift call grid_average_X2Y('U2TF',strocnyT) - +! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport + if (grid_system == 'CD') then + call grid_average_X2Y('E2US',uvelE,uvel) + call grid_average_X2Y('N2US',vvelN,vvel) + endif call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 0b11439ef..2c1b23032 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -200,9 +200,9 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y + tarear, grid_type, grid_average_X2Y !, grid_system commented out until implementation of c grid use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength + aice_init, aice0, aicen, vicen, strength!, uvelE, vvelN ommented out until implementation of c grid use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -657,7 +657,14 @@ subroutine implicit_solver (dt) field_loc_NEcorner, field_type_vector) call grid_average_X2Y('U2TF',strocnxT) ! shift call grid_average_X2Y('U2TF',strocnyT) - +! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport +! commented out in order to focus on EVP for now within the cdgrid +! should be used when routine is ready +! if (grid_system == 'CD') then +! call grid_average_X2Y('E2US',uvelE,uvel) +! call grid_average_X2Y('N2US',vvelN,vvel) +! endif +!end comment out call ice_timer_stop(timer_dynamics) ! dynamics end subroutine implicit_solver diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index f2dff2367..5d34b60fc 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -260,8 +260,8 @@ subroutine transport_remap (dt) use ice_domain_size, only: ncat, max_blocks use ice_blocks, only: nx_block, ny_block, block, get_block, nghost use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & - uvel, vvel, bound_state - use ice_grid, only: tarea + uvel, vvel, bound_state, uvelE, vvelN + use ice_grid, only: tarea, grid_system use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_advect, timer_bound @@ -538,14 +538,24 @@ subroutine transport_remap (dt) !------------------------------------------------------------------- ! Main remapping routine: Step ice area and tracers forward in time. !------------------------------------------------------------------- - - call horizontal_remap (dt, ntrace, & + if (grid_system == 'CD') then + call horizontal_remap (dt, ntrace, & uvel (:,:,:), vvel (:,:,:), & aim (:,:,:,:), trm (:,:,:,:,:), & l_fixed_area, & tracer_type, depend, & has_dependents, integral_order, & - l_dp_midpt) + l_dp_midpt, grid_system, & + uvelE(:,:,:),vvelN(:,:,:)) + else + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm (:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt, grid_system) + endif !------------------------------------------------------------------- ! Given new fields, recompute state variables. @@ -709,8 +719,8 @@ subroutine transport_upwind (dt) use ice_domain_size, only: ncat, max_blocks use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, trcr_depend, bound_state, trcr_base, & - n_trcr_strata, nt_strata - use ice_grid, only: HTE, HTN, tarea + n_trcr_strata, nt_strata, uvelE, vvelN + use ice_grid, only: HTE, HTN, tarea, grid_system use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect @@ -761,31 +771,34 @@ subroutine transport_upwind (dt) !------------------------------------------------------------------- ! Average corner velocities to edges. !------------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) - vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uee, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vnn, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) - + if (grid_system == 'CD') then + uee(:,:,:)=uvelE(:,:,:) + vnn(:,:,:)=vvelN(:,:,:) + else + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) + vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uee, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vnn, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) + endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 070f3b7ad..aae19378a 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -319,7 +319,8 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, & integral_order, & - l_dp_midpt) + l_dp_midpt, grid_system, & + uvelE, vvelN) use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & ice_HaloDestroy @@ -339,8 +340,12 @@ subroutine horizontal_remap (dt, ntrace, & ntrace ! number of tracers in use real (kind=dbl_kind), intent(in), dimension(nx_block,ny_block,max_blocks) :: & - uvel ,&! x-component of velocity (m/s) - vvel ! y-component of velocity (m/s) + uvel ,&! x-component of velocity (m/s) ugrid + vvel ! y-component of velocity (m/s) ugrid + + real (kind=dbl_kind), intent(in), optional, dimension(nx_block,ny_block,max_blocks) :: & + uvelE ,&! x-component of velocity (m/s) egrid + vvelN ! y-component of velocity (m/s) ngrid real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,0:ncat,max_blocks) :: & mm ! mean mass values in each grid cell @@ -348,6 +353,8 @@ subroutine horizontal_remap (dt, ntrace, & real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & tm ! mean tracer values in each grid cell + character (len=char_len_long), intent(in) :: grid_system + !------------------------------------------------------------------- ! If l_fixed_area is true, the area of each departure region is ! computed in advance (e.g., by taking the divergence of the @@ -663,6 +670,20 @@ subroutine horizontal_remap (dt, ntrace, & enddo if (l_fixed_area) then + if (grid_system == 'CD') then ! velocities are already on the center + do j = jlo, jhi + do i = ilo-1, ihi + edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt + enddo + enddo + + do j = jlo-1, jhi + do i = ilo, ihi + edgearea_n(i,j) = vvelN(i,j,iblk)*HTN(i,j,iblk) * dt + enddo + enddo + + else do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = (uvel(i,j,iblk) + uvel(i,j-1,iblk)) & @@ -676,7 +697,8 @@ subroutine horizontal_remap (dt, ntrace, & * p5 * HTN(i,j,iblk) * dt enddo enddo - endif + endif + endif !------------------------------------------------------------------- ! Transports for east cell edges. From 052caab07e9ce87da99758408bccf7a43e56b652 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 17 Nov 2021 16:48:32 -0700 Subject: [PATCH 034/109] Bug fix for missing allocation of CD variables (#19) * Bug fix for missing allocation of CD variables * Adjusting indents and fixing a comment --- cicecore/cicedynB/general/ice_flux.F90 | 44 ++++++++++++++------------ 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 65c21f6d9..e17faab6a 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -591,34 +591,38 @@ subroutine alloc_flux vocnN (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) ss_tltxN (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) ss_tltyN (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction - taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) - taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) - strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points - strairyN (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at N points - strocnxN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at N points - strocnyN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at N points - strtltxN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at N points - strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points - strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) - strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) + taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) + taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) + strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points + strairyN (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at N points + strocnxN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at N points + strocnyN (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at N points + strtltxN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at N points + strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points + strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) + strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) + fmN (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in N-cell (kg/s) + TbN (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) straxE (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) strayE (nx_block,ny_block,max_blocks), & ! uocnE (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) vocnE (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) ss_tltxE (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) ss_tltyE (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction - taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) - taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) - strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points - strairyE (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at E points - strocnxE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at E points - strocnyE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at E points - strtltxE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at E points - strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points - strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) - strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) + taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) + taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) + strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points + strairyE (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction at E points + strocnxE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction at E points + strocnyE (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction at E points + strtltxE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction at E points + strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points + strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) + strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) iceemask (nx_block,ny_block,max_blocks), & ! ice extent mask (E-cell) + fmE (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in E-cell (kg/s) + TbE (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') From 256c3dc819771f4018e916e12979b86afcb9cbd0 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 17 Nov 2021 16:07:35 -0800 Subject: [PATCH 035/109] fix advection none setup for multiproc runs, remove obsolete box2001 env setting (#22) --- cice.setup | 4 ++++ cicecore/cicedynB/general/ice_init.F90 | 7 ++++--- configuration/scripts/options/set_env.box2001 | 1 - 3 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 configuration/scripts/options/set_env.box2001 diff --git a/cice.setup b/cice.setup index be9266dd2..aae4319d4 100755 --- a/cice.setup +++ b/cice.setup @@ -1070,6 +1070,10 @@ EOF2 end ${casescr}/parse_settings.sh cice.settings ${fsmods} + if ($status != 0) then + echo "${0}: ERROR, parse_namelist.sh aborted" + exit -1 + endif ${casescr}/parse_namelist.sh ice_in ${fimods} if ($status != 0) then echo "${0}: ERROR, parse_namelist.sh aborted" diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 1b4d50927..125b73e95 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -962,6 +962,10 @@ subroutine input_data abort_list = trim(abort_list)//":1" endif + if (ktransport <= 0) then + advection = 'none' + endif + if (ktransport > 0 .and. advection /= 'remap' .and. advection /= 'upwind') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: invalid advection=',trim(advection) abort_list = trim(abort_list)//":3" @@ -1473,9 +1477,6 @@ subroutine input_data endif write(nu_diag,1030) ' ssh_stress = ',trim(ssh_stress),trim(tmpstr2) - if (ktransport <= 0) then - advection = 'none' - endif if (trim(advection) == 'remap') then tmpstr2 = ' : linear remapping advection' elseif (trim(advection) == 'upwind') then diff --git a/configuration/scripts/options/set_env.box2001 b/configuration/scripts/options/set_env.box2001 deleted file mode 100644 index a3f7c10f5..000000000 --- a/configuration/scripts/options/set_env.box2001 +++ /dev/null @@ -1 +0,0 @@ -setenv NICELYR 1 From b401c089595b547502393b05b86d16e664b201e9 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 18 Nov 2021 15:28:52 -0500 Subject: [PATCH 036/109] Compute seabed stress at CD-grid locations (#20) * ice_dyn_shared: fix subname for 'seabed_stress_factor_LKD' * ice_dyn_shared: add optional 'grid_location' argument to seabed_stress_factor_LKD In subsequent commits we want to compute the seabed stress factor 'Tb' not only at the U point (NE corner) but also at the E (center of E face) and N (center of N face) points. In order to dispatch the computation in this subroutine to different code paths depending on the grid location (U, N or E), add an optional argument 'grid_location' that can be used to indicate at which point we want the factor to be computed. Default it to the 'U' point for backwards compatibility, such that the existing calls do not have to be changed. Note that we need an additional 'l_grid_location' variable to set the default value, as the Fortran standard does not allow setting the value of an optional argument if it is not present. Note also that 'icellu' was incorrectly referencing 'icetmask'. Fix that by generalizing the description. * ice_grid: add 'grid_neighbor_{min,max}' functions Add functions 'grid_neighbor_min' and 'grid_neighbor_max', used to compute the minimum/maximum of neighboring T-point values of a field at the U, E or N location. * ice_dyn_shared: use 'grid_neighbor_{min,max}' in seabed_stress_factor_LKD Use the 'grid_neighbor_{min,max}' subroutines introduced in the preceding commit to compute the min and max of neighbor values of hwater, aice and vice at 'grid_location'. This generalizes the subroutine for the CD-grid. A subsequent commit will call it twice to compute the seabed stress factor at the E and N locations. * ice_dyn_shared: fix subname for 'seabed_stress_factor_prob' * ice_dyn_shared: add optional CD-grid arguments to 'seabed_stress_factor_prob' In contrast to 'seabed_stress_factor_LKD', in 'seabed_stress_factor_prob' the seabed stress factor is computed at the T location and only transfered to the U location at the end of the subroutine. So for efficiency it does not make sense to call that subroutine twice for computing the factor at the E and N locations. Instead, add optional arguments for 'TbE' and 'TbN', as well as the corresponding indices paraphernalia, and compute the factor at the E and N location at the end of the subroutine depending on 'grid_system' and the presence of the optional arguments. Use 'grid_neighbor_max' to abstract away the operation of finding the maximum value of neighboring cells. * ice_dyn_evp: compute seabed stress factor at CD-grid locations Leverage the changes in the previous commits to compute the seabed stress factor at the E and N locations if grid_system == 'CD'. For seabed_stress_method == 'LKD', simply call seabed_stress_factor_LKD twice, once for each of the E and N locations. For seabed_stress_method == 'probabilistic', call seabed_stress_factor_prob with the additional arguments for the E and N locations. --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 40 ++++++- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 103 ++++++++++++++---- cicecore/cicedynB/infrastructure/ice_grid.F90 | 64 ++++++++++- 3 files changed, 180 insertions(+), 27 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 6ad42838f..5fa3e8a26 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -502,13 +502,40 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks + select case (trim(grid_system)) + case('B') + + if ( seabed_stress_method == 'LKD' ) then + + call seabed_stress_factor_LKD (nx_block, ny_block, & + icellu (iblk), & + indxui(:,iblk), indxuj(:,iblk), & + vice(:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu(:,:,iblk)) + + elseif ( seabed_stress_method == 'probabilistic' ) then + + call seabed_stress_factor_prob (nx_block, ny_block, & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + hwater(:,:,iblk), Tbu(:,:,iblk)) + endif + + case('CD') + if ( seabed_stress_method == 'LKD' ) then call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & + icelle (iblk), & + indxei(:,iblk), indxej(:,iblk), & + vice(:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbE(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block, ny_block, & + icelln (iblk), & + indxni(:,iblk), indxnj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + hwater(:,:,iblk), TbN(:,:,iblk)) elseif ( seabed_stress_method == 'probabilistic' ) then @@ -516,8 +543,13 @@ subroutine evp (dt) icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + hwater(:,:,iblk), Tbu(:,:,iblk), & + TbE(:,:,iblk), TbN(:,:,iblk), & + icelle(iblk), indxei(:,iblk), indxej(:,iblk), & + icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) endif + + end select enddo !$OMP END PARALLEL DO diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 40996b03b..2e4430c3d 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -930,11 +930,14 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & icellu, & indxui, indxuj, & vice, aice, & - hwater, Tbu) + hwater, Tbu, & + grid_location) + + use ice_grid, only: grid_neighbor_min, grid_neighbor_max integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where icetmask = 1 + icellu ! no. of cells where ice[uen]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxui , & ! compressed index in i-direction @@ -946,31 +949,44 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & hwater ! water depth at tracer location (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor (N/m^2) + Tbu ! seabed stress factor at 'grid_location' (N/m^2) + + character(len=*), optional, intent(inout) :: & + grid_location ! grid location (U, E, N), U assumed if not present real (kind=dbl_kind) :: & - au, & ! concentration of ice at u location - hu, & ! volume per unit area of ice at u location (mean thickness, m) - hwu, & ! water depth at u location (m) - hcu ! critical thickness at u location (m) + au, & ! concentration of ice at 'grid_location' + hu, & ! volume per unit area of ice at 'grid_location' (mean thickness, m) + hwu, & ! water depth at 'grid_location' (m) + hcu ! critical thickness at 'grid_location' (m) integer (kind=int_kind) :: & i, j, ij - character(len=*), parameter :: subname = '(seabed1_stress_coeff)' + character(len=char_len) :: & + l_grid_location ! local version of 'grid_location' + + character(len=*), parameter :: subname = '(seabed_stress_factor_LKD)' + ! Assume U location (NE corner) if grid_location not present + if (.not. (present(grid_location))) then + l_grid_location = 'U' + else + l_grid_location = grid_location + endif + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - ! convert quantities to u-location + ! convert quantities to grid_location - hwu = min(hwater(i,j),hwater(i+1,j),hwater(i,j+1),hwater(i+1,j+1)) + hwu = grid_neighbor_min(hwater, i, j, l_grid_location) if (hwu < threshold_hw) then - au = max(aice(i,j),aice(i+1,j),aice(i,j+1),aice(i+1,j+1)) - hu = max(vice(i,j),vice(i+1,j),vice(i,j+1),vice(i+1,j+1)) + au = grid_neighbor_max(aice, i, j, l_grid_location) + hu = grid_neighbor_max(vice, i, j, l_grid_location) ! 1- calculate critical thickness hcu = au * hwu / k1 @@ -1002,15 +1018,19 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & icellt, indxti, indxtj, & icellu, indxui, indxuj, & aicen, vicen, & - hwater, Tbu) + hwater, Tbu, & + TbE, TbN, & + icelle, indxei, indxej, & + icelln, indxni, indxnj) ! use modules use ice_arrays_column, only: hin_max use ice_domain_size, only: ncat + use ice_grid, only: grid_neighbor_min, grid_neighbor_max integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt, icellu ! no. of cells where icetmask = 1 + icellt, icellu ! no. of cells where ice[tu]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), & intent(in) :: & @@ -1027,7 +1047,21 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & vicen ! partial volume for last thickness category in ITD (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor (N/m^2) + Tbu ! seabed stress factor at U location (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout), optional :: & + TbE, & ! seabed stress factor at E location (N/m^2) + TbN ! seabed stress factor at N location (N/m^2) + + integer (kind=int_kind), intent(in), optional :: & + icelle, icelln ! no. of cells where ice[en]mask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), & + intent(in), optional :: & + indxei , & ! compressed index in i-direction + indxej , & ! compressed index in j-direction + indxni , & ! compressed index in i-direction + indxnj ! compressed index in j-direction ! local variables @@ -1068,7 +1102,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & real (kind=dbl_kind) :: atot, x_kmax real (kind=dbl_kind) :: cut, rhoi, rhow, gravit, pi, puny - character(len=*), parameter :: subname = '(seabed2_stress_coeff)' + character(len=*), parameter :: subname = '(seabed_stress_factor_prob)' call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi) call icepack_query_parameters(gravit_out=gravit) @@ -1149,12 +1183,37 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & endif enddo - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) - ! convert quantities to u-location - Tbu(i,j) = max(Tbt(i,j),Tbt(i+1,j),Tbt(i,j+1),Tbt(i+1,j+1)) - enddo ! ij + select case (trim(grid_system)) + case('B') + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + ! convert quantities to U-location + Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') + enddo ! ij + case('CD') + if(present(Tbe) .and. present(TbN) .and. & + present(icelle) .and. present(icelln) .and. & + present(indxei) .and. present(indxej) .and. & + present(indxni) .and. present(indxnj)) then + + do ij = 1, icelle + i = indxei(ij) + j = indxej(ij) + ! convert quantities to E-location + TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') + enddo + do ij = 1, icelln + i = indxni(ij) + j = indxnj(ij) + ! convert quantities to N-location + TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') + enddo + + else + call abort_ice(subname // ' insufficient number of arguments for grid_system:' // grid_system) + endif + end select end subroutine seabed_stress_factor_prob diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 0e1934fdf..6e554b001 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -46,7 +46,7 @@ module ice_grid implicit none private public :: init_grid1, init_grid2, grid_average_X2Y, & - alloc_grid, makemask + alloc_grid, makemask, grid_neighbor_min, grid_neighbor_max character (len=char_len_long), public :: & grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) @@ -2803,6 +2803,68 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) end subroutine grid_average_X2YF +!======================================================================= +! Compute the minimum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the minumum (U, E, N) + + character(len=*), parameter :: subname = '(grid_neighbor_min)' + + select case (trim(grid_location)) + case('U') + mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + mini = min(field(i,j), field(i+1,j)) + case('N') + mini = min(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + end select + + end function grid_neighbor_min + + +!======================================================================= +! Compute the maximum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_max(field, i, j, grid_location) result(maxi) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the maximum (U, E, N) + + + character(len=*), parameter :: subname = '(grid_neighbor_max)' + + select case (trim(grid_location)) + case('U') + maxi = max(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + maxi = max(field(i,j), field(i+1,j)) + case('N') + maxi = max(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + end select + + end function grid_neighbor_max + !======================================================================= ! The following code is used for obtaining the coordinates of the grid ! vertices for CF-compliant netCDF history output. Approximate! From e3112ebb854d2e7f6b660f5823af9705dd9da602 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Thu, 18 Nov 2021 13:29:36 -0700 Subject: [PATCH 037/109] Add some variables for CD discretization (#21) * Prototype variables for CD grid discretization * Fix syntax error * Change variable names to ratio and only need 4 * Needed minus signs and fix alignment * Make the ratios consistent with the doc --- cicecore/cicedynB/infrastructure/ice_grid.F90 | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 6e554b001..6a3cf1171 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -107,6 +107,12 @@ module ice_grid dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + ratiodxN , & ! - dxn(i+1,j) / dxn(i,j) + ratiodyE , & ! - dye(i ,j+1) / dye(i,j) + ratiodxNr , & ! 1 / ratiodxN + ratiodyEr ! 1 / ratiodyE + ! grid dimensions for rectangular grid real (kind=dbl_kind), public :: & dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) @@ -255,6 +261,16 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + if (grid_system == 'CD') then + allocate( & + ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + endif + if (pgl_global_ext) then allocate( & G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) @@ -502,6 +518,17 @@ subroutine init_grid2 enddo enddo + if (grid_system == 'CD') then + do j = jlo, jhi + do i = ilo, ihi + ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) + ratiodyE (i,j,iblk) = - dye(i ,j+1,iblk) / dye(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxn(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodye(i,j,iblk) + enddo + enddo + endif + enddo ! iblk !$OMP END PARALLEL DO From 249639ac84a7daf6cfb5e8c3cd1d19d767f06dc7 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Thu, 18 Nov 2021 13:30:00 -0700 Subject: [PATCH 038/109] Add halo updates on uvelE, vvelE, uvelN, and vvelN (#23) * Additional CD variables for the EVP dynamics * Add halo updates for uvel and vvel at N and E * Add field_loc_Nface and field_loc_Eface to the use statement. --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 35 ++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 5fa3e8a26..f2a506922 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -38,6 +38,7 @@ module ice_dyn_evp use ice_kinds_mod use ice_communicate, only: my_task, master_task use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 @@ -483,6 +484,23 @@ subroutine evp (dt) call unstack_velocity_field(fld2, uvel, vvel) call ice_timer_stop(timer_bound) + if (grid_system == 'CD') then + + call ice_timer_start(timer_bound) + ! velocities may have changed in dyn_prep2 + call stack_velocity_field(uvelN, vvelN, fld2) + call ice_HaloUpdate (fld2, halo_info, & + field_loc_Nface, field_type_vector) + call unstack_velocity_field(fld2, uvelN, vvelN) + ! velocities may have changed in dyn_prep2 + call stack_velocity_field(uvelE, vvelE, fld2) + call ice_HaloUpdate (fld2, halo_info, & + field_loc_Eface, field_type_vector) + call unstack_velocity_field(fld2, uvelE, vvelE) + call ice_timer_stop(timer_bound) + + endif + if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 @@ -657,6 +675,23 @@ subroutine evp (dt) call ice_timer_stop(timer_bound) call unstack_velocity_field(fld2, uvel, vvel) + if (grid_system == 'CD') then + + call ice_timer_start(timer_bound) + ! velocities may have changed in dyn_prep2 + call stack_velocity_field(uvelN, vvelN, fld2) + call ice_HaloUpdate (fld2, halo_info, & + field_loc_Nface, field_type_vector) + call unstack_velocity_field(fld2, uvelN, vvelN) + ! velocities may have changed in dyn_prep2 + call stack_velocity_field(uvelE, vvelE, fld2) + call ice_HaloUpdate (fld2, halo_info, & + field_loc_Eface, field_type_vector) + call unstack_velocity_field(fld2, uvelE, vvelE) + call ice_timer_stop(timer_bound) + + endif + enddo ! subcycling endif ! evp_algorithm From 8dc3d0bd9a7624511ffe16c46eb3496d8121a427 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 18 Nov 2021 15:30:57 -0500 Subject: [PATCH 039/109] Compute velocities at CD-grid locations (#24) * ice_dyn_shared: add 'step_vel' to step velocities at CD-grid locations Add a new subroutine 'step_vel', similar to the existing 'stepu', to step the CD-grid velocities forward by solving the momentum equation. In contrast to 'stepu', the components of the divergence of the internal stress tensor are computed in a separate subroutine, 'div_stress', and so are simply passed as arguments in 'step_vel'. We mostly follow 'stepu' for the rest of the computation, apart from two micro-optimizations: - Introduce a new intermediate variable, 'ccc', to compute the denominator of the seabed stress coefficient, and reuse it when computing the seabed stress on the last iteration. - Fold the two 'if's at the end of the subroutine into a single one. * ice_dyn_evp: compute velocities at CD-grid locations Call the 'step_vel' subroutine introduced in the previous commit from 'evp' to step the CD-grid velocities '[uv]vel[EN]' forward when 'grid_system == 'CD'. --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 68 +++++++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 123 +++++++++++++++++- 2 files changed, 174 insertions(+), 17 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index f2a506922..5f237d9f0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -42,7 +42,7 @@ module ice_dyn_evp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, dyn_prep1, dyn_prep2, dyn_finish, & + use ice_dyn_shared, only: stepu, step_vel, dyn_prep1, dyn_prep2, dyn_finish, & ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & @@ -644,21 +644,57 @@ subroutine evp (dt) ! momentum equation !----------------------------------------------------------------- - call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - ksub, & - aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & - uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - uvel_init(:,:,iblk), vvel_init(:,:,iblk),& - uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + select case (grid_system) + case('B') + + call stepu (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + ksub, & + aiu (:,:,iblk), strtmp (:,:,:), & + uocn (:,:,iblk), vocn (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk),& + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) + + case('CD') + + call step_vel (nx_block, ny_block, & + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + ksub, aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call step_vel (nx_block, ny_block, & + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + ksub, aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnE (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + + + end select enddo !$TCXOMP END PARALLEL DO diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 2e4430c3d..353c57575 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -23,7 +23,7 @@ module ice_dyn_shared implicit none private - public :: init_dyn, set_evp_parameters, stepu, principal_stress, & + public :: init_dyn, set_evp_parameters, stepu, step_vel, principal_stress, & dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & @@ -807,6 +807,127 @@ end subroutine stepu !======================================================================= +! Integration of the momentum equation to find velocity (u,v) at E and N locations + + subroutine step_vel (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + ksub, aiu, & + uocn, vocn, & + waterx, watery, & + forcex, forcey, & + massdti, fm, & + strintx, strinty, & + taubx, tauby, & + uvel_init, vvel_init,& + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell, & ! total count when ice[en]mask is true + ksub ! subcycling iteration + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + uvel_init,& ! x-component of velocity (m/s), beginning of timestep + vvel_init,& ! y-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + strintx , & ! divergence of internal ice stress, x (N/m^2) + strinty ! divergence of internal ice stress, y (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + uvel , & ! x-component of velocity (m/s) + vvel ! y-component of velocity (m/s) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + taubx , & ! seabed stress, x-direction (N/m^2) + tauby ! seabed stress, y-direction (N/m^2) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Cw ! ocean-ice neutral drag coefficient + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,ab2 , & ! intermediate variables + cc1,cc2 , & ! " + taux, tauy , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(step_vel)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + taux = vrel*waterx(i,j) ! NOTE this is not the entire + tauy = vrel*watery(i,j) ! ocn stress term + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ab2 = cca**2 + ccb**2 + + ! compute the velocity components + cc1 = strintx(i,j) + forcex(i,j) + taux & + + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + cc2 = strinty(i,j) + forcey(i,j) + tauy & + + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + + uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s + vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 + + ! calculate seabed stress component for outputs + if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration + taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc + tauby(i,j) = -vvel(i,j)*Tb(i,j) / ccc + endif + + enddo ! ij + + end subroutine step_vel + +!======================================================================= + ! Calculation of the ice-ocean stress. ! ...the sign will be reversed later... ! From 216acf8a660f1d8c96641cd37b387c312cb0ed23 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 18 Nov 2021 16:16:01 -0500 Subject: [PATCH 040/109] Strain_rates_U subroutine (#26) * Initial coding of strain_rates_U * strain_rates_U is done * Small corrections following Philippes comments --- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 353c57575..7ceb715af 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1744,6 +1744,111 @@ subroutine strain_rates_T (nx_block, ny_block, & end subroutine strain_rates_T + +!======================================================================= + +! Compute strain rates at the U point including boundary conditions +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_U (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + divU, tensionU, & + shearU, DeltaU ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + integer (kind=int_kind) :: & + i, j ! indices + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) interp. at U point + vvelU , & ! y-component of velocity (m/s) interp. at U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs + epm , & ! E-cell mask + npm , & ! E-cell mask + uvm ! U-cell mask + + + real (kind=dbl_kind), intent(out):: & + divU, tensionU, shearU, DeltaU ! strain rates at the U point + + ! local variables + + real (kind=dbl_kind) :: & + uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij + + character(len=*), parameter :: subname = '(strain_rates_U)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + + ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... + + ! divergence = e_11 + e_22 + divU = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + ! tension strain rate = e_11 - e_22 + tensionU = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + + ! shearing strain rate = 2*e_12 + shearU = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + + vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + + ! Delta (in the denominator of zeta, eta) + DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) + + end subroutine strain_rates_U + !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. From 028037df6bdeefb3e8520dc9a42b8587e5d8e6f9 Mon Sep 17 00:00:00 2001 From: daveh150 Date: Thu, 18 Nov 2021 16:28:59 -0700 Subject: [PATCH 041/109] Cgrid update box tests (#25) * split up atm and ocn forcing * Changed restore_ice = false for symtests * Added more box tests to gridsys_suite.ts. Modified several set_nml for use with new islands grid and uniform forcing * Fix for TPM. THis was also fixed by Dave bailey * Added CD forcing to boxslotcyl forcing data * ice_init.F90: moved grid_average_X2Y to be in proper place after set_state_var. ice_grid.F90: modified variable declaration in gridaverage_X2Y to use (:,:,:) for dimensions of passed arrays * Bug fix in ice_grid.F90:get_box_kmt. Need to first initialize land mask to c1 before adding land locations * ice_grid.F90 - renamed subroutine box_grid_kmt to grid_boxislands_kmt. Changed names of set_nml to remove underscores. Updated gridsys_suite to be consistent with set_nml. * revert boxadv to have ktherm=0. Change gridsys_suite.ts options to run box test on multple PEs. Removed boxadv from gridsys_suite.ts * Clean up blank lines and unused use statements. Removed check for uniform atm_data_type in get_forcing_atmo Co-authored-by: apcraig --- cicecore/cicedynB/general/ice_forcing.F90 | 167 ++++++++++++------ cicecore/cicedynB/general/ice_init.F90 | 100 +++++++---- cicecore/cicedynB/infrastructure/ice_grid.F90 | 34 ++-- configuration/scripts/options/set_nml.box2001 | 1 + ...set_nml.boxislands => set_nml.boxislandse} | 17 +- .../scripts/options/set_nml.boxislandsn | 43 +++++ .../scripts/options/set_nml.boxislandsne | 43 +++++ .../scripts/options/set_nml.boxslotcyl | 3 +- configuration/scripts/options/set_nml.boxsyme | 3 +- configuration/scripts/options/set_nml.boxsymn | 3 +- .../scripts/options/set_nml.boxsymne | 3 +- configuration/scripts/options/set_nml.gbox128 | 1 + configuration/scripts/options/set_nml.gbox180 | 1 + configuration/scripts/options/set_nml.gbox80 | 1 + .../scripts/options/set_nml.kmtislands | 1 + configuration/scripts/tests/gridsys_suite.ts | 25 ++- 16 files changed, 323 insertions(+), 123 deletions(-) rename configuration/scripts/options/{set_nml.boxislands => set_nml.boxislandse} (70%) create mode 100644 configuration/scripts/options/set_nml.boxislandsn create mode 100644 configuration/scripts/options/set_nml.boxislandsne create mode 100644 configuration/scripts/options/set_nml.kmtislands diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 77da544a6..f3b749f77 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -117,11 +117,11 @@ module ice_forcing character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf - atm_data_type, & ! 'default', 'monthly', 'ncar', - ! 'hadgem' or 'oned' or 'calm' + atm_data_type, & ! 'default', 'monthly', 'ncar', 'box2001' + ! 'hadgem', 'oned', 'calm', 'uniform' ! 'JRA55_gx1' or 'JRA55_gx3' or 'JRA55_tx1' bgc_data_type, & ! 'default', 'clim' - ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', + ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', 'box2001' ! 'hadgem_sst' or 'hadgem_sst_uvocn', 'uniform' ice_data_type, & ! 'default', 'box2001', 'boxslotcyl' precip_units ! 'mm_per_month', 'mm_per_sec', 'mks','m_per_sec' @@ -309,17 +309,21 @@ subroutine init_forcing_atmo elseif (trim(atm_data_type) == 'ISPOL') then call ISPOL_files elseif (trim(atm_data_type) == 'box2001') then - call box2001_data + call box2001_data_atm elseif (trim(atm_data_type) == 'uniform_northeast') then - call uniform_data('NE') + call uniform_data_atm('NE') elseif (trim(atm_data_type) == 'uniform_east') then - call uniform_data('E') + call uniform_data_atm('E') elseif (trim(atm_data_type) == 'uniform_north') then - call uniform_data('N') + call uniform_data_atm('N') elseif (trim(atm_data_type) == 'calm') then - call uniform_data('N',c0) ! direction does not matter when c0 + call uniform_data_atm('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_files + elseif (trim(atm_data_type) == 'default') then + ! don't need to do anything more + else + call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '//trim(atm_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_atmo @@ -473,10 +477,7 @@ subroutine init_forcing_ocn(dt) enddo !$OMP END PARALLEL DO - endif ! init_sst_data - - - if (trim(ocn_data_type) == 'hadgem_sst' .or. & + elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then diag = .true. ! write diagnostic information @@ -508,30 +509,29 @@ subroutine init_forcing_ocn(dt) enddo !$OMP END PARALLEL DO - endif ! ocn_data_type - - if (trim(ocn_data_type) == 'ncar') then + elseif (trim(ocn_data_type) == 'ncar') then call ocn_data_ncar_init ! call ocn_data_ncar_init_3D - endif - if (trim(ocn_data_type) == 'hycom') then + elseif (trim(ocn_data_type) == 'hycom') then call ocn_data_hycom_init - endif + + elseif (trim(atm_data_type) == 'box2001') then + call box2001_data_ocn ! uniform forcing options - if (trim(ocn_data_type) == 'uniform_northeast') then + elseif (trim(ocn_data_type) == 'uniform_northeast') then call uniform_data_ocn('NE',p1) - endif - if (trim(ocn_data_type) == 'uniform_east') then + elseif (trim(ocn_data_type) == 'uniform_east') then call uniform_data_ocn('E',p1) - endif - if (trim(ocn_data_type) == 'uniform_north') then + elseif (trim(ocn_data_type) == 'uniform_north') then call uniform_data_ocn('N',p1) - endif - - if (trim(ocn_data_type) == 'calm') then + elseif (trim(ocn_data_type) == 'calm') then call uniform_data_ocn('N',c0) ! directon does not matter for c0 + elseif (trim(ocn_data_type) == 'default') then + ! don't need to do anything more + else + call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '//trim(ocn_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_ocn @@ -648,17 +648,12 @@ subroutine get_forcing_atmo elseif (trim(atm_data_type) == 'oned') then call oned_data elseif (trim(atm_data_type) == 'box2001') then - call box2001_data - elseif (trim(atm_data_type) == 'uniform_northeast') then - ! dah: uniformm opotions inclued here to allow call to prepare_forcing - ! is prepare_forcing required? zlvl0 and precip options are set in prepare_forcing. - ! call uniform_data('NE') - elseif (trim(atm_data_type) == 'uniform_east') then - ! call uniform_data('E') - elseif (trim(atm_data_type) == 'uniform_north') then - ! call uniform_data('N') + call box2001_data_atm elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_data + !elseif (trim(atm_data_type) == 'uniform_northeast') then + !elseif (trim(atm_data_type) == 'uniform_east') then + !elseif (trim(atm_data_type) == 'uniform_north') then else ! default values set in init_flux return endif @@ -756,6 +751,18 @@ subroutine get_forcing_ocn (dt) elseif (trim(ocn_data_type) == 'hycom') then ! call ocn_data_hycom(dt) !MHRI: NOT IMPLEMENTED YET + elseif (trim(atm_data_type) == 'box2001') then + call box2001_data_ocn + ! uniform forcing options + elseif (trim(ocn_data_type) == 'uniform_northeast') then +! tcraig, not time varying + call uniform_data_ocn('NE',p1) + elseif (trim(ocn_data_type) == 'uniform_east') then + call uniform_data_ocn('E',p1) + elseif (trim(ocn_data_type) == 'uniform_north') then + call uniform_data_ocn('N',p1) + elseif (trim(ocn_data_type) == 'calm') then + call uniform_data_ocn('N',c0) ! directon does not matter for c0 endif call ice_timer_stop(timer_forcing) @@ -5277,7 +5284,7 @@ end subroutine ocn_data_ispol_init !======================================================================= ! - subroutine box2001_data + subroutine box2001_data_atm ! wind and current fields as in Hunke, JCP 2001 ! these are defined at the u point @@ -5287,7 +5294,7 @@ subroutine box2001_data use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs use ice_blocks, only: nx_block, ny_block, nghost - use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray + use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray use ice_grid, only: uvm, grid_average_X2Y use ice_state, only: aice @@ -5302,7 +5309,7 @@ subroutine box2001_data real (kind=dbl_kind) :: & secday, pi , puny, period, pi2, tau - character(len=*), parameter :: subname = '(box2001_data)' + character(len=*), parameter :: subname = '(box2001_data_atm)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' @@ -5317,15 +5324,16 @@ subroutine box2001_data do j = 1, ny_block do i = 1, nx_block - ! ocean current - ! constant in time, could be initialized in ice_flux.F90 - uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & - / real(nx_global,kind=dbl_kind) - p1 - vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & - / real(ny_global,kind=dbl_kind) + p1 - - uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) - vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) +!tcraig, move to box2001_data_ocn +! ! ocean current +! ! constant in time, could be initialized in ice_flux.F90 +! uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & +! / real(nx_global,kind=dbl_kind) - p1 +! vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & +! / real(ny_global,kind=dbl_kind) + p1 +! +! uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) +! vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) ! wind components uatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & @@ -5372,18 +5380,67 @@ subroutine box2001_data enddo enddo ! nblocks - end subroutine box2001_data + end subroutine box2001_data_atm + +!======================================================================= +! + subroutine box2001_data_ocn + +! wind and current fields as in Hunke, JCP 2001 +! these are defined at the u point +! authors: Elizabeth Hunke, LANL + + use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks + use ice_calendar, only: timesecs + use ice_blocks, only: nx_block, ny_block, nghost + use ice_flux, only: uocn, vocn + use ice_grid, only: uvm + + ! local parameters + + integer (kind=int_kind) :: & + iblk, i,j ! loop indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + aiu ! ice fraction on u-grid + + real (kind=dbl_kind) :: & + secday, pi , puny, period, pi2, tau + + character(len=*), parameter :: subname = '(box2001_data_ocn)' + + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + + ! ocean current + ! constant in time, could be initialized in ice_flux.F90 + uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & + / real(nx_global,kind=dbl_kind) - p1 + vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & + / real(ny_global,kind=dbl_kind) + p1 + + uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) + vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) + + enddo + enddo + enddo ! nblocks + + end subroutine box2001_data_ocn !======================================================================= ! - subroutine uniform_data(dir,spd) + subroutine uniform_data_atm(dir,spd) ! uniform wind fields in some direction use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost - use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray - use ice_grid, only: grid_average_X2Y + use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray character(len=*), intent(in) :: dir real(kind=dbl_kind), intent(in), optional :: spd ! speed for test @@ -5397,7 +5454,7 @@ subroutine uniform_data(dir,spd) tau, & atm_val ! value to use for atm speed - character(len=*), parameter :: subname = '(uniform_data)' + character(len=*), parameter :: subname = '(uniform_data_atm)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' @@ -5417,7 +5474,6 @@ subroutine uniform_data(dir,spd) vatm = atm_val elseif (dir == 'E') then uatm = atm_val - vatm = c0 else call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & @@ -5438,7 +5494,7 @@ subroutine uniform_data(dir,spd) enddo enddo ! nblocks - end subroutine uniform_data + end subroutine uniform_data_atm !======================================================================= ! @@ -5449,8 +5505,7 @@ subroutine uniform_data_ocn(dir,spd) use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost - use ice_flux, only: uocn, vocn, uatm, vatm, wind, strax, stray - use ice_grid, only: grid_average_X2Y + use ice_flux, only: uocn, vocn character(len=*), intent(in) :: dir diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 125b73e95..95223d7ae 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2134,13 +2134,17 @@ end subroutine input_data subroutine init_state use ice_blocks, only: block, get_block, nx_block, ny_block - use ice_domain, only: nblocks, blocks_ice + use ice_domain, only: nblocks, blocks_ice, halo_info use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, ULON, TLAT + use ice_grid, only: tmask, ULON, TLAT, grid_system, grid_average_X2Y + use ice_boundary, only: ice_HaloUpdate + use ice_forcing, only: ice_data_type + use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar use ice_state, only: trcr_depend, aicen, trcrn, vicen, vsnon, & aice0, aice, vice, vsno, trcr, aice_init, bound_state, & - n_trcr_strata, nt_strata, trcr_base, uvel, vvel + n_trcr_strata, nt_strata, trcr_base, uvel, vvel, & + uvelN, vvelN, uvelE, vvelE integer (kind=int_kind) :: & ilo, ihi , & ! physical domain indices @@ -2372,6 +2376,30 @@ subroutine init_state vicen, vsnon, & ntrcr, trcrn) + if (trim(grid_system) == 'CD') then + + ! move from B-grid to CD-grid for boxslotcyl test + if (trim(ice_data_type) == 'boxslotcyl') then + call grid_average_X2Y('U2NS',uvel,uvelN) + call grid_average_X2Y('U2NS',vvel,vvelN) + call grid_average_X2Y('U2ES',uvel,uvelE) + call grid_average_X2Y('U2ES',vvel,vvelE) + endif + + ! Halo update on North, East faces + call ice_HaloUpdate(uvelN, halo_info, & + field_loc_Nface, field_type_scalar) + call ice_HaloUpdate(vvelN, halo_info, & + field_loc_Nface, field_type_scalar) + + call ice_HaloUpdate(uvelE, halo_info, & + field_loc_Eface, field_type_scalar) + call ice_HaloUpdate(vvelE, halo_info, & + field_loc_Eface, field_type_scalar) + + endif + + !----------------------------------------------------------------- ! compute aggregate ice state and open water area !----------------------------------------------------------------- @@ -2439,6 +2467,7 @@ subroutine set_state_var (nx_block, ny_block, & vicen, vsnon, & uvel, vvel) + use ice_arrays_column, only: hin_max use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat use ice_grid, only: grid_type @@ -2480,11 +2509,10 @@ subroutine set_state_var (nx_block, ny_block, & ! 1: surface temperature of ice/snow (C) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - uvel , & ! ice velocity + uvel , & ! ice velocity B grid vvel ! ! local variables - integer (kind=int_kind) :: & i, j , & ! horizontal indices ij , & ! horizontal index, combines i and j loops @@ -2578,13 +2606,6 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_ic) == 'default') then - !----------------------------------------------------------------- - ! Place ice where ocean surface is cold. - ! Note: If SST is not read from a file, then the ocean is assumed - ! to be at its freezing point everywhere, and ice will - ! extend to the prescribed edges. - !----------------------------------------------------------------- - if (trim(ice_data_type) == 'box2001') then hbar = c2 ! initial ice thickness @@ -2611,25 +2632,25 @@ subroutine set_state_var (nx_block, ny_block, & else - ! initial category areas in cells with ice - hbar = c3 ! initial ice thickness with greatest area - ! Note: the resulting average ice thickness - ! tends to be less than hbar due to the - ! nonlinear distribution of ice thicknesses - sum = c0 - do n = 1, ncat - if (n < ncat) then - hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m - else ! n=ncat - hinit(n) = (hin_max(n-1) + c1) ! m - endif - ! parabola, max at h=hbar, zero at h=0, 2*hbar - ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) - sum = sum + ainit(n) - enddo - do n = 1, ncat - ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize - enddo + ! initial category areas in cells with ice + hbar = c3 ! initial ice thickness with greatest area + ! Note: the resulting average ice thickness + ! tends to be less than hbar due to the + ! nonlinear distribution of ice thicknesses + sum = c0 + do n = 1, ncat + if (n < ncat) then + hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m + else ! n=ncat + hinit(n) = (hin_max(n-1) + c1) ! m + endif + ! parabola, max at h=hbar, zero at h=0, 2*hbar + ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) + sum = sum + ainit(n) + enddo + do n = 1, ncat + ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize + enddo endif ! ice_data_type @@ -2665,7 +2686,13 @@ subroutine set_state_var (nx_block, ny_block, & else ! default behavior - ! place ice at high latitudes where ocean sfc is cold + !----------------------------------------------------------------- + ! Place ice where ocean surface is cold. + ! Note: If SST is not read from a file, then the ocean is assumed + ! to be at its freezing point everywhere, and ice will + ! extend to the prescribed edges. + !----------------------------------------------------------------- + icells = 0 do j = jlo, jhi do i = ilo, ihi @@ -2673,7 +2700,7 @@ subroutine set_state_var (nx_block, ny_block, & ! place ice in high latitudes where ocean sfc is cold if ( (sst (i,j) <= Tf(i,j)+p2) .and. & (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & - TLAT(i,j) > edge_init_nh/rad_to_deg) ) then + TLAT(i,j) > edge_init_nh/rad_to_deg) ) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -2726,9 +2753,10 @@ subroutine set_state_var (nx_block, ny_block, & endif vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - else ! default case. ice_data_type = uniform + else ! default or uniform vicen(i,j,n) = hinit(n) * ainit(n) ! m + endif ! ice_data_type vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) @@ -2757,7 +2785,8 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! ij enddo ! ncat - ! velocity initialization for special tests + ! velocity initialization for special tests. + ! these velocites are defined on B-grid if (trim(ice_data_type) == 'boxslotcyl') then do j = 1, ny_block do i = 1, nx_block @@ -2880,7 +2909,6 @@ subroutine boxslotcyl_data_vel(i, j, & uvel, vvel ! ice velocity ! local variables - real (kind=dbl_kind) :: & pi , & ! pi secday , & ! seconds per day diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 6a3cf1171..ad5fb32da 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -1375,7 +1375,7 @@ subroutine rectgrid if (trim(kmt_type) == 'boxislands') then - call get_box_kmt(work_g1) + call grid_boxislands_kmt(work_g1) else ! default @@ -1391,7 +1391,7 @@ subroutine rectgrid if (trim(kmt_type) == 'boxislands') then - call get_box_kmt(work_g1) + call grid_boxislands_kmt(work_g1) else ! default @@ -1452,7 +1452,7 @@ end subroutine rectgrid ! Assumes work array has been initialized to 1 (ocean) and north and ! south land boundaries have been applied (ew_boundary_type='cyclic') - subroutine get_box_kmt (work) + subroutine grid_boxislands_kmt (work) use ice_constants, only: c0, c1, c20 @@ -1462,7 +1462,7 @@ subroutine get_box_kmt (work) i, j, k, & ! indices nxb, nyb ! convenient cell-block sizes for building the mask - character(len=*), parameter :: subname = '(get_box_kmt)' + character(len=*), parameter :: subname = '(grid_boxislands_kmt)' ! number of cells in 5% of global grid x and y lengths nxb = int(real(nx_global, dbl_kind) / c20, int_kind) @@ -1471,6 +1471,10 @@ subroutine get_box_kmt (work) if (nxb < 1 .or. nyb < 1) & call abort_ice(subname//'ERROR: requires larger grid size') + ! initialize work area as all ocean (c1). + work(:,:) = c1 + + ! now add land points (c0) ! northeast triangle k = 0 do j = ny_global, ny_global-3*nyb, -1 @@ -1567,7 +1571,7 @@ subroutine get_box_kmt (work) enddo enddo - end subroutine get_box_kmt + end subroutine grid_boxislands_kmt !======================================================================= @@ -2317,10 +2321,10 @@ subroutine grid_average_X2Y(X2Y,work1,work2) X2Y real (kind=dbl_kind), intent(inout) :: & - work1(nx_block,ny_block,max_blocks) + work1(:,:,:) real (kind=dbl_kind), intent(out), optional :: & - work2(nx_block,ny_block,max_blocks) + work2(:,:,:) ! local variables @@ -2411,12 +2415,12 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) dir real (kind=dbl_kind), intent(in) :: & - work1(nx_block,ny_block,max_blocks), & - area1(nx_block,ny_block,max_blocks), & - mask1(nx_block,ny_block,max_blocks) + work1(:,:,:), & + area1(:,:,:), & + mask1(:,:,:) real (kind=dbl_kind), intent(out) :: & - work2(nx_block,ny_block,max_blocks) + work2(:,:,:) ! local variables @@ -2642,12 +2646,12 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) dir real (kind=dbl_kind), intent(in) :: & - work1(nx_block,ny_block,max_blocks), & - area1(nx_block,ny_block,max_blocks), & - area2(nx_block,ny_block,max_blocks) + work1(:,:,:), & + area1(:,:,:), & + area2(:,:,:) real (kind=dbl_kind), intent(out) :: & - work2(nx_block,ny_block,max_blocks) + work2(:,:,:) ! local variables diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index c166b4217..c087466c1 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -22,6 +22,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'box2001' calc_strair = .false. restore_ice = .true. diff --git a/configuration/scripts/options/set_nml.boxislands b/configuration/scripts/options/set_nml.boxislandse similarity index 70% rename from configuration/scripts/options/set_nml.boxislands rename to configuration/scripts/options/set_nml.boxislandse index eb39a4e79..d27b26a8d 100644 --- a/configuration/scripts/options/set_nml.boxislands +++ b/configuration/scripts/options/set_nml.boxislandse @@ -1,13 +1,14 @@ npt = 48 kmt_type = 'boxislands' -ice_ic = 'default' +ice_ic = 'default' +use_leap_years = .false. histfreq = 'd','x','x','x','x' -grid_type = 'rectangular' +grid_type = 'rectangular' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' ktherm = -1 kstrength = 0 kdyn = 1 @@ -15,10 +16,14 @@ kridge = -1 ktransport = -1 coriolis = 'constant' atmbndy = 'constant' -atm_data_type = 'box2001' -ice_data_type = 'box2001' +atm_data_type = 'uniform_east' +ice_data_type = 'uniform' +rotate_wind = .false. calc_strair = .false. restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. f_aice = 'd' f_hi = 'd' f_hs = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn new file mode 100644 index 000000000..48ee103f5 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislandsn @@ -0,0 +1,43 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'default' +use_leap_years = .false. +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_north' +ice_data_type = 'uniform' +rotate_wind = .false. +calc_strair = .false. +restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne new file mode 100644 index 000000000..3dec1d246 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxislandsne @@ -0,0 +1,43 @@ +npt = 48 +kmt_type = 'boxislands' +ice_ic = 'default' +use_leap_years = .false. +histfreq = 'd','x','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'constant' +atmbndy = 'constant' +atm_data_type = 'uniform_northeast' +ice_data_type = 'uniform' +rotate_wind = .false. +calc_strair = .false. +restore_ice = .false. +tr_iage = .false. +tr_FY = .false. +tr_pond_lvl = .false. +f_aice = 'd' +f_hi = 'd' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_divu = 'd' +f_sig1 = 'd' +f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxslotcyl b/configuration/scripts/options/set_nml.boxslotcyl index 9985cef13..b38d0efce 100644 --- a/configuration/scripts/options/set_nml.boxslotcyl +++ b/configuration/scripts/options/set_nml.boxslotcyl @@ -4,13 +4,14 @@ restart_ext = .false. dt = 3600.0 npt = 288 grid_type = 'rectangular' +kmt_type = 'default' dxrect = 10.e5 dyrect = 10.e5 kcatbound = 2 ew_boundary_type = 'open' ns_boundary_type = 'open' close_boundaries = .true. -tr_lvl = .false. +tr_lvl = .false. tr_pond_lvl = .false. ktherm = -1 kdyn = -1 diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme index ca8f81e63..01be549b4 100644 --- a/configuration/scripts/options/set_nml.boxsyme +++ b/configuration/scripts/options/set_nml.boxsyme @@ -23,10 +23,11 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_east' +ocn_data_type = 'calm' ice_data_type = 'uniform' calc_strair = .false. rotate_wind = .false. -restore_ice = .true. +restore_ice = .false. f_aice = 'd1' f_hi = 'd1' f_hs = 'd' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn index 2a1449ae2..66fa95a9e 100644 --- a/configuration/scripts/options/set_nml.boxsymn +++ b/configuration/scripts/options/set_nml.boxsymn @@ -23,10 +23,11 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_north' +ocn_data_type = 'calm' ice_data_type = 'uniform' calc_strair = .false. rotate_wind = .false. -restore_ice = .true. +restore_ice = .false. f_aice = 'd1' f_hi = 'd1' f_hs = 'd' diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne index af38fa6fe..54add03b8 100644 --- a/configuration/scripts/options/set_nml.boxsymne +++ b/configuration/scripts/options/set_nml.boxsymne @@ -23,10 +23,11 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' ice_data_type = 'uniform' calc_strair = .false. rotate_wind = .false. -restore_ice = .true. +restore_ice = .false. f_aice = 'd1' f_hi = 'd1' f_hs = 'd' diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 7b139f94a..2371b65ed 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,4 +1,5 @@ ice_ic = 'default' grid_type = 'rectangular' atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 7b139f94a..8063bee67 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -1,4 +1,5 @@ ice_ic = 'default' grid_type = 'rectangular' atm_data_type = 'box2001' +ocn_data_type = 'calm' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 7b139f94a..8063bee67 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -1,4 +1,5 @@ ice_ic = 'default' grid_type = 'rectangular' atm_data_type = 'box2001' +ocn_data_type = 'calm' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.kmtislands b/configuration/scripts/options/set_nml.kmtislands new file mode 100644 index 000000000..238c67d3f --- /dev/null +++ b/configuration/scripts/options/set_nml.kmtislands @@ -0,0 +1 @@ +kmt_type = 'boxislands' diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 0a3d07025..54eeb45ea 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -3,14 +3,27 @@ smoke gx3 8x2 diag1,run5day restart gx3 4x2 debug,diag1 smoke gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl -smoke gbox80 1x1 boxsymn -smoke gbox80 1x1 boxsyme -smoke gbox80 1x1 boxsymne +smoke gbox80 2x2 boxsymn +smoke gbox80 4x2 boxsyme +smoke gbox80 4x1 boxsymne +smoke gbox80 2x2 boxsymn,kmtislands +smoke gbox80 4x1 boxsyme,kmtislands +smoke gbox80 4x2 boxsymne,kmtislands +smoke gbox80 8x1 boxislandsn +smoke gbox80 4x2 boxislandse +smoke gbox80 2x4 boxislandsne + smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd smoke gbox80 1x1 box2001,gridcd smoke gbox80 1x1 boxslotcyl,gridcd -smoke gbox80 1x1 boxsymn,gridcd -smoke gbox80 1x1 boxsyme,gridcd -smoke gbox80 1x1 boxsymne,gridcd +smoke gbox80 2x2 boxsymn,gridcd +smoke gbox80 4x2 boxsyme,gridcd +smoke gbox80 4x1 boxsymne,gridcd +smoke gbox80 2x2 boxsymn,kmtislands,gridcd +smoke gbox80 4x1 boxsyme,kmtislands,gridcd +smoke gbox80 4x2 boxsymne,kmtislands,gridcd +smoke gbox80 8x1 boxislandsn,gridcd +smoke gbox80 4x2 boxislandse,gridcd +smoke gbox80 2x4 boxislandsne,gridcd From f91adf0837c33d6b4805d97e41f407214eb42aaf Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Thu, 18 Nov 2021 17:06:22 -0700 Subject: [PATCH 042/109] Fix some initialization of CD variables. (#27) * Fix some variables initialization on CD variables. * Combine iblk loops * Fix some arrangement and alignment * Need to add grid_system * add wtmp to OMP PRIVATE --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 14 +--------- cicecore/cicedynB/general/ice_flux.F90 | 27 +++++++++++++------ cicecore/cicedynB/infrastructure/ice_grid.F90 | 16 +++++------ 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 5f237d9f0..779e84452 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -266,7 +266,7 @@ subroutine evp (dt) call grid_average_X2Y('T2EF',tmass,emass) call grid_average_X2Y('T2EF',aice_init, aie) call grid_average_X2Y('T2NF',tmass,nmass) - call grid_average_X2Y('T2NF',aice_init, aie) + call grid_average_X2Y('T2NF',aice_init, ain) endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -420,22 +420,10 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & TbN (:,:,iblk)) - enddo ! iblk - !$TCXOMP END PARALLEL DO - - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - !----------------------------------------------------------------- ! more preparation for dynamics on E grid !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icelle(iblk), & diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index e17faab6a..c593d91b1 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -641,7 +641,7 @@ subroutine init_coupler_flux use ice_flux_bgc, only: flux_bio_atm, flux_bio, faero_atm, fiso_atm, & fnit, famm, fsil, fdmsp, fdms, fhum, fdust, falgalN, & fdoc, fdon, fdic, ffed, ffep - use ice_grid, only: bathymetry + use ice_grid, only: bathymetry, grid_system integer (kind=int_kind) :: n @@ -738,13 +738,24 @@ subroutine init_coupler_flux ! fluxes received from ocean !----------------------------------------------------------------- - ss_tltx(:,:,:)= c0 ! sea surface tilt (m/m) - ss_tlty(:,:,:)= c0 - uocn (:,:,:) = c0 ! surface ocean currents (m/s) - vocn (:,:,:) = c0 - frzmlt(:,:,:) = c0 ! freezing/melting potential (W/m^2) - frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) - sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) + ss_tltx (:,:,:) = c0 ! sea surface tilt (m/m) + ss_tlty (:,:,:) = c0 + uocn (:,:,:) = c0 ! surface ocean currents (m/s) + vocn (:,:,:) = c0 + frzmlt (:,:,:) = c0 ! freezing/melting potential (W/m^2) + frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) + sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) + + if (grid_system == 'CD') then + ss_tltxN(:,:,:) = c0 ! sea surface tilt (m/m) + ss_tltyN(:,:,:) = c0 + ss_tltxE(:,:,:) = c0 ! sea surface tilt (m/m) + ss_tltyE(:,:,:) = c0 + uocnN (:,:,:) = c0 ! surface ocean currents (m/s) + vocnN (:,:,:) = c0 + uocnE (:,:,:) = c0 ! surface ocean currents (m/s) + vocnE (:,:,:) = c0 + endif do iblk = 1, size(Tf,3) do j = 1, size(Tf,2) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index ad5fb32da..688b7155e 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -2441,7 +2441,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) select case (trim(dir)) case('NE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2466,7 +2466,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('SW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2491,7 +2491,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('NW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2516,7 +2516,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('SE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2541,7 +2541,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('E') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2562,7 +2562,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('W') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2583,7 +2583,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('N') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -2604,7 +2604,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) !$OMP END PARALLEL DO case('S') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo From bc38c073a0c058e8e79baf1e4df545464d570f45 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Fri, 19 Nov 2021 11:35:39 -0500 Subject: [PATCH 043/109] Add CD-grid stress arrays (#28) * ice_flux: add stress arrays at CD-grid locations (T,U) * ice_dyn_shared: initialize CD-grid stress arrays * infrastructure: add CD-grid stress arrays to restarts * ice_dyn_shared: generalize 'principal_stress' arguments names In a subsequent commit we will call 'principal_stress' with the CD-grid stress arrays 'stress{p,m,12}T' to compute the principal stresses at the tracer point when using the CD grid. In that light, remove the '_1' suffix from the stress arguments since they won't always be located at the NE corner anymore. * ice_history: compute 'sig[12P]' at T-point for CD-grid For the CD-grid, compute the principal stresses and the ice pressure at the T point by passing the appropriate arrays to 'principal_stress'. These three history variables are computed using the NE-corner values on the B-grid, but this is not reflected in the description of the fields in the history output. Add the location at which the stresses are computed to the comment argument in the call to 'define_hist_field', for both the B and CD grid. * ice_dyn_evp: pass 'stress{p,m,12}[TU]' to dyn_prep2 For the CD-grid, the 'stress{p,m,12}_[1-4]' arrays are not used. Pass the CD-grid location stress arrays 'stress{p,m,12}[TU]' to 'dyn_prep2', which zero-initializes them anywhere icetmask is zero. Also, add a TODO in the `if (grid_type) == 'tripole'` block, since it is not yet clear how the halo updates should be done for the new stress arrays on the tripole grid. --- cicecore/cicedynB/analysis/ice_history.F90 | 50 +++++++++++++------ cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 17 ++++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 35 ++++++++----- cicecore/cicedynB/general/ice_flux.F90 | 11 +++- .../infrastructure/ice_restart_driver.F90 | 37 ++++++++++++-- .../io/io_netcdf/ice_restart.F90 | 10 ++++ .../infrastructure/io/io_pio2/ice_restart.F90 | 10 ++++ 7 files changed, 132 insertions(+), 38 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index 4ade53cfb..e0855aa1c 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -94,6 +94,7 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character(len=char_len) :: description character(len=*), parameter :: subname = '(init_hist)' !----------------------------------------------------------------- @@ -1199,19 +1200,26 @@ subroutine init_hist (dt) "none", secday*c100, c0, & ns1, f_shear) + select case (grid_system) + case('B') + description = ", on U grid (NE corner values)" + case ('CD') + description = ", on T grid" + end select + call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & "norm. principal stress 1", & - "sig1 is instantaneous", c1, c0, & + "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & "norm. principal stress 2", & - "sig2 is instantaneous", c1, c0, & + "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & "ice pressure", & - "sigP is instantaneous", c1, c0, & + "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & @@ -1979,7 +1987,7 @@ subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: nfsd - use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu + use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_system use ice_calendar, only: new_year, write_history, & write_ic, timesecs, histfreq, nstreams, mmonth, & new_month @@ -2001,6 +2009,7 @@ subroutine accum_hist (dt) fm, fmN, fmE, daidtt, dvidtt, daidtd, dvidtd, fsurf, & fcondtop, fcondbot, fsurfn, fcondtopn, flatn, fsensn, albcnt, snwcnt, & stressp_1, stressm_1, stress12_1, & + stresspT, stressmT, stress12T, & stressp_2, & stressp_3, & stressp_4, sig1, sig2, sigP, & @@ -4287,17 +4296,28 @@ subroutine accum_hist (dt) ! snapshots !--------------------------------------------------------------- - ! compute sig1 and sig2 - - call principal_stress (nx_block, ny_block, & - stressp_1 (:,:,iblk), & - stressm_1 (:,:,iblk), & - stress12_1(:,:,iblk), & - strength (:,:,iblk), & - sig1 (:,:,iblk), & - sig2 (:,:,iblk), & - sigP (:,:,iblk)) - + ! compute sig1 and sig2 + select case (grid_system) + case('B') + call principal_stress (nx_block, ny_block, & + stressp_1 (:,:,iblk), & + stressm_1 (:,:,iblk), & + stress12_1(:,:,iblk), & + strength (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk), & + sigP (:,:,iblk)) + case('CD') + call principal_stress (nx_block, ny_block, & + stresspT (:,:,iblk), & + stressmT (:,:,iblk), & + stress12T (:,:,iblk), & + strength (:,:,iblk), & + sig1 (:,:,iblk), & + sig2 (:,:,iblk), & + sigP (:,:,iblk)) + end select + do j = jlo, jhi do i = ilo, ihi if (.not. tmask(i,j,iblk)) then ! mask out land points diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 779e84452..a78cdd457 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -95,7 +95,9 @@ subroutine evp (dt) Tbu, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, nmask, emask, dxt, dyt, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, tinyarea, grid_average_X2Y, & @@ -410,11 +412,11 @@ subroutine evp (dt) taubxN (:,:,iblk), taubyN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & forcexN (:,:,iblk), forceyN (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stresspT (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressmT (:,:,iblk), stressm_2 (:,:,iblk), & stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12T (:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvelN_init (:,:,iblk), vvelN_init (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -443,11 +445,11 @@ subroutine evp (dt) taubxE (:,:,iblk), taubyE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & forcexE (:,:,iblk), forceyE (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stresspU (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressmU (:,:,iblk), stressm_2 (:,:,iblk), & stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12U (:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvelE_init (:,:,iblk), vvelE_init (:,:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & @@ -726,6 +728,7 @@ subroutine evp (dt) ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then + ! TODO: CD-grid if (maskhalo_dyn) then !------------------------------------------------------- ! set halomask to zero because ice_HaloMask always keeps diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 7ceb715af..fb0a65d68 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -158,7 +158,9 @@ subroutine init_dyn (dt) use ice_flux, only: rdg_conv, rdg_shear, iceumask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear use ice_grid, only: ULAT, NLAT, ELAT @@ -247,6 +249,15 @@ subroutine init_dyn (dt) stress12_3(i,j,iblk) = c0 stress12_4(i,j,iblk) = c0 + if (grid_system == 'CD') then + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 + stress12T (i,j,iblk) = c0 + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 + stress12U (i,j,iblk) = c0 + endif + ! ice extent mask on velocity points iceumask(i,j,iblk) = .false. @@ -1341,13 +1352,13 @@ end subroutine seabed_stress_factor_prob !======================================================================= ! Computes principal stresses for comparison with the theoretical -! yield curve; northeast values +! yield curve ! ! author: Elizabeth C. Hunke, LANL subroutine principal_stress(nx_block, ny_block, & - stressp_1, stressm_1, & - stress12_1, strength, & + stressp, stressm, & + stress12, strength, & sig1, sig2, & sigP) @@ -1355,9 +1366,9 @@ subroutine principal_stress(nx_block, ny_block, & nx_block, ny_block ! block dimensions real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - stressp_1 , & ! sigma11 + sigma22 - stressm_1 , & ! sigma11 - sigma22 - stress12_1, & ! sigma12 + stressp , & ! sigma11 + sigma22 + stressm , & ! sigma11 - sigma22 + stress12 , & ! sigma12 strength ! for normalization of sig1 and sig2 real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & @@ -1382,14 +1393,14 @@ subroutine principal_stress(nx_block, ny_block, & do i = 1, nx_block if (strength(i,j) > puny) then ! ice internal pressure - sigP(i,j) = -p5*stressp_1(i,j) + sigP(i,j) = -p5*stressp(i,j) ! normalized principal stresses - sig1(i,j) = (p5*(stressp_1(i,j) & - + sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & + sig1(i,j) = (p5*(stressp(i,j) & + + sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & / strength(i,j) - sig2(i,j) = (p5*(stressp_1(i,j) & - - sqrt(stressm_1(i,j)**2+c4*stress12_1(i,j)**2))) & + sig2(i,j) = (p5*(stressp(i,j) & + - sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & / strength(i,j) else sig1(i,j) = spval_dbl diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index c593d91b1..00d9aac97 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -134,7 +134,10 @@ module ice_flux ! ice stress tensor in each corner of T cell (kg/s^2) stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 + stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 + ! ice stress tensor at U and T locations (grid_system = 'CD') (kg/s^2) + stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 + stresspU, stressmU, stress12U ! " logical (kind=log_kind), & dimension (:,:,:), allocatable, public :: & @@ -623,6 +626,12 @@ subroutine alloc_flux iceemask (nx_block,ny_block,max_blocks), & ! ice extent mask (E-cell) fmE (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in E-cell (kg/s) TbE (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) + stresspT (nx_block,ny_block,max_blocks), & ! sigma11+sigma22 + stressmT (nx_block,ny_block,max_blocks), & ! sigma11-sigma22 + stress12T (nx_block,ny_block,max_blocks), & ! sigma12 + stresspU (nx_block,ny_block,max_blocks), & ! sigma11+sigma22 + stressmU (nx_block,ny_block,max_blocks), & ! sigma11-sigma22 + stress12U (nx_block,ny_block,max_blocks), & ! sigma12 stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 1a5681b38..9a5a75bea 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -57,8 +57,11 @@ subroutine dumpfile(filename_spec) strocnxT, strocnyT, sst, frzmlt, iceumask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_flux, only: coszen + use ice_grid, only: grid_system use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel character(len=char_len_long), intent(in), optional :: filename_spec @@ -164,6 +167,15 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) + if (grid_system == 'CD') then + call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) + call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) + call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) + call write_restart_field(nu_dump,0,stresspU ,'ruf8','stresspU' ,1,diag) + call write_restart_field(nu_dump,0,stressmU ,'ruf8','stressmU' ,1,diag) + call write_restart_field(nu_dump,0,stress12U,'ruf8','stress12U',1,diag) + endif + !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- @@ -206,9 +218,11 @@ subroutine restartfile (ice_ic) strocnxT, strocnyT, sst, frzmlt, iceumask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 + stress12_1, stress12_2, stress12_3, stress12_4, & + stresspT, stressmT, stress12T, & + stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type + use ice_grid, only: tmask, grid_type, grid_system use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & trcr_base, nt_strata, n_trcr_strata @@ -367,6 +381,21 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12_4,'ruf8', & 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 + if (grid_system == 'CD') then + call read_restart_field(nu_restart,0,stresspT,'ruf8', & + 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT + call read_restart_field(nu_restart,0,stressmT,'ruf8', & + 'stressmT' ,1,diag,field_loc_center,field_type_scalar) ! stressmT + call read_restart_field(nu_restart,0,stress12T,'ruf8', & + 'stress12T',1,diag,field_loc_center,field_type_scalar) ! stress12T + call read_restart_field(nu_restart,0,stresspU,'ruf8', & + 'stresspU' ,1,diag,field_loc_center,field_type_scalar) ! stresspU + call read_restart_field(nu_restart,0,stressmU,'ruf8', & + 'stressmU' ,1,diag,field_loc_center,field_type_scalar) ! stressmU + call read_restart_field(nu_restart,0,stress12U,'ruf8', & + 'stress12U',1,diag,field_loc_center,field_type_scalar) ! stress12U + endif + if (trim(grid_type) == 'tripole') then call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & field_loc_center, field_type_scalar) @@ -394,6 +423,7 @@ subroutine restartfile (ice_ic) field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & field_loc_center, field_type_scalar) + ! TODO: CD-grid endif !----------------------------------------------------------------- @@ -465,6 +495,7 @@ subroutine restartfile (ice_ic) stress12_4(i,j,iblk) = c0 enddo enddo + ! TODO: CD-grid ? enddo !$OMP END PARALLEL DO diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index f6002ff40..0bba6e36e 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -137,6 +137,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_arrays_column, only: oceanmixed_ice use ice_dyn_shared, only: kdyn + use ice_grid, only: grid_system character(len=char_len_long), intent(in), optional :: filename_spec @@ -273,6 +274,15 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'stress12_3',dims) call define_rest_field(ncid,'stress12_4',dims) + if (grid_system == 'CD') then + call define_rest_field(ncid,'stresspT' ,dims) + call define_rest_field(ncid,'stressmT' ,dims) + call define_rest_field(ncid,'stress12T',dims) + call define_rest_field(ncid,'stresspU' ,dims) + call define_rest_field(ncid,'stressmU' ,dims) + call define_rest_field(ncid,'stress12U',dims) + endif + call define_rest_field(ncid,'iceumask',dims) if (oceanmixed_ice) then diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 0ec6b7628..2e5338fc0 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -145,6 +145,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_dyn_shared, only: kdyn use ice_arrays_column, only: oceanmixed_ice + use ice_grid, only: grid_system logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -276,6 +277,15 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'stress12_3',dims) call define_rest_field(File,'stress12_4',dims) + if (grid_system == 'CD') then + call define_rest_field(File,'stresspT' ,dims) + call define_rest_field(File,'stressmT' ,dims) + call define_rest_field(File,'stress12T',dims) + call define_rest_field(File,'stresspU' ,dims) + call define_rest_field(File,'stressmU' ,dims) + call define_rest_field(File,'stress12U',dims) + endif + call define_rest_field(File,'iceumask',dims) if (oceanmixed_ice) then From 0025eae550369cd1f88a73c5b2765dda5902b99b Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Fri, 19 Nov 2021 12:45:41 -0500 Subject: [PATCH 044/109] Stress_U subroutine (#29) * Initial coding of stress_U * Almost done with stressU * Done with stress_U * Added calls for stress_T and stress_U * Added grid variables from ice_grid in evp * Rm empty line * Cosmetic changes * Fixed compilation errors...works now --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 213 ++++++++++++++++-- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2 +- 2 files changed, 194 insertions(+), 21 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index a78cdd457..7cc9b2e2b 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -98,7 +98,9 @@ subroutine evp (dt) stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_grid, only: tmask, umask, nmask, emask, dxt, dyt, & + use ice_grid, only: tmask, umask, nmask, emask, uvm, epm, npm, & + dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & + ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, tinyarea, grid_average_X2Y, & grid_type, grid_system @@ -168,6 +170,10 @@ subroutine evp (dt) real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + real (kind=dbl_kind), allocatable :: & + zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscous coeff) + etax2T(:,:,:) ! etax2 = 2*eta (shear viscous coeff) + real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -195,6 +201,13 @@ subroutine evp (dt) allocate(fld2(nx_block,ny_block,2,max_blocks)) + if (grid_system == 'CD') then + + allocate(zetax2T(nx_block,ny_block,max_blocks)) + allocate(etax2T(nx_block,ny_block,max_blocks)) + + endif + ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -608,7 +621,8 @@ subroutine evp (dt) !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) do iblk = 1, nblocks -! if (trim(yield_curve) == 'ellipse') then + select case (grid_system) + case('B') call stress (nx_block, ny_block, & ksub, icellt(iblk), & indxti (:,iblk), indxtj (:,iblk), & @@ -628,15 +642,11 @@ subroutine evp (dt) shear (:,:,iblk), divu (:,:,iblk), & rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & strtmp (:,:,:) ) -! endif ! yield_curve !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- - select case (grid_system) - case('B') - call stepu (nx_block, ny_block, & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -655,7 +665,38 @@ subroutine evp (dt) case('CD') - call step_vel (nx_block, ny_block, & + call stress_T (nx_block, ny_block, & + ksub, icellt(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), tinyarea (:,:,iblk), & + strength (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv (:,:,iblk), rdg_shear (:,:,iblk) ) + + call stress_U (nx_block, ny_block, & + ksub, icellu(iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + uvm (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U (:,:,iblk)) + + call step_vel (nx_block, ny_block, & ! E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & ksub, aiE (:,:,iblk), & @@ -669,7 +710,7 @@ subroutine evp (dt) uvelE (:,:,iblk), vvelE (:,:,iblk), & TbE (:,:,iblk)) - call step_vel (nx_block, ny_block, & + call step_vel (nx_block, ny_block, & ! N point icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & ksub, aiN (:,:,iblk), & @@ -683,7 +724,6 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & TbN (:,:,iblk)) - end select enddo @@ -724,6 +764,10 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) deallocate(fld2) + if (grid_system == 'CD') then + deallocate(zetax2T, etax2T) + endif + if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) ! Force symmetry across the tripole seam @@ -1161,7 +1205,6 @@ end subroutine stress !======================================================================= ! Computes the strain rates and internal stress components for T points -! Computes stress terms for the momentum equation ! author: JF Lemieux, ECCC ! Nov 2021 @@ -1174,7 +1217,8 @@ subroutine stress_T (nx_block, ny_block, & dxN, dyE, & dxT, dyT, & tarear, tinyarea, & - strength, & + strength, & + zetax2T, etax2T, & stresspT, stressmT, & stress12T, & shear, divu, & @@ -1207,9 +1251,11 @@ subroutine stress_T (nx_block, ny_block, & tinyarea ! puny*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + zetax2T , & ! zetax2 = 2*zeta (bulk viscous coeff) + etax2T , & ! etax2 = 2*eta (shear viscous coeff) stresspT , & ! sigma11+sigma22 stressmT , & ! sigma11-sigma22 - stress12T ! sigma12 + stress12T ! sigma12 real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & shear , & ! strain rate II component (1/s) @@ -1224,8 +1270,6 @@ subroutine stress_T (nx_block, ny_block, & real (kind=dbl_kind) :: & divT, tensionT, shearT, DeltaT, & ! strain rates at T point - zetax2T , & ! 2 x zeta (visc coeff) at T point - etax2T , & ! 2 x eta (visc coeff) at T point rep_prsT ! replacement pressure at T point logical :: capping ! of the viscous coef @@ -1262,7 +1306,8 @@ subroutine stress_T (nx_block, ny_block, & call viscous_coeffs_and_rep_pressure_T (strength(i,j), & tinyarea(i,j), & - DeltaT, zetax2T, etax2T, & + DeltaT, & + zetax2T(i,j),etax2T(i,j),& rep_prsT, capping ) !----------------------------------------------------------------- @@ -1272,13 +1317,13 @@ subroutine stress_T (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspT(i,j) = (stresspT(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2T*divT - rep_prsT)) * denom1 + arlx1i*(zetax2T(i,j)*divT - rep_prsT)) * denom1 stressmT(i,j) = (stressmT(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2T*tensionT) * denom1 + arlx1i*etax2T(i,j)*tensionT) * denom1 stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2T*shearT) * denom1 + arlx1i*p5*etax2T(i,j)*shearT) * denom1 enddo ! ij @@ -1296,13 +1341,141 @@ subroutine stress_T (nx_block, ny_block, & dxT, dyT, & tarear , & shear , divu , & - rdg_conv , rdg_shear ) + rdg_conv , rdg_shear ) endif end subroutine stress_T - !======================================================================= +!======================================================================= + +! Computes the strain rates and internal stress components for U points + +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine stress_U (nx_block, ny_block, & + ksub, icellu, & + indxui, indxuj, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + zetax2T, etax2T, & + stresspU, stressmU, & + stress12U ) + + use ice_dyn_shared, only: strain_rates_U!, & + ! viscous_coeffs_and_rep_pressure_U + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + ksub , & ! subcycling step + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) at the U point + vvelU , & ! y-component of velocity (m/s) at the U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs + epm , & ! E-cell mask + npm , & ! E-cell mask + uvm , & ! U-cell mask + zetax2T , & ! 2*zeta at the T point + etax2T ! 2*eta at the T point + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + stresspU , & ! sigma11+sigma22 + stressmU , & ! sigma11-sigma22 + stress12U ! sigma12 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divU, tensionU, shearU, DeltaU, & ! strain rates at U point + zetax2U, etax2U, rep_prsU ! replacement pressure at U point + + character(len=*), parameter :: subname = '(stress_U)' + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + !----------------------------------------------------------------- + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_U (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + divU, tensionU, & + shearU, DeltaU ) + + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at T point + !----------------------------------------------------------------- + +! COMING SOON!!! + +! call viscous_coeffs_and_rep_pressure_U (zetax2T(i,j), zetax2T(i,j+1), & +! zetax2T(i+1,j+1),zetax2T(i+1,j), & +! etax2T(i,j), etax2T(i,j+1), & +! etax2T(i+1,j+1), etax2T(i+1,j), & +! hm(i,j), hm(i,j+1), & +! hm(i+1,j+1), hm(i+1,j), & +! tarea(i,j), tarea(i,j+1), & +! tarea(i+1,j+1), tarea(i+1,j), & +! DeltaU ) + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stresspU(i,j) = (stresspU(i,j)*(c1-arlx1i*revp) + & + arlx1i*(zetax2U*divU - rep_prsU)) * denom1 + + stressmU(i,j) = (stressmU(i,j)*(c1-arlx1i*revp) + & + arlx1i*etax2U*tensionU) * denom1 + + stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) + & + arlx1i*p5*etax2U*shearU) * denom1 + + enddo ! ij + + end subroutine stress_U + +!======================================================================= ! Computes divergence of stress tensor at the E or N point for the mom equation diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index fb0a65d68..8bf892a53 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -28,7 +28,7 @@ module ice_dyn_shared seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & deformations, deformations_T, & - strain_rates, strain_rates_T, & + strain_rates, strain_rates_T, strain_rates_U, & viscous_coeffs_and_rep_pressure, & viscous_coeffs_and_rep_pressure_T, & stack_velocity_field, unstack_velocity_field From 3e1ab269c46739a17dfb22d0bfa24cffb01e7cd4 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Fri, 19 Nov 2021 11:20:53 -0700 Subject: [PATCH 045/109] Add calls to dyn_finish for CD grid (#30) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 47 +++++++++++++++++++ cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 30 +++++++----- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 7cc9b2e2b..fc6d8eed0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -861,6 +861,53 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO + if (grid_system == 'CD') then + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call dyn_finish & + (nx_block, ny_block, & + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + aiN (:,:,iblk), fmN (:,:,iblk), & + strintxN(:,:,iblk), strintyN(:,:,iblk), & + strairxN(:,:,iblk), strairyN(:,:,iblk), & + strocnxN(:,:,iblk), strocnyN(:,:,iblk)) + + call dyn_finish & + (nx_block, ny_block, & + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + aiE (:,:,iblk), fmE (:,:,iblk), & + strintxE(:,:,iblk), strintyE(:,:,iblk), & + strairxE(:,:,iblk), strairyE(:,:,iblk), & + strocnxE(:,:,iblk), strocnyE(:,:,iblk)) + + ! If we are coupling to a C grid ocean model +! do ij =1, icelle +! i = indxei(ij,iblk) +! j = indxej(ij,iblk) + +! strocnxT(i,j,iblk) = strocnxE(i,j,iblk) / aiE(i,j,iblk) +! enddo + +! do ij =1, icelln +! i = indxni(ij,iblk) +! j = indxnj(ij,iblk) + +! strocnyT(i,j,iblk) = strocnyN(i,j,iblk) / aiN(i,j,iblk) +! enddo + + enddo + !$OMP END PARALLEL DO + + endif + call ice_HaloUpdate (strocnxT, halo_info, & field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (strocnyT, halo_info, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 8bf892a53..3a3f4b154 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -979,7 +979,7 @@ subroutine dyn_finish (nx_block, ny_block, & strocnx , & ! ice-ocean stress, x-direction strocny ! ice-ocean stress, y-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out), optional :: & strocnxT, & ! ice-ocean stress, x-direction strocnyT ! ice-ocean stress, y-direction @@ -999,12 +999,16 @@ subroutine dyn_finish (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do j = 1, ny_block - do i = 1, nx_block - strocnxT(i,j) = c0 - strocnyT(i,j) = c0 - enddo - enddo + if (present(strocnxT)) then + + do j = 1, ny_block + do i = 1, nx_block + strocnxT(i,j) = c0 + strocnyT(i,j) = c0 + enddo + enddo + + endif ! ocean-ice stress for coupling do ij =1, icellu @@ -1031,10 +1035,14 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) ! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - ! Prepare to convert to T grid - ! divide by aice for coupling - strocnxT(i,j) = strocnx(i,j) / aiu(i,j) - strocnyT(i,j) = strocny(i,j) / aiu(i,j) + if (present(strocnxT)) then + + ! Prepare to convert to T grid + ! divide by aice for coupling + strocnxT(i,j) = strocnx(i,j) / aiu(i,j) + strocnyT(i,j) = strocny(i,j) / aiu(i,j) + + endif enddo end subroutine dyn_finish From e2658d14e7663d7eb5e8a997353e69d10487f569 Mon Sep 17 00:00:00 2001 From: Elizabeth Hunke Date: Fri, 19 Nov 2021 12:46:02 -0700 Subject: [PATCH 046/109] corrections to boxnodyn test: run dynamics with zero forcing (#31) --- .../scripts/options/set_nml.boxnodyn | 31 +++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 35a224c92..8e5d4a692 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -8,6 +8,30 @@ dumpfreq_n = 2 histfreq = 'd','x','x','x','x' histfreq_n = 2,1,1,1,1 f_aice = 'd' +f_uvel = 'd' +f_vvel = 'd' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd' +f_strairy = 'd' +f_strtltx = 'd' +f_strtlty = 'd' +f_strcorx = 'd' +f_strcory = 'd' +f_strocnx = 'd' +f_strocny = 'd' +f_strintx = 'd' +f_strinty = 'd' +f_taubx = 'd' +f_tauby = 'd' +f_strength = 'd' +f_divu = 'd' +f_shear = 'd' +f_sig1 = 'd' +f_sig2 = 'd' +f_sigP = 'd' kcatbound = 0 ew_boundary_type = 'open' ns_boundary_type = 'open' @@ -20,10 +44,13 @@ tr_pond_lvl = .false. tr_aero = .false. kitd = 0 ktherm = 0 -kdyn = 0 +kdyn = 1 revised_evp = .false. -kstrength = 0 +kstrength = 1 krdg_partic = 1 krdg_redist = 1 +seabed_stress = .true. +atm_data_type = 'calm' +ocn_data_type = 'calm' shortwave = 'ccsm3' albedo_type = 'constant' From d4acb36aacf3e50bc5cd747be54847302b1a7070 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 19 Nov 2021 13:37:59 -0800 Subject: [PATCH 047/109] ifdef out the CD stress restart read to allow usage of B grid restart files for initial CD testing (#32) --- cicecore/cicedynB/infrastructure/ice_restart_driver.F90 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 9a5a75bea..11836c073 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -381,6 +381,9 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12_4,'ruf8', & 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 +! tcraig, comment these out now to allow restarts from B grid file +! this will affect exact restart when we get to that point +#if (1 == 0) if (grid_system == 'CD') then call read_restart_field(nu_restart,0,stresspT,'ruf8', & 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT @@ -395,6 +398,7 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12U,'ruf8', & 'stress12U',1,diag,field_loc_center,field_type_scalar) ! stress12U endif +#endif if (trim(grid_type) == 'tripole') then call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & From f46e35e7d3e8734876917a6e1a7602928969ee2a Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Fri, 19 Nov 2021 18:23:44 -0700 Subject: [PATCH 048/109] Add earear and narear (#35) --- cicecore/cicedynB/infrastructure/ice_grid.F90 | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 688b7155e..3f4556b2c 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -78,6 +78,8 @@ module ice_grid earea , & ! area of E-cell (m^2) tarear , & ! 1/tarea uarear , & ! 1/uarea + narear , & ! 1/narea + earear , & ! 1/earea tinyarea,& ! puny*tarea tarean , & ! area of NH T-cells tareas , & ! area of SH T-cells @@ -208,6 +210,8 @@ subroutine alloc_grid earea (nx_block,ny_block,max_blocks), & ! area of E-cell (m^2) tarear (nx_block,ny_block,max_blocks), & ! 1/tarea uarear (nx_block,ny_block,max_blocks), & ! 1/uarea + narear (nx_block,ny_block,max_blocks), & ! 1/narea + earear (nx_block,ny_block,max_blocks), & ! 1/earea tinyarea (nx_block,ny_block,max_blocks), & ! puny*tarea tarean (nx_block,ny_block,max_blocks), & ! area of NH T-cells tareas (nx_block,ny_block,max_blocks), & ! area of SH T-cells @@ -497,6 +501,16 @@ subroutine init_grid2 else uarear(i,j,iblk) = c0 ! possible on boundaries endif + if (narea(i,j,iblk) > c0) then + narear(i,j,iblk) = c1/narea(i,j,iblk) + else + narear(i,j,iblk) = c0 ! possible on boundaries + endif + if (earea(i,j,iblk) > c0) then + earear(i,j,iblk) = c1/earea(i,j,iblk) + else + earear(i,j,iblk) = c0 ! possible on boundaries + endif tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) enddo enddo From 922e672aa70add8dd42d2e3360528ff583e2d61e Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Fri, 19 Nov 2021 20:31:46 -0500 Subject: [PATCH 049/109] div_stress calls and bug correction (#34) * Added calls for div_stress * Corrected bug in div_Stress * Changed variable descriptions --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 111 +++++++++++++-------- 1 file changed, 70 insertions(+), 41 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index fc6d8eed0..4e7b33a7e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -102,7 +102,7 @@ subroutine evp (dt) dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, tinyarea, grid_average_X2Y, & + tarear, uarear, earea, narea, tinyarea, grid_average_X2Y, & grid_type, grid_system use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & @@ -695,6 +695,32 @@ subroutine evp (dt) zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) + + call div_stress (nx_block, ny_block, & ! E point + ksub, icelle(iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earea (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + 'E') + + call div_stress (nx_block, ny_block, & ! N point + ksub, icelln(iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narea (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + 'N') call step_vel (nx_block, ny_block, & ! E point icelle (iblk), Cdn_ocn (:,:,iblk), & @@ -1530,18 +1556,18 @@ end subroutine stress_U ! Nov 2021 subroutine div_stress (nx_block, ny_block, & - ksub, icell, & - indxi, indxj, & - dxE_N, dyE_N, & - dxT_U, dyT_U, & - arear, & - stressp, stressm, & - stress12, & - F1, F2, & + ksub, icell, & + indxi, indxj, & + dxE_N, dyE_N, & + dxT_U, dyT_U, & + area, & + stresspF1, stressmF1, & + stress12F1, & + stresspF2, stressmF2, & + stress12F2, & + F1, F2, & grid_location) - use ice_dyn_shared, only: strain_rates_T, deformations_T, & - viscous_coeffs_and_rep_pressure_T integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1558,10 +1584,13 @@ subroutine div_stress (nx_block, ny_block, & dyE_N , & ! height of E or N-cell through the middle (m) dxT_U , & ! width of T or U-cell through the middle (m) dyT_U , & ! height of T or U-cell through the middle (m) - arear , & ! 1/earea or 1/narea - stressp , & ! sigma11+sigma22 - stressm , & ! sigma11-sigma22 - stress12 ! sigma12 + area , & ! earea or narea + stresspF1 , & ! stressp (U or T) used for F1 calculation + stressmF1 , & ! stressm (U or T) used for F1 calculation + stress12F1 , & ! stress12 (U or T) used for F1 calculation + stresspF2 , & ! stressp (U or T) used for F2 calculation + stressmF2 , & ! stressm (U or T) used for F2 calculation + stress12F2 ! stress12 (U or T) used for F2 calculation character(len=*), intent(in) :: & grid_location ! E (East) or N (North) ! TO BE IMPROVED!!!! @@ -1591,35 +1620,35 @@ subroutine div_stress (nx_block, ny_block, & select case (trim(grid_location)) case('E') - F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stressp(i+1,j)-stressp(i,j) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressm(i+1,j) & - -(dyT_U(i,j)**2)*stressm(i,j) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12(i,j) & - -(dxT_U(i,j-1)**2)*stress12(i,j-1) ) ) - - F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stressp(i,j)-stressp(i,j-1) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressm(i,j) & - -(dxT_U(i,j-1)**2)*stressm(i,j-1) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12(i+1,j) & - -(dyT_U(i,j)**2)*stress12(i,j) ) ) + F1(i,j) = (c1/area(i,j)) * & + ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j)-stresspF1(i,j) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressmF1(i+1,j) & + -(dyT_U(i,j)**2)*stressmF1(i,j) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12F1(i,j) & + -(dxT_U(i,j-1)**2)*stress12F1(i,j-1) ) ) + + F2(i,j) = (c1/area(i,j)) * & + ( p5 * dxE_N(i,j) * ( stresspF2(i,j)-stresspF2(i,j-1) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressmF2(i,j) & + -(dxT_U(i,j-1)**2)*stressmF2(i,j-1) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12F2(i+1,j) & + -(dyT_U(i,j)**2)*stress12F2(i,j) ) ) case('N') - F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stressp(i,j)-stressp(i-1,j) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressm(i,j) & - -(dyT_U(i-1,j)**2)*stressm(i-1,j) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12(i,j+1) & - -(dxT_U(i,j)**2)*stress12(i,j) ) ) - - F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stressp(i,j+1)-stressp(i,j) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressm(i,j+1) & - -(dxT_U(i,j)**2)*stressm(i,j) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12(i,j) & - -(dyT_U(i-1,j)**2)*stress12(i-1,j) ) ) + F1(i,j) = (c1/area(i,j)) * & + ( p5 * dyE_N(i,j) * ( stresspF1(i,j)-stresspF1(i-1,j) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressmF1(i,j) & + -(dyT_U(i-1,j)**2)*stressmF1(i-1,j) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12F1(i,j+1) & + -(dxT_U(i,j)**2)*stress12F1(i,j) ) ) + + F2(i,j) = (c1/area(i,j)) * & + ( p5 * dxE_N(i,j) * ( stresspF2(i,j+1)-stresspF2(i,j) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressmF2(i,j+1) & + -(dxT_U(i,j)**2)*stressmF2(i,j) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12F2(i,j) & + -(dyT_U(i-1,j)**2)*stress12F2(i-1,j) ) ) case default call abort_ice(subname // ' unkwown grid_location: ' // grid_location) end select From bf34414c0fc4247480e48af22a696d6aed5055f0 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Fri, 19 Nov 2021 20:32:14 -0500 Subject: [PATCH 050/109] cicedynB: fix typos in abort messages (#36) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 4e7b33a7e..f74b3b49a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1650,7 +1650,7 @@ subroutine div_stress (nx_block, ny_block, & + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12F2(i,j) & -(dyT_U(i-1,j)**2)*stress12F2(i-1,j) ) ) case default - call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 3f4556b2c..8212def06 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -2873,7 +2873,7 @@ real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) resul case('N') mini = min(field(i,j), field(i,j+1)) case default - call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select end function grid_neighbor_min @@ -2905,7 +2905,7 @@ real(kind=dbl_kind) function grid_neighbor_max(field, i, j, grid_location) resul case('N') maxi = max(field(i,j), field(i,j+1)) case default - call abort_ice(subname // ' unkwown grid_location: ' // grid_location) + call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select end function grid_neighbor_max From 207be2da249634a2028eba1c76657e862fdd1e0b Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Fri, 19 Nov 2021 18:32:29 -0700 Subject: [PATCH 051/109] Add present on strocnyT (#37) --- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 3a3f4b154..dd2e45237 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -999,7 +999,7 @@ subroutine dyn_finish (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (present(strocnxT)) then + if (present(strocnxT) .and. present(strocnyT)) then do j = 1, ny_block do i = 1, nx_block @@ -1035,7 +1035,7 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) ! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - if (present(strocnxT)) then + if (present(strocnxT) .and. present(strocnyT)) then ! Prepare to convert to T grid ! divide by aice for coupling From c308719c478853f082d317a0005a19b7505ef0c2 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 19 Nov 2021 18:48:20 -0800 Subject: [PATCH 052/109] quick fix for uninitialized variables (#38) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index f74b3b49a..10a756de6 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1518,7 +1518,9 @@ subroutine stress_U (nx_block, ny_block, & !----------------------------------------------------------------- ! COMING SOON!!! - + zetax2U = c0 + etax2U = c0 + rep_prsU = c0 ! call viscous_coeffs_and_rep_pressure_U (zetax2T(i,j), zetax2T(i,j+1), & ! zetax2T(i+1,j+1),zetax2T(i+1,j), & ! etax2T(i,j), etax2T(i,j+1), & From c4e02043bb148dfcb7bd0e25daa772eac6c6b582 Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Wed, 24 Nov 2021 17:57:59 +0100 Subject: [PATCH 053/109] umaskCD and more (#33) * Changes in ice_dyn* are interpolation of uvelE/vvelN to B grid. ice_transport files are changed in a way so that velocities are interpolated to b grid for depature point function and kept at E or N grid possible. * changed according to comments. changed average from F to S and. commented out in vp and eap * Changes in ice_dyn* are interpolation of uvelE/vvelN to B grid. ice_transport files are changed in a way so that velocities are interpolated to b grid for depature point function and kept at E or N grid possible. * changed according to comments. changed average from F to S and. commented out in vp and eap * Create local uvmCD that is projected into umaskCD and icellu. Added function to fint zeta, eta and replacement pressure at U * Added call to routine * bux fixes * last bugfixes. Code compiles and is bit for bit with code before. Only quick suite has been run. * First move towards merge with merge of commit 39 * update ice_dyn_evp * It is now bit for bit. Number of mask is reduced from 2 to one. Instead an additional if else gridsystem is introduced. subroutine evp should be refactored * moved trim * Based on Quicksuite: B grid bit for bit. CD grid passes 13 of 16. 2 fails on 2 test. 1 is pending. Comments from Phillipe also included --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 205 +++++++++++------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 135 +++++++----- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 4 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 49 ++++- 4 files changed, 243 insertions(+), 150 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 10a756de6..0c9e1126e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -98,11 +98,11 @@ subroutine evp (dt) stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_grid, only: tmask, umask, nmask, emask, uvm, epm, npm, & + use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earea, narea, tinyarea, grid_average_X2Y, & + tarear, uarear, earea, narea, tinyarea, grid_average_X2Y, tarea, & grid_type, grid_system use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & @@ -341,35 +341,67 @@ subroutine evp (dt) ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + + if (trim(grid_system) == 'B') then + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icellu(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) + + elseif (trim(grid_system) == 'CD') then + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellt(iblk), icellu(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umaskCD (:,:,iblk), & + uocn (:,:,iblk), vocn (:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvel_init (:,:,iblk), vvel_init (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + Tbu (:,:,iblk)) + endif !----------------------------------------------------------------- ! ice strength @@ -412,8 +444,8 @@ subroutine evp (dt) indxti (:,iblk), indxtj (:,iblk), & indxni (:,iblk), indxnj (:,iblk), & aiN (:,:,iblk), nmass (:,:,iblk), & - nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & - nmask (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk),& + nmask (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & strairxN (:,:,iblk), strairyN (:,:,iblk), & ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & @@ -445,7 +477,7 @@ subroutine evp (dt) indxti (:,iblk), indxtj (:,iblk), & indxei (:,iblk), indxej (:,iblk), & aiE (:,:,iblk), emass (:,:,iblk), & - emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk),& emask (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & strairxE (:,:,iblk), strairyE (:,:,iblk), & @@ -643,10 +675,9 @@ subroutine evp (dt) rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & strtmp (:,:,:) ) - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- - + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- call stepu (nx_block, ny_block, & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -688,13 +719,14 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & dxE (:,:,iblk), dyN (:,:,iblk), & dxU (:,:,iblk), dyU (:,:,iblk), & + tarea (:,:,iblk), & ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - uvm (:,:,iblk), & + hm (:,:,iblk), uvm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U (:,:,iblk)) + stress12U (:,:,iblk)) call div_stress (nx_block, ny_block, & ! E point ksub, icelle(iblk), & @@ -977,7 +1009,7 @@ subroutine stress (nx_block, ny_block, & rdg_conv, rdg_shear, & str ) - use ice_dyn_shared, only: strain_rates, deformations, viscous_coeffs_and_rep_pressure + use ice_dyn_shared, only: strain_rates, deformations, viscous_coeffs_and_rep_pressure_T integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1041,8 +1073,8 @@ subroutine stress (nx_block, ny_block, & str12ew, str12we, str12ns, str12sn , & strp_tmp, strm_tmp, tmp - logical :: capping ! of the viscous coef - + real(kind=dbl_kind),parameter :: capping = c1 ! of the viscous coef + character(len=*), parameter :: subname = '(stress)' !----------------------------------------------------------------- @@ -1050,7 +1082,6 @@ subroutine stress (nx_block, ny_block, & !----------------------------------------------------------------- str(:,:,:) = c0 - capping = .true. ! could be later included in ice_in do ij = 1, icellt i = indxti(ij) @@ -1079,17 +1110,27 @@ subroutine stress (nx_block, ny_block, & !----------------------------------------------------------------- ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - - call viscous_coeffs_and_rep_pressure (strength(i,j), tinyarea(i,j),& - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2ne, zetax2nw, & - zetax2sw, zetax2se, & - etax2ne, etax2nw, & - etax2sw, etax2se, & - rep_prsne, rep_prsnw, & - rep_prssw, rep_prsse, & - capping) + + call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& + Deltane, zetax2ne, & + etax2ne, rep_prsne, & + capping) + + call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& + Deltanw, zetax2nw, & + etax2nw, rep_prsnw, & + capping) + + call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& + Deltasw, zetax2sw, & + etax2sw, rep_prssw, & + capping) + + call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& + Deltase, zetax2se, & + etax2se, rep_prsse, & + capping) + !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1345,15 +1386,14 @@ subroutine stress_T (nx_block, ny_block, & divT, tensionT, shearT, DeltaT, & ! strain rates at T point rep_prsT ! replacement pressure at T point - logical :: capping ! of the viscous coef - + real(kind=dbl_kind), parameter :: capping = c1 ! of the viscous coef + character(len=*), parameter :: subname = '(stress_T)' !----------------------------------------------------------------- ! Initialize !----------------------------------------------------------------- - capping = .true. ! could be later included in ice_in do ij = 1, icellt i = indxti(ij) @@ -1435,15 +1475,16 @@ subroutine stress_U (nx_block, ny_block, & uvelU, vvelU, & dxE, dyN, & dxU, dyU, & + tarea, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & - epm, npm, uvm, & + epm, npm, hm, uvm, & zetax2T, etax2T, & stresspU, stressmU, & - stress12U ) + stress12U ) - use ice_dyn_shared, only: strain_rates_U!, & - ! viscous_coeffs_and_rep_pressure_U + use ice_dyn_shared, only: strain_rates_U, & + viscous_coeffs_and_rep_pressure_T2U integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1456,21 +1497,23 @@ subroutine stress_U (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point vvelN , & ! y-component of velocity (m/s) at the N point uvelU , & ! x-component of velocity (m/s) at the U point vvelU , & ! y-component of velocity (m/s) at the U point - dxE , & ! width of E-cell through the middle (m) + dxE , & ! width of E-cell through the middle (m) dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs + tarea , & ! area of T-cell (m^2) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline epm , & ! E-cell mask npm , & ! E-cell mask + hm , & ! T-cell mask uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point etax2T ! 2*eta at the T point @@ -1517,19 +1560,15 @@ subroutine stress_U (nx_block, ny_block, & ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- -! COMING SOON!!! - zetax2U = c0 - etax2U = c0 - rep_prsU = c0 -! call viscous_coeffs_and_rep_pressure_U (zetax2T(i,j), zetax2T(i,j+1), & -! zetax2T(i+1,j+1),zetax2T(i+1,j), & -! etax2T(i,j), etax2T(i,j+1), & -! etax2T(i+1,j+1), etax2T(i+1,j), & -! hm(i,j), hm(i,j+1), & -! hm(i+1,j+1), hm(i+1,j), & -! tarea(i,j), tarea(i,j+1), & -! tarea(i+1,j+1), tarea(i+1,j), & -! DeltaU ) + call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & + zetax2T(i+1,j+1), zetax2T(i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU,zetax2U, etax2U, rep_prsU) !----------------------------------------------------------------- ! the stresses ! kg/s^2 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index dd2e45237..d4622cb6e 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -31,6 +31,7 @@ module ice_dyn_shared strain_rates, strain_rates_T, strain_rates_U, & viscous_coeffs_and_rep_pressure, & viscous_coeffs_and_rep_pressure_T, & + viscous_coeffs_and_rep_pressure_T2U, & stack_velocity_field, unstack_velocity_field ! namelist parameters @@ -605,13 +606,17 @@ subroutine dyn_prep2 (nx_block, ny_block, & !----------------------------------------------------------------- icellu = 0 + do j = jlo, jhi do i = ilo, ihi - - ! ice extent mask (U-cells) iceumask_old(i,j) = iceumask(i,j) ! save +! if (grid_system == 'B') then ! include ice mask. + ! ice extent mask (U-cells) iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & - .and. (umass(i,j) > m_min) + .and. (umass(i,j) > m_min) +! else ! ice mask shpuld be applied to cd grid. For now it is not implemented. +! iceumask(i,j) = umask(i,j) +! endif if (iceumask(i,j)) then icellu = icellu + 1 @@ -637,7 +642,6 @@ subroutine dyn_prep2 (nx_block, ny_block, & vvel_init(i,j) = vvel(i,j) enddo enddo - !----------------------------------------------------------------- ! Define variables for momentum equation !----------------------------------------------------------------- @@ -916,17 +920,15 @@ subroutine step_vel (nx_block, ny_block, & ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - ab2 = cca**2 + ccb**2 + ab2 = cca**2 + ccb**2 ! compute the velocity components cc1 = strintx(i,j) + forcex(i,j) + taux & + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) cc2 = strinty(i,j) + forcey(i,j) + tauy & + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) - uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - ! calculate seabed stress component for outputs if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc @@ -1899,7 +1901,7 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & real (kind=dbl_kind), intent(in):: & Deltane, Deltanw, Deltasw, Deltase ! Delta at each corner - logical , intent(in):: capping + real(kind=dbl_kind) , intent(in):: capping real (kind=dbl_kind), intent(out):: & zetax2ne, zetax2nw, zetax2sw, zetax2se, & ! zetax2 at each corner @@ -1912,39 +1914,30 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code -! if (trim(yield_curve) == 'ellipse') then - - if (capping) then - tmpcalcne = strength/max(Deltane,tinyarea) - tmpcalcnw = strength/max(Deltanw,tinyarea) - tmpcalcsw = strength/max(Deltasw,tinyarea) - tmpcalcse = strength/max(Deltase,tinyarea) - else - tmpcalcne = strength/(Deltane + tinyarea) - tmpcalcnw = strength/(Deltanw + tinyarea) - tmpcalcsw = strength/(Deltasw + tinyarea) - tmpcalcse = strength/(Deltase + tinyarea) - endif - - zetax2ne = (c1+Ktens)*tmpcalcne ! northeast - rep_prsne = (c1-Ktens)*tmpcalcne*Deltane - etax2ne = epp2i*zetax2ne + tmpcalcne = capping *(strength/max(Deltane, tinyarea))+ & + (c1-capping)* strength/ (Deltane+ tinyarea) + tmpcalcnw = capping *(strength/max(Deltanw, tinyarea))+ & + (c1-capping)* strength/ (Deltanw+ tinyarea) + tmpcalcsw = capping *(strength/max(Deltasw, tinyarea))+ & + (c1-capping)* strength/ (Deltasw+ tinyarea) + tmpcalcse = capping *(strength/max(Deltase, tinyarea))+ & + (c1-capping)* strength/ (Deltase+ tinyarea) + + zetax2ne = (c1+Ktens)*tmpcalcne ! northeast + rep_prsne = (c1-Ktens)*tmpcalcne*Deltane + etax2ne = epp2i*zetax2ne - zetax2nw = (c1+Ktens)*tmpcalcnw ! northwest - rep_prsnw = (c1-Ktens)*tmpcalcnw*Deltanw - etax2nw = epp2i*zetax2nw + zetax2nw = (c1+Ktens)*tmpcalcnw ! northwest + rep_prsnw = (c1-Ktens)*tmpcalcnw*Deltanw + etax2nw = epp2i*zetax2nw - zetax2sw = (c1+Ktens)*tmpcalcsw ! southwest - rep_prssw = (c1-Ktens)*tmpcalcsw*Deltasw - etax2sw = epp2i*zetax2sw + zetax2sw = (c1+Ktens)*tmpcalcsw ! southwest + rep_prssw = (c1-Ktens)*tmpcalcsw*Deltasw + etax2sw = epp2i*zetax2sw - zetax2se = (c1+Ktens)*tmpcalcse ! southeast - rep_prsse = (c1-Ktens)*tmpcalcse*Deltase - etax2se = epp2i*zetax2se - -! else - -! endif + zetax2se = (c1+Ktens)*tmpcalcse ! southeast + rep_prsse = (c1-Ktens)*tmpcalcse*Deltase + etax2se = epp2i*zetax2se end subroutine viscous_coeffs_and_rep_pressure @@ -1961,6 +1954,7 @@ end subroutine viscous_coeffs_and_rep_pressure ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice ! by combining tensile strength and a parameterization for grounded ridges. ! J. Geophys. Res. Oceans, 121, 7354-7368. +! capping must be 1 (c1) for evp and 0 for vp solver subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & Delta , zetax2 , & @@ -1971,9 +1965,7 @@ subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & strength, tinyarea real (kind=dbl_kind), intent(in):: & - Delta - - logical, intent(in):: capping + Delta, capping real (kind=dbl_kind), intent(out):: & zetax2, etax2, rep_prs ! 2 x visous coeffs, replacement pressure @@ -1986,23 +1978,62 @@ subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code -! if (trim(yield_curve) == 'ellipse') then - - if (capping) then - tmpcalc = strength/max(Delta,tinyarea) - else - tmpcalc = strength/(Delta + tinyarea) - endif - + tmpcalc = capping *(strength/max(Delta,tinyarea))+ & + (c1-capping)*(strength/(Delta + tinyarea)) zetax2 = (c1+Ktens)*tmpcalc rep_prs = (c1-Ktens)*tmpcalc*Delta etax2 = epp2i*zetax2 -! else + end subroutine viscous_coeffs_and_rep_pressure_T -! endif - end subroutine viscous_coeffs_and_rep_pressure_T + subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & + zetax2T_11, zetax2T_10, & + etax2T_00, etax2T_01, & + etax2T_11, etax2T_10, & + maskT_00, maskT_01, & + maskT_11, maskT_10, & + tarea_00, tarea_01, & + tarea_11, tarea_10, & + deltaU, & + zetax2U, etax2U, & + rep_prsU) + + + real (kind=dbl_kind), intent(in):: & + zetax2T_00,zetax2T_10,zetax2T_11,zetax2T_01, & + etax2T_00, etax2T_10, etax2T_11, etax2T_01, & ! 2 x visous coeffs, replacement pressure + maskT_00, maskT_10, maskT_11, maskT_01, & + tarea_00, tarea_10, tarea_11, tarea_01, & + deltaU + + real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU + + ! local variables + + real (kind=dbl_kind) :: & + Totarea + + character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_T2U)' + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + Totarea = maskT_00*Tarea_00 + & + maskT_10*Tarea_10 + & + maskT_11*Tarea_11 + & + maskT_01*Tarea_01 + zetax2U = (maskT_00*Tarea_00 *zetax2T_00 + & + maskT_10*Tarea_10 *zetax2T_10 + & + maskT_11*Tarea_11 *zetax2T_11 + & + maskT_01*Tarea_01 *zetax2T_01)/Totarea + + etax2U = (maskT_00*Tarea_00 *etax2T_00 + & + maskT_10*Tarea_10 *etax2T_10 + & + maskT_11*Tarea_11 *etax2T_11 + & + maskT_01*Tarea_01 *etax2T_01)/Totarea + + rep_prsU = (c1-ktens)/(1+Ktens)*zetax2U*deltaU + + end subroutine viscous_coeffs_and_rep_pressure_T2U !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 2c1b23032..5bbef02a4 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -1202,13 +1202,11 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stressp_1, stressp_2, stressp_3, stressp_4 , & strp_tmp - logical :: capping ! of the viscous coeff - + real(kind=dbl_kind) ,parameter :: capping = c0 ! of the viscous coef character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize - capping = .false. ! Initialize stPr, zetax2 and etax2 to zero ! (for cells where icetmask is false) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 8212def06..9dfc7b27b 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -155,7 +155,7 @@ module ice_grid real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id - uvm , & ! land/boundary mask, velocity (U-cell) + uvm , & ! land/boundary mask, velocity (U-cell) npm , & ! land/boundary mask (N-cell) epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) @@ -167,7 +167,8 @@ module ice_grid logical (kind=log_kind), & dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) - umask , & ! land/boundary mask, velocity (U-cell) + umask , & ! land/boundary mask, velocity (U-cell) (1 if all surrounding T cells are ocean) + umaskCD, & ! land/boundary mask, velocity (U-cell) (1 if at least two surrounding T cells are ocean) nmask , & ! land/boundary mask, (N-cell) emask , & ! land/boundary mask, (E-cell) lmask_n, & ! northern hemisphere mask @@ -239,12 +240,13 @@ subroutine alloc_grid yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id - uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point npm (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) epm (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) kmt (nx_block,ny_block,max_blocks), & ! ocean topography mask for bathymetry (T-cell) tmask (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) umask (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask @@ -1925,7 +1927,7 @@ end subroutine primary_grid_lengths_HTE subroutine makemask - use ice_constants, only: c0, p5, & + use ice_constants, only: c0, p5, c1p5, & field_loc_center, field_loc_NEcorner, field_type_scalar, & field_loc_Nface, field_loc_Eface @@ -1936,6 +1938,9 @@ subroutine makemask real (kind=dbl_kind) :: & puny + real (kind=dbl_kind), dimension(:,:,:), allocatable :: & + uvmCD + type (block) :: & this_block ! block information for current block @@ -1958,6 +1963,7 @@ subroutine makemask !----------------------------------------------------------------- bm = c0 + allocate(uvmCD(nx_block,ny_block,max_blocks)) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -1974,6 +1980,8 @@ subroutine makemask npm(i,j,iblk) = min (hm(i,j, iblk), hm(i,j+1,iblk)) epm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j,iblk)) bm(i,j,iblk) = my_task + iblk/100.0_dbl_kind + uvmCD(i,j,iblk) = (hm(i,j, iblk)+hm(i+1,j, iblk) & + + hm(i,j+1,iblk)+hm(i+1,j+1,iblk)) enddo enddo enddo @@ -1982,6 +1990,8 @@ subroutine makemask call ice_timer_start(timer_bound) call ice_HaloUpdate (uvm, halo_info, & field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (uvmCD, halo_info, & + field_loc_NEcorner, field_type_scalar) call ice_HaloUpdate (npm, halo_info, & field_loc_Nface, field_type_scalar) call ice_HaloUpdate (epm, halo_info, & @@ -1999,19 +2009,32 @@ subroutine makemask jhi = this_block%jhi ! needs to cover halo (no halo update for logicals) - tmask(:,:,iblk) = .false. - umask(:,:,iblk) = .false. - nmask(:,:,iblk) = .false. - emask(:,:,iblk) = .false. + tmask(:,:,iblk) = .false. + umask(:,:,iblk) = .false. + umaskCD(:,:,iblk) = .false. + nmask(:,:,iblk) = .false. + emask(:,:,iblk) = .false. do j = jlo-nghost, jhi+nghost do i = ilo-nghost, ihi+nghost - if ( hm(i,j,iblk) > p5) tmask(i,j,iblk) = .true. - if (uvm(i,j,iblk) > p5) umask(i,j,iblk) = .true. - if (npm(i,j,iblk) > p5) nmask(i,j,iblk) = .true. - if (epm(i,j,iblk) > p5) emask(i,j,iblk) = .true. + if ( hm(i,j,iblk) > p5 ) tmask (i,j,iblk) = .true. + if (uvm(i,j,iblk) > p5 ) umask (i,j,iblk) = .true. + if (uvmCD(i,j,iblk) > c1p5) umaskCD(i,j,iblk) = .true. + if (npm(i,j,iblk) > p5 ) nmask (i,j,iblk) = .true. + if (epm(i,j,iblk) > p5 ) emask (i,j,iblk) = .true. enddo enddo + enddo + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + !----------------------------------------------------------------- ! create hemisphere masks !----------------------------------------------------------------- @@ -2045,6 +2068,8 @@ subroutine makemask enddo ! iblk !$OMP END PARALLEL DO + deallocate(uvmCD) + end subroutine makemask !======================================================================= From 9e6e8f330efbadb7d1613862e32c3b8c5cebfa8b Mon Sep 17 00:00:00 2001 From: daveh150 Date: Wed, 24 Nov 2021 10:58:32 -0600 Subject: [PATCH 054/109] =?UTF-8?q?Added=20boxnodyn=20tests=20to=20gridsys?= =?UTF-8?q?=5Fsuite.ts.=20Also=20added=20machinery=20to=20sub=E2=80=A6=20(?= =?UTF-8?q?#40)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added boxnodyn tests to gridsys_suite.ts. Also added machinery to submit batch jobs on local NRLSSC grid system * Change NRLSSC to lowercase nrlssc --- configuration/scripts/cice.batch.csh | 16 +++++ configuration/scripts/cice.launch.csh | 13 ++++ .../scripts/machines/Macros.nrlssc_gnu | 59 +++++++++++++++++++ configuration/scripts/machines/env.nrlssc_gnu | 16 +++++ configuration/scripts/tests/gridsys_suite.ts | 3 +- 5 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 configuration/scripts/machines/Macros.nrlssc_gnu create mode 100755 configuration/scripts/machines/env.nrlssc_gnu diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 024270039..04f397034 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -104,6 +104,22 @@ cat >> ${jobfile} << EOFB ###PBS -m be EOFB +else if (${ICE_MACHINE} =~ nrlssc*) then +# nrlssc queue system has nodes with different task per node +if (${taskpernode} <= 12) set tpnstr = 'twelve' +if (${taskpernode} == 20) set tpnstr = 'twenty' +if (${taskpernode} == 24) set tpnstr = 'twentyfour' +if (${taskpernode} == 28) set tpnstr = 'twentyeight' + +cat >> ${jobfile} <&! \$ICE_RUNLOG_FILE EOFR endif +#======= +else if (${ICE_MACHINE} =~ nrlssc*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + + #======= else if (${ICE_MACHINE} =~ onyx*) then cat >> ${jobfile} << EOFR diff --git a/configuration/scripts/machines/Macros.nrlssc_gnu b/configuration/scripts/machines/Macros.nrlssc_gnu new file mode 100644 index 000000000..91d2fae0b --- /dev/null +++ b/configuration/scripts/machines/Macros.nrlssc_gnu @@ -0,0 +1,59 @@ +#============================================================================== +# Makefile macros for NRLSSC GCC and openmpi compilers +#============================================================================== + +# specific Netcdf and MPI paths, since we use /common instead of /usr + +#MPIHOME := /common/openmpi/gnu10.1.0/3.1.6 +#CDFHOME := /common/netcdf/gnu10.1.0/openmpi3.1.6/4.7.4 + +# use MY defined vars from .setenv_linux +MPIHOME := ${MPI_HOME} +CDFHOME := ${NETCDF_HOME} + +CPP := cpp +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 -DHAVE_F2008_CONTIGUOUS -DLINUX -DCPRINTEL ${ICE_CPPDEFS} +CFLAGS := -c -O2 + +FIXEDFLAGS := -ffixed-line-length-132 +FREEFLAGS := -ffree-form +FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +FFLAGS_NOOPT := -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow +else + FFLAGS += -O2 +endif + +FC := $(MPIHOME)/bin/mpif90 + +CC:= $(MPIHOME)/bin/mpicc + +MPICC:= $(MPIHOME)/bin/mpicc + +MPIFC:= $(MPIHOME)/bin/mpif90 +LD:= $(FC) + +NETCDF_PATH := $(CDFHOME) + +ifeq ($(ICE_IOTYPE), netcdf) + # NETCDF_PATH := $(shell nc-config --prefix) + INCLDIR := $(INCLDIR) -I$(NETCDF_PATH)/include + LIB_NETCDF := $(NETCDF_PATH)/lib + LIB_PNETCDF := + LD := $(LD) -Wl,-rpath,$(LIB_NETCDF) + SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff +else + SLIBS := +endif + +LIB_MPI := +SCC:= gcc +SFC:= gfortran + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +endif diff --git a/configuration/scripts/machines/env.nrlssc_gnu b/configuration/scripts/machines/env.nrlssc_gnu new file mode 100755 index 000000000..f050a61b8 --- /dev/null +++ b/configuration/scripts/machines/env.nrlssc_gnu @@ -0,0 +1,16 @@ +#!/bin/csh -f + +setenv ICE_MACHINE_MACHNAME NRLSSC +setenv ICE_MACHINE_MACHINFO "NRLSSC" +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /u/data/hebert/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /u/data/hebert/CICE_RUNS +setenv ICE_MACHINE_BASELINE /u/data/hebert/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub " +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "standard" +setenv ICE_MACHINE_TPNODE 20 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 1 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 54eeb45ea..afc91fa4f 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -3,6 +3,7 @@ smoke gx3 8x2 diag1,run5day restart gx3 4x2 debug,diag1 smoke gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl +smoke gbox80 2x4 boxnodyn smoke gbox80 2x2 boxsymn smoke gbox80 4x2 boxsyme smoke gbox80 4x1 boxsymne @@ -13,11 +14,11 @@ smoke gbox80 8x1 boxislandsn smoke gbox80 4x2 boxislandse smoke gbox80 2x4 boxislandsne - smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd smoke gbox80 1x1 box2001,gridcd smoke gbox80 1x1 boxslotcyl,gridcd +smoke gbox80 2x4 boxnodyn,gridcd smoke gbox80 2x2 boxsymn,gridcd smoke gbox80 4x2 boxsyme,gridcd smoke gbox80 4x1 boxsymne,gridcd From c2c0c4a021ab348dd64796fcb552fdfa704bdf78 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Mon, 29 Nov 2021 14:39:42 -0700 Subject: [PATCH 055/109] Inialize uvel, vvel at every substep (#43) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 0c9e1126e..a1f09ff9d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -646,6 +646,13 @@ subroutine evp (dt) do ksub = 1,ndte ! subcycling + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for stress_U + + if (grid_system == 'CD') then + call grid_average_X2Y('E2US',uvelE,uvel) + call grid_average_X2Y('N2US',vvelN,vvel) + endif + !----------------------------------------------------------------- ! stress tensor equation, total surface stress !----------------------------------------------------------------- From 54e5c34fbbab0218ec3567cb2cc72ac03b2e8849 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 30 Nov 2021 13:01:15 -0800 Subject: [PATCH 056/109] Update to Consortium #162aee9, update scripts (#44) * update icepack, rename snwITDrdg to snwitdrdg (#658) * Change max_blocks for rake tests on izumi (nothread). (#665) * Fix some raketests for izumi * fix some rake tests * Makefile: make Fortran object files depend on their dependency files (#667) When 'make' is invoked on the CICE Makefile, the first thing it does is to try to make the included dependency files (*.d) (which are in fact Makefiles themselves) [1], in alphabetical order. The rule to make the dep files have the dependency generator, 'makdep', as a prerequisite, so when processing the first dep file, make notices 'makdep' does not exist and proceeds to build it. If for whatever reason this compilation fails, make will then proceed to the second dep file, notice that it recently tried and failed to build its dependency 'makdep', give up on the second dep file, proceed to the third, and so on. In the end, no dep file is produced. Make then restarts itself and proceeds to build the code, which of course fails catastrophically because the Fortran source files are not compiled in the right order because the dependency files are missing. To avoid that, add a dependency on the dep file to the rules that make the object file out of the Fortran source files. Since old-fashioned suffix rules cannot have their own prerequisites [2], migrate the rules for the Fortran source files to use pattern rules [3] instead. While at it, also migrate the rule for the C source files. With this new dependency, the builds abort early, before trying to compile the Fortran sources, making it easier to understand what has gone wrong. Since we do not use suffix rules anymore, remove the '.SUFFIXES' line that indicates which extension to use suffix rules for (but keep the line that eliminates all default suffix rules). [1] https://www.gnu.org/software/make/manual/html_node/Remaking-Makefiles.html [2] https://www.gnu.org/software/make/manual/html_node/Suffix-Rules.html [3] https://www.gnu.org/software/make/manual/html_node/Pattern-Rules.html#Pattern-Rules * Fix multi-pe advection=none bug (#664) * update parsing scripts to improve robustness, fix multi-pe advection=none * Update cice script to improve performance including minor refactoring of parse_namelist and parse_settings to reduce cost and ability to use already setup ice_in file from a prior case in the suite. Added commented out timing ability in cice.setup. Change test default to PEND from FAIL. * fix cice.setup for case * add sedbak implementation to support Mac sed * s/spend/spent Co-authored-by: David A. Bailey Co-authored-by: Philippe Blain --- cice.setup | 71 ++++++++++++++++--- configuration/scripts/Makefile | 7 +- .../{set_nml.snwITDrdg => set_nml.snwitdrdg} | 0 configuration/scripts/parse_namelist.sh | 18 +++-- .../scripts/parse_namelist_from_env.sh | 3 +- configuration/scripts/parse_settings.sh | 34 +++++---- configuration/scripts/tests/base_suite.ts | 4 +- configuration/scripts/tests/nothread_suite.ts | 4 +- icepack | 2 +- 9 files changed, 99 insertions(+), 44 deletions(-) rename configuration/scripts/options/{set_nml.snwITDrdg => set_nml.snwitdrdg} (100%) diff --git a/cice.setup b/cice.setup index aae4319d4..60c56e5c2 100755 --- a/cice.setup +++ b/cice.setup @@ -1,5 +1,7 @@ #!/bin/csh -f +#set pd0 = `date -u "+%s%N"` + set ICE_SANDBOX = `pwd` set ICE_VERSION = unknown if (-e cicecore/version.txt) then @@ -824,8 +826,8 @@ EOF # set default test output as failure if (${docase} == 0) then echo "#---" >! test_output - echo "FAIL ${testname_noid} build" >> test_output - echo "FAIL ${testname_noid} run" >> test_output + echo "PEND ${testname_noid} build" >> test_output + echo "PEND ${testname_noid} run" >> test_output endif # from basic script dir to case @@ -934,9 +936,21 @@ EOF if (-e ${fimods}) rm ${fimods} if (-e ${fsmods}) rm ${fsmods} + # Use an existing ice_in file from the suite if it exists + # to reduce time spent in parse_namelist + set skip_parse_namelist = spval + if (${dosuite} == 1) then + set iceinfn = ../ice_in_save_${grid}${soptions} + if (-e ${iceinfn}) then + echo "use ${iceinfn}" + cp ${iceinfn} ice_in + set skip_parse_namelist = true + endif + endif + + # Set decomp info in namelist cat >! ${fimods} << EOF1 # cice.setup settings - nprocs = ${task} nx_global = ${ICE_DECOMP_NXGLOB} ny_global = ${ICE_DECOMP_NYGLOB} @@ -965,7 +979,6 @@ EOF1 cat >! ${fsmods} << EOF1 # cice.setup settings - setenv ICE_SANDBOX ${ICE_SANDBOX} setenv ICE_SCRIPTS ${ICE_SCRIPTS} setenv ICE_CASENAME ${casename} @@ -1034,44 +1047,57 @@ EOF1 foreach name (${grid} $setsx) set found = 0 + if (-e ${ICE_SCRIPTS}/options/set_nml.${name}) then cat >> ${fimods} << EOF2 # set_nml.${name} - EOF2 - cat ${ICE_SCRIPTS}/options/set_nml.${name} >> ${fimods} - cat >> ${fimods} << EOF2 - + if ("${skip_parse_namelist}" == "true") then + # need to make sure the decomp info from the set_nml is picked up. each case + # has a slightly different decomp that is independent of the ice_in_save file. + # compute that then overwrite by set_nml as needed. + grep -i "distribution_type" ${ICE_SCRIPTS}/options/set_nml.${name} >> ${fimods} + grep -i "processor_shape" ${ICE_SCRIPTS}/options/set_nml.${name} >> ${fimods} + cat >> ${fimods} << EOF2 +# using saved ice_in EOF2 + else + cat ${ICE_SCRIPTS}/options/set_nml.${name} >> ${fimods} + cat >> ${fimods} << EOF2 +EOF2 + endif echo "adding namelist mods set_nml.${name}" echo "`date` ${0} adding namelist modes set_nml.${name}" >> ${casedir}/README.case set found = 1 endif + if (-e ${ICE_SCRIPTS}/options/set_env.${name}) then cat >> ${fsmods} << EOF2 # set_env.${name} - EOF2 cat ${ICE_SCRIPTS}/options/set_env.${name} >> ${fsmods} cat >> ${fsmods} << EOF2 - EOF2 echo "adding env mods set_env.${name}" echo "`date` ${0} adding namelist modes set_env.${name}" >> ${casedir}/README.case set found = 1 endif + if (${found} == 0) then echo "${0}: ERROR, ${ICE_SCRIPTS}/options/set_[nml,env].${name} not found" exit -1 endif end +#set pd1 = `date -u "+%s%N"` +#@ pdd = ( $pd1 - $pd0 ) / 1000000 +#echo "tcxp b4 parse $pdd" ${casescr}/parse_settings.sh cice.settings ${fsmods} if ($status != 0) then - echo "${0}: ERROR, parse_namelist.sh aborted" + echo "${0}: ERROR, parse_settings.sh aborted" exit -1 endif ${casescr}/parse_namelist.sh ice_in ${fimods} @@ -1082,6 +1108,20 @@ EOF2 source ./cice.settings source ./env.${machcomp} -nomodules || exit 2 ${casescr}/parse_namelist_from_env.sh ice_in + if ($status != 0) then + echo "${0}: ERROR, parse_namelist_from_env.sh aborted" + exit -1 + endif +#set pd1 = `date -u "+%s%N"` +#@ pdd = ( $pd1 - $pd0 ) / 1000000 +#echo "tcxp after parse $pdd" + + # Save ice_in in the suite to reduce time spent in parse_namelist + if (${dosuite} == 1) then + if !(-e ${iceinfn}) then + cp ice_in ${iceinfn} + endif + endif #------------------------------------------------------------ # Generate run script @@ -1166,6 +1206,10 @@ EOF echo "" endif +#set pd1 = `date -u "+%s%N"` +#@ pdd = ( $pd1 - $pd0 ) / 1000000 +#echo "tcxp case done $pdd" + # This is the foreach end for the testsuite end # This is the foreach end for the envnames @@ -1180,6 +1224,7 @@ if ( ${dosuite} == 1 ) then cat >> ${tsdir}/suite.submit << EOF0 set nonomatch && rm -f ciceexe.* && unset nonomatch +set nonomatch && rm -f ice_in_save* && unset nonomatch EOF0 @@ -1222,6 +1267,10 @@ endif #--------------------------------------------- +#set pd1 = `date -u "+%s%N"` +#@ pdd = ( $pd1 - $pd0 ) / 1000000 +#echo "tcxp done $pdd" + echo " " echo "${0} done" echo " " diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index 1633b1542..0322513d2 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -73,7 +73,6 @@ RM := rm AR := ar .SUFFIXES: -.SUFFIXES: .F90 .F .c .o .PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk all: $(EXEC) @@ -169,13 +168,13 @@ libcice: $(OBJS) @ echo "$(AR) -r $(EXEC) $(OBJS)" $(AR) -r $(EXEC) $(OBJS) -.c.o: +%.o : %.c $(CC) $(CFLAGS) $(CPPDEFS) $(INCLDIR) $< -.F.o: +%.o : %.F %.d $(FC) -c $(FFLAGS) $(FIXEDFLAGS) $(CPPDEFS) $(INCLDIR) $< -.F90.o: +%.o : %.F90 %.d $(FC) -c $(FFLAGS) $(FREEFLAGS) $(CPPDEFS) $(MODDIR) $(INCLDIR) $< clean: diff --git a/configuration/scripts/options/set_nml.snwITDrdg b/configuration/scripts/options/set_nml.snwitdrdg similarity index 100% rename from configuration/scripts/options/set_nml.snwITDrdg rename to configuration/scripts/options/set_nml.snwitdrdg diff --git a/configuration/scripts/parse_namelist.sh b/configuration/scripts/parse_namelist.sh index ea539a2d0..dcb0d1ccc 100755 --- a/configuration/scripts/parse_namelist.sh +++ b/configuration/scripts/parse_namelist.sh @@ -10,7 +10,7 @@ filename=$1 filemods=$2 #echo "$0 $1 $2" -echo "running parse_namelist.sh" +echo "running ${scriptname}" foundstring="FoundSTRING" vnamearray=() valuearray=() @@ -43,11 +43,9 @@ do fi done - #sed -i 's|\(^\s*'"$vname"'\s*\=\s*\)\(.*$\)|\1'"$value"'|g' $filename - cp ${filename} ${filename}.check - sed -i.sedbak -e 's|\(^[[:space:]]*'"$vname"'[[:space:]]*=[[:space:]]*\)\(.*$\)|\1'"$foundstring"'|g' ${filename}.check - grep -q ${foundstring} ${filename}.check - if [ $? -eq 0 ]; then + grep -q "^[[:space:]]*${vname}[[:space:]]*=" $filename + grepout=$? + if [ ${grepout} -eq 0 ]; then sed -i.sedbak -e 's|\(^[[:space:]]*'"$vname"'[[:space:]]*=[[:space:]]*\)\(.*$\)|\1'"$value"'|g' ${filename} if [[ "${found}" == "${foundstring}" ]]; then vnamearray+=($vname) @@ -55,17 +53,17 @@ do else valuearray[$found]=${value} fi - if [[ -e "${filename}.sedbak" ]]; then - rm ${filename}.sedbak - fi else echo "${scriptname} ERROR: parsing error for ${vname}" exit -99 fi - rm ${filename}.check ${filename}.check.sedbak fi done < "$filemods" +if [[ -e "${filename}.sedbak" ]]; then + rm ${filename}.sedbak +fi + exit 0 diff --git a/configuration/scripts/parse_namelist_from_env.sh b/configuration/scripts/parse_namelist_from_env.sh index 4d829450f..4c25d358d 100755 --- a/configuration/scripts/parse_namelist_from_env.sh +++ b/configuration/scripts/parse_namelist_from_env.sh @@ -5,10 +5,11 @@ if [[ "$#" -ne 1 ]]; then exit -1 fi +scriptname=`basename "$0"` filename=$1 #echo "$0 $1" -echo "running parse_namelist_from_env.sh" +echo "running $scriptname" sed -i.sedbak -e 's|ICE_SANDBOX|'"${ICE_SANDBOX}"'|g' $filename sed -i.sedbak -e 's|ICE_MACHINE_INPUTDATA|'"${ICE_MACHINE_INPUTDATA}"'|g' $filename diff --git a/configuration/scripts/parse_settings.sh b/configuration/scripts/parse_settings.sh index d6ed31c15..a3f432801 100755 --- a/configuration/scripts/parse_settings.sh +++ b/configuration/scripts/parse_settings.sh @@ -10,7 +10,7 @@ filename=$1 filemods=$2 #echo "$0 $1 $2" -echo "running parse_settings.sh" +echo "running ${scriptname}" foundstring="FoundSTRING" vnamearray=() valuearray=() @@ -23,8 +23,11 @@ do else #vname=`echo $line | sed "s|\(^\s*set\S*\)\s\{1,100\}\(\S*\)\s\{1,100\}\(\S*\).*$|\2|g"` #value=`echo $line | sed "s|\(^\s*set\S*\)\s\{1,100\}\(\S*\)\s\{1,100\}\(\S*\).*$|\3|g"` - vname=`echo $line | sed "s|\(^[[:space:]]*set[^[:space:]]*\)[[:space:]][[:space:]]*\([^[:space:]]*\)[[:space:]][[:space:]]*\([^[:space:]]*\).*$|\2|g"` + vname=`echo $line | sed "s|\(^[[:space:]]*set[^[:space:]]*\)[[:space:]][[:space:]]*\([^[:space:]]*\).*$|\2|g"` value=`echo $line | sed "s|\(^[[:space:]]*set[^[:space:]]*\)[[:space:]][[:space:]]*\([^[:space:]]*\)[[:space:]][[:space:]]*\([^[:space:]]*\).*$|\3|g"` + if [[ "${value}" == "${line}" ]]; then + value="" + fi # echo "$line $vname $value" found=${foundstring} @@ -43,22 +46,27 @@ do fi done - #sed -i 's|\(^\s*set.* '"$vname"' \)[^#]*\(#*.*$\)|\1 '"$value"' \2|g' $filename - sed -i.sedbak -e 's|\(^[[:space:]]*set.* '"$vname"' \)[^#]*\(#*.*$\)|\1 '"$value"' \2|g' $filename - - if [[ "${found}" == "${foundstring}" ]]; then - vnamearray+=($vname) - valuearray+=($value) + grep -q "^[[:space:]]*set.* ${vname}[[:space:]]*" $filename + grepout=$? + if [ ${grepout} -eq 0 ]; then + sed -i.sedbak -e 's|\(^[[:space:]]*set.* '"$vname"' \)[^#]*\(#*.*$\)|\1 '"$value"' \2|g' $filename + if [[ "${found}" == "${foundstring}" ]]; then + vnamearray+=($vname) + valuearray+=($value) + else + valuearray[$found]=${value} + fi else - valuearray[$found]=${value} - fi - - if [[ -e "${filename}.sedbak" ]]; then - rm ${filename}.sedbak + echo "${scriptname} ERROR: parsing error for ${vname}" + exit -99 fi fi done < "$filemods" +if [[ -e "${filename}.sedbak" ]]; then + rm ${filename}.sedbak +fi + exit 0 diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index 3dc4905b3..e4c376ad4 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -58,9 +58,9 @@ restart gx3 4x2 fsd12,debug,short smoke gx3 8x2 fsd12ww3,diag24,run1day smoke gx3 4x1 isotope,debug restart gx3 8x2 isotope -smoke gx3 4x1 snwITDrdg,snwgrain,icdefault,debug +smoke gx3 4x1 snwitdrdg,snwgrain,icdefault,debug smoke gx3 4x1 snw30percent,icdefault,debug -restart gx3 8x2 snwITDrdg,icdefault,snwgrain +restart gx3 8x2 snwitdrdg,icdefault,snwgrain restart gx3 4x4 gx3ncarbulk,iobinary restart gx3 4x4 histall,precision8,cdf64 smoke gx3 30x1 bgcz,histall diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index da1267e86..616741aa2 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -19,7 +19,7 @@ restart gx3 16x1 gx3ncarbulk,iobinary restart gx3 12x1 alt01 restart gx3 16x1 alt02 restart gx3 8x1 alt03 -restart gx3 16x1 alt04 +restart gx3 16x1x5x29x6 alt04 restart gx3 16x1 alt05 restart gx3 20x1 alt06 restart gx3 18x1 alt01,debug,short @@ -66,7 +66,7 @@ restart gx3 16x1x8x10x10 droundrobin restart_gx3_8x1x25x29x2_ restart gx3 6x1x50x58x1 droundrobin restart_gx3_8x1x25x29x2_dslenderX2 restart gx3 8x1x19x19x5 droundrobin restart_gx3_8x1x25x29x2_dslenderX2 restart gx3 20x1x5x29x20 dsectrobin,short restart_gx3_8x1x25x29x2_dslenderX2 -restart gx3 32x1x5x10x10 drakeX2 restart_gx3_8x1x25x29x2_dslenderX2 +restart gx3 32x1x5x10x12 drakeX2 restart_gx3_8x1x25x29x2_dslenderX2 restart gx3 16x1x8x10x10 droundrobin,maskhalo restart_gx3_8x1x25x29x2_dslenderX2 restart gx3 4x1x25x29x4 droundrobin restart_gx3_8x1x25x29x2_dslenderX2 diff --git a/icepack b/icepack index f9c9e480f..152bd701e 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit f9c9e480f6ce482317734be80719178c8e1b5121 +Subproject commit 152bd701e0cf3ec4385e5ce81918ba94e7a791cb From cc0e71613f88841016e5868fb0f4f55260b389e6 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 30 Nov 2021 16:01:28 -0500 Subject: [PATCH 057/109] Modif strain u (#45) * Corrected wrong sign for shearU * Use arear instead of area in div_stress * Minor modif in viscous_coeffs_and_rep_pressure_T2U * Changed erea to earear in use ice grid...it now compiles --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 18 +++++++++--------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index a1f09ff9d..bc0863827 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -102,7 +102,7 @@ subroutine evp (dt) dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earea, narea, tinyarea, grid_average_X2Y, tarea, & + tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, & grid_type, grid_system use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & @@ -740,7 +740,7 @@ subroutine evp (dt) indxei (:,iblk), indxej (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & - earea (:,:,iblk), & + earear (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk), & stress12U (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & @@ -753,7 +753,7 @@ subroutine evp (dt) indxni (:,iblk), indxnj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & - narea (:,:,iblk), & + narear (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk), & @@ -1608,7 +1608,7 @@ subroutine div_stress (nx_block, ny_block, & indxi, indxj, & dxE_N, dyE_N, & dxT_U, dyT_U, & - area, & + arear, & stresspF1, stressmF1, & stress12F1, & stresspF2, stressmF2, & @@ -1632,7 +1632,7 @@ subroutine div_stress (nx_block, ny_block, & dyE_N , & ! height of E or N-cell through the middle (m) dxT_U , & ! width of T or U-cell through the middle (m) dyT_U , & ! height of T or U-cell through the middle (m) - area , & ! earea or narea + arear , & ! earear or narear stresspF1 , & ! stressp (U or T) used for F1 calculation stressmF1 , & ! stressm (U or T) used for F1 calculation stress12F1 , & ! stress12 (U or T) used for F1 calculation @@ -1668,14 +1668,14 @@ subroutine div_stress (nx_block, ny_block, & select case (trim(grid_location)) case('E') - F1(i,j) = (c1/area(i,j)) * & + F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j)-stresspF1(i,j) ) & + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressmF1(i+1,j) & -(dyT_U(i,j)**2)*stressmF1(i,j) ) & + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12F1(i,j) & -(dxT_U(i,j-1)**2)*stress12F1(i,j-1) ) ) - F2(i,j) = (c1/area(i,j)) * & + F2(i,j) = arear(i,j) * & ( p5 * dxE_N(i,j) * ( stresspF2(i,j)-stresspF2(i,j-1) ) & - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressmF2(i,j) & -(dxT_U(i,j-1)**2)*stressmF2(i,j-1) ) & @@ -1684,14 +1684,14 @@ subroutine div_stress (nx_block, ny_block, & case('N') - F1(i,j) = (c1/area(i,j)) * & + F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i,j)-stresspF1(i-1,j) ) & + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressmF1(i,j) & -(dyT_U(i-1,j)**2)*stressmF1(i-1,j) ) & + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12F1(i,j+1) & -(dxT_U(i,j)**2)*stress12F1(i,j) ) ) - F2(i,j) = (c1/area(i,j)) * & + F2(i,j) = arear(i,j) * & ( p5 * dxE_N(i,j) * ( stresspF2(i,j+1)-stresspF2(i,j) ) & - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressmF2(i,j+1) & -(dxT_U(i,j)**2)*stressmF2(i,j) ) & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index d4622cb6e..6a18d7454 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1863,7 +1863,7 @@ subroutine strain_rates_U (nx_block, ny_block, & shearU = dxU(i,j) * ( uEijp1 - uEij ) & - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + dyU(i,j) * ( vNip1j - vNij ) & - + vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) ! Delta (in the denominator of zeta, eta) DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) @@ -2031,7 +2031,7 @@ subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & maskT_11*Tarea_11 *etax2T_11 + & maskT_01*Tarea_01 *etax2T_01)/Totarea - rep_prsU = (c1-ktens)/(1+Ktens)*zetax2U*deltaU + rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU end subroutine viscous_coeffs_and_rep_pressure_T2U From a1415359ae8b6dd73451f410f682301d45f3ceaf Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 1 Dec 2021 13:32:00 -0800 Subject: [PATCH 058/109] turn off OMP around seabed stress, causes aborts on cheyenne with pgi (#46) --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 5 +++-- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 5 +++-- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index e5a89b118..74e08bdcd 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -400,7 +400,8 @@ subroutine eap (dt) if (seabed_stress) then - !$OMP PARALLEL DO PRIVATE(iblk) + ! tcraig, evp omp causes abort on cheyenne with pgi, turn off here too + !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks if ( seabed_stress_method == 'LKD' ) then @@ -421,7 +422,7 @@ subroutine eap (dt) endif enddo - !$OMP END PARALLEL DO + !$TCXOMP END PARALLEL DO endif do ksub = 1,ndte ! subcycling diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index bc0863827..c29f06497 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -552,7 +552,8 @@ subroutine evp (dt) if (seabed_stress) then - !$OMP PARALLEL DO PRIVATE(iblk) + ! tcraig, causes abort with pgi compiler on cheyenne + !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks select case (trim(grid_system)) @@ -605,7 +606,7 @@ subroutine evp (dt) end select enddo - !$OMP END PARALLEL DO + !$TCXOMP END PARALLEL DO endif call ice_timer_start(timer_evp_2d) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 5bbef02a4..66499037a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -446,7 +446,8 @@ subroutine implicit_solver (dt) if (seabed_stress) then - !$OMP PARALLEL DO PRIVATE(iblk) + ! tcraig, evp omp causes abort on cheyenne with pgi, turn off here too + !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks if ( seabed_stress_method == 'LKD' ) then @@ -467,7 +468,7 @@ subroutine implicit_solver (dt) endif enddo - !$OMP END PARALLEL DO + !$TCXOMP END PARALLEL DO endif !----------------------------------------------------------------- From f128adee50d1074e1713dd3b8c76174ddfb4c40e Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 2 Dec 2021 21:18:27 +0100 Subject: [PATCH 059/109] Cgrid debug (#42) * All CD grid on quicksuite runs. Avoid spurios values of zetax2T and etax2T. The real problem is most likely difference between U and T mask. * added space --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index c29f06497..d06ea1bc9 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -205,6 +205,8 @@ subroutine evp (dt) allocate(zetax2T(nx_block,ny_block,max_blocks)) allocate(etax2T(nx_block,ny_block,max_blocks)) + zetax2T(:,:,:) = c0 + etax2T (:,:,:) = c0 endif From 0809cde2250858b5f33d09db7cb764c33ff398fa Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 16 Dec 2021 14:01:36 -0800 Subject: [PATCH 060/109] Add grid_ocn, grid_atm feature (#47) * - Refactor strair and strocn implementation a bit - Note strocnxT, strocnyT are per ice area and different units from other strocn[x,y] variables. Added task to C-grid clean issue. - Remove strocnxT_f, strocnyT_f from dyn_finish interface and compute separately - Remove strairxU/T, strairyU/T arguments from dyn_prep1, compute strairxU, strairyU more cleanly - Update variable documentation in code - Remove straxE, strayE, straxN, strayN, probably won't need - Update grid_average_X2Y - Add grid_average_X2Y for 'NE2T' and 'NE2U' as an overloaded interface - Add grid_average_X2Y "A" implementation which is unmasked normalized weighted average. This is like S but ignores masks - Add grid_average_X2Y explicit implementation (dir,work1,wght1,mask1,work2) for 'S' and 'A' averaging options - Eliminate "in-place" operation, not really needed, can only cause confusion, require all averaging from one variable to a different variable. - Update gridavgchk unit test to test new grid_average_X2Y options - Identify bug related to location of uocn,vocn variables. Used on U grid but variables on T grid. Added task to C-grid clean issue. * - Add grid_atm and grid_ocn to namelist as well as grid_*_[thrm,dynu,dynv] to support flexible grid definitions - Refactor grid_average_X2Y interfaces to better support flexibility wrt grids - Update gridavgchk unit test * - Update [u,v]ocn, ss_tlt[x,y] implementation to improve flexibility and use grid_atm and grid_ocn info - Migrate averaging and memory allocation of U, N, E fields to dynamics - Update history capability to support grid_atm and grid_ocn values for some history variables * - Update u/vocn and ss_tltx/y usage to support the grid_ocn value - Fix bug in init/get_forcing_ocn, affects only cases with atm_data_type=box2001 and ocn_data_type/=box2001 - Update set_nml files for box cases to more clearly specify atm/ocn/ice_data_type and define grid_ocn where needed * update documentation * recover lost grid_file initialization line * update uatm, vatm handling * - Update strax, stray implementation to support flexible grid interpolation - Update grid_average_X2Y internal interface names - Update documentation * - Rename grid_system to grid_ice - Update documentation * rename grid_system to grid_ice * rename grid_system to grid_ice * update comment for uocn, vocn --- .../cicedynB/analysis/ice_diagnostics.F90 | 14 +- cicecore/cicedynB/analysis/ice_history.F90 | 127 ++- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 67 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 205 +++-- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 60 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 69 +- .../dynamics/ice_transport_driver.F90 | 12 +- .../cicedynB/dynamics/ice_transport_remap.F90 | 6 +- cicecore/cicedynB/general/ice_flux.F90 | 99 +-- cicecore/cicedynB/general/ice_forcing.F90 | 24 +- cicecore/cicedynB/general/ice_init.F90 | 109 ++- cicecore/cicedynB/general/ice_step_mod.F90 | 37 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 838 +++++++++++++++--- .../infrastructure/ice_restart_driver.F90 | 8 +- .../io/io_netcdf/ice_restart.F90 | 4 +- .../infrastructure/io/io_pio2/ice_restart.F90 | 4 +- .../drivers/direct/hadgem3/CICE_RunMod.F90 | 4 +- .../direct/nemo_concepts/CICE_RunMod.F90 | 4 +- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 4 +- .../drivers/mct/cesm1/ice_import_export.F90 | 16 +- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 4 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 16 +- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 4 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 13 +- .../drivers/standalone/cice/CICE_RunMod.F90 | 4 +- .../unittest/gridavgchk/gridavgchk.F90 | 436 +++++++-- configuration/scripts/ice_in | 4 +- configuration/scripts/options/set_nml.box2001 | 2 + configuration/scripts/options/set_nml.boxadv | 4 + .../scripts/options/set_nml.boxislandse | 1 + .../scripts/options/set_nml.boxislandsn | 1 + .../scripts/options/set_nml.boxislandsne | 1 + .../scripts/options/set_nml.boxnodyn | 1 + .../scripts/options/set_nml.boxrestore | 4 + .../scripts/options/set_nml.boxslotcyl | 4 + configuration/scripts/options/set_nml.gbox128 | 2 + configuration/scripts/options/set_nml.gridb | 2 +- configuration/scripts/options/set_nml.gridcd | 2 +- doc/source/cice_index.rst | 13 +- doc/source/master_list.bib | 24 +- doc/source/user_guide/ug_case_settings.rst | 13 +- doc/source/user_guide/ug_implementation.rst | 116 +++ 42 files changed, 1800 insertions(+), 582 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index 1b9f70044..d07cc0a93 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_system + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -297,7 +297,7 @@ subroutine runtime_diags (dt) enddo enddo ! Eventually do energy diagnostic on T points. -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! !$OMP PARALLEL DO PRIVATE(iblk,i,j) ! do iblk = 1, nblocks ! do j = 1, ny_block @@ -403,7 +403,7 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -1663,7 +1663,7 @@ end subroutine debug_ice subroutine print_state(plabel,i,j,iblk) - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd @@ -1790,7 +1790,7 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) write(nu_diag,*) 'vvelE(i,j)',vvelE(i,j,iblk) write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) @@ -1843,7 +1843,7 @@ end subroutine print_state subroutine print_points_state(plabel,ilabel) - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr @@ -1940,7 +1940,7 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'vvelE=',vvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'uvelN=',uvelN(i,j,iblk) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index e0855aa1c..a9cf22529 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -68,7 +68,9 @@ subroutine init_hist (dt) use ice_domain_size, only: max_blocks, max_nstrm, nilyr, nslyr, nblyr, ncat, nfsd use ice_dyn_shared, only: kdyn use ice_flux, only: mlt_onset, frz_onset, albcnt, snwcnt - use ice_grid, only: grid_system + use ice_grid, only: grid_ice, & + grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & + grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv use ice_history_shared ! everything use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc @@ -94,9 +96,108 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character (len=25) :: & + str2D_gat, str2d_gau, str2d_gav, & ! dimensions for t, u, v atm grid (ga) + str2D_got, str2d_gou, str2d_gov ! dimensions for t, u, v ocn grid (go) + character (len=25) :: & + cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) + cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) character(len=char_len) :: description character(len=*), parameter :: subname = '(init_hist)' + !----------------------------------------------------------------- + ! set atm/ocn forcing grids + !----------------------------------------------------------------- + + !--- ATM --- + + if (grid_atm_thrm == 'T') then + str2D_gat = tstr2D + cstr_gat = tcstr + elseif (grid_atm_thrm == 'U') then + str2D_gat = ustr2D + cstr_gat = ucstr + elseif (grid_atm_thrm == 'N') then + str2D_gat = nstr2D + cstr_gat = ncstr + elseif (grid_atm_thrm == 'E') then + str2D_gat = estr2D + cstr_gat = ecstr + endif + + if (grid_atm_dynu == 'T') then + str2D_gau = tstr2D + cstr_gau = tcstr + elseif (grid_atm_dynu == 'U') then + str2D_gau = ustr2D + cstr_gau = ucstr + elseif (grid_atm_dynu == 'N') then + str2D_gau = nstr2D + cstr_gau = ncstr + elseif (grid_atm_dynu == 'E') then + str2D_gau = estr2D + cstr_gau = ecstr + endif + + if (grid_atm_dynv == 'T') then + str2D_gav = tstr2D + cstr_gav = tcstr + elseif (grid_atm_dynv == 'U') then + str2D_gav = ustr2D + cstr_gav = ucstr + elseif (grid_atm_dynv == 'N') then + str2D_gav = nstr2D + cstr_gav = ncstr + elseif (grid_atm_dynv == 'E') then + str2D_gav = estr2D + cstr_gav = ecstr + endif + + !--- OCN --- + + if (grid_ocn_thrm == 'T') then + str2D_got = tstr2D + cstr_got = tcstr + elseif (grid_ocn_thrm == 'U') then + str2D_got = ustr2D + cstr_got = ucstr + elseif (grid_ocn_thrm == 'N') then + str2D_got = nstr2D + cstr_got = ncstr + elseif (grid_ocn_thrm == 'E') then + str2D_got = estr2D + cstr_got = ecstr + endif + + if (grid_ocn_dynu == 'T') then + str2D_gou = tstr2D + cstr_gou = tcstr + elseif (grid_ocn_dynu == 'U') then + str2D_gou = ustr2D + cstr_gou = ucstr + elseif (grid_ocn_dynu == 'N') then + str2D_gou = nstr2D + cstr_gou = ncstr + elseif (grid_ocn_dynu == 'E') then + str2D_gou = estr2D + cstr_gou = ecstr + endif + + if (grid_ocn_dynv == 'T') then + str2D_gov = tstr2D + cstr_gov = tcstr + elseif (grid_ocn_dynv == 'U') then + str2D_gov = ustr2D + cstr_gov = ucstr + elseif (grid_ocn_dynv == 'N') then + str2D_gov = nstr2D + cstr_gov = ncstr + elseif (grid_ocn_dynv == 'E') then + str2D_gov = estr2D + cstr_gov = ecstr + endif + + !----------------------------------------------------------------- ! set history dimensions !----------------------------------------------------------------- @@ -279,7 +380,7 @@ subroutine init_hist (dt) f_sispeed = f_CMIP endif - if (grid_system == 'CD') then + if (grid_ice == 'CD') then f_uvelE = f_uvel f_vvelE = f_vvel f_icespdE = f_icespd @@ -684,22 +785,22 @@ subroutine init_hist (dt) "vector direction - coming from", c1, c0, & ns1, f_icedir) - call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_uatm,"uatm","m/s",str2D_gau, cstr_gau, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uatm) - call define_hist_field(n_vatm,"vatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_vatm,"vatm","m/s",str2D_gav, cstr_gav, & "atm velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vatm) - call define_hist_field(n_atmspd,"atmspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_atmspd,"atmspd","m/s",str2D_gau, cstr_gau, & "atmosphere wind speed", & "vector magnitude", c1, c0, & ns1, f_atmspd) - call define_hist_field(n_atmdir,"atmdir","deg",ustr2D, ucstr, & + call define_hist_field(n_atmdir,"atmdir","deg",str2D_gau, cstr_gau, & "atmosphere wind direction", & "vector direction - coming from", c1, c0, & ns1, f_atmdir) @@ -754,22 +855,22 @@ subroutine init_hist (dt) "none", c1, c0, & ns1, f_sss) - call define_hist_field(n_uocn,"uocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_uocn,"uocn","m/s",str2D_gou, cstr_gou, & "ocean current (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uocn) - call define_hist_field(n_vocn,"vocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_vocn,"vocn","m/s",str2D_gov, cstr_gov, & "ocean current (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vocn) - call define_hist_field(n_ocnspd,"ocnspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_ocnspd,"ocnspd","m/s",str2D_gou, cstr_gou, & "ocean current speed", & "vector magnitude", c1, c0, & ns1, f_ocnspd) - call define_hist_field(n_ocndir,"ocndir","deg",ustr2D, ucstr, & + call define_hist_field(n_ocndir,"ocndir","deg",str2D_gou, cstr_gou, & "ocean current direction", & "vector direction - going to", c1, c0, & ns1, f_ocndir) @@ -1200,7 +1301,7 @@ subroutine init_hist (dt) "none", secday*c100, c0, & ns1, f_shear) - select case (grid_system) + select case (grid_ice) case('B') description = ", on U grid (NE corner values)" case ('CD') @@ -1987,7 +2088,7 @@ subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: nfsd - use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_system + use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_ice use ice_calendar, only: new_year, write_history, & write_ic, timesecs, histfreq, nstreams, mmonth, & new_month @@ -4297,7 +4398,7 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- ! compute sig1 and sig2 - select case (grid_system) + select case (grid_ice) case('B') call principal_stress (nx_block, ny_block, & stressp_1 (:,:,iblk), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 74e08bdcd..6ede5e667 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,9 +134,10 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y!, grid_system commented out until implementation of cd-grid + tarear, uarear, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength !, uvelE, vvelN grid_system commented out until implementation of cd-grid + aice_init, aice0, aicen, vicen, strength ! use ice_timers, only: timer_dynamics, timer_bound, & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3 @@ -165,6 +166,8 @@ subroutine eap (dt) indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -191,6 +194,10 @@ subroutine eap (dt) type (block) :: & this_block ! block information for current block + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -238,8 +245,6 @@ subroutine eap (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -254,8 +259,10 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T',aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -266,16 +273,16 @@ subroutine eap (dt) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif ! tcraig, tcx, turned off this threaded region, in evp, this block and @@ -302,7 +309,7 @@ subroutine eap (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & @@ -478,7 +485,7 @@ subroutine eap (dt) indxui (:,iblk), indxuj (:,iblk), & ksub, & aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & forcex (:,:,iblk), forcey (:,:,iblk), & umassdti (:,:,iblk), fm (:,:,iblk), & @@ -543,26 +550,40 @@ subroutine eap (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index d06ea1bc9..377f1205d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -86,13 +86,13 @@ subroutine evp (dt) strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & strocnxT, strocnyT, strax, stray, & - strairxN, strairyN, uocnN, vocnN, ss_tltxN, ss_tltyN, icenmask, fmN, & + Tbu, hwater, & + strairxN, strairyN, icenmask, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & - straxN, strayN, TbN, & - strairxE, strairyE, uocnE, vocnE, ss_tltxE, ss_tltyE, iceemask, fmE, & + TbN, & + strairxE, strairyE, iceemask, fmE, & strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & - straxE, strayE, TbE, & - Tbu, hwater, & + TbE, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & @@ -103,7 +103,8 @@ subroutine evp (dt) ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, & - grid_type, grid_system + grid_type, grid_ice, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -141,6 +142,10 @@ subroutine evp (dt) indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) + ss_tltxU , & ! sea surface slope, x-direction (m/m) + ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -151,6 +156,10 @@ subroutine evp (dt) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnN , & ! i ocean current (m/s) + vocnN , & ! j ocean current (m/s) + ss_tltxN , & ! sea surface slope, x-direction (m/m) + ss_tltyN , & ! sea surface slope, y-direction (m/m) waterxN , & ! for ocean stress calculation, x (m/s) wateryN , & ! for ocean stress calculation, y (m/s) forcexN , & ! work array: combined atm stress and ocn tilt, x @@ -160,6 +169,10 @@ subroutine evp (dt) nmassdti ! mass of N-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnE , & ! i ocean current (m/s) + vocnE , & ! j ocean current (m/s) + ss_tltxE , & ! sea surface slope, x-direction (m/m) + ss_tltyE , & ! sea surface slope, y-direction (m/m) waterxE , & ! for ocean stress calculation, x (m/s) wateryE , & ! for ocean stress calculation, y (m/s) forcexE , & ! work array: combined atm stress and ocn tilt, x @@ -189,6 +202,10 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + logical (kind=log_kind), save :: first_time = .true. character(len=*), parameter :: subname = '(evp)' @@ -201,7 +218,7 @@ subroutine evp (dt) allocate(fld2(nx_block,ny_block,2,max_blocks)) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate(zetax2T(nx_block,ny_block,max_blocks)) allocate(etax2T(nx_block,ny_block,max_blocks)) @@ -254,16 +271,8 @@ subroutine evp (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) - if (grid_system == 'CD') then - strairxN(:,:,iblk) = strairxT(:,:,iblk) - strairyN(:,:,iblk) = strairyT(:,:,iblk) - strairxE(:,:,iblk) = strairxT(:,:,iblk) - strairyE(:,:,iblk) = strairyT(:,:,iblk) - endif enddo ! iblk !$OMP END PARALLEL DO @@ -276,59 +285,59 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) - - if (grid_system == 'CD') then - call grid_average_X2Y('T2EF',tmass,emass) - call grid_average_X2Y('T2EF',aice_init, aie) - call grid_average_X2Y('T2NF',tmass,nmass) - call grid_average_X2Y('T2NF',aice_init, ain) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T',aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') + call grid_average_X2Y('S',ss_tltx,grid_ocn_dynu,ss_tltxU,'U') + call grid_average_X2Y('S',ss_tlty,grid_ocn_dynv,ss_tltyU,'U') + + if (grid_ice == 'CD') then + call grid_average_X2Y('F',tmass,'T',emass,'E') + call grid_average_X2Y('F',aice_init,'T', aie,'E') + call grid_average_X2Y('F',tmass,'T',nmass,'N') + call grid_average_X2Y('F',aice_init,'T', ain,'N') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnN,'N') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnN,'N') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnE,'E') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnE,'E') endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing - ! This wind stress is rotated on u grid and multiplied by aice + ! Map T to U, N, E as needed + ! This wind stress is rotated on u grid and multiplied by aice in coupler !---------------------------------------------------------------- call icepack_query_parameters(calc_strair_out=calc_strair) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) - endif - - if (grid_system == 'CD') then + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') + endif - if (.not. calc_strair) then - strairxN(:,:,:) = strax(:,:,:) - strairyN(:,:,:) = stray(:,:,:) - strairxE(:,:,:) = strax(:,:,:) - strairyE(:,:,:) = stray(:,:,:) + if (grid_ice == 'CD') then + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxN, 'N') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyN, 'N') + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxE, 'E') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyE, 'E') else - call ice_HaloUpdate (strairxN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairxE, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyE, halo_info, & - field_loc_center, field_type_vector) - call grid_average_X2Y('T2NF',strairxN) - call grid_average_X2Y('T2NF',strairyN) - call grid_average_X2Y('T2EF',strairxE) - call grid_average_X2Y('T2EF',strairyE) + call grid_average_X2Y('F',strairxT,'T',strairxN,'N') + call grid_average_X2Y('F',strairyT,'T',strairyN,'N') + call grid_average_X2Y('F',strairxT,'T',strairxE,'E') + call grid_average_X2Y('F',strairyT,'T',strairyE,'E') endif - endif + ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength ! need to do more debugging !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) @@ -344,7 +353,7 @@ subroutine evp (dt) jlo = this_block%jlo jhi = this_block%jhi - if (trim(grid_system) == 'B') then + if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icellu(iblk), & @@ -353,9 +362,9 @@ subroutine evp (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & fm (:,:,iblk), dt, & strtltx (:,:,iblk), strtlty (:,:,iblk), & @@ -374,7 +383,7 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - elseif (trim(grid_system) == 'CD') then + elseif (trim(grid_ice) == 'CD') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icellu(iblk), & @@ -383,9 +392,9 @@ subroutine evp (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & fm (:,:,iblk), dt, & strtltx (:,:,iblk), strtlty (:,:,iblk), & @@ -425,7 +434,7 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -505,7 +514,7 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO - endif ! grid_system + endif ! grid_ice call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -521,7 +530,7 @@ subroutine evp (dt) call unstack_velocity_field(fld2, uvel, vvel) call ice_timer_stop(timer_bound) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call ice_timer_start(timer_bound) ! velocities may have changed in dyn_prep2 @@ -558,7 +567,7 @@ subroutine evp (dt) !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - select case (trim(grid_system)) + select case (trim(grid_ice)) case('B') if ( seabed_stress_method == 'LKD' ) then @@ -627,7 +636,7 @@ subroutine evp (dt) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiu,uocn,vocn,forcex,forcey,Tbu, & + cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& strength,uvel,vvel,dxt,dyt, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -651,9 +660,9 @@ subroutine evp (dt) ! shift velocity components from CD grid locations (N, E) to B grid location (U) for stress_U - if (grid_system == 'CD') then - call grid_average_X2Y('E2US',uvelE,uvel) - call grid_average_X2Y('N2US',vvelN,vvel) + if (grid_ice == 'CD') then + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') endif !----------------------------------------------------------------- @@ -663,7 +672,7 @@ subroutine evp (dt) !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) do iblk = 1, nblocks - select case (grid_system) + select case (grid_ice) case('B') call stress (nx_block, ny_block, & ksub, icellt(iblk), & @@ -693,7 +702,7 @@ subroutine evp (dt) indxui (:,iblk), indxuj (:,iblk), & ksub, & aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & forcex (:,:,iblk), forcey (:,:,iblk), & umassdti (:,:,iblk), fm (:,:,iblk), & @@ -782,7 +791,7 @@ subroutine evp (dt) icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & ksub, aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnE (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & forcexN (:,:,iblk), forceyN (:,:,iblk), & nmassdti (:,:,iblk), fmN (:,:,iblk), & @@ -809,7 +818,7 @@ subroutine evp (dt) call ice_timer_stop(timer_bound) call unstack_velocity_field(fld2, uvel, vvel) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call ice_timer_start(timer_bound) ! velocities may have changed in dyn_prep2 @@ -832,7 +841,7 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) deallocate(fld2) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then deallocate(zetax2T, etax2T) endif @@ -919,17 +928,16 @@ subroutine evp (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -956,37 +964,40 @@ subroutine evp (dt) strairxE(:,:,iblk), strairyE(:,:,iblk), & strocnxE(:,:,iblk), strocnyE(:,:,iblk)) - ! If we are coupling to a C grid ocean model -! do ij =1, icelle -! i = indxei(ij,iblk) -! j = indxej(ij,iblk) - -! strocnxT(i,j,iblk) = strocnxE(i,j,iblk) / aiE(i,j,iblk) -! enddo - -! do ij =1, icelln -! i = indxni(ij,iblk) -! j = indxnj(ij,iblk) - -! strocnyT(i,j,iblk) = strocnyN(i,j,iblk) / aiN(i,j,iblk) -! enddo - enddo !$OMP END PARALLEL DO endif - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport - if (grid_system == 'CD') then - call grid_average_X2Y('E2US',uvelE,uvel) - call grid_average_X2Y('N2US',vvelN,vvel) + if (grid_ice == 'CD') then + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') endif + call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 6a18d7454..bc0c25f75 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -17,7 +17,7 @@ module ice_dyn_shared use ice_domain_size, only: max_blocks use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -134,7 +134,7 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep @@ -186,7 +186,7 @@ subroutine init_dyn (dt) allocate(fcor_blk(nx_block,ny_block,max_blocks)) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate(fcorE_blk(nx_block,ny_block,max_blocks)) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) endif @@ -199,7 +199,7 @@ subroutine init_dyn (dt) ! velocity uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s - if (grid_system == 'CD') then ! extra velocity variables + if (grid_ice == 'CD') then ! extra velocity variables uvelE(i,j,iblk) = c0 vvelE(i,j,iblk) = c0 uvelN(i,j,iblk) = c0 @@ -221,7 +221,7 @@ subroutine init_dyn (dt) fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif - if (grid_system == 'CD') then + if (grid_ice == 'CD') then if (trim(coriolis) == 'constant') then fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s @@ -250,7 +250,7 @@ subroutine init_dyn (dt) stress12_3(i,j,iblk) = c0 stress12_4(i,j,iblk) = c0 - if (grid_system == 'CD') then + if (grid_ice == 'CD') then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T (i,j,iblk) = c0 @@ -339,8 +339,6 @@ subroutine dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & aice, vice, & vsno, tmask, & - strairxT, strairyT, & - strairx, strairy, & tmass, icetmask) integer (kind=int_kind), intent(in) :: & @@ -350,16 +348,12 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & aice , & ! concentration of ice vice , & ! volume per unit area of ice (m) - vsno , & ! volume per unit area of snow (m) - strairxT, & ! stress on ice by air, x-direction - strairyT ! stress on ice by air, y-direction + vsno ! volume per unit area of snow (m) logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & tmask ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction tmass ! total mass of ice and snow (kg/m^2) integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & @@ -403,14 +397,6 @@ subroutine dyn_prep1 (nx_block, ny_block, & tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & .and. (tmass(i,j) > m_min) - !----------------------------------------------------------------- - ! prep to convert to U grid - !----------------------------------------------------------------- - ! these quantities include the factor of aice needed for - ! correct treatment of free drift - strairx(i,j) = strairxT(i,j) - strairy(i,j) = strairyT(i,j) - !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- @@ -610,7 +596,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & do j = jlo, jhi do i = ilo, ihi iceumask_old(i,j) = iceumask(i,j) ! save -! if (grid_system == 'B') then ! include ice mask. +! if (grid_ice == 'B') then ! include ice mask. ! ice extent mask (U-cells) iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & .and. (umass(i,j) > m_min) @@ -954,8 +940,7 @@ subroutine dyn_finish (nx_block, ny_block, & aiu, fm, & strintx, strinty, & strairx, strairy, & - strocnx, strocny, & - strocnxT, strocnyT) + strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -981,10 +966,6 @@ subroutine dyn_finish (nx_block, ny_block, & strocnx , & ! ice-ocean stress, x-direction strocny ! ice-ocean stress, y-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out), optional :: & - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction - ! local variables integer (kind=int_kind) :: & @@ -1001,17 +982,6 @@ subroutine dyn_finish (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (present(strocnxT) .and. present(strocnyT)) then - - do j = 1, ny_block - do i = 1, nx_block - strocnxT(i,j) = c0 - strocnyT(i,j) = c0 - enddo - enddo - - endif - ! ocean-ice stress for coupling do ij =1, icellu i = indxui(ij) @@ -1037,14 +1007,6 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) ! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - if (present(strocnxT) .and. present(strocnyT)) then - - ! Prepare to convert to T grid - ! divide by aice for coupling - strocnxT(i,j) = strocnx(i,j) / aiu(i,j) - strocnyT(i,j) = strocny(i,j) / aiu(i,j) - - endif enddo end subroutine dyn_finish @@ -1325,7 +1287,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & endif enddo - select case (trim(grid_system)) + select case (trim(grid_ice)) case('B') do ij = 1, icellu i = indxui(ij) @@ -1353,7 +1315,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & enddo else - call abort_ice(subname // ' insufficient number of arguments for grid_system:' // grid_system) + call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) endif end select diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 66499037a..9d4d220fe 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -200,9 +200,10 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y !, grid_system commented out until implementation of c grid + tarear, grid_type, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength!, uvelE, vvelN ommented out until implementation of c grid + aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -218,6 +219,8 @@ subroutine implicit_solver (dt) i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -252,6 +255,10 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -304,8 +311,6 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -320,8 +325,10 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T', aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -333,15 +340,15 @@ subroutine implicit_solver (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -367,7 +374,7 @@ subroutine implicit_solver (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & @@ -490,6 +497,7 @@ subroutine implicit_solver (dt) indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocnU , vocnU , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -642,26 +650,40 @@ subroutine implicit_solver (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif @@ -686,6 +708,7 @@ subroutine anderson_solver (icellt , icellu, & indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocn , vocn , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -700,7 +723,7 @@ subroutine anderson_solver (icellt , icellu, & use ice_constants, only: c1 use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks - use ice_flux, only: uocn, vocn, fm, Tbu + use ice_flux, only: fm, Tbu use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & uarear, tinyarea use ice_state, only: uvel, vvel, strength @@ -721,6 +744,8 @@ subroutine anderson_solver (icellt , icellu, & real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiu , & ! ice fraction on u-grid + uocn , & ! i ocean current (m/s) + vocn , & ! j ocean current (m/s) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 5d34b60fc..01eb6b989 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -261,7 +261,7 @@ subroutine transport_remap (dt) use ice_blocks, only: nx_block, ny_block, block, get_block, nghost use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, bound_state, uvelE, vvelN - use ice_grid, only: tarea, grid_system + use ice_grid, only: tarea, grid_ice use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_advect, timer_bound @@ -538,14 +538,14 @@ subroutine transport_remap (dt) !------------------------------------------------------------------- ! Main remapping routine: Step ice area and tracers forward in time. !------------------------------------------------------------------- - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call horizontal_remap (dt, ntrace, & uvel (:,:,:), vvel (:,:,:), & aim (:,:,:,:), trm (:,:,:,:,:), & l_fixed_area, & tracer_type, depend, & has_dependents, integral_order, & - l_dp_midpt, grid_system, & + l_dp_midpt, grid_ice, & uvelE(:,:,:),vvelN(:,:,:)) else call horizontal_remap (dt, ntrace, & @@ -554,7 +554,7 @@ subroutine transport_remap (dt) l_fixed_area, & tracer_type, depend, & has_dependents, integral_order, & - l_dp_midpt, grid_system) + l_dp_midpt, grid_ice) endif !------------------------------------------------------------------- @@ -720,7 +720,7 @@ subroutine transport_upwind (dt) use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, trcr_depend, bound_state, trcr_base, & n_trcr_strata, nt_strata, uvelE, vvelN - use ice_grid, only: HTE, HTN, tarea, grid_system + use ice_grid, only: HTE, HTN, tarea, grid_ice use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect @@ -771,7 +771,7 @@ subroutine transport_upwind (dt) !------------------------------------------------------------------- ! Average corner velocities to edges. !------------------------------------------------------------------- - if (grid_system == 'CD') then + if (grid_ice == 'CD') then uee(:,:,:)=uvelE(:,:,:) vnn(:,:,:)=vvelN(:,:,:) else diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index aae19378a..662fa7e60 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -319,7 +319,7 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, & integral_order, & - l_dp_midpt, grid_system, & + l_dp_midpt, grid_ice, & uvelE, vvelN) use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & @@ -353,7 +353,7 @@ subroutine horizontal_remap (dt, ntrace, & real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & tm ! mean tracer values in each grid cell - character (len=char_len_long), intent(in) :: grid_system + character (len=char_len_long), intent(in) :: grid_ice !------------------------------------------------------------------- ! If l_fixed_area is true, the area of each departure region is @@ -670,7 +670,7 @@ subroutine horizontal_remap (dt, ntrace, & enddo if (l_fixed_area) then - if (grid_system == 'CD') then ! velocities are already on the center + if (grid_ice == 'CD') then ! velocities are already on the center do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 00d9aac97..50f383568 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -33,41 +33,31 @@ module ice_flux !----------------------------------------------------------------- ! Dynamics component + ! All variables are assumed to be on the atm or ocn thermodynamic + ! grid except as noted !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ! in from atmos (if .not.calc_strair) - strax , & ! wind stress components (N/m^2) - stray , & ! - straxE , & ! wind stress components (N/m^2) - strayE , & ! - straxN , & ! wind stress components (N/m^2) - strayN , & ! + strax , & ! wind stress components (N/m^2), on grid_atm_dynu + stray , & ! on grid_atm_dynv ! in from ocean - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - uocnE , & ! ocean current, x-direction (m/s) - vocnE , & ! ocean current, y-direction (m/s) - uocnN , & ! ocean current, x-direction (m/s) - vocnN , & ! ocean current, y-direction (m/s) - ss_tltx , & ! sea surface slope, x-direction (m/m) - ss_tlty , & ! sea surface slope, y-direction - ss_tltxE, & ! sea surface slope, x-direction (m/m) - ss_tltyE, & ! sea surface slope, y-direction - ss_tltxN, & ! sea surface slope, x-direction (m/m) - ss_tltyN, & ! sea surface slope, y-direction + uocn , & ! ocean current, x-direction (m/s), on grid_ocn_dynu + vocn , & ! ocean current, y-direction (m/s), on grid_ocn_dynv + ss_tltx , & ! sea surface slope, x-direction (m/m), on grid_ocn_dynu + ss_tlty , & ! sea surface slope, y-direction, on grid_ocn_dynv hwater , & ! water depth for seabed stress calc (landfast ice) ! out to atmosphere - strairxT, & ! stress on ice by air, x-direction - strairyT, & ! stress on ice by air, y-direction + strairxT, & ! stress on ice by air, x-direction at T points, computed in icepack + strairyT, & ! stress on ice by air, y-direction at T points, computed in icepack ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction + strocnxT, & ! ice-ocean stress at T points, x-direction at T points, mapped from strocnx, per ice fraction + strocnyT ! ice-ocean stress at T points, y-direction at T points, mapped from strocny, per ice fraction ! diagnostic @@ -77,30 +67,30 @@ module ice_flux sigP , & ! internal ice pressure (N/m) taubx , & ! seabed stress (x) (N/m^2) tauby , & ! seabed stress (y) (N/m^2) - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction - strocnx , & ! ice-ocean stress, x-direction - strocny , & ! ice-ocean stress, y-direction + strairx , & ! stress on ice by air, x-direction at U points, mapped from strairxT + strairy , & ! stress on ice by air, y-direction at U points, mapped from strairyT + strocnx , & ! ice-ocean stress, x-direction at U points, computed in dyn_finish + strocny , & ! ice-ocean stress, y-direction at U points, computed in dyn_finish strtltx , & ! stress due to sea surface slope, x-direction strtlty , & ! stress due to sea surface slope, y-direction strintx , & ! divergence of internal ice stress, x (N/m^2) strinty , & ! divergence of internal ice stress, y (N/m^2) taubxN , & ! seabed stress (x) at N points (N/m^2) taubyN , & ! seabed stress (y) at N points (N/m^2) - strairxN, & ! stress on ice by air, x-direction at N points - strairyN, & ! stress on ice by air, y-direction at N points - strocnxN, & ! ice-ocean stress, x-direction at N points - strocnyN, & ! ice-ocean stress, y-direction at N points + strairxN, & ! stress on ice by air, x-direction at N points, mapped from strairxT + strairyN, & ! stress on ice by air, y-direction at N points, mapped from strairyT + strocnxN, & ! ice-ocean stress, x-direction at N points, computed in dyn_finish + strocnyN, & ! ice-ocean stress, y-direction at N points, computed in dyn_finish strtltxN, & ! stress due to sea surface slope, x-direction at N points strtltyN, & ! stress due to sea surface slope, y-direction at N points strintxN, & ! divergence of internal ice stress, x at N points (N/m^2) strintyN, & ! divergence of internal ice stress, y at N points (N/m^2) taubxE , & ! seabed stress (x) at E points (N/m^2) taubyE , & ! seabed stress (y) at E points (N/m^2) - strairxE, & ! stress on ice by air, x-direction at E points - strairyE, & ! stress on ice by air, y-direction at E points - strocnxE, & ! ice-ocean stress, x-direction at E points - strocnyE, & ! ice-ocean stress, y-direction at E points + strairxE, & ! stress on ice by air, x-direction at E points, mapped from strairxT + strairyE, & ! stress on ice by air, y-direction at E points, mapped from strairyT + strocnxE, & ! ice-ocean stress, x-direction at E points, computed in dyn_finish + strocnyE, & ! ice-ocean stress, y-direction at E points, computed in dyn_finish strtltxE, & ! stress due to sea surface slope, x-direction at E points strtltyE, & ! stress due to sea surface slope, y-direction at E points strintxE, & ! divergence of internal ice stress, x at E points (N/m^2) @@ -135,7 +125,7 @@ module ice_flux stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 - ! ice stress tensor at U and T locations (grid_system = 'CD') (kg/s^2) + ! ice stress tensor at U and T locations (grid_ice = 'CD') (kg/s^2) stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 stresspU, stressmU, stress12U ! " @@ -170,9 +160,9 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & zlvl , & ! atm level height (momentum) (m) zlvs , & ! atm level height (scalar quantities) (m) - uatm , & ! wind velocity components (m/s) - vatm , & - wind , & ! wind speed (m/s) + uatm , & ! wind velocity components (m/s), on grid_atm_dynu + vatm , & ! on grid_atm_dynv + wind , & ! wind speed (m/s) , on grid_atm_dynu potT , & ! air potential temperature (K) Tair , & ! air temperature (K) Qa , & ! specific humidity (kg/kg) @@ -374,6 +364,8 @@ module ice_flux !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uatmT , & ! uatm mapped to T grid (m/s) + vatmT , & ! vatm mapped to T grid (m/s) rside , & ! fraction of ice that melts laterally fside , & ! lateral heat flux (W/m^2) fsw , & ! incoming shortwave radiation (W/m^2) @@ -395,7 +387,7 @@ module ice_flux ! subroutine alloc_flux - use ice_grid, only : grid_system + use ice_grid, only : grid_ice integer (int_kind) :: ierr @@ -547,6 +539,8 @@ subroutine alloc_flux fswthru_ai (nx_block,ny_block,max_blocks), & ! shortwave penetrating to ocean (W/m^2) fresh_da (nx_block,ny_block,max_blocks), & ! fresh water flux to ocean due to data assim (kg/m^2/s) fsalt_da (nx_block,ny_block,max_blocks), & ! salt flux to ocean due to data assimilation(kg/m^2/s) + uatmT (nx_block,ny_block,max_blocks), & ! uatm mapped to T grid + vatmT (nx_block,ny_block,max_blocks), & ! vatm mapped to T grid rside (nx_block,ny_block,max_blocks), & ! fraction of ice that melts laterally fside (nx_block,ny_block,max_blocks), & ! lateral melt rate (W/m^2) fsw (nx_block,ny_block,max_blocks), & ! incoming shortwave radiation (W/m^2) @@ -586,14 +580,8 @@ subroutine alloc_flux stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') - if (grid_system == "CD") & + if (grid_ice == "CD") & allocate( & - straxN (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) - strayN (nx_block,ny_block,max_blocks), & ! - uocnN (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) - vocnN (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) - ss_tltxN (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) - ss_tltyN (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points @@ -607,12 +595,6 @@ subroutine alloc_flux icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) fmN (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in N-cell (kg/s) TbN (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) - straxE (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) - strayE (nx_block,ny_block,max_blocks), & ! - uocnE (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) - vocnE (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) - ss_tltxE (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) - ss_tltyE (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points @@ -650,7 +632,7 @@ subroutine init_coupler_flux use ice_flux_bgc, only: flux_bio_atm, flux_bio, faero_atm, fiso_atm, & fnit, famm, fsil, fdmsp, fdms, fhum, fdust, falgalN, & fdoc, fdon, fdic, ffed, ffep - use ice_grid, only: bathymetry, grid_system + use ice_grid, only: bathymetry, grid_ice integer (kind=int_kind) :: n @@ -755,17 +737,6 @@ subroutine init_coupler_flux frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) - if (grid_system == 'CD') then - ss_tltxN(:,:,:) = c0 ! sea surface tilt (m/m) - ss_tltyN(:,:,:) = c0 - ss_tltxE(:,:,:) = c0 ! sea surface tilt (m/m) - ss_tltyE(:,:,:) = c0 - uocnN (:,:,:) = c0 ! surface ocean currents (m/s) - vocnN (:,:,:) = c0 - uocnE (:,:,:) = c0 ! surface ocean currents (m/s) - vocnE (:,:,:) = c0 - endif - do iblk = 1, size(Tf,3) do j = 1, size(Tf,2) do i = 1, size(Tf,1) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index f3b749f77..013915683 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -516,7 +516,7 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'hycom') then call ocn_data_hycom_init - elseif (trim(atm_data_type) == 'box2001') then + elseif (trim(ocn_data_type) == 'box2001') then call box2001_data_ocn ! uniform forcing options @@ -751,7 +751,7 @@ subroutine get_forcing_ocn (dt) elseif (trim(ocn_data_type) == 'hycom') then ! call ocn_data_hycom(dt) !MHRI: NOT IMPLEMENTED YET - elseif (trim(atm_data_type) == 'box2001') then + elseif (trim(ocn_data_type) == 'box2001') then call box2001_data_ocn ! uniform forcing options elseif (trim(ocn_data_type) == 'uniform_northeast') then @@ -4110,8 +4110,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('T2UF',work1,ocn_frc_m(:,:,:,n ,m)) - call grid_average_X2Y('T2UF',work2,ocn_frc_m(:,:,:,n+1,m)) + call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4368,6 +4368,9 @@ subroutine ocn_data_hadgem(dt) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & sstdat ! data value toward which SST is restored + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 ! temporary array + real (kind=dbl_kind) :: workx, worky logical (kind=log_kind) :: readm @@ -4512,8 +4515,11 @@ subroutine ocn_data_hadgem(dt) ! Interpolate to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) + ! tcraig, this is now computed in dynamics for consistency + !work1 = uocn + !call grid_average_X2Y('F',work1,'T',uocn,'U') + !work1 = vocn + !call grid_average_X2Y('F',work1,'T',vocn,'U') endif ! ocn_data_type = hadgem_sst_uvocn @@ -5316,7 +5322,7 @@ subroutine box2001_data_atm call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call grid_average_X2Y('T2UF',aice, aiu) + call grid_average_X2Y('F',aice,'T',aiu,'U') period = c4*secday @@ -5443,7 +5449,7 @@ subroutine uniform_data_atm(dir,spd) use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! speed for test + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters @@ -5509,7 +5515,7 @@ subroutine uniform_data_ocn(dir,spd) character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! speed for test + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 95223d7ae..3292f6274 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -99,7 +99,10 @@ subroutine input_data use ice_grid, only: grid_file, gridcpl_file, kmt_file, & bathymetry_file, use_bathymetry, & bathymetry_format, kmt_type, & - grid_type, grid_format, grid_system, & + grid_type, grid_format, & + grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & @@ -185,7 +188,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & - close_boundaries, orca_halogrid, grid_system, kmt_type + close_boundaries, orca_halogrid, grid_ice, kmt_type, & + grid_atm, grid_ocn namelist /tracer_nml/ & tr_iage, restart_age, & @@ -325,8 +329,10 @@ subroutine input_data ice_ic = 'default' ! latitude and sst-dependent grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) grid_type = 'rectangular' ! define rectangular grid internally - grid_system = 'B' ! underlying grid system grid_file = 'unknown_grid_file' + grid_ice = 'B' ! underlying grid system + grid_atm = 'A' ! underlying atm forcing/coupling grid + grid_ocn = 'A' ! underlying atm forcing/coupling grid gridcpl_file = 'unknown_gridcpl_file' orca_halogrid = .false. ! orca haloed grid bathymetry_file = 'unknown_bathymetry_file' @@ -700,7 +706,9 @@ subroutine input_data call broadcast_scalar(dyrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) - call broadcast_scalar(grid_system, master_task) + call broadcast_scalar(grid_ice, master_task) + call broadcast_scalar(grid_ocn, master_task) + call broadcast_scalar(grid_atm, master_task) call broadcast_scalar(grid_file, master_task) call broadcast_scalar(gridcpl_file, master_task) call broadcast_scalar(orca_halogrid, master_task) @@ -1336,6 +1344,68 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. + ! compute grid locations for thermo, u and v fields + + grid_ice_thrm = 'T' + if (grid_ice == 'A') then + grid_ice_dynu = 'T' + grid_ice_dynv = 'T' + elseif (grid_ice == 'B') then + grid_ice_dynu = 'U' + grid_ice_dynv = 'U' + elseif (grid_ice == 'C') then + grid_ice_dynu = 'E' + grid_ice_dynv = 'N' + elseif (grid_ice == 'CD') then + grid_ice_dynu = 'NE' + grid_ice_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ice: '//trim(grid_ice) + endif + abort_list = trim(abort_list)//":64" + endif + + grid_atm_thrm = 'T' + if (grid_atm == 'A') then + grid_atm_dynu = 'T' + grid_atm_dynv = 'T' + elseif (grid_atm == 'B') then + grid_atm_dynu = 'U' + grid_atm_dynv = 'U' + elseif (grid_atm == 'C') then + grid_atm_dynu = 'E' + grid_atm_dynv = 'N' + elseif (grid_atm == 'CD') then + grid_atm_dynu = 'NE' + grid_atm_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_atm: '//trim(grid_atm) + endif + abort_list = trim(abort_list)//":65" + endif + + grid_ocn_thrm = 'T' + if (grid_ocn == 'A') then + grid_ocn_dynu = 'T' + grid_ocn_dynv = 'T' + elseif (grid_ocn == 'B') then + grid_ocn_dynu = 'U' + grid_ocn_dynv = 'U' + elseif (grid_ocn == 'C') then + grid_ocn_dynu = 'E' + grid_ocn_dynv = 'N' + elseif (grid_ocn == 'CD') then + grid_ocn_dynu = 'NE' + grid_ocn_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ocn: '//trim(grid_ocn) + endif + abort_list = trim(abort_list)//":66" + endif + !----------------------------------------------------------------- ! spew !----------------------------------------------------------------- @@ -1367,7 +1437,18 @@ subroutine input_data if (trim(grid_type) == 'displaced_pole') tmpstr2 = ' : user-defined grid with rotated north pole' if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) - write(nu_diag,1030) ' grid_system = ',trim(grid_system) + write(nu_diag,1030) ' grid_ice = ',trim(grid_ice) + write(nu_diag,1030) ' grid_ice_thrm = ',trim(grid_ice_thrm) + write(nu_diag,1030) ' grid_ice_dynu = ',trim(grid_ice_dynu) + write(nu_diag,1030) ' grid_ice_dynv = ',trim(grid_ice_dynv) + write(nu_diag,1030) ' grid_atm = ',trim(grid_atm) + write(nu_diag,1030) ' grid_atm_thrm = ',trim(grid_atm_thrm) + write(nu_diag,1030) ' grid_atm_dynu = ',trim(grid_atm_dynu) + write(nu_diag,1030) ' grid_atm_dynv = ',trim(grid_atm_dynv) + write(nu_diag,1030) ' grid_ocn = ',trim(grid_ocn) + write(nu_diag,1030) ' grid_ocn_thrm = ',trim(grid_ocn_thrm) + write(nu_diag,1030) ' grid_ocn_dynu = ',trim(grid_ocn_dynu) + write(nu_diag,1030) ' grid_ocn_dynv = ',trim(grid_ocn_dynv) write(nu_diag,1030) ' kmt_type = ',trim(kmt_type) if (trim(grid_type) /= 'rectangular') then if (use_bathymetry) then @@ -2036,9 +2117,9 @@ subroutine input_data abort_list = trim(abort_list)//":20" endif - if (grid_system /= 'B' .and. & - grid_system /= 'CD' ) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_system=',trim(grid_system) + if (grid_ice /= 'B' .and. & + grid_ice /= 'CD' ) then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_ice=',trim(grid_ice) abort_list = trim(abort_list)//":26" endif @@ -2137,7 +2218,7 @@ subroutine init_state use ice_domain, only: nblocks, blocks_ice, halo_info use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, ULON, TLAT, grid_system, grid_average_X2Y + use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y use ice_boundary, only: ice_HaloUpdate use ice_forcing, only: ice_data_type use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar @@ -2376,14 +2457,14 @@ subroutine init_state vicen, vsnon, & ntrcr, trcrn) - if (trim(grid_system) == 'CD') then + if (trim(grid_ice) == 'CD') then ! move from B-grid to CD-grid for boxslotcyl test if (trim(ice_data_type) == 'boxslotcyl') then - call grid_average_X2Y('U2NS',uvel,uvelN) - call grid_average_X2Y('U2NS',vvel,vvelN) - call grid_average_X2Y('U2ES',uvel,uvelE) - call grid_average_X2Y('U2ES',vvel,vvelE) + call grid_average_X2Y('S',uvel,'U',uvelN,'N') + call grid_average_X2Y('S',vvel,'U',vvelN,'N') + call grid_average_X2Y('S',uvel,'U',uvelE,'E') + call grid_average_X2Y('S',vvel,'U',vvelE,'E') endif ! Halo update on North, East faces diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 976e95361..46a9c9389 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -37,7 +37,7 @@ module ice_step_mod public :: step_therm1, step_therm2, step_dyn_horiz, step_dyn_ridge, & step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & - update_state, biogeochemistry, save_init, step_dyn_wave + update_state, biogeochemistry, step_dyn_wave, step_prep !======================================================================= @@ -51,6 +51,8 @@ subroutine save_init use ice_state, only: aice, aicen, aice_init, aicen_init, & vicen, vicen_init, vsnon, vsnon_init + character(len=*), parameter :: subname = '(save_init)' + !----------------------------------------------------------------- ! Save the ice area passed to the coupler (so that history fields ! can be made consistent with coupler fields). @@ -64,6 +66,27 @@ subroutine save_init end subroutine save_init +!======================================================================= + + subroutine step_prep +! prep for step, called outside nblock loop + + use ice_flux, only: uatm, vatm, uatmT, vatmT + use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + + character(len=*), parameter :: subname = '(step_prep)' + + ! Save initial state + + call save_init + + ! Compute uatmT, vatmT + + call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') + call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + + end subroutine step_prep + !======================================================================= ! ! Scales radiation fields computed on the previous time step. @@ -170,7 +193,7 @@ subroutine step_therm1 (dt, iblk) use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & - meltsn, melttn, meltbn, congeln, snoicen, uatm, vatm, fside, & + meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & frain, Tair, strairxT, strairyT, fsurf, fcondtop, fsens, & @@ -374,8 +397,8 @@ subroutine step_therm1 (dt, iblk) aeroice = aeroice (:,:,:), & isosno = isosno (:,:), & isoice = isoice (:,:), & - uatm = uatm (i,j, iblk), & - vatm = vatm (i,j, iblk), & + uatm = uatmT (i,j, iblk), & + vatm = vatmT (i,j, iblk), & wind = wind (i,j, iblk), & zlvl = zlvl (i,j, iblk), & zlvs = zlvs (i,j, iblk), & @@ -1381,7 +1404,7 @@ subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio use ice_blocks, only: nx_block, ny_block - use ice_flux, only: sst, Tf, Qa, uatm, vatm, wind, potT, rhoa, zlvl, & + use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & qdp, hmix, strairx_ocn, strairy_ocn, Tref_ocn, Qref_ocn @@ -1464,8 +1487,8 @@ subroutine ocean_mixed_layer (dt, iblk) call icepack_atm_boundary(sfctype = 'ocn', & Tsf = sst (i,j,iblk), & potT = potT (i,j,iblk), & - uatm = uatm (i,j,iblk), & - vatm = vatm (i,j,iblk), & + uatm = uatmT (i,j,iblk), & + vatm = vatmT (i,j,iblk), & wind = wind (i,j,iblk), & zlvl = zlvl (i,j,iblk), & Qa = Qa (i,j,iblk), & diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 9dfc7b27b..ecca13bd9 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -57,7 +57,18 @@ module ice_grid bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) grid_spacing , & ! default of 30.e3m or set by user in namelist - grid_system , & ! Underlying grid structure (i.e. B, C, CD, etc) + grid_ice , & ! Underlying model grid structure (A, B, C, CD) + grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ice_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) + grid_atm , & ! atmos forcing grid structure (A, B, C, CD) + grid_atm_thrm, & ! atmos forcing grid for thermo fields (T, U, N, E) + grid_atm_dynu, & ! atmos forcing grid for dyn U fields (T, U, N, E) + grid_atm_dynv, & ! atmos forcing grid for dyn V fields (T, U, N, E) + grid_ocn , & ! ocean forcing grid structure (A B, C, CD) + grid_ocn_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ocn_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ocn_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) grid_type ! current options are rectangular (default), ! displaced_pole, tripole, regional @@ -180,6 +191,12 @@ module ice_grid logical (kind=log_kind), private :: & l_readCenter ! If anglet exist in grid file read it otherwise calculate it + interface grid_average_X2Y + module procedure grid_average_X2Y_base , & + grid_average_X2Y_userwghts, & + grid_average_X2Y_NEversion + end interface + !======================================================================= contains @@ -267,7 +284,7 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate( & ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & @@ -534,7 +551,7 @@ subroutine init_grid2 enddo enddo - if (grid_system == 'CD') then + if (grid_ice == 'CD') then do j = jlo, jhi do i = ilo, ihi ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) @@ -2349,94 +2366,339 @@ end subroutine Tlatlon !======================================================================= ! Shifts quantities from one grid to another +! Constructs the shift based on the grid ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! ! author: T. Craig - subroutine grid_average_X2Y(X2Y,work1,work2) + subroutine grid_average_X2Y_base(type,work1,grid1,work2,grid2) character(len=*) , intent(in) :: & - X2Y + type, grid1, grid2 - real (kind=dbl_kind), intent(inout) :: & + real (kind=dbl_kind), intent(in) :: & work1(:,:,:) - real (kind=dbl_kind), intent(out), optional :: & + real (kind=dbl_kind), intent(out) :: & work2(:,:,:) ! local variables - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work2tmp + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_base)' + + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1(X2Y,work1,work2) + endif + + end subroutine grid_average_X2Y_base + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_userwghts(type,work1,grid1,wght1,mask1,work2,grid2) + + character(len=*) , intent(in) :: & + type, grid1, grid2 + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_userwghts)' + + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + endif + + end subroutine grid_average_X2Y_userwghts + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,grid2) + + character(len=*) , intent(in) :: & + type, grid1a, grid1b, grid2 + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:) - character(len=*), parameter :: subname = '(grid_average_X2Y)' + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_NEversion)' + + X2Y = trim(grid1a)//trim(grid1b)//'2'//trim(grid2)//trim(type) + + select case (trim(X2Y)) + + ! state masked + case('NE2US') + call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2US') + call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TS') + call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TS') + call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_NEversion + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1(X2Y,work1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1)' select case (trim(X2Y)) ! flux unmasked case('T2UF') - call grid_average_X2YF('NE',work1,tarea,work2tmp,uarea) + call grid_average_X2YF('NE',work1,tarea,work2,uarea) case('T2EF') - call grid_average_X2YF('E' ,work1,tarea,work2tmp,earea) + call grid_average_X2YF('E' ,work1,tarea,work2,earea) case('T2NF') - call grid_average_X2YF('N' ,work1,tarea,work2tmp,narea) + call grid_average_X2YF('N' ,work1,tarea,work2,narea) case('U2TF') - call grid_average_X2YF('SW',work1,uarea,work2tmp,tarea) + call grid_average_X2YF('SW',work1,uarea,work2,tarea) case('U2EF') - call grid_average_X2YF('S' ,work1,uarea,work2tmp,earea) + call grid_average_X2YF('S' ,work1,uarea,work2,earea) case('U2NF') - call grid_average_X2YF('W' ,work1,uarea,work2tmp,narea) + call grid_average_X2YF('W' ,work1,uarea,work2,narea) case('E2TF') - call grid_average_X2YF('W' ,work1,earea,work2tmp,tarea) + call grid_average_X2YF('W' ,work1,earea,work2,tarea) case('E2UF') - call grid_average_X2YF('N' ,work1,earea,work2tmp,uarea) + call grid_average_X2YF('N' ,work1,earea,work2,uarea) case('E2NF') - call grid_average_X2YF('NW',work1,earea,work2tmp,narea) + call grid_average_X2YF('NW',work1,earea,work2,narea) case('N2TF') - call grid_average_X2YF('S' ,work1,narea,work2tmp,tarea) + call grid_average_X2YF('S' ,work1,narea,work2,tarea) case('N2UF') - call grid_average_X2YF('E' ,work1,narea,work2tmp,uarea) + call grid_average_X2YF('E' ,work1,narea,work2,uarea) case('N2EF') - call grid_average_X2YF('SE',work1,narea,work2tmp,earea) + call grid_average_X2YF('SE',work1,narea,work2,earea) ! state masked case('T2US') - call grid_average_X2YS('NE',work1,tarea,hm ,work2tmp) + call grid_average_X2YS('NE',work1,tarea,hm ,work2) case('T2ES') - call grid_average_X2YS('E' ,work1,tarea,hm ,work2tmp) + call grid_average_X2YS('E' ,work1,tarea,hm ,work2) case('T2NS') - call grid_average_X2YS('N' ,work1,tarea,hm ,work2tmp) + call grid_average_X2YS('N' ,work1,tarea,hm ,work2) case('U2TS') - call grid_average_X2YS('SW',work1,uarea,uvm,work2tmp) + call grid_average_X2YS('SW',work1,uarea,uvm,work2) case('U2ES') - call grid_average_X2YS('S' ,work1,uarea,uvm,work2tmp) + call grid_average_X2YS('S' ,work1,uarea,uvm,work2) case('U2NS') - call grid_average_X2YS('W' ,work1,uarea,uvm,work2tmp) + call grid_average_X2YS('W' ,work1,uarea,uvm,work2) case('E2TS') - call grid_average_X2YS('W' ,work1,earea,epm,work2tmp) + call grid_average_X2YS('W' ,work1,earea,epm,work2) case('E2US') - call grid_average_X2YS('N' ,work1,earea,epm,work2tmp) + call grid_average_X2YS('N' ,work1,earea,epm,work2) case('E2NS') - call grid_average_X2YS('NW',work1,earea,epm,work2tmp) + call grid_average_X2YS('NW',work1,earea,epm,work2) case('N2TS') - call grid_average_X2YS('S' ,work1,narea,npm,work2tmp) + call grid_average_X2YS('S' ,work1,narea,npm,work2) case('N2US') - call grid_average_X2YS('E' ,work1,narea,npm,work2tmp) + call grid_average_X2YS('E' ,work1,narea,npm,work2) case('N2ES') - call grid_average_X2YS('SE',work1,narea,npm,work2tmp) + call grid_average_X2YS('SE',work1,narea,npm,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,tarea,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,tarea,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,tarea,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,uarea,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,uarea,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,uarea,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,earea,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,earea,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,earea,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,narea,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,narea,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,narea,work2) case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select - if (present(work2)) then - work2 = work2tmp - else - work1 = work2tmp - endif + end subroutine grid_average_X2Y_1 - end subroutine grid_average_X2Y +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1f)' + + select case (trim(X2Y)) + +! don't support these for now, requires extra destination wght +! ! flux unmasked +! case('T2UF') +! call grid_average_X2YF('NE',work1,tarea,work2,uarea) +! case('T2EF') +! call grid_average_X2YF('E' ,work1,tarea,work2,earea) +! case('T2NF') +! call grid_average_X2YF('N' ,work1,tarea,work2,narea) +! case('U2TF') +! call grid_average_X2YF('SW',work1,uarea,work2,tarea) +! case('U2EF') +! call grid_average_X2YF('S' ,work1,uarea,work2,earea) +! case('U2NF') +! call grid_average_X2YF('W' ,work1,uarea,work2,narea) +! case('E2TF') +! call grid_average_X2YF('W' ,work1,earea,work2,tarea) +! case('E2UF') +! call grid_average_X2YF('N' ,work1,earea,work2,uarea) +! case('E2NF') +! call grid_average_X2YF('NW',work1,earea,work2,narea) +! case('N2TF') +! call grid_average_X2YF('S' ,work1,narea,work2,tarea) +! case('N2UF') +! call grid_average_X2YF('E' ,work1,narea,work2,uarea) +! case('N2EF') +! call grid_average_X2YF('SE',work1,narea,work2,earea) + + ! state masked + case('T2US') + call grid_average_X2YS('NE',work1,wght1,mask1,work2) + case('T2ES') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('T2NS') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('U2TS') + call grid_average_X2YS('SW',work1,wght1,mask1,work2) + case('U2ES') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('U2NS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2TS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2US') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('E2NS') + call grid_average_X2YS('NW',work1,wght1,mask1,work2) + case('N2TS') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('N2US') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('N2ES') + call grid_average_X2YS('SE',work1,wght1,mask1,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,wght1,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,wght1,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,wght1,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,wght1,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_1f !======================================================================= ! Shifts quantities from one grid to another @@ -2446,7 +2708,7 @@ end subroutine grid_average_X2Y ! ! author: T. Craig - subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) + subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) use ice_constants, only: c0 @@ -2455,7 +2717,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) real (kind=dbl_kind), intent(in) :: & work1(:,:,:), & - area1(:,:,:), & + wght1(:,:,:), & mask1(:,:,:) real (kind=dbl_kind), intent(out) :: & @@ -2489,15 +2751,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i+1,j, iblk)*area1(i+1,j, iblk) & - + mask1(i, j+1,iblk)*area1(i, j+1,iblk) & - + mask1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i+1,j, iblk)*work1(i+1,j, iblk)*area1(i+1,j, iblk) & - + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i, j+1,iblk) & - + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & / wtmp enddo enddo @@ -2514,15 +2776,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j-1,iblk)*area1(i, j-1,iblk) & - + mask1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j-1,iblk)*work1(i, j-1,iblk)*area1(i, j-1,iblk) & - + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo enddo @@ -2539,15 +2801,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + mask1(i, j+1,iblk)*area1(i ,j+1,iblk)) + wtmp = (mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & / wtmp enddo enddo @@ -2564,15 +2826,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*area1(i ,j, iblk) & - + mask1(i+1,j ,iblk)*area1(i+1,j, iblk)) + wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*area1(i ,j, iblk) & - + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & + work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp enddo enddo @@ -2589,11 +2851,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j,iblk)*area1(i, j,iblk) & - + mask1(i+1,j,iblk)*area1(i+1,j,iblk)) + wtmp = (mask1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*wght1(i+1,j,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk) & - + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*area1(i+1,j,iblk)) & + work2(i,j,iblk) = (mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & / wtmp enddo enddo @@ -2610,11 +2872,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i-1,j,iblk)*area1(i-1,j,iblk) & - + mask1(i, j,iblk)*area1(i, j,iblk)) + wtmp = (mask1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*wght1(i ,j,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*area1(i-1,j,iblk) & - + mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk)) & + work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk)) & / wtmp enddo enddo @@ -2631,11 +2893,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i,j, iblk)*area1(i,j, iblk) & - + mask1(i,j+1,iblk)*area1(i,j+1,iblk)) + wtmp = (mask1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*wght1(i,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk) & - + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*area1(i,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & / wtmp enddo enddo @@ -2652,11 +2914,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i,j-1,iblk)*area1(i,j-1,iblk) & - + mask1(i,j, iblk)*area1(i,j, iblk)) + wtmp = (mask1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*wght1(i,j ,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*area1(i,j-1,iblk) & - + mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk)) & + work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk)) & / wtmp enddo enddo @@ -2669,6 +2931,236 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) end subroutine grid_average_X2YS +!======================================================================= +! Shifts quantities from one grid to another +! State unmasked version, simple weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YA(dir,work1,wght1,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YA)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk) & + + wght1(i ,j+1,iblk) & + + wght1(i+1,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i-1,j ,iblk) & + + wght1(i ,j-1,iblk) & + + wght1(i-1,j-1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j ,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i-1,j+1,iblk) & + + wght1(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j-1,iblk) & + + wght1(i+1,j-1,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j,iblk) & + + wght1(i+1,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j,iblk) & + + wght1(i ,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j ,iblk) & + + wght1(i,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j-1,iblk) & + + wght1(i,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YA + !======================================================================= ! Shifts quantities from one grid to another ! Flux masked, original implementation based on earlier t2u and u2t versions @@ -2677,7 +3169,7 @@ end subroutine grid_average_X2YS ! ! author: T. Craig - subroutine grid_average_X2YF(dir,work1,area1,work2,area2) + subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) use ice_constants, only: c0, p25, p5 @@ -2686,8 +3178,8 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) real (kind=dbl_kind), intent(in) :: & work1(:,:,:), & - area1(:,:,:), & - area2(:,:,:) + wght1(:,:,:), & + wght2(:,:,:) real (kind=dbl_kind), intent(out) :: & work2(:,:,:) @@ -2718,11 +3210,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i+1,j, iblk)*area1(i+1,j, iblk) & - + work1(i, j+1,iblk)*area1(i, j+1,iblk) & - + work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & - / area2(i, j, iblk) + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2739,11 +3231,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + work1(i, j-1,iblk)*area1(i, j-1,iblk) & - + work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & - / area2(i, j, iblk) + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2760,11 +3252,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & - / area2(i, j, iblk) + (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2781,11 +3273,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + work1(i ,j ,iblk)*area1(i ,j, iblk) & - + work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & - / area2(i, j, iblk) + (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2802,9 +3294,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i, j,iblk)*area1(i, j,iblk) & - + work1(i+1,j,iblk)*area1(i+1,j,iblk)) & - / area2(i, j,iblk) + (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2821,9 +3313,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i-1,j,iblk)*area1(i-1,j,iblk) & - + work1(i, j,iblk)*area1(i, j,iblk)) & - / area2(i, j,iblk) + (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2840,9 +3332,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i,j, iblk)*area1(i,j, iblk) & - + work1(i,j+1,iblk)*area1(i,j+1,iblk)) & - / area2(i, j,iblk) + (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2859,9 +3351,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i,j-1,iblk)*area1(i,j-1,iblk) & - + work1(i,j, iblk)*area1(i,j, iblk)) & - / area2(i, j,iblk) + (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2903,6 +3395,102 @@ real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) resul end function grid_neighbor_min +!======================================================================= +! Shifts quantities from one grid to another +! State masked version, simple weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:), & + wght1a(:,:,:), wght1b(:,:,:), & + mask1a(:,:,:), mask1b(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YS_2)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE2US') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TS') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j-1,iblk)*work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YS_2 !======================================================================= ! Compute the maximum of adjacent values of a field at specific indices, diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 11836c073..e4f5a89e9 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -61,7 +61,7 @@ subroutine dumpfile(filename_spec) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel character(len=char_len_long), intent(in), optional :: filename_spec @@ -167,7 +167,7 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) @@ -222,7 +222,7 @@ subroutine restartfile (ice_ic) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_system + use ice_grid, only: tmask, grid_type, grid_ice use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & trcr_base, nt_strata, n_trcr_strata @@ -384,7 +384,7 @@ subroutine restartfile (ice_ic) ! tcraig, comment these out now to allow restarts from B grid file ! this will affect exact restart when we get to that point #if (1 == 0) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call read_restart_field(nu_restart,0,stresspT,'ruf8', & 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT call read_restart_field(nu_restart,0,stressmT,'ruf8', & diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index 0bba6e36e..e62a1f67f 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -137,7 +137,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_arrays_column, only: oceanmixed_ice use ice_dyn_shared, only: kdyn - use ice_grid, only: grid_system + use ice_grid, only: grid_ice character(len=char_len_long), intent(in), optional :: filename_spec @@ -274,7 +274,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'stress12_3',dims) call define_rest_field(ncid,'stress12_4',dims) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call define_rest_field(ncid,'stresspT' ,dims) call define_rest_field(ncid,'stressmT' ,dims) call define_rest_field(ncid,'stress12T',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 2e5338fc0..12e4365e9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -145,7 +145,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_dyn_shared, only: kdyn use ice_arrays_column, only: oceanmixed_ice - use ice_grid, only: grid_system + use ice_grid, only: grid_ice logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -277,7 +277,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'stress12_3',dims) call define_rest_field(File,'stress12_4',dims) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call define_rest_field(File,'stresspT' ,dims) call define_rest_field(File,'stressmT' ,dims) call define_rest_field(File,'stress12T',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index cd81de879..61f261bb2 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ecd95e3c3..eb2bdcbf1 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index e9ab0d7e4..365322dde 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -157,7 +157,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -213,7 +213,7 @@ subroutine ice_step call t_stopf ('cice_run_presc') endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 7aa60dbdf..f88cc2b2d 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -65,6 +65,7 @@ subroutine ice_import( x2i ) type(block) :: this_block ! block information for current block integer,parameter :: nflds=17,nfldv=6,nfldb=27 real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP character(len=char_len) :: tfrz_option @@ -472,10 +473,15 @@ subroutine ice_import( x2i ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 81fa367c1..d4b100518 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -128,7 +128,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -186,7 +186,7 @@ subroutine ice_step endif #endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 10d42137f..0f7f1ebd4 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -10,7 +10,7 @@ module ice_import_export use ice_domain , only : nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_exit , only : abort_ice - use ice_flux , only : strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -405,6 +405,7 @@ subroutine ice_import( importState, rc ) integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain type(block) :: this_block ! block information for current block real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP real (kind=dbl_kind) :: Tffresh @@ -801,10 +802,15 @@ subroutine ice_import( importState, rc ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 2d3e22973..7da73db1d 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -165,7 +165,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -224,7 +224,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 4b2d6d65f..2faa623ec 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -940,10 +940,15 @@ module cice_cap ! call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') end subroutine subroutine CICE_Export(st,rc) diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 0fde18e04..27d61db84 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -157,7 +157,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -216,7 +216,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 index 73ff10cad..5a4b3d54e 100644 --- a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -29,7 +29,7 @@ program gridavgchk use ice_exit, only: abort_ice, end_run use ice_global_reductions, only: global_minval, global_maxval use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask, & - hm,uvm + hm,uvm,epm,npm implicit none @@ -38,14 +38,22 @@ program gridavgchk integer(int_kind) :: blockID, numBlocks type (block) :: this_block - real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) - real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) - real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) - real(dbl_kind) :: amin, amax, errtol, errx, erry + real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) ! input + real(dbl_kind) ,allocatable :: arraysx(:,:,:), arraysy(:,:,:) ! extra input for NE2T, NE2U + real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) ! output + real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) ! error + real(dbl_kind) ,allocatable :: wght1(:,:,:), mask1(:,:,:), array2z(:,:,:) ! extra for explicit + real(dbl_kind) :: amin, amax, fmax, errtol, errx, erry real(dbl_kind) :: deltax0, deltay0, deltax, deltay - integer(int_kind) :: npes, ierr, ntask, ntest, maxtest, navg - integer(int_kind), parameter :: maxgroup = 3 + integer(int_kind), parameter :: maxtests = 3 + integer(int_kind), parameter :: maxgroups = 4 + integer(int_kind) :: numtests_cnt, numgroups_cnt + character(len=16) :: numtests_name(maxtests) + integer(int_kind) :: nbase(maxgroups) + character(len=16) :: numgroups_name(maxgroups) + real(dbl_kind) :: errmax(maxgroups,maxtests) + integer(int_kind) :: npes, ierr, ntask, testcnt, tottest, mtests, cnt, ng integer(int_kind) :: errorflag0,gflag integer(int_kind), allocatable :: errorflag(:) character(len=32), allocatable :: stringflag(:) @@ -67,55 +75,122 @@ program gridavgchk call CICE_Initialize npes = get_num_procs() - navg = 12 - if (.not. landblockelim) navg=24 ! no land block elimination, can test F mappings - allocate(avgname(navg)) - allocate(errtolconst(navg)) - allocate(errtolijind(navg)) - allocate(errtolarea(navg)) - maxtest = maxgroup * navg - allocate(errorflag(maxtest)) - allocate(stringflag(maxtest)) - allocate(dmask(nx_block,ny_block,max_blocks,navg)) - - errtolconst(1:12) = 0.0001_dbl_kind - errtolijind(1:12) = 0.51_dbl_kind - errtolarea (1:12) = 0.75_dbl_kind + numtests_name(1) = 'constant' + numtests_name(2) = 'ijindex' + numtests_name(3) = 'area' + numgroups_name(1) = 'X2YA' + numgroups_name(2) = 'X2YS' + numgroups_name(3) = 'X2YF' + numgroups_name(4) = 'NE2YS' + nbase(1) = 16 + nbase(2) = 16 + nbase(3) = 0 + nbase(4) = 4 + errmax = c0 + + if (.not. landblockelim) nbase(3) = nbase(2) ! no land block elimination, can test F mappings + mtests = nbase(1) + nbase(2) + nbase(3) + nbase(4) + + allocate(avgname(mtests)) + allocate(errtolconst(mtests)) + allocate(errtolijind(mtests)) + allocate(errtolarea(mtests)) + errtolconst = c0 + errtolijind = c0 + errtolarea = c0 + tottest = maxtests * mtests + allocate(errorflag(tottest)) + allocate(stringflag(tottest)) + allocate(dmask(nx_block,ny_block,max_blocks,mtests)) + + n = 0 + errtolconst(n+1:n+nbase(1)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(1)) = 0.10_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.04_dbl_kind if (nx_global > 200 .and. ny_global > 200) then - errtolarea (1:12) = 0.20_dbl_kind + errtolijind(n+1:n+nbase(1)) = 0.03_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.003_dbl_kind endif - avgname(1) = 'T2US'; dmask(:,:,:,1) = umask(:,:,:) - avgname(2) = 'T2NS'; dmask(:,:,:,2) = nmask(:,:,:) - avgname(3) = 'T2ES'; dmask(:,:,:,3) = emask(:,:,:) - avgname(4) = 'U2TS'; dmask(:,:,:,4) = tmask(:,:,:) - avgname(5) = 'U2NS'; dmask(:,:,:,5) = nmask(:,:,:) - avgname(6) = 'U2ES'; dmask(:,:,:,6) = emask(:,:,:) - avgname(7) = 'N2TS'; dmask(:,:,:,7) = tmask(:,:,:) - avgname(8) = 'N2US'; dmask(:,:,:,8) = umask(:,:,:) - avgname(9) = 'N2ES'; dmask(:,:,:,9) = emask(:,:,:) - avgname(10) = 'E2TS'; dmask(:,:,:,10) = tmask(:,:,:) - avgname(11) = 'E2US'; dmask(:,:,:,11) = umask(:,:,:) - avgname(12) = 'E2NS'; dmask(:,:,:,12) = nmask(:,:,:) - if (navg > 12) then - errtolconst(13:24) = 0.008_dbl_kind - errtolijind(13:24) = 0.65_dbl_kind - errtolarea (13:24) = 0.55_dbl_kind + n=n+1; avgname(n) = 'T2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EA' ; dmask(:,:,:,n) = emask(:,:,:) + + errtolconst(n+1:n+nbase(2)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(2)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(2)) = 0.19_dbl_kind if (nx_global > 200 .and. ny_global > 200) then - errtolijind(13:24) = 0.25_dbl_kind - errtolarea (13:24) = 0.15_dbl_kind + errtolarea (n+1:n+nbase(2)) = 0.06_dbl_kind endif - avgname(13) = 'T2UF'; dmask(:,:,:,13) = umask(:,:,:) - avgname(14) = 'T2NF'; dmask(:,:,:,14) = nmask(:,:,:) - avgname(15) = 'T2EF'; dmask(:,:,:,15) = emask(:,:,:) - avgname(16) = 'U2TF'; dmask(:,:,:,16) = tmask(:,:,:) - avgname(17) = 'U2NF'; dmask(:,:,:,17) = nmask(:,:,:) - avgname(18) = 'U2EF'; dmask(:,:,:,18) = emask(:,:,:) - avgname(19) = 'N2TF'; dmask(:,:,:,19) = tmask(:,:,:) - avgname(20) = 'N2UF'; dmask(:,:,:,20) = umask(:,:,:) - avgname(21) = 'N2EF'; dmask(:,:,:,21) = emask(:,:,:) - avgname(22) = 'E2TF'; dmask(:,:,:,22) = tmask(:,:,:) - avgname(23) = 'E2UF'; dmask(:,:,:,23) = umask(:,:,:) - avgname(24) = 'E2NF'; dmask(:,:,:,24) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2ES' ; dmask(:,:,:,n) = emask(:,:,:) + + if (nbase(3) > 0) then + errtolconst(n+1:n+nbase(3)) = 0.0065_dbl_kind + errtolijind(n+1:n+nbase(3)) = 0.65_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.04_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(3)) = 0.22_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.004_dbl_kind + endif + n=n+1; avgname(n) = 'T2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EF' ; dmask(:,:,:,n) = emask(:,:,:) + endif + + errtolconst(n+1:n+nbase(4)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(4)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.12_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(4)) = 0.26_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.03_dbl_kind + endif + n=n+1; avgname(n) = 'NE2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'EN2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'NE2US'; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'EN2US'; dmask(:,:,:,n) = umask(:,:,:) + + if (n /= mtests) then + call abort_ice(subname//' n ne mtests') endif !----------------------------------------------------------------- @@ -135,7 +210,7 @@ program gridavgchk write(6,*) ' block_size_x = ',block_size_x write(6,*) ' block_size_y = ',block_size_y write(6,*) ' nblocks_tot = ',nblocks_tot - write(6,*) ' maxtest = ',maxtest + write(6,*) ' tottest = ',tottest write(6,*) ' ' endif @@ -151,37 +226,59 @@ program gridavgchk allocate(array1x(nx_block,ny_block,max_blocks)) allocate(array1y(nx_block,ny_block,max_blocks)) + allocate(arraysx(nx_block,ny_block,max_blocks)) + allocate(arraysy(nx_block,ny_block,max_blocks)) allocate(array2x(nx_block,ny_block,max_blocks)) allocate(array2y(nx_block,ny_block,max_blocks)) allocate(array3x(nx_block,ny_block,max_blocks)) allocate(array3y(nx_block,ny_block,max_blocks)) + allocate(wght1 (nx_block,ny_block,max_blocks)) + allocate(mask1 (nx_block,ny_block,max_blocks)) + allocate(array2z(nx_block,ny_block,max_blocks)) call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) - ntest = 0 + testcnt = 0 !---------------- ! Test constant field !---------------- + numtests_cnt = 1 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST constant field' + write(6,*) 'TEST constant field, test ',numtests_cnt endif array1x = testconst + arraysx = testconst - do n = 1,navg - ntest = ntest + 1 + do n = 1,mtests + testcnt = testcnt + 1 - stringflag(ntest) = trim(avgname(n))//' const' + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + errtol = errtolconst(n) + stringflag(testcnt) = trim(avgname(n))//' const' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolconst(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtol,numtests_cnt,numgroups_cnt endif array2x = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + else ! EN, swap needed + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif + endif array3x = c0 do iblock = 1,numBlocks @@ -193,17 +290,16 @@ program gridavgchk je = this_block%jhi do j = jb,je jglob = this_block%j_glob(j) - errtol = errtolconst(n) * testconst do i = ib,ie iglob = this_block%i_glob(i) - array3x(i,j,iblock) = array2x(i,j,iblock) - array1x(i,j,iblock) + array3x(i,j,iblock) = (array2x(i,j,iblock) - testconst)/testconst ! if array2 is c0, then there are no valid surrounding points and ignore it if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 errx = abs(array3x(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. errx > errtol .and. array2x(i,j,iblock) /= c0) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error const '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'value, error ',array2x(i,j,iblock),errx @@ -211,6 +307,8 @@ program gridavgchk enddo enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,102) 'input min/max = ',amin,amax @@ -220,17 +318,24 @@ program gridavgchk amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo !---------------- ! Test global i, j fields + ! for NE2T, NE2U, inputs should result in exact calcs !---------------- + numtests_cnt = 2 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST global i, j fields' + write(6,*) 'TEST global i, j fields, test ',numtests_cnt endif + array1x = -999. + arraysx = -999. + do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -246,10 +351,7 @@ program gridavgchk enddo enddo - call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) - call ice_HaloUpdate(array1y, halo_info, field_loc_center, field_type_scalar, fillval) - - ! Overwrite the i wraparound points to deal with i/j index average on wraparound + ! Fill in ghost cells with locally appropriate value do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -257,21 +359,38 @@ program gridavgchk ie = this_block%ihi jb = this_block%jlo je = this_block%jhi - do j = 1,ny_block + ! skip corners do i = ib,ie - if (this_block%i_glob(i) == 1 ) array1x(i-1,j,iblock) = 0 - if (this_block%i_glob(i) == nx_global) array1x(i+1,j,iblock) = nx_global+1 + array1x(i,jb-1,iblock) = array1x(i,jb,iblock) + array1y(i,jb-1,iblock) = array1y(i,jb,iblock) - 1.0_dbl_kind + array1x(i,je+1,iblock) = array1x(i,je,iblock) + array1y(i,je+1,iblock) = array1y(i,je,iblock) + 1.0_dbl_kind enddo + ! set corners + do j = 1,ny_block + array1x(ib-1,j,iblock) = array1x(ib,j,iblock) - 1.0_dbl_kind + array1y(ib-1,j,iblock) = array1y(ib,j,iblock) + array1x(ie+1,j,iblock) = array1x(ie,j,iblock) + 1.0_dbl_kind + array1y(ie+1,j,iblock) = array1y(ie,j,iblock) enddo enddo - do n = 1,navg - ntest = ntest + 1 + arraysx = array1x + 0.5_dbl_kind + arraysy = array1y - 0.5_dbl_kind - stringflag(ntest) = trim(avgname(n))//' ijind' + do n = 1,mtests + testcnt = testcnt + 1 + + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' ijind' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolijind(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolijind(n),numtests_cnt,numgroups_cnt endif deltax0 = 0.0_dbl_kind @@ -279,30 +398,44 @@ program gridavgchk if (avgname(n)(1:3) == 'T2U' .or. & avgname(n)(1:3) == 'T2E' .or. & avgname(n)(1:3) == 'N2U' .or. & - avgname(n)(1:3) == 'N2E') then + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2U'.or. & + avgname(n)(1:4) == 'EN2U') then deltax0 = 0.5_dbl_kind elseif (avgname(n)(1:3) == 'U2T' .or. & avgname(n)(1:3) == 'U2N' .or. & avgname(n)(1:3) == 'E2T' .or. & - avgname(n)(1:3) == 'E2N') then + avgname(n)(1:3) == 'E2N' ) then deltax0 = -0.5_dbl_kind endif if (avgname(n)(1:3) == 'T2U' .or. & avgname(n)(1:3) == 'T2N' .or. & avgname(n)(1:3) == 'E2U' .or. & - avgname(n)(1:3) == 'E2N') then + avgname(n)(1:3) == 'E2N' ) then deltay0 = 0.5_dbl_kind elseif (avgname(n)(1:3) == 'U2T' .or. & avgname(n)(1:3) == 'U2E' .or. & avgname(n)(1:3) == 'N2T' .or. & - avgname(n)(1:3) == 'N2E') then + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2T'.or. & + avgname(n)(1:4) == 'EN2T') then deltay0 = -0.5_dbl_kind endif array2x = c0 array2y = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) - call grid_average_X2Y(trim(avgname(n)),array1y,array2y) + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + call grid_average_X2Y(avgname(n)(4:4),array1y,avgname(n)(1:1),array2y,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),array1y,avgname(n)(1:1),arraysy,avgname(n)(2:2),array2y,avgname(n)(4:4)) + else ! EN, swap needed array1 is N, arrays is E + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),arraysy,avgname(n)(1:1),array1y,avgname(n)(2:2),array2y,avgname(n)(4:4)) + endif + endif array3x = c0 errtol = errtolijind(n) @@ -329,8 +462,8 @@ program gridavgchk erry = abs(array3y(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. (errx > errtol .or. erry > errtol)) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error ijind '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'array2x, err',array2x(i,j,iblock),errx @@ -356,6 +489,8 @@ program gridavgchk enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,102) 'i_glob min/max = ',amin,amax @@ -374,6 +509,10 @@ program gridavgchk amin = global_minval(array3y, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3y, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'j error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + amax = global_maxval(abs(array3y), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo @@ -381,25 +520,113 @@ program gridavgchk ! Test area fields !---------------- + numtests_cnt = 3 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST area fields' + write(6,*) 'TEST area fields, test ',numtests_cnt endif - do n = 1,navg - ntest = ntest + 1 + do n = 1,mtests + testcnt = testcnt + 1 - stringflag(ntest) = trim(avgname(n))//' area' + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' area' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolarea(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolarea(n),numtests_cnt,numgroups_cnt + endif + + array1x = -999. + arraysx = -999. + mask1 = -999. + wght1 = -999. + if (avgname(n)(1:2) == 'T2') then + array1x = tarea + wght1 = tarea + mask1 = hm + elseif (avgname(n)(1:2) == 'U2') then + array1x = uarea + wght1 = uarea + mask1 = uvm + elseif (avgname(n)(1:2) == 'E2') then + array1x = earea + wght1 = earea + mask1 = epm + elseif (avgname(n)(1:2) == 'N2') then + array1x = narea + wght1 = narea + mask1 = npm + elseif (avgname(n)(1:3) == 'NE2') then + array1x = narea + arraysx = earea + elseif (avgname(n)(1:3) == 'EN2') then + array1x = earea + arraysx = narea + else + call abort_ice(subname//' avgname not supported 1x = '//trim(avgname(n))) + endif + + array2y = -999. + if (avgname(n)(2:3) == '2T' .or. & + avgname(n)(3:4) == '2T') then + array2y = tarea + elseif (avgname(n)(2:3) == '2U' .or. & + avgname(n)(3:4) == '2U') then + array2y = uarea + elseif (avgname(n)(2:3) == '2E') then + array2y = earea + elseif (avgname(n)(2:3) == '2N') then + array2y = narea + else + call abort_ice(subname//' avgname not supported 2y = '//trim(avgname(n))) endif - array1x = tarea ! input - array2y = uarea ! result - call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) array2x = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + if (len_trim(avgname(n)) == 4) then +! call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + ! ------ + ! Extra Explicit Calc Test + ! ------ + if (avgname(n)(2:2) == '2' .and. (avgname(n)(4:4) == 'S' .or. avgname(n)(4:4) == 'A')) then + stringflag(testcnt) = trim(stringflag(testcnt))//' + explicit' + if (avgname(n)(4:4) == 'S') then + ! test direct mapping compared to S, array1x*wght1*mask1 where wght1=area and mask1=mask + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + elseif (avgname(n)(4:4) == 'A') then + ! test direct mapping compared to A, array1x*wght1 where wght1=area and mask1=1.0 + mask1 = c1 + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + endif + fmax = global_maxval(abs(array1x), distrb_info) + amax = global_maxval(abs(array2z-array2x), distrb_info) +! tcraig, errtol=c0 doesn't work here, diff seems smaller than roundoff? - interesting +! errtol = c0 + errtol = 1.0e-20_dbl_kind + if (amax < fmax * errtol) then + if (my_task == master_task) write(6,103) 'PASS explicit avg vs implicit avg ',errtol + else + errorflag(testcnt) = failflag + errorflag0 = failflag + if (my_task == master_task) write(6,103) 'FAIL explicit avg vs implicit avg ',amax,fmax*errtol + amin = global_minval(array2x, distrb_info) + amax = global_maxval(array2x, distrb_info) + if (my_task == master_task) write(6,103) 'output min/max = ',amin,amax + amin = global_minval(array2z, distrb_info) + amax = global_maxval(array2z, distrb_info) + if (my_task == master_task) write(6,103) 'expout min/max = ',amin,amax + endif + endif + + else ! len_trim(avgname(n)) == 5 + ! no swap needed 1x and sx set based on NE or EN + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif array3x = c1 array3y = c1 @@ -421,8 +648,8 @@ program gridavgchk errx = abs(array3x(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. errx > errtolarea(n)) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error area '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'out,exact,err',array2x(i,j,iblock),array2y(i,j,iblock),array3x(i,j,iblock) @@ -430,6 +657,8 @@ program gridavgchk enddo enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,103) 'input min/max = ',amin,amax @@ -442,16 +671,29 @@ program gridavgchk amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo 100 format(a,10i8) 101 format(a,3g16.7) 102 format(a,3f16.7) 103 format(a,2g16.7,f16.7) +110 format(a,i8,g16.7,6i8) + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'Max Errors:' + do i = 1,maxgroups + do j = 1,maxtests + write(6,'(2x,a16,2x,a16,2x,f23.16)') trim(numgroups_name(i)),trim(numtests_name(j)),errmax(i,j) + enddo + enddo + endif gflag = global_maxval(errorflag0, MPI_COMM_ICE) errorflag0 = gflag - do n = 1,maxtest + do n = 1,tottest gflag = global_maxval(errorflag(n), MPI_COMM_ICE) errorflag(n) = gflag enddo @@ -459,7 +701,7 @@ program gridavgchk if (my_task == master_task) then write(6,*) ' ' write(6,*) 'GRIDAVGCHK COMPLETED SUCCESSFULLY' - do n = 1,maxtest + do n = 1,tottest if (errorflag(n) == passflag) then write(6,*) 'PASS ',trim(stringflag(n)) else diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index a5dd3c058..86efbf65e 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -60,7 +60,9 @@ &grid_nml grid_format = 'bin' grid_type = 'displaced_pole' - grid_system = 'B' + grid_ice = 'B' + grid_atm = 'A' + grid_ocn = 'A' grid_file = 'grid' kmt_type = 'file' kmt_file = 'kmt' diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index c087466c1..6039335bc 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -1,3 +1,5 @@ +grid_atm = 'B' +grid_ocn = 'B' days_per_year = 360 use_leap_years = .false. npt = 240 diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index 7fc70713e..f7daef019 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -1,9 +1,13 @@ +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .false. kcatbound = 2 ew_boundary_type = 'cyclic' ns_boundary_type = 'cyclic' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' tr_iage = .true. tr_FY = .false. tr_lvl = .true. diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxislandse index d27b26a8d..561cdb2b1 100644 --- a/configuration/scripts/options/set_nml.boxislandse +++ b/configuration/scripts/options/set_nml.boxislandse @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_east' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn index 48ee103f5..35f321ee4 100644 --- a/configuration/scripts/options/set_nml.boxislandsn +++ b/configuration/scripts/options/set_nml.boxislandsn @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_north' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne index 3dec1d246..c572e7e2c 100644 --- a/configuration/scripts/options/set_nml.boxislandsne +++ b/configuration/scripts/options/set_nml.boxislandsne @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 8e5d4a692..25ef3cbd4 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -52,5 +52,6 @@ krdg_redist = 1 seabed_stress = .true. atm_data_type = 'calm' ocn_data_type = 'calm' +ice_data_type = 'box2001' shortwave = 'ccsm3' albedo_type = 'constant' diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index ac0266aeb..a3bacd6d3 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -1,3 +1,4 @@ +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .true. @@ -6,6 +7,9 @@ ndtd = 2 kcatbound = 1 ew_boundary_type = 'cyclic' ns_boundary_type = 'open' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' histfreq = 'd','x','x','x','x' histfreq_n = 1,1,1,1,1 f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxslotcyl b/configuration/scripts/options/set_nml.boxslotcyl index b38d0efce..9bbe59dd6 100644 --- a/configuration/scripts/options/set_nml.boxslotcyl +++ b/configuration/scripts/options/set_nml.boxslotcyl @@ -1,3 +1,5 @@ +grid_atm = 'B' +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .false. @@ -17,6 +19,8 @@ ktherm = -1 kdyn = -1 kridge = -1 ktransport = 1 +atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'boxslotcyl' histfreq = 'h','x','x','x','x' histfreq_n = 6 , 1 , 1 , 1 , 1 diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 2371b65ed..40e81553f 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,5 +1,7 @@ +grid_ocn = 'B' ice_ic = 'default' grid_type = 'rectangular' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' + diff --git a/configuration/scripts/options/set_nml.gridb b/configuration/scripts/options/set_nml.gridb index 2a209410b..eadfc15ce 100644 --- a/configuration/scripts/options/set_nml.gridb +++ b/configuration/scripts/options/set_nml.gridb @@ -1,2 +1,2 @@ -grid_system = 'B' +grid_ice = 'B' diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd index 9426056e9..c4198f382 100644 --- a/configuration/scripts/options/set_nml.gridcd +++ b/configuration/scripts/options/set_nml.gridcd @@ -1,2 +1,2 @@ -grid_system = 'CD' +grid_ice = 'CD' diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index da4f1280a..8ca6e5414 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -283,9 +283,20 @@ either Celsius or Kelvin units). "fyear_init", "initial forcing data year", "" "**G**", "", "" "gravit", "gravitational acceleration", "9.80616 m/s\ :math:`^2`" + "grid_atm", "grid structure for atm forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_atm_dynu", "grid for atm dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_dynv", "grid for atm dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_thrm", "grid for atm thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_file", "input file for grid info", "" "grid_format", "format of grid files", "" - "grid_system", "structure of the grid, ‘B’, ‘CD’, etc", "" + "grid_ice", "structure of the model ice grid, ‘B’, ‘CD’, etc", "" + "grid_ice_dynu", "grid for ice dynamic-u model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_dynv", "grid for ice dynamic-v model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_thrm", "grid for ice thermodynamic model fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn", "grid structure for ocn forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_ocn_dynu", "grid for ocn dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_dynv", "grid for ocn dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_thrm", "grid for ocn thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_type", "‘rectangular’, ‘displaced_pole’, ‘column’ or ‘regional’", "" "gridcpl_file", "input file for coupling grid info", "" "grow_net", "specific biogeochemistry growth rate per grid cell", "s :math:`^{-1}`" diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index e4afa0c46..295f5df9d 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -950,13 +950,27 @@ @Article{Roberts18 } @article{Roach19, -author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", -title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, -journal = {Journal of Advances in Modeling Earth Systems}, -url = {http://doi.wiley.com/10.1029/2019MS001836}, -year={2019} + author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", + title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, + journal = {Journal of Advances in Modeling Earth Systems}, + url = {http://doi.wiley.com/10.1029/2019MS001836}, + year={2019} } +@incollection{Arakawa77, + author = "A. Arakawa and V.R. Lamb", + title = "Computational Design of the Basic Dynamical Processes of the UCLA General Circulation Model", + editor = "Julius Chang", + series = "Methods in Computational Physics: Advances in Research and Applications", + publisher = {Elsevier}, + volume = {17}, + pages = {173-265}, + year = {1977}, + booktitle = "General Circulation Models of the Atmosphere", + issn = {0076-6860}, + doi = {https://doi.org/10.1016/B978-0-12-460817-7.50009-4}, + url = {https://www.sciencedirect.com/science/article/pii/B9780124608177500094}, +} ======= @article{Horvat15, diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 6df4c228d..8f763db2f 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -236,12 +236,19 @@ grid_nml "``dxrect``", "real", "x-direction grid spacing for rectangular grid in cm", "0.0" "``dyrect``", "real", "y-direction grid spacing for rectangular grid in cm", "0.0" "``gridcpl_file``", "string", "input file for coupling grid info", "'unknown_gridcpl_file'" + "``grid_atm``", "``A``", "atm forcing/coupling grid, all fields on T grid", "``A``" + "", "``B``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" + "", "``C``", "atm forcing/coupling grid, thermo fields on T grid, dynu fields on E grid, dynv fields on N grid", "" + "", "``CD``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on N and E grid", "" "``grid_file``", "string", "name of grid file to be read", "'unknown_grid_file'" "``grid_format``", "``bin``", "read direct access grid and kmt files", "``bin``" "", "``nc``", "read grid and kmt files", "" - "``grid_system``", "``B``", "use B grid structure with T at center and U at NE corner, "``B``" - "", "``C``", "use C grid structure with T at center, U at E edge, and V at N edge", "" + "``grid_ice``", "``B``", "use B grid structure with T at center and U at NE corner", "``B``" "", "``CD``", "use CD grid structure with T at center and U/V at N and E edge", "" + "``grid_ocn``", "``A``", "ocn forcing/coupling grid, all fields on T grid", "``A``" + "", "``B``", "ocn forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" + "", "``C``", "ocn forcing/coupling grid, thermo fields on T grid, dynu fields on E grid, dynv fields on N grid", "" + "", "``CD``", "ocn forcing/coupling grid, thermo fields on T grid, dyn fields on N and E grid", "" "``grid_type``", "``displaced_pole``", "read from file in *popgrid*", "``rectangular``" "", "``rectangular``", "defined in *rectgrid*", "" "", "``regional``", "read from file in *popgrid*", "" @@ -252,7 +259,7 @@ grid_nml "", "``2``", "WMO standard categories", "" "", "``3``", "asymptotic scheme", "" "``kmt_type``", "string", "file, default or boxislands", "file" - "``kmt_file``", "string", "name of land mask file to be read", "'unknown_kmt_file'" + "``kmt_file``", "string", "name of land mask file to be read", "``unknown_kmt_file``" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 5bccd33e1..a838f887b 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -419,6 +419,122 @@ or southern hemispheres, respectively. Special constants (``spval`` and points in the history files and diagnostics. +.. _interpolation: + +**************************** +Interpolating between grids +**************************** + +Fields in CICE are generally defined at particular grid locations, such as T cell centers, +U corners, or N or E edges. These are assigned internally in CICE based on the ``grid_ice`` +namelist variable. Forcing/coupling fields are also associated with a +specific set of grid locations that may or may not be the same as on the internal CICE model grid. +The namelist variables ``grid_atm`` and ``grid_ocn`` define the forcing/coupling grids. +The ``grid_ice``, ``grid_atm``, and ``grid_ocn`` variables are independent and take +values like ``A``, ``B``, ``C``, or ``CD`` consistent with the Arakawa grid convention :cite:`Arakawa77`. +The relationship between the grid system and the internal grids is shown in :ref:`tab-gridsys`. + +.. _tab-gridsys: + +.. table:: Grid System and Type Definitions + :align: center + + +--------------+----------------+----------------+----------------+ + | grid system | thermo grid | u dynamic grid | v dynamic grid | + +==============+================+================+================+ + | A | T | T | T | + +--------------+----------------+----------------+----------------+ + | B | T | U | U | + +--------------+----------------+----------------+----------------+ + | C | T | E | N | + +--------------+----------------+----------------+----------------+ + | CD | T | N+E | N+E | + +--------------+----------------+----------------+----------------+ + +For all grid systems, thermodynamic variables are always defined on the ``T`` grid for the model and +model forcing/coupling fields. However, the dynamics u and v fields vary. +In the ``CD`` grid, there are twice as many u and v fields as on the other grids. Within the CICE model, +the variables ``grid_ice_thrm``, ``grid_ice_dynu``, ``grid_ice_dynv``, ``grid_atm_thrm``, +``grid_atm_dynu``, ``grid_atm_dynv``, ``grid_ocn_thrm``, ``grid_ocn_dynu``, and ``grid_ocn_dynv`` are +character strings (``T``, ``U``, ``N``, ``E`` , ``NE``) derived from the ``grid_ice``, ``grid_atm``, +and ``grid_ocn`` namelist values. + +The CICE model has several internal methods that will interpolate (a.k.a. map or average) fields on +(``T``, ``U``, ``N``, ``E``, ``NE``) grids to (``T``, ``U``, ``N``, ``E``). An interpolation +to an identical grid results in a field copy. The generic interface to this method is ``grid_average_X2Y``, +and there are several forms. + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +where type is an interpolation type with the following valid values, + +type = ``S`` is a normalized, masked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (M_{1i}A_{1i}work1_{i})} {\sum_{i=1}^{n} (M_{1i}A_{1i})} + +type = ``A`` is a normalized, unmasked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {\sum_{i=1}^{n} (A_{1i})} + +type = ``F`` is a normalized, unmasked, conservative flux interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {n*A_{2}} + +with A defined as the appropriate gridcell area and M as the gridcell mask. +Another form of the ``grid_average_X2Y`` is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,wght1,mask1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: wght1(:,:,:) ! input weight(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: mask1(:,:,:) ! input mask(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +In this case, the input arrays `wght1` and `mask1` are used in the interpolation equations instead of gridcell +area and mask. This version allows the user to define the weights and mask +explicitly. This implementation is supported only for type = ``S`` or ``A`` interpolations. + +A final form of the ``grid_average_X2Y`` interface is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1a,grid1a,work1b,grid1b,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1a(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1a ! work1 grid (N, E) + real (kind=dbl_kind), intent(in) :: work1b(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1b ! work1 grid (N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U) + +This version supports mapping from an ``NE`` grid to a ``T`` or ``U`` grid. In this case, the ``1a`` arguments +are for either the `N` or `E` field and the 1b arguments are for the complementary field (``E`` or ``N`` respectively). +At present, only ``S`` type mappings are supported with this interface. + +In all cases, the work1, wght1, and mask1 input arrays should have correct halo values when called. Examples of usage +can be found in the source code, but the following example maps the uocn and vocn fields from their native +forcing/coupling grid to the ``U`` grid using a masked, area-weighted, average method. + +.. code-block:: fortran + + call grid_average_X2Y('S', uocn, grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn, grid_ocn_dynv, vocnU, 'U') + + .. _performance: *************** From bd59fb041b7be177c11b498c56c6cfbc39e610b8 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 22 Dec 2021 11:24:32 -0700 Subject: [PATCH 061/109] Fix a bug with dyn_prep2. (#48) * Add some print statements * Some print statements * Fix a bug with dyn_prep2 and change the order of interpolations and halo updates * Update icepack --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 70 ++++++++----------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2 +- 2 files changed, 31 insertions(+), 41 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 377f1205d..ddbc91f1c 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -493,7 +493,7 @@ subroutine evp (dt) uocnE (:,:,iblk), vocnE (:,:,iblk), & strairxE (:,:,iblk), strairyE (:,:,iblk), & ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - icetmask (:,:,iblk), icenmask (:,:,iblk), & + icetmask (:,:,iblk), iceemask (:,:,iblk), & fmE (:,:,iblk), dt, & strtltxE (:,:,iblk), strtltyE (:,:,iblk), & strocnxE (:,:,iblk), strocnyE (:,:,iblk), & @@ -520,16 +520,6 @@ subroutine evp (dt) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - call ice_timer_start(timer_bound) - call ice_HaloUpdate (strength, halo_info, & - field_loc_center, field_type_scalar) - ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) - call ice_timer_stop(timer_bound) - if (grid_ice == 'CD') then call ice_timer_start(timer_bound) @@ -545,8 +535,21 @@ subroutine evp (dt) call unstack_velocity_field(fld2, uvelE, vvelE) call ice_timer_stop(timer_bound) + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') endif + call ice_timer_start(timer_bound) + call ice_HaloUpdate (strength, halo_info, & + field_loc_center, field_type_scalar) + + ! velocities may have changed in dyn_prep2 + call stack_velocity_field(uvel, vvel, fld2) + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + call unstack_velocity_field(fld2, uvel, vvel) + call ice_timer_stop(timer_bound) + if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 @@ -658,13 +661,6 @@ subroutine evp (dt) do ksub = 1,ndte ! subcycling - ! shift velocity components from CD grid locations (N, E) to B grid location (U) for stress_U - - if (grid_ice == 'CD') then - call grid_average_X2Y('S',uvelE,'E',uvel,'U') - call grid_average_X2Y('S',vvelN,'N',vvel,'U') - endif - !----------------------------------------------------------------- ! stress tensor equation, total surface stress !----------------------------------------------------------------- @@ -806,35 +802,36 @@ subroutine evp (dt) enddo !$TCXOMP END PARALLEL DO - call stack_velocity_field(uvel, vvel, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) - if (grid_ice == 'CD') then call ice_timer_start(timer_bound) - ! velocities may have changed in dyn_prep2 call stack_velocity_field(uvelN, vvelN, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_Nface, field_type_vector) call unstack_velocity_field(fld2, uvelN, vvelN) - ! velocities may have changed in dyn_prep2 call stack_velocity_field(uvelE, vvelE, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_Eface, field_type_vector) call unstack_velocity_field(fld2, uvelE, vvelE) call ice_timer_stop(timer_bound) + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') + endif + call ice_timer_start(timer_bound) + call stack_velocity_field(uvel, vvel, fld2) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + call unstack_velocity_field(fld2, uvel, vvel) + call ice_timer_stop(timer_bound) + enddo ! subcycling endif ! evp_algorithm @@ -992,12 +989,6 @@ subroutine evp (dt) call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift call grid_average_X2Y('F',work2,'U',strocnyT,'T') -! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport - if (grid_ice == 'CD') then - call grid_average_X2Y('S',uvelE,'E',uvel,'U') - call grid_average_X2Y('S',vvelN,'N',vvel,'U') - endif - call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp @@ -1533,7 +1524,7 @@ subroutine stress_U (nx_block, ny_block, & ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline epm , & ! E-cell mask - npm , & ! E-cell mask + npm , & ! N-cell mask hm , & ! T-cell mask uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point @@ -1714,7 +1705,6 @@ subroutine div_stress (nx_block, ny_block, & case default call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select - enddo ! ij diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index bc0c25f75..57d951be0 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1770,7 +1770,7 @@ subroutine strain_rates_U (nx_block, ny_block, & ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs epm , & ! E-cell mask - npm , & ! E-cell mask + npm , & ! N-cell mask uvm ! U-cell mask From e63964311a8aa1e0174074bca4d0ec5a63fc5716 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 25 Jan 2022 09:57:13 -0700 Subject: [PATCH 062/109] Dynhalo (#49) * Rearrange the calls in ice_dyn_evp.F90 and add some halo updates * Fix spaces --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 108 +++++++++++++++++---- 1 file changed, 89 insertions(+), 19 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index ddbc91f1c..d517d5b55 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -127,9 +127,9 @@ subroutine evp (dt) integer (kind=int_kind), dimension(max_blocks) :: & icellt , & ! no. of cells where icetmask = 1 - icelln , & ! no. of cells where icenmask = 1 - icelle , & ! no. of cells where iceemask = 1 - icellu ! no. of cells where iceumask = 1 + icelln , & ! no. of cells where icenmask = .true. + icelle , & ! no. of cells where iceemask = .true. + icellu ! no. of cells where iceumask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & indxti , & ! compressed index in i-direction @@ -468,11 +468,11 @@ subroutine evp (dt) taubxN (:,:,iblk), taubyN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & forcexN (:,:,iblk), forceyN (:,:,iblk), & - stresspT (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressmT (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12T (:,:,iblk), stress12_2(:,:,iblk), & + stress12_1 (:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvelN_init (:,:,iblk), vvelN_init (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -501,16 +501,31 @@ subroutine evp (dt) taubxE (:,:,iblk), taubyE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & forcexE (:,:,iblk), forceyE (:,:,iblk), & - stresspU (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressmU (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12U (:,:,iblk), stress12_2(:,:,iblk), & + stress12_1 (:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvelE_init (:,:,iblk), vvelE_init (:,:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & TbE (:,:,iblk)) + + do i=1,nx_block + do j=1,ny_block + if (.not.iceumask(i,j,iblk)) then + stresspU(i,j,iblk) = c0 + stressmU(i,j,iblk) = c0 + stress12U(i,j,iblk) = c0 + endif + if (icetmask(i,j,iblk) == 0) then + stresspT(i,j,iblk) = c0 + stressmT(i,j,iblk) = c0 + stress12T(i,j,iblk) = c0 + endif + enddo + enddo enddo ! iblk !$TCXOMP END PARALLEL DO @@ -537,6 +552,9 @@ subroutine evp (dt) call grid_average_X2Y('S',uvelE,'E',uvel,'U') call grid_average_X2Y('S',vvelN,'N',vvel,'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) endif call ice_timer_start(timer_bound) @@ -665,11 +683,12 @@ subroutine evp (dt) ! stress tensor equation, total surface stress !----------------------------------------------------------------- + select case (grid_ice) + case('B') + !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) do iblk = 1, nblocks - select case (grid_ice) - case('B') call stress (nx_block, ny_block, & ksub, icellt(iblk), & indxti (:,iblk), indxtj (:,iblk), & @@ -709,7 +728,13 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - case('CD') + enddo + !$TCXOMP END PARALLEL DO + + case('CD') + + !$TCXOMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks call stress_T (nx_block, ny_block, & ksub, icellt(iblk), & @@ -726,6 +751,19 @@ subroutine evp (dt) shear (:,:,iblk), divu (:,:,iblk), & rdg_conv (:,:,iblk), rdg_shear (:,:,iblk) ) + enddo + + ! Need to update the halos for the stress components + call ice_timer_start(timer_bound) + call ice_HaloUpdate (zetax2T, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (etax2T, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$TCXOMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stress_U (nx_block, ny_block, & ksub, icellu(iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -743,6 +781,28 @@ subroutine evp (dt) stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) + enddo + !$TCXOMP END PARALLEL DO + + ! Need to update the halos for the stress components + call ice_timer_start(timer_bound) + call ice_HaloUpdate (stresspT, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stressmT, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stress12T, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stresspU, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (stressmU, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (stress12U, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$TCXOMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call div_stress (nx_block, ny_block, & ! E point ksub, icelle(iblk), & indxei (:,iblk), indxej (:,iblk), & @@ -769,6 +829,12 @@ subroutine evp (dt) strintxN (:,:,iblk), strintyN (:,:,iblk), & 'N') + enddo + !$TCXOMP END PARALLEL DO + + !$TCXOMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call step_vel (nx_block, ny_block, & ! E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & @@ -797,12 +863,8 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & TbN (:,:,iblk)) - end select - - enddo - !$TCXOMP END PARALLEL DO - - if (grid_ice == 'CD') then + enddo + !$TCXOMP END PARALLEL DO call ice_timer_start(timer_bound) call stack_velocity_field(uvelN, vvelN, fld2) @@ -818,7 +880,10 @@ subroutine evp (dt) call grid_average_X2Y('S',uvelE,'E',uvel,'U') call grid_average_X2Y('S',vvelN,'N',vvel,'U') - endif + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + + end select call ice_timer_start(timer_bound) call stack_velocity_field(uvel, vvel, fld2) @@ -989,6 +1054,11 @@ subroutine evp (dt) call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift call grid_average_X2Y('F',work2,'U',strocnyT,'T') + if (grid_ice == 'CD') then + call grid_average_X2Y('S',strintxE,'E',strintx,'U') ! diagnostic + call grid_average_X2Y('S',strintyN,'N',strinty,'U') ! diagnostic + endif + call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp From 311302accb3140c55f2c50cef482cc7872d746e2 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 25 Jan 2022 11:57:44 -0500 Subject: [PATCH 063/109] new box symmetry tests and coriolis=zero (#50) --- cicecore/cicedynB/general/ice_forcing.F90 | 14 ++++- configuration/scripts/options/set_nml.boxsyme | 2 +- configuration/scripts/options/set_nml.boxsymn | 2 +- configuration/scripts/options/set_nml.boxsyms | 55 +++++++++++++++++++ configuration/scripts/options/set_nml.boxsymw | 55 +++++++++++++++++++ 5 files changed, 124 insertions(+), 4 deletions(-) create mode 100644 configuration/scripts/options/set_nml.boxsyms create mode 100644 configuration/scripts/options/set_nml.boxsymw diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 013915683..1f88b4c14 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -312,10 +312,14 @@ subroutine init_forcing_atmo call box2001_data_atm elseif (trim(atm_data_type) == 'uniform_northeast') then call uniform_data_atm('NE') - elseif (trim(atm_data_type) == 'uniform_east') then - call uniform_data_atm('E') elseif (trim(atm_data_type) == 'uniform_north') then call uniform_data_atm('N') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data_atm('E') + elseif (trim(atm_data_type) == 'uniform_south') then + call uniform_data_atm('S') + elseif (trim(atm_data_type) == 'uniform_west') then + call uniform_data_atm('W') elseif (trim(atm_data_type) == 'calm') then call uniform_data_atm('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then @@ -5481,6 +5485,12 @@ subroutine uniform_data_atm(dir,spd) elseif (dir == 'E') then uatm = atm_val vatm = c0 + elseif (dir == 'S') then + uatm = c0 + vatm = -atm_val + elseif (dir == 'W') then + uatm = -atm_val + vatm = c0 else call abort_ice (subname//'ERROR: dir unknown, dir = '//trim(dir), & file=__FILE__, line=__LINE__) diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme index 01be549b4..73eda501d 100644 --- a/configuration/scripts/options/set_nml.boxsyme +++ b/configuration/scripts/options/set_nml.boxsyme @@ -20,7 +20,7 @@ kstrength = 0 kdyn = 1 kridge = -1 ktransport = -1 -coriolis = 'constant' +coriolis = 'zero' atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn index 66fa95a9e..bb1205adb 100644 --- a/configuration/scripts/options/set_nml.boxsymn +++ b/configuration/scripts/options/set_nml.boxsymn @@ -20,7 +20,7 @@ kstrength = 0 kdyn = 1 kridge = -1 ktransport = -1 -coriolis = 'constant' +coriolis = 'zero' atmbndy = 'constant' atm_data_type = 'uniform_north' ocn_data_type = 'calm' diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms new file mode 100644 index 000000000..06d922fe1 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsyms @@ -0,0 +1,55 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_south' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxsymw new file mode 100644 index 000000000..92dc31f6e --- /dev/null +++ b/configuration/scripts/options/set_nml.boxsymw @@ -0,0 +1,55 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .true. +ew_boundary_type = 'open' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_west' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' From 14f42ab803344102f8b9def313f48b7c048c090a Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 25 Jan 2022 08:58:00 -0800 Subject: [PATCH 064/109] Add gbox12 and ice_data_type='smallblock' (#51) * add gbox12 and ice_data_type='smallblock' * update documentation * update gbox12 default --- cicecore/cicedynB/general/ice_init.F90 | 16 +++++++++++++++- configuration/scripts/cice_decomp.csh | 11 +++++++++++ configuration/scripts/options/set_nml.gbox12 | 5 +++++ doc/source/user_guide/ug_case_settings.rst | 4 +++- 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 configuration/scripts/options/set_nml.gbox12 diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 3292f6274..2c2327dd8 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2687,7 +2687,7 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_ic) == 'default') then - if (trim(ice_data_type) == 'box2001') then + if (trim(ice_data_type) == 'box2001' .or. trim(ice_data_type) == 'smallblock') then hbar = c2 ! initial ice thickness do n = 1, ncat @@ -2764,6 +2764,20 @@ subroutine set_state_var (nx_block, ny_block, & endif enddo enddo + + else if (trim(ice_data_type) == 'smallblock') then + ! 2x2 ice in center of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if ((iglob(i) == nx_global/2 .or. iglob(i) == nx_global/2+1) .and. & + (jglob(j) == ny_global/2 .or. jglob(j) == ny_global/2+1)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo else ! default behavior diff --git a/configuration/scripts/cice_decomp.csh b/configuration/scripts/cice_decomp.csh index aa1bb9a54..0c6715f3b 100755 --- a/configuration/scripts/cice_decomp.csh +++ b/configuration/scripts/cice_decomp.csh @@ -66,6 +66,17 @@ else if (${grid} == 'gbox80') then set blckx = 8; set blcky = 8 endif +else if (${grid} == 'gbox12') then + set nxglob = 12 + set nyglob = 12 + if (${cicepes} <= 1) then + set blckx = 12; set blcky = 12 + else if (${cicepes} <= 8) then + set blckx = 4; set blcky = 4 + else + set blckx = 2; set blcky = 2 + endif + else if (${grid} == 'gx3') then set nxglob = 100 set nyglob = 116 diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 new file mode 100644 index 000000000..8063bee67 --- /dev/null +++ b/configuration/scripts/options/set_nml.gbox12 @@ -0,0 +1,5 @@ +ice_ic = 'default' +grid_type = 'rectangular' +atm_data_type = 'box2001' +ocn_data_type = 'calm' +ice_data_type = 'box2001' diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 8f763db2f..3f70ebb02 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -577,7 +577,9 @@ forcing_nml "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" "``ice_data_type``", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "``default``" "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`)", "" - "", "``default``", "no special initialization", "" + "", "``default``", "ice dependent on latitude and ocean temperature", "" + "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" + "", "``uniform``", "uniform ice concentration and thickness across domain", "" "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" From 94618ea8f0c1627511aec9e95cacaecd183dc8e2 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 3 Feb 2022 16:41:16 -0500 Subject: [PATCH 065/109] New subroutines stepu_Cgrid and stepv_Cgrid (#52) * new subroutines stepu_Cgrid and stepv_Cgrid * Corrected compilation errors --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 4 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 208 +++++++++++++++++- 2 files changed, 208 insertions(+), 4 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index d517d5b55..1045cd14f 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1621,7 +1621,7 @@ subroutine stress_U (nx_block, ny_block, & j = indxuj(ij) !----------------------------------------------------------------- - ! strain rates at T point + ! strain rates at U point ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- @@ -1639,7 +1639,7 @@ subroutine stress_U (nx_block, ny_block, & shearU, DeltaU ) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at T point + ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 57d951be0..48006fb6c 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -23,8 +23,8 @@ module ice_dyn_shared implicit none private - public :: init_dyn, set_evp_parameters, stepu, step_vel, principal_stress, & - dyn_prep1, dyn_prep2, dyn_finish, & + public :: set_evp_parameters, stepu, step_vel, stepu_Cgrid, stepv_Cgrid, & + principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & deformations, deformations_T, & @@ -927,6 +927,210 @@ end subroutine step_vel !======================================================================= +! Integration of the momentum equation to find velocity u at E location on C grid + + subroutine stepu_Cgrid (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + ksub, aiu, & + uocn, vocn, & + waterx, forcex, & + massdti, fm, & + strintx, taubx, & + uvel_init, & + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell, & ! total count when ice[en]mask is true + ksub ! subcycling iteration + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + uvel_init,& ! x-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + waterx , & ! for ocean stress calculation, x (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + strintx , & ! divergence of internal ice stress, x (N/m^2) + Cw , & ! ocean-ice neutral drag coefficient + vvel ! y-component of velocity (m/s) interpolated to E location + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + uvel , & ! x-component of velocity (m/s) + taubx ! seabed stress, x-direction (N/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,cc1 , & ! intermediate variables + taux , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(stepu_Cgrid)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + taux = vrel*waterx(i,j) ! NOTE this is not the entire + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ! compute the velocity components + cc1 = strintx(i,j) + forcex(i,j) + taux & + + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + + uvel(i,j) = (ccb*vold + cc1) / cca ! m/s + + ! calculate seabed stress component for outputs + if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration + taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc + endif + + enddo ! ij + + end subroutine stepu_Cgrid + +!======================================================================= + +! Integration of the momentum equation to find velocity v at N location on C grid + + subroutine stepv_Cgrid (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + ksub, aiu, & + uocn, vocn, & + watery, forcey, & + massdti, fm, & + strinty, tauby, & + vvel_init, & + uvel, vvel, & + Tb) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell, & ! total count when ice[en]mask is true + ksub ! subcycling iteration + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Tb, & ! seabed stress factor (N/m^2) + vvel_init,& ! y-component of velocity (m/s), beginning of timestep + aiu , & ! ice fraction on [en]-grid + watery , & ! for ocean stress calculation, y (m/s) + forcey , & ! work array: combined atm stress and ocn tilt, y + massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + uocn , & ! ocean current, x-direction (m/s) + vocn , & ! ocean current, y-direction (m/s) + fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + strinty , & ! divergence of internal ice stress, y (N/m^2) + Cw , & ! ocean-ice neutral drag coefficient + uvel ! x-component of velocity (m/s) interpolated to N location + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + vvel , & ! y-component of velocity (m/s) + tauby ! seabed stress, y-direction (N/m^2) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + uold, vold , & ! old-time uvel, vvel + vrel , & ! relative ice-ocean velocity + cca,ccb,ccc,cc2 , & ! intermediate variables + tauy , & ! part of ocean stress term + Cb , & ! complete seabed (basal) stress coeff + rhow ! + + character(len=*), parameter :: subname = '(stepv_Cgrid)' + + !----------------------------------------------------------------- + ! integrate the momentum equation + !----------------------------------------------------------------- + + call icepack_query_parameters(rhow_out=rhow) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + do ij =1, icell + i = indxi(ij) + j = indxj(ij) + + uold = uvel(i,j) + vold = vvel(i,j) + + ! (magnitude of relative ocean current)*rhow*drag*aice + vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + (vocn(i,j) - vold)**2) ! m/s + ! ice/ocean stress + tauy = vrel*watery(i,j) ! NOTE this is not the entire ocn stress + + ccc = sqrt(uold**2 + vold**2) + u0 + Cb = Tb(i,j) / ccc ! for seabed stress + ! revp = 0 for classic evp, 1 for revised evp + cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s + + ! compute the velocity components + cc2 = strinty(i,j) + forcey(i,j) + tauy & + + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + + vvel(i,j) = (-ccb*uold + cc2) / cca + + ! calculate seabed stress component for outputs + if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration + tauby(i,j) = -vvel(i,j)*Tb(i,j) / ccc + endif + + enddo ! ij + + end subroutine stepv_Cgrid + +!======================================================================= + ! Calculation of the ice-ocean stress. ! ...the sign will be reversed later... ! From 52184352a53fedaa2d34bf6b1c144789d5f18e83 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 4 Feb 2022 12:55:25 -0800 Subject: [PATCH 066/109] - Implement support for grid_ice = "C". Currently bit-for-bit with "CD". (#53) - Add ice_data_type = bigblock and ice_data_type = gauss. Both cover 90% of the domain with no ice on the boundaries. bigblock is a constant depth block of ice. gauss is a block of ice in a gauss distribution. --- .../cicedynB/analysis/ice_diagnostics.F90 | 62 ++++++++++++------- cicecore/cicedynB/analysis/ice_history.F90 | 6 +- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 2 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 22 +++---- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 12 ++-- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 2 +- .../dynamics/ice_transport_driver.F90 | 4 +- .../cicedynB/dynamics/ice_transport_remap.F90 | 2 +- cicecore/cicedynB/general/ice_flux.F90 | 4 +- cicecore/cicedynB/general/ice_init.F90 | 40 +++++++++++- cicecore/cicedynB/infrastructure/ice_grid.F90 | 4 +- configuration/scripts/options/set_nml.gridc | 2 + configuration/scripts/tests/gridsys_suite.ts | 15 +++++ doc/source/user_guide/ug_case_settings.rst | 2 + 14 files changed, 125 insertions(+), 54 deletions(-) create mode 100644 configuration/scripts/options/set_nml.gridc diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index d07cc0a93..520e72141 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -393,29 +393,41 @@ subroutine runtime_diags (dt) hmaxs = global_maxval(vice, distrb_info, lmask_s) ! maximum ice speed - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & - + vvel(i,j,iblk)**2) - enddo - enddo - enddo - !$OMP END PARALLEL DO if (grid_ice == 'CD') then - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & - + vvelE(i,j,iblk)**2), & - sqrt(uvelN(i,j,iblk)**2 & - + vvelN(i,j,iblk)**2)) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & + + vvelE(i,j,iblk)**2), & + sqrt(uvelN(i,j,iblk)**2 & + + vvelN(i,j,iblk)**2)) + enddo + enddo enddo + !$OMP END PARALLEL DO + elseif (grid_ice == 'C') then + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = sqrt(uvelE(i,j,iblk)**2 & + + vvelN(i,j,iblk)**2) + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO + else + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & + + vvel(i,j,iblk)**2) + enddo + enddo + enddo + !$OMP END PARALLEL DO endif umaxn = global_maxval(work1, distrb_info, lmask_n) @@ -1790,7 +1802,10 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) - if (grid_ice == 'CD') then + if (grid_ice == 'C') then + write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) + write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) + elseif (grid_ice == 'CD') then write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) write(nu_diag,*) 'vvelE(i,j)',vvelE(i,j,iblk) write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) @@ -1940,7 +1955,10 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) - if (grid_ice == 'CD') then + if (grid_ice == 'C') then + write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) + write(nu_diag,*) trim(llabel),'vvelN=',vvelN(i,j,iblk) + elseif (grid_ice == 'CD') then write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'vvelE=',vvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'uvelN=',uvelN(i,j,iblk) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index a9cf22529..d1fce0d67 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -380,7 +380,7 @@ subroutine init_hist (dt) f_sispeed = f_CMIP endif - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then f_uvelE = f_uvel f_vvelE = f_vvel f_icespdE = f_icespd @@ -1304,7 +1304,7 @@ subroutine init_hist (dt) select case (grid_ice) case('B') description = ", on U grid (NE corner values)" - case ('CD') + case ('CD','C') description = ", on T grid" end select @@ -4408,7 +4408,7 @@ subroutine accum_hist (dt) sig1 (:,:,iblk), & sig2 (:,:,iblk), & sigP (:,:,iblk)) - case('CD') + case('CD','C') call principal_stress (nx_block, ny_block, & stresspT (:,:,iblk), & stressmT (:,:,iblk), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 6ede5e667..2b735b71c 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -583,7 +583,7 @@ subroutine eap (dt) ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_ice == 'CD') then +! if (grid_ice == 'CD' .or. grid_ice == 'C') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 1045cd14f..8bb35e960 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -218,7 +218,7 @@ subroutine evp (dt) allocate(fld2(nx_block,ny_block,2,max_blocks)) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate(zetax2T(nx_block,ny_block,max_blocks)) allocate(etax2T(nx_block,ny_block,max_blocks)) @@ -292,7 +292,7 @@ subroutine evp (dt) call grid_average_X2Y('S',ss_tltx,grid_ocn_dynu,ss_tltxU,'U') call grid_average_X2Y('S',ss_tlty,grid_ocn_dynv,ss_tltyU,'U') - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call grid_average_X2Y('F',tmass,'T',emass,'E') call grid_average_X2Y('F',aice_init,'T', aie,'E') call grid_average_X2Y('F',tmass,'T',nmass,'N') @@ -324,7 +324,7 @@ subroutine evp (dt) call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then if (.not. calc_strair) then call grid_average_X2Y('F', strax, grid_atm_dynu, strairxN, 'N') call grid_average_X2Y('F', stray, grid_atm_dynv, strairyN, 'N') @@ -383,7 +383,7 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - elseif (trim(grid_ice) == 'CD') then + elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icellu(iblk), & @@ -434,7 +434,7 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -535,7 +535,7 @@ subroutine evp (dt) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call ice_timer_start(timer_bound) ! velocities may have changed in dyn_prep2 @@ -608,7 +608,7 @@ subroutine evp (dt) hwater(:,:,iblk), Tbu(:,:,iblk)) endif - case('CD') + case('CD','C') if ( seabed_stress_method == 'LKD' ) then @@ -731,7 +731,7 @@ subroutine evp (dt) enddo !$TCXOMP END PARALLEL DO - case('CD') + case('CD','C') !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -903,7 +903,7 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) deallocate(fld2) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then deallocate(zetax2T, etax2T) endif @@ -999,7 +999,7 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1054,7 +1054,7 @@ subroutine evp (dt) call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift call grid_average_X2Y('F',work2,'U',strocnyT,'T') - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call grid_average_X2Y('S',strintxE,'E',strintx,'U') ! diagnostic call grid_average_X2Y('S',strintyN,'N',strinty,'U') ! diagnostic endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 48006fb6c..e4af6d432 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -134,7 +134,7 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep @@ -186,7 +186,7 @@ subroutine init_dyn (dt) allocate(fcor_blk(nx_block,ny_block,max_blocks)) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate(fcorE_blk(nx_block,ny_block,max_blocks)) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) endif @@ -199,7 +199,7 @@ subroutine init_dyn (dt) ! velocity uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s - if (grid_ice == 'CD') then ! extra velocity variables + if (grid_ice == 'CD' .or. grid_ice == 'C') then ! extra velocity variables uvelE(i,j,iblk) = c0 vvelE(i,j,iblk) = c0 uvelN(i,j,iblk) = c0 @@ -221,7 +221,7 @@ subroutine init_dyn (dt) fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then if (trim(coriolis) == 'constant') then fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s @@ -250,7 +250,7 @@ subroutine init_dyn (dt) stress12_3(i,j,iblk) = c0 stress12_4(i,j,iblk) = c0 - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T (i,j,iblk) = c0 @@ -1499,7 +1499,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & ! convert quantities to U-location Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') enddo ! ij - case('CD') + case('CD','C') if(present(Tbe) .and. present(TbN) .and. & present(icelle) .and. present(icelln) .and. & present(indxei) .and. present(indxej) .and. & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 9d4d220fe..5806b0f00 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -683,7 +683,7 @@ subroutine implicit_solver (dt) ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_ice == 'CD') then +! if (grid_ice == 'CD' .or. grid_ice == 'C') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 01eb6b989..dc6425adb 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -538,7 +538,7 @@ subroutine transport_remap (dt) !------------------------------------------------------------------- ! Main remapping routine: Step ice area and tracers forward in time. !------------------------------------------------------------------- - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call horizontal_remap (dt, ntrace, & uvel (:,:,:), vvel (:,:,:), & aim (:,:,:,:), trm (:,:,:,:,:), & @@ -771,7 +771,7 @@ subroutine transport_upwind (dt) !------------------------------------------------------------------- ! Average corner velocities to edges. !------------------------------------------------------------------- - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then uee(:,:,:)=uvelE(:,:,:) vnn(:,:,:)=vvelN(:,:,:) else diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 662fa7e60..922b3f06b 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -670,7 +670,7 @@ subroutine horizontal_remap (dt, ntrace, & enddo if (l_fixed_area) then - if (grid_ice == 'CD') then ! velocities are already on the center + if (grid_ice == 'CD' .or. grid_ice == 'C') then ! velocities are already on the center do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 50f383568..72d9ea972 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -580,7 +580,7 @@ subroutine alloc_flux stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') - if (grid_ice == "CD") & + if (grid_ice == "CD" .or. grid_ice == "C") & allocate( & taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) @@ -632,7 +632,7 @@ subroutine init_coupler_flux use ice_flux_bgc, only: flux_bio_atm, flux_bio, faero_atm, fiso_atm, & fnit, famm, fsil, fdmsp, fdms, fhum, fdust, falgalN, & fdoc, fdon, fdic, ffed, ffep - use ice_grid, only: bathymetry, grid_ice + use ice_grid, only: bathymetry integer (kind=int_kind) :: n diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 2c2327dd8..a20496bf7 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2118,6 +2118,7 @@ subroutine input_data endif if (grid_ice /= 'B' .and. & + grid_ice /= 'C' .and. & grid_ice /= 'CD' ) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_ice=',trim(grid_ice) abort_list = trim(abort_list)//":26" @@ -2457,7 +2458,7 @@ subroutine init_state vicen, vsnon, & ntrcr, trcrn) - if (trim(grid_ice) == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then ! move from B-grid to CD-grid for boxslotcyl test if (trim(ice_data_type) == 'boxslotcyl') then @@ -2600,13 +2601,15 @@ subroutine set_state_var (nx_block, ny_block, & k , & ! ice layer index n , & ! thickness category index it , & ! tracer index + iedge , & ! edge around big block + jedge , & ! edge around big block icells ! number of cells initialized with ice integer (kind=int_kind), dimension(nx_block*ny_block) :: & indxi, indxj ! compressed indices for cells with aicen > puny real (kind=dbl_kind) :: & - Tsfc, sum, hbar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall + Tsfc, sum, hbar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio real (kind=dbl_kind), dimension(ncat) :: & ainit, hinit ! initial area, thickness @@ -2687,7 +2690,10 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_ic) == 'default') then - if (trim(ice_data_type) == 'box2001' .or. trim(ice_data_type) == 'smallblock') then + if (trim(ice_data_type) == 'box2001' .or. & + trim(ice_data_type) == 'smallblock' .or. & + trim(ice_data_type) == 'bigblock' .or. & + trim(ice_data_type) == 'gauss') then hbar = c2 ! initial ice thickness do n = 1, ncat @@ -2779,6 +2785,23 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo + else if (trim(ice_data_type) == 'bigblock' .or. & + trim(ice_data_type) == 'gauss') then + ! ice in 90% of domain, not at edges + icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.05) + 1 + do j = jlo, jhi + do i = ilo, ihi + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + else ! default behavior !----------------------------------------------------------------- @@ -2848,6 +2871,17 @@ subroutine set_state_var (nx_block, ny_block, & endif vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m + elseif (trim(ice_data_type) == 'gauss') then + if (hinit(n) > c0) then + dist_ratio = 8._dbl_kind * & + sqrt((real(iglob(i),kind=dbl_kind)-real(nx_global+1,kind=dbl_kind)/c2)**2 + & + (real(jglob(j),kind=dbl_kind)-real(ny_global+1,kind=dbl_kind)/c2)**2) / & + sqrt((real(nx_global,kind=dbl_kind))**2 + & + (real(ny_global,kind=dbl_kind))**2) + aicen(i,j,n) = ainit(n) * exp(-dist_ratio) + endif + vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m + else ! default or uniform vicen(i,j,n) = hinit(n) * ainit(n) ! m diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index ecca13bd9..0d0a59d98 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -284,7 +284,7 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & @@ -551,7 +551,7 @@ subroutine init_grid2 enddo enddo - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then do j = jlo, jhi do i = ilo, ihi ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) diff --git a/configuration/scripts/options/set_nml.gridc b/configuration/scripts/options/set_nml.gridc new file mode 100644 index 000000000..a04fab4fd --- /dev/null +++ b/configuration/scripts/options/set_nml.gridc @@ -0,0 +1,2 @@ +grid_ice = 'C' + diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index afc91fa4f..8d86544fa 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -28,3 +28,18 @@ smoke gbox80 4x2 boxsymne,kmtislands,gridcd smoke gbox80 8x1 boxislandsn,gridcd smoke gbox80 4x2 boxislandse,gridcd smoke gbox80 2x4 boxislandsne,gridcd + +smoke gx3 8x2 diag1,run5day,gridc +restart gx3 4x2 debug,diag1,gridc +smoke gbox80 1x1 box2001,gridc +smoke gbox80 1x1 boxslotcyl,gridc +smoke gbox80 2x4 boxnodyn,gridc +smoke gbox80 2x2 boxsymn,gridc +smoke gbox80 4x2 boxsyme,gridc +smoke gbox80 4x1 boxsymne,gridc +smoke gbox80 2x2 boxsymn,kmtislands,gridc +smoke gbox80 4x1 boxsyme,kmtislands,gridc +smoke gbox80 4x2 boxsymne,kmtislands,gridc +smoke gbox80 8x1 boxislandsn,gridc +smoke gbox80 4x2 boxislandse,gridc +smoke gbox80 2x4 boxislandsne,gridc diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 3f70ebb02..16a35a92f 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -576,8 +576,10 @@ forcing_nml "``fyear_init``", "integer", "first year of atmospheric forcing data", "1900" "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" "``ice_data_type``", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "``default``" + "", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "" "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`)", "" "", "``default``", "ice dependent on latitude and ocean temperature", "" + "", "``gauss``", "gauss distributed ice block covering about 90 percent of the area in center of domain", "" "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" "", "``uniform``", "uniform ice concentration and thickness across domain", "" "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" From c22f906f2162d3e4b6d70aea657665c9a0845191 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Mon, 7 Feb 2022 12:56:04 -0500 Subject: [PATCH 067/109] calls for stepu,v_Cgrid and avg of uvelE at N point and vvelN at E point. (#54) * Calls for stepu,v_Cgrid + avg of uE(vN) at N(E) * corrected typo * minor bug corrected...it compiles --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 104 +++++++++++++++------ 1 file changed, 73 insertions(+), 31 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 8bb35e960..79709f9da 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -42,7 +42,8 @@ module ice_dyn_evp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, step_vel, dyn_prep1, dyn_prep2, dyn_finish, & + use ice_dyn_shared, only: stepu, step_vel, stepu_Cgrid, stepv_Cgrid, & + dyn_prep1, dyn_prep2, dyn_finish, & ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & @@ -733,6 +734,11 @@ subroutine evp (dt) case('CD','C') + if (grid_ice == 'C') then + call grid_average_X2Y('A',uvelE,'E',uvelN,'N') + call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + endif + !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -832,40 +838,76 @@ subroutine evp (dt) enddo !$TCXOMP END PARALLEL DO + if (grid_ice == 'CD') then + !$TCXOMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + do iblk = 1, nblocks + + call step_vel (nx_block, ny_block, & ! E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + ksub, aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call step_vel (nx_block, ny_block, & ! N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + ksub, aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + + enddo + !$TCXOMP END PARALLEL DO - call step_vel (nx_block, ny_block, & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - ksub, aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call step_vel (nx_block, ny_block, & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - ksub, aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + elseif (grid_ice == 'C') then - enddo + !$TCXOMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call stepu_Cgrid (nx_block, ny_block, & ! u, E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + ksub, aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepv_Cgrid (nx_block, ny_block, & ! v, N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + ksub, aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + + enddo !$TCXOMP END PARALLEL DO - + + endif + call ice_timer_start(timer_bound) call stack_velocity_field(uvelN, vvelN, fld2) call ice_HaloUpdate (fld2, halo_info, & From e01b2e82295d0753bb7ff2fe235948adae4787a1 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 7 Feb 2022 15:07:31 -0800 Subject: [PATCH 068/109] Modify uniform atm/ice stress formulation, multiple by aiu (#55) * - Modify uniform atm/ice stress formulation, multiple by aiu - Add channel ice_data_type initialization * Update aice/aiu application of air/ice stress in box and uniform forcing --- cicecore/cicedynB/general/ice_forcing.F90 | 36 +++++++++++++--------- cicecore/cicedynB/general/ice_init.F90 | 14 +++++++++ doc/source/user_guide/ug_case_settings.rst | 1 + 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 1f88b4c14..c53f944ac 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -19,7 +19,9 @@ module ice_forcing use ice_kinds_mod + use ice_boundary, only: ice_HaloUpdate use ice_blocks, only: nx_block, ny_block + use ice_domain, only: halo_info use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global use ice_communicate, only: my_task, master_task use ice_calendar, only: istep, istep1, & @@ -585,8 +587,7 @@ subroutine get_forcing_atmo ! Get atmospheric forcing data and interpolate as necessary use ice_blocks, only: block, get_block - use ice_boundary, only: ice_HaloUpdate - use ice_domain, only: nblocks, blocks_ice, halo_info + use ice_domain, only: nblocks, blocks_ice use ice_flux, only: Tair, fsw, flw, frain, fsnow, Qa, rhoa, & uatm, vatm, strax, stray, zlvl, wind, swvdr, swvdf, swidr, swidf, & potT, sst @@ -653,6 +654,18 @@ subroutine get_forcing_atmo call oned_data elseif (trim(atm_data_type) == 'box2001') then call box2001_data_atm + elseif (trim(atm_data_type) == 'uniform_northeast') then + call uniform_data_atm('NE') + elseif (trim(atm_data_type) == 'uniform_north') then + call uniform_data_atm('N') + elseif (trim(atm_data_type) == 'uniform_east') then + call uniform_data_atm('E') + elseif (trim(atm_data_type) == 'uniform_south') then + call uniform_data_atm('S') + elseif (trim(atm_data_type) == 'uniform_west') then + call uniform_data_atm('W') + elseif (trim(atm_data_type) == 'calm') then + call uniform_data_atm('N',c0) ! direction does not matter when c0 elseif (trim(atm_data_type) == 'hycom') then call hycom_atm_data !elseif (trim(atm_data_type) == 'uniform_northeast') then @@ -5305,7 +5318,6 @@ subroutine box2001_data_atm use ice_calendar, only: timesecs use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray - use ice_grid, only: uvm, grid_average_X2Y use ice_state, only: aice ! local parameters @@ -5313,9 +5325,6 @@ subroutine box2001_data_atm integer (kind=int_kind) :: & iblk, i,j ! loop indices - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - aiu ! ice fraction on u-grid - real (kind=dbl_kind) :: & secday, pi , puny, period, pi2, tau @@ -5326,8 +5335,6 @@ subroutine box2001_data_atm call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call grid_average_X2Y('F',aice,'T',aiu,'U') - period = c4*secday do iblk = 1, nblocks @@ -5359,8 +5366,9 @@ subroutine box2001_data_atm ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) - strax(i,j,iblk) = aiu(i,j,iblk) * tau * uatm(i,j,iblk) - stray(i,j,iblk) = aiu(i,j,iblk) * tau * vatm(i,j,iblk) + + strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) + stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) ! initialization test ! Diagonal wind vectors 1 @@ -5412,9 +5420,6 @@ subroutine box2001_data_ocn integer (kind=int_kind) :: & iblk, i,j ! loop indices - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - aiu ! ice fraction on u-grid - real (kind=dbl_kind) :: & secday, pi , puny, period, pi2, tau @@ -5451,6 +5456,7 @@ subroutine uniform_data_atm(dir,spd) use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray + use ice_state, only: aice character(len=*), intent(in) :: dir real(kind=dbl_kind), intent(in), optional :: spd ! velocity @@ -5503,8 +5509,8 @@ subroutine uniform_data_atm(dir,spd) ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) - strax(i,j,iblk) = tau * uatm(i,j,iblk) - stray(i,j,iblk) = tau * vatm(i,j,iblk) + strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) + stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) enddo enddo diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index a20496bf7..626b9fad9 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2692,6 +2692,7 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_data_type) == 'box2001' .or. & trim(ice_data_type) == 'smallblock' .or. & + trim(ice_data_type) == 'channel' .or. & trim(ice_data_type) == 'bigblock' .or. & trim(ice_data_type) == 'gauss') then @@ -2784,6 +2785,19 @@ subroutine set_state_var (nx_block, ny_block, & endif enddo enddo + + else if (trim(ice_data_type) == 'channel') then + ! channel ice in center of domain in i direction + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo else if (trim(ice_data_type) == 'bigblock' .or. & trim(ice_data_type) == 'gauss') then diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 16a35a92f..9c289f35d 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -578,6 +578,7 @@ forcing_nml "``ice_data_type``", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "``default``" "", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "" "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`)", "" + "", "``channel``", "uniform block ice concentration and thickness in i-direction in 50% of domain in j-direction", "" "", "``default``", "ice dependent on latitude and ocean temperature", "" "", "``gauss``", "gauss distributed ice block covering about 90 percent of the area in center of domain", "" "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" From faa33bedd90fb6c7e23c862016052f46c51ecafc Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 8 Feb 2022 18:54:51 -0700 Subject: [PATCH 069/109] Restarts for C and D (#56) --- .../infrastructure/ice_restart_driver.F90 | 115 ++++++++++++++++-- .../io/io_netcdf/ice_restart.F90 | 19 ++- .../infrastructure/io/io_pio2/ice_restart.F90 | 22 +++- 3 files changed, 138 insertions(+), 18 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index e4f5a89e9..5595d0bf9 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -21,6 +21,7 @@ module ice_restart_driver use ice_arrays_column, only: oceanmixed_ice use ice_constants, only: c0, c1, p5, & field_loc_center, field_loc_NEcorner, & + field_loc_Eface, field_loc_Nface, & field_type_scalar, field_type_vector use ice_restart_shared, only: restart_dir, pointer_file, & runid, use_restart_time, lenstr, restart_coszen @@ -54,15 +55,16 @@ subroutine dumpfile(filename_spec) use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, & + strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & - stresspU, stressmU, stress12U + stresspU, stressmU, stress12U use ice_flux, only: coszen use ice_grid, only: grid_ice - use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel + use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN character(len=char_len_long), intent(in), optional :: filename_spec @@ -130,6 +132,18 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,uvel,'ruf8','uvel',1,diag) call write_restart_field(nu_dump,0,vvel,'ruf8','vvel',1,diag) + if (grid_ice == 'CD') then + call write_restart_field(nu_dump,0,uvelE,'ruf8','uvelE',1,diag) + call write_restart_field(nu_dump,0,vvelE,'ruf8','vvelE',1,diag) + call write_restart_field(nu_dump,0,uvelN,'ruf8','uvelN',1,diag) + call write_restart_field(nu_dump,0,vvelN,'ruf8','vvelN',1,diag) + endif + + if (grid_ice == 'C') then + call write_restart_field(nu_dump,0,uvelE,'ruf8','uvelE',1,diag) + call write_restart_field(nu_dump,0,vvelN,'ruf8','vvelN',1,diag) + endif + !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- @@ -167,7 +181,7 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) @@ -192,6 +206,34 @@ subroutine dumpfile(filename_spec) !$OMP END PARALLEL DO call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + + endif + ! for mixed layer model if (oceanmixed_ice) then call write_restart_field(nu_dump,0,sst,'ruf8','sst',1,diag) @@ -215,7 +257,7 @@ subroutine restartfile (ice_ic) use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, & + strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & @@ -225,6 +267,7 @@ subroutine restartfile (ice_ic) use ice_grid, only: tmask, grid_type, grid_ice use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & + uvelE, vvelE, uvelN, vvelN, & trcr_base, nt_strata, n_trcr_strata character (*), optional :: ice_ic @@ -314,6 +357,24 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,vvel,'ruf8', & 'vvel',1,diag,field_loc_NEcorner, field_type_vector) + if (grid_ice == 'CD') then + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + call read_restart_field(nu_restart,0,vvelE,'ruf8', & + 'vvelE',1,diag,field_loc_Eface, field_type_vector) + call read_restart_field(nu_restart,0,uvelN,'ruf8', & + 'uvelN',1,diag,field_loc_Nface, field_type_vector) + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) + endif + + if (grid_ice == 'C') then + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) + endif + !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- @@ -381,10 +442,7 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12_4,'ruf8', & 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 -! tcraig, comment these out now to allow restarts from B grid file -! this will affect exact restart when we get to that point -#if (1 == 0) - if (grid_ice == 'CD') then + if (grid_ice == 'CD' .or. grid_ice == 'C') then call read_restart_field(nu_restart,0,stresspT,'ruf8', & 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT call read_restart_field(nu_restart,0,stressmT,'ruf8', & @@ -392,13 +450,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,stress12T,'ruf8', & 'stress12T',1,diag,field_loc_center,field_type_scalar) ! stress12T call read_restart_field(nu_restart,0,stresspU,'ruf8', & - 'stresspU' ,1,diag,field_loc_center,field_type_scalar) ! stresspU + 'stresspU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stresspU call read_restart_field(nu_restart,0,stressmU,'ruf8', & - 'stressmU' ,1,diag,field_loc_center,field_type_scalar) ! stressmU + 'stressmU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stressmU call read_restart_field(nu_restart,0,stress12U,'ruf8', & - 'stress12U',1,diag,field_loc_center,field_type_scalar) ! stress12U + 'stress12U',1,diag,field_loc_NEcorner,field_type_scalar) ! stress12U endif -#endif if (trim(grid_type) == 'tripole') then call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & @@ -450,6 +507,38 @@ subroutine restartfile (ice_ic) enddo !$OMP END PARALLEL DO + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + icenmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceemask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + + endif + ! for mixed layer model if (oceanmixed_ice) then diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index e62a1f67f..949a17cf8 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -247,6 +247,18 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'uvel',dims) call define_rest_field(ncid,'vvel',dims) + + if (grid_ice == 'CD') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelE',dims) + call define_rest_field(ncid,'uvelN',dims) + call define_rest_field(ncid,'vvelN',dims) + endif + + if (grid_ice == 'C') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelN',dims) + endif if (restart_coszen) call define_rest_field(ncid,'coszen',dims) @@ -274,16 +286,19 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'stress12_3',dims) call define_rest_field(ncid,'stress12_4',dims) - if (grid_ice == 'CD') then + call define_rest_field(ncid,'iceumask',dims) + + if (grid_ice == 'CD' .or. grid_ice == 'C') then call define_rest_field(ncid,'stresspT' ,dims) call define_rest_field(ncid,'stressmT' ,dims) call define_rest_field(ncid,'stress12T',dims) call define_rest_field(ncid,'stresspU' ,dims) call define_rest_field(ncid,'stressmU' ,dims) call define_rest_field(ncid,'stress12U',dims) + call define_rest_field(ncid,'icenmask',dims) + call define_rest_field(ncid,'iceemask',dims) endif - call define_rest_field(ncid,'iceumask',dims) if (oceanmixed_ice) then call define_rest_field(ncid,'sst',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 12e4365e9..2a7efd65d 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -252,6 +252,20 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'uvel',dims) call define_rest_field(File,'vvel',dims) + + if (grid_ice == 'CD') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelE',dims) + call define_rest_field(ncid,'uvelN',dims) + call define_rest_field(ncid,'vvelN',dims) + endif + + if (grid_ice == 'C') then + call define_rest_field(ncid,'uvelE',dims) + call define_rest_field(ncid,'vvelN',dims) + endif + + if (restart_coszen) call define_rest_field(File,'coszen',dims) call define_rest_field(File,'scale_factor',dims) call define_rest_field(File,'swvdr',dims) @@ -277,17 +291,19 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'stress12_3',dims) call define_rest_field(File,'stress12_4',dims) - if (grid_ice == 'CD') then + call define_rest_field(File,'iceumask',dims) + + if (grid_ice == 'CD' .or. grid_ice == 'C') then call define_rest_field(File,'stresspT' ,dims) call define_rest_field(File,'stressmT' ,dims) call define_rest_field(File,'stress12T',dims) call define_rest_field(File,'stresspU' ,dims) call define_rest_field(File,'stressmU' ,dims) call define_rest_field(File,'stress12U',dims) + call define_rest_field(File,'icenmask',dims) + call define_rest_field(File,'iceemask',dims) endif - call define_rest_field(File,'iceumask',dims) - if (oceanmixed_ice) then call define_rest_field(File,'sst',dims) call define_rest_field(File,'frzmlt',dims) From 00c43c8d49ee1a00f1f53296540b145b24550d31 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Wed, 9 Feb 2022 11:43:54 -0500 Subject: [PATCH 070/109] New subroutine for viscous coeff at U point (#58) * Adding new viscous_coeffs_and_rep_pressure_U subroutine * added call for viscous_coeffs_and_rep_pressure_U * It now compiles * Use of uarea instead of uarear for tinyareaU --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 51 +++++++++++++++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 54 ++++++++++++++++++- 2 files changed, 96 insertions(+), 9 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 79709f9da..129da34dc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -103,7 +103,7 @@ subroutine evp (dt) dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, & + tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & @@ -778,12 +778,13 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & dxE (:,:,iblk), dyN (:,:,iblk), & dxU (:,:,iblk), dyU (:,:,iblk), & - tarea (:,:,iblk), & + tarea (:,:,iblk), uarea (:,:,iblk), & ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & hm (:,:,iblk), uvm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & + strength (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) @@ -1186,7 +1187,7 @@ subroutine stress (nx_block, ny_block, & zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (visc coeff) etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (visc coeff) rep_prsne, rep_prsnw, rep_prsse, rep_prssw, & ! replacement pressure -! puny , & ! puny +! puny , & ! puny ssigpn, ssigps, ssigpe, ssigpw , & ssigmn, ssigms, ssigme, ssigmw , & ssig12n, ssig12s, ssig12e, ssig12w , & @@ -1599,16 +1600,18 @@ subroutine stress_U (nx_block, ny_block, & uvelU, vvelU, & dxE, dyN, & dxU, dyU, & - tarea, & + tarea, uarea, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & epm, npm, hm, uvm, & zetax2T, etax2T, & + strength, & stresspU, stressmU, & stress12U ) use ice_dyn_shared, only: strain_rates_U, & - viscous_coeffs_and_rep_pressure_T2U + viscous_coeffs_and_rep_pressure_T2U, & + viscous_coeffs_and_rep_pressure_U integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1631,6 +1634,7 @@ subroutine stress_U (nx_block, ny_block, & dxU , & ! width of U-cell through the middle (m) dyU , & ! height of U-cell through the middle (m) tarea , & ! area of T-cell (m^2) + uarea , & ! area of U-cell (m^2) ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline @@ -1640,7 +1644,8 @@ subroutine stress_U (nx_block, ny_block, & hm , & ! T-cell mask uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point - etax2T ! 2*eta at the T point + etax2T , & ! 2*eta at the T point + strength ! ice strength at the T point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stresspU , & ! sigma11+sigma22 @@ -1650,14 +1655,26 @@ subroutine stress_U (nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & - i, j, ij + i, j, ij, method real (kind=dbl_kind) :: & divU, tensionU, shearU, DeltaU, & ! strain rates at U point - zetax2U, etax2U, rep_prsU ! replacement pressure at U point + zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point + puny, tinyareaU + real(kind=dbl_kind), parameter :: capping = c1 ! of the viscous coef + character(len=*), parameter :: subname = '(stress_U)' + + call icepack_query_parameters(puny_out=puny) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) then + call abort_ice(error_message=subname, file=__FILE__, & + line=__LINE__) + end if + method=2 + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) @@ -1684,6 +1701,8 @@ subroutine stress_U (nx_block, ny_block, & ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- + if (method == 1) then + call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & zetax2T(i+1,j+1), zetax2T(i+1,j ), & etax2T (i ,j ), etax2T (i ,j+1), & @@ -1694,6 +1713,22 @@ subroutine stress_U (nx_block, ny_block, & tarea (i+1,j+1), tarea (i+1,j ), & DeltaU,zetax2U, etax2U, rep_prsU) + elseif (method == 2) then + + tinyareaU = puny*uarea(i,j) + + call viscous_coeffs_and_rep_pressure_U (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + tinyareaU, & + DeltaU , capping, & + zetax2U, etax2U, rep_prsU) + + endif + !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index e4af6d432..4bbc98902 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -32,6 +32,7 @@ module ice_dyn_shared viscous_coeffs_and_rep_pressure, & viscous_coeffs_and_rep_pressure_T, & viscous_coeffs_and_rep_pressure_T2U, & + viscous_coeffs_and_rep_pressure_U, & stack_velocity_field, unstack_velocity_field ! namelist parameters @@ -2168,7 +2169,7 @@ subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & real (kind=dbl_kind), intent(in):: & zetax2T_00,zetax2T_10,zetax2T_11,zetax2T_01, & - etax2T_00, etax2T_10, etax2T_11, etax2T_01, & ! 2 x visous coeffs, replacement pressure + etax2T_00, etax2T_10, etax2T_11, etax2T_01, & ! 2 x viscous coeffs, replacement pressure maskT_00, maskT_10, maskT_11, maskT_01, & tarea_00, tarea_10, tarea_11, tarea_01, & deltaU @@ -2201,6 +2202,57 @@ subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & end subroutine viscous_coeffs_and_rep_pressure_T2U +!======================================================================= + + subroutine viscous_coeffs_and_rep_pressure_U (strength_00, strength_01, & + strength_11, strength_10, & + maskT_00, maskT_01, & + maskT_11, maskT_10, & + tarea_00, tarea_01, & + tarea_11, tarea_10, & + tinyareaU, & + deltaU, capping, & + zetax2U, etax2U, & + rep_prsU) + + + real (kind=dbl_kind), intent(in):: & + strength_00,strength_10,strength_11,strength_01, & + maskT_00, maskT_10, maskT_11, maskT_01, & + tarea_00, tarea_10, tarea_11, tarea_01, & + tinyareaU, deltaU, capping + + real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU + + ! local variables + + real (kind=dbl_kind) :: & + Totarea, tmpcalc, strength + + character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_U)' + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + Totarea = maskT_00*Tarea_00 + & + maskT_10*Tarea_10 + & + maskT_11*Tarea_11 + & + maskT_01*Tarea_01 + strength = (maskT_00*Tarea_00 *strength_00 + & + maskT_10*Tarea_10 *strength_10 + & + maskT_11*Tarea_11 *strength_11 + & + maskT_01*Tarea_01 *strength_01)/Totarea + + ! rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU + ! IMPROVE the calc below are the same as in the other viscous coeff...could reduce redundency + ! we could have a strength_U subroutine and then calc the visc coeff + + tmpcalc = capping *(strength/max(deltaU,tinyareaU))+ & + (c1-capping)*(strength/(deltaU + tinyareaU)) + zetax2U = (c1+Ktens)*tmpcalc + rep_prsU = (c1-Ktens)*tmpcalc*deltaU + etax2U = epp2i*zetax2U + + end subroutine viscous_coeffs_and_rep_pressure_U + !======================================================================= ! Load velocity components into array for boundary updates From b5dc5f34dd1b82c9502e51cc66574048dbcb8c81 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 9 Feb 2022 11:42:14 -0800 Subject: [PATCH 071/109] Simplify/update setup of rectangular configurations (#57) * - Simplify interaction of kmt_type, ice_data_type, and other namelist to initialize rectangular geometries - Update diagnostics to avoid undefined values when N/S hemisphere not defined - Update dynamics to avoid divide by zeros on calculations with no ice - Add medblock ice_data_type - Add boxchan tests * undo changes to avoid divide by zeros in dynamics with ice free points, needed only in cases where icetmask and iceumask are extended in testing, not needed in general --- .../cicedynB/analysis/ice_diagnostics.F90 | 81 +++++++++++++++++- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 1 + cicecore/cicedynB/general/ice_init.F90 | 66 ++++++++++---- cicecore/cicedynB/infrastructure/ice_grid.F90 | 85 ++++++++----------- configuration/scripts/options/set_nml.box2001 | 1 + configuration/scripts/options/set_nml.boxchan | 56 ++++++++++++ configuration/scripts/options/set_nml.boxsyme | 1 + configuration/scripts/options/set_nml.boxsymn | 1 + .../scripts/options/set_nml.boxsymne | 1 + configuration/scripts/options/set_nml.boxsyms | 1 + configuration/scripts/options/set_nml.boxsymw | 1 + configuration/scripts/options/set_nml.gbox12 | 1 + configuration/scripts/options/set_nml.gbox128 | 1 + configuration/scripts/options/set_nml.gbox180 | 1 + configuration/scripts/options/set_nml.gbox80 | 1 + configuration/scripts/tests/base_suite.ts | 1 + configuration/scripts/tests/gridsys_suite.ts | 3 + doc/source/user_guide/ug_case_settings.rst | 12 ++- 19 files changed, 246 insertions(+), 71 deletions(-) create mode 100644 configuration/scripts/options/set_nml.boxchan diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index 520e72141..84b89fcdc 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -201,6 +201,14 @@ subroutine runtime_diags (dt) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & work1, work2 + real (kind=dbl_kind), parameter :: & + maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect + ! undefined values returned from global_maxval. if global_maxval + ! is applied to a region that does not exist (for instance + ! southern hemisphere in box cases), global_maxval + ! returns -HUGE which we want to avoid writing. The + ! return value is checked against maxval_spval before writing. + ! real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & ! uvelT, vvelT @@ -231,6 +239,8 @@ subroutine runtime_diags (dt) ! hemispheric quantities ! total ice area + arean = c0 + areas = c0 arean = global_sum(aice, distrb_info, field_loc_center, tarean) areas = global_sum(aice, distrb_info, field_loc_center, tareas) arean = arean * m2_to_km2 @@ -247,6 +257,8 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO + extentn = c0 + extents = c0 extentn = global_sum(work1, distrb_info, field_loc_center, & tarean) extents = global_sum(work1, distrb_info, field_loc_center, & @@ -255,10 +267,14 @@ subroutine runtime_diags (dt) extents = extents * m2_to_km2 ! total ice volume + shmaxn = c0 + shmaxs = c0 shmaxn = global_sum(vice, distrb_info, field_loc_center, tarean) shmaxs = global_sum(vice, distrb_info, field_loc_center, tareas) ! total snow volume + snwmxn = c0 + snwmxs = c0 snwmxn = global_sum(vsno, distrb_info, field_loc_center, tarean) snwmxs = global_sum(vsno, distrb_info, field_loc_center, tareas) @@ -313,6 +329,8 @@ subroutine runtime_diags (dt) ! enddo ! endif ! !$OMP END PARALLEL DO + ketotn = c0 + ketots = c0 ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) ketots = global_sum(work1, distrb_info, field_loc_center, tareas) @@ -389,8 +407,12 @@ subroutine runtime_diags (dt) endif ! maximum ice volume (= mean thickness including open water) + hmaxn = c0 + hmaxs = c0 hmaxn = global_maxval(vice, distrb_info, lmask_n) hmaxs = global_maxval(vice, distrb_info, lmask_s) + if (hmaxn < maxval_spval) hmaxn = c0 + if (hmaxs < maxval_spval) hmaxs = c0 ! maximum ice speed if (grid_ice == 'CD') then @@ -430,8 +452,12 @@ subroutine runtime_diags (dt) !$OMP END PARALLEL DO endif + umaxn = c0 + umaxs = c0 umaxn = global_maxval(work1, distrb_info, lmask_n) umaxs = global_maxval(work1, distrb_info, lmask_s) + if (umaxn < maxval_spval) umaxn = c0 + if (umaxs < maxval_spval) umaxs = c0 ! Write warning message if ice speed is too big ! (Ice speeds of ~1 m/s or more usually indicate instability) @@ -472,8 +498,12 @@ subroutine runtime_diags (dt) ! maximum ice strength + pmaxn = c0 + pmaxs = c0 pmaxn = global_maxval(strength, distrb_info, lmask_n) pmaxs = global_maxval(strength, distrb_info, lmask_s) + if (pmaxn < maxval_spval) pmaxn = c0 + if (pmaxs < maxval_spval) pmaxs = c0 pmaxn = pmaxn / c1000 ! convert to kN/m pmaxs = pmaxs / c1000 @@ -482,7 +512,9 @@ subroutine runtime_diags (dt) ! total ice/snow internal energy call total_energy (work1) - + + etotn = c0 + etots = c0 etotn = global_sum(work1, distrb_info, & field_loc_center, tarean) etots = global_sum(work1, distrb_info, & @@ -497,6 +529,8 @@ subroutine runtime_diags (dt) ! evaporation + evpn = c0 + evps = c0 evpn = global_sum_prod(evap, aice, distrb_info, & field_loc_center, tarean) evps = global_sum_prod(evap, aice, distrb_info, & @@ -515,6 +549,8 @@ subroutine runtime_diags (dt) endif ! salt flux + sfsaltn = c0 + sfsalts = c0 sfsaltn = global_sum(fsalt_ai, distrb_info, & field_loc_center, tarean) sfsalts = global_sum(fsalt_ai, distrb_info, & @@ -523,6 +559,8 @@ subroutine runtime_diags (dt) sfsalts = sfsalts*dt ! fresh water flux + sfreshn = c0 + sfreshs = c0 sfreshn = global_sum(fresh_ai, distrb_info, & field_loc_center, tarean) sfreshs = global_sum(fresh_ai, distrb_info, & @@ -544,6 +582,8 @@ subroutine runtime_diags (dt) ! ocean heat ! Note: fswthru not included because it does not heat ice + fhocnn = c0 + fhocns = c0 fhocnn = global_sum(fhocn_ai, distrb_info, & field_loc_center, tarean) fhocns = global_sum(fhocn_ai, distrb_info, & @@ -593,6 +633,8 @@ subroutine runtime_diags (dt) endif ! calc_Tsfc + fhatmn = c0 + fhatms = c0 fhatmn = global_sum(work1, distrb_info, & field_loc_center, tarean) fhatms = global_sum(work1, distrb_info, & @@ -609,6 +651,8 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO + fswnetn = c0 + fswnets = c0 fswnetn = global_sum(work1, distrb_info, & field_loc_center, tarean) fswnets = global_sum(work1, distrb_info, & @@ -627,6 +671,8 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO + fswdnn = c0 + fswdns = c0 fswdnn = global_sum(work1, distrb_info, & field_loc_center, tarean) fswdns = global_sum(work1, distrb_info, & @@ -642,12 +688,17 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO + + fhfrzn = c0 + fhfrzs = c0 fhfrzn = global_sum(work1, distrb_info, & field_loc_center, tarean) fhfrzs = global_sum(work1, distrb_info, & field_loc_center, tareas) ! rain + rnn = c0 + rns = c0 rnn = global_sum_prod(frain, aice_init, distrb_info, & field_loc_center, tarean) rns = global_sum_prod(frain, aice_init, distrb_info, & @@ -656,6 +707,8 @@ subroutine runtime_diags (dt) rns = rns*dt ! snow + snn = c0 + sns = c0 snn = global_sum_prod(fsnow, aice_init, distrb_info, & field_loc_center, tarean) sns = global_sum_prod(fsnow, aice_init, distrb_info, & @@ -668,6 +721,8 @@ subroutine runtime_diags (dt) work1(:,:,:) = frazil(:,:,:)*rhoi/dt if (ktherm == 2 .and. .not.update_ocn_f) & work1(:,:,:) = (frazil(:,:,:)-frazil_diag(:,:,:))*rhoi/dt + frzn = c0 + frzs = c0 frzn = global_sum(work1, distrb_info, & field_loc_center, tarean) frzs = global_sum(work1, distrb_info, & @@ -751,6 +806,16 @@ subroutine runtime_diags (dt) ! isotopes if (tr_iso) then + fisoan = c0 + fisoas = c0 + fisoon = c0 + fisoos = c0 + isototn = c0 + isotots = c0 + isomx1n = c0 + isomx1s = c0 + isorn = c0 + isors = c0 do n = 1, n_iso fisoan(n) = global_sum_prod(fiso_atm(:,:,n,:), aice_init, & distrb_info, field_loc_center, tarean) @@ -783,6 +848,8 @@ subroutine runtime_diags (dt) isotots(n) = global_sum(work1, distrb_info, field_loc_center, tareas) isomx1n(n) = global_maxval(work1, distrb_info, lmask_n) isomx1s(n) = global_maxval(work1, distrb_info, lmask_s) + if (isomx1n(n) < maxval_spval) isomx1n(n) = c0 + if (isomx1s(n) < maxval_spval) isomx1s(n) = c0 isorn(n) = (totison(n)-isototn(n)+fisoan(n)-fisoon(n))/(isototn(n)+c1) isors(n) = (totisos(n)-isotots(n)+fisoas(n)-fisoos(n))/(isotots(n)+c1) enddo ! n_iso @@ -790,6 +857,16 @@ subroutine runtime_diags (dt) ! aerosols if (tr_aero) then + faeran = c0 + faeras = c0 + faeron = c0 + faeros = c0 + aerototn = c0 + aerotots = c0 + aeromx1n = c0 + aeromx1s = c0 + aerrn = c0 + aerrs = c0 do n = 1, n_aero faeran(n) = global_sum_prod(faero_atm(:,:,n,:), aice_init, & distrb_info, field_loc_center, tarean) @@ -821,6 +898,8 @@ subroutine runtime_diags (dt) aerotots(n) = global_sum(work1, distrb_info, field_loc_center, tareas) aeromx1n(n) = global_maxval(work1, distrb_info, lmask_n) aeromx1s(n) = global_maxval(work1, distrb_info, lmask_s) + if (aeromx1n(n) < maxval_spval) aeromx1n(n) = c0 + if (aeromx1s(n) < maxval_spval) aeromx1s(n) = c0 aerrn(n) = (totaeron(n)-aerototn(n)+faeran(n)-faeron(n)) & / (aerototn(n) + c1) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 129da34dc..a588dd435 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -529,7 +529,7 @@ subroutine evp (dt) enddo enddo ! iblk !$TCXOMP END PARALLEL DO - + endif ! grid_ice call icepack_warnings_flush(nu_diag) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 4bbc98902..d43701058 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -916,6 +916,7 @@ subroutine step_vel (nx_block, ny_block, & + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 + ! calculate seabed stress component for outputs if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 626b9fad9..80ec1d0a9 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2125,6 +2125,7 @@ subroutine input_data endif if (kmt_type /= 'file' .and. & + kmt_type /= 'channel' .and. & kmt_type /= 'default' .and. & kmt_type /= 'boxislands') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) @@ -2460,13 +2461,10 @@ subroutine init_state if (grid_ice == 'CD' .or. grid_ice == 'C') then - ! move from B-grid to CD-grid for boxslotcyl test - if (trim(ice_data_type) == 'boxslotcyl') then - call grid_average_X2Y('S',uvel,'U',uvelN,'N') - call grid_average_X2Y('S',vvel,'U',vvelN,'N') - call grid_average_X2Y('S',uvel,'U',uvelE,'E') - call grid_average_X2Y('S',vvel,'U',vvelE,'E') - endif + call grid_average_X2Y('S',uvel,'U',uvelN,'N') + call grid_average_X2Y('S',vvel,'U',vvelN,'N') + call grid_average_X2Y('S',uvel,'U',uvelE,'E') + call grid_average_X2Y('S',vvel,'U',vvelE,'E') ! Halo update on North, East faces call ice_HaloUpdate(uvelN, halo_info, & @@ -2690,6 +2688,10 @@ subroutine set_state_var (nx_block, ny_block, & if (trim(ice_ic) == 'default') then + !--------------------------------------------------------- + ! ice concentration/thickness + !--------------------------------------------------------- + if (trim(ice_data_type) == 'box2001' .or. & trim(ice_data_type) == 'smallblock' .or. & trim(ice_data_type) == 'channel' .or. & @@ -2742,6 +2744,10 @@ subroutine set_state_var (nx_block, ny_block, & endif ! ice_data_type + !--------------------------------------------------------- + ! location of ice + !--------------------------------------------------------- + if ((trim(ice_data_type) == 'box2001') .or. & (trim(ice_data_type) == 'boxslotcyl')) then @@ -2758,8 +2764,8 @@ subroutine set_state_var (nx_block, ny_block, & endif ! tmask enddo ! i enddo ! j - - else if (trim(ice_data_type) == 'uniform') then + + elseif (trim(ice_data_type) == 'uniform') then ! all cells not land mask are ice icells = 0 do j = jlo, jhi @@ -2772,7 +2778,20 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - else if (trim(ice_data_type) == 'smallblock') then + elseif (trim(ice_data_type) == 'channel') then + ! channel ice in center of domain in i direction + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'smallblock') then ! 2x2 ice in center of domain icells = 0 do j = jlo, jhi @@ -2786,21 +2805,24 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - else if (trim(ice_data_type) == 'channel') then - ! channel ice in center of domain in i direction + elseif (trim(ice_data_type) == 'medblock') then + ! ice in 50% of domain, not at edges icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.25) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.25) + 1 do j = jlo, jhi do i = ilo, ihi - if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4) then + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j endif enddo enddo - - else if (trim(ice_data_type) == 'bigblock' .or. & - trim(ice_data_type) == 'gauss') then + + elseif (trim(ice_data_type) == 'bigblock' .or. & + trim(ice_data_type) == 'gauss') then ! ice in 90% of domain, not at edges icells = 0 iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 @@ -2843,6 +2865,10 @@ subroutine set_state_var (nx_block, ny_block, & endif ! ice_data_type + !--------------------------------------------------------- + ! ice distribution + !--------------------------------------------------------- + do n = 1, ncat ! ice volume, snow volume @@ -2928,8 +2954,11 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! ij enddo ! ncat - ! velocity initialization for special tests. + !--------------------------------------------------------- + ! ice velocity ! these velocites are defined on B-grid + !--------------------------------------------------------- + if (trim(ice_data_type) == 'boxslotcyl') then do j = 1, ny_block do i = 1, nx_block @@ -2939,6 +2968,9 @@ subroutine set_state_var (nx_block, ny_block, & uvel, vvel) enddo ! j enddo ! i + else + uvel = c0 + vvel = c0 endif endif ! ice_ic diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 0d0a59d98..c5761872d 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -1404,71 +1404,60 @@ subroutine rectgrid if (my_task == master_task) then work_g1(:,:) = c0 ! initialize hm as land - if (trim(ew_boundary_type) == 'cyclic') then + if (trim(kmt_type) == 'boxislands') then - if (trim(kmt_type) == 'boxislands') then + call grid_boxislands_kmt(work_g1) - call grid_boxislands_kmt(work_g1) + elseif (trim(kmt_type) == 'channel') then - else ! default - - do j = 3,ny_global-2 ! closed top and bottom - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 - enddo - enddo - - endif ! kmt_type - - elseif (trim(ew_boundary_type) == 'open') then + do j = 3,ny_global-2 ! closed top and bottom + do i = 1,nx_global ! open sides + work_g1(i,j) = c1 ! NOTE nx_global > 5 + enddo + enddo - if (trim(kmt_type) == 'boxislands') then + elseif (trim(kmt_type) == 'default') then - call grid_boxislands_kmt(work_g1) + ! land in the upper left and lower right corners, + ! otherwise open boundaries + imid = nint(aint(real(nx_global)/c2)) + jmid = nint(aint(real(ny_global)/c2)) - else ! default + do j = 3,ny_global-2 + do i = 3,nx_global-2 + work_g1(i,j) = c1 ! open central domain + enddo + enddo - ! land in the upper left and lower right corners, - ! otherwise open boundaries - imid = nint(aint(real(nx_global)/c2)) - jmid = nint(aint(real(ny_global)/c2)) + if (nx_global > 5 .and. ny_global > 5) then - do j = 3,ny_global-2 - do i = 3,nx_global-2 - work_g1(i,j) = c1 ! open central domain + do j = 1, jmid+2 + do i = 1, imid+2 + work_g1(i,j) = c1 ! open lower left corner enddo enddo - if (nx_global > 5 .and. ny_global > 5) then - - do j = 1, jmid+2 - do i = 1, imid+2 - work_g1(i,j) = c1 ! open lower left corner - enddo - enddo - - do j = max(jmid-2,1), ny_global - do i = max(imid-2,1), nx_global - work_g1(i,j) = c1 ! open upper right corner - enddo - enddo - - endif ! > 5x5 grid + do j = max(jmid-2,1), ny_global + do i = max(imid-2,1), nx_global + work_g1(i,j) = c1 ! open upper right corner + enddo + enddo - if (close_boundaries) then - work_g1(:, 1:2) = c0 - work_g1(:, ny_global-1:ny_global) = c0 - work_g1(1:2, :) = c0 - work_g1(nx_global-1:nx_global, :) = c0 - endif + endif ! > 5x5 grid - endif ! kmt_type + else - elseif (trim(ew_boundary_type) == 'closed') then + call abort_ice(subname//'ERROR: unknown kmt_type '//trim(kmt_type)) - call abort_ice(subname//'ERROR: closed boundaries not available') + endif ! kmt_type + if (close_boundaries) then + work_g1(:, 1:2) = c0 + work_g1(:, ny_global-1:ny_global) = c0 + work_g1(1:2, :) = c0 + work_g1(nx_global-1:nx_global, :) = c0 endif + endif call scatter_global(hm, work_g1, master_task, distrb_info, & diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index 6039335bc..974564b34 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -7,6 +7,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.boxchan b/configuration/scripts/options/set_nml.boxchan new file mode 100644 index 000000000..5a6ddabaf --- /dev/null +++ b/configuration/scripts/options/set_nml.boxchan @@ -0,0 +1,56 @@ +days_per_year = 360 +use_leap_years = .false. +npt_unit = 'd' +npt = 5 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'channel' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'open' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'channel' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme index 73eda501d..bcdff7806 100644 --- a/configuration/scripts/options/set_nml.boxsyme +++ b/configuration/scripts/options/set_nml.boxsyme @@ -6,6 +6,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn index bb1205adb..04b88a997 100644 --- a/configuration/scripts/options/set_nml.boxsymn +++ b/configuration/scripts/options/set_nml.boxsymn @@ -6,6 +6,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne index 54add03b8..927bbf961 100644 --- a/configuration/scripts/options/set_nml.boxsymne +++ b/configuration/scripts/options/set_nml.boxsymne @@ -6,6 +6,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms index 06d922fe1..d01bb7b99 100644 --- a/configuration/scripts/options/set_nml.boxsyms +++ b/configuration/scripts/options/set_nml.boxsyms @@ -6,6 +6,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxsymw index 92dc31f6e..a34fa165d 100644 --- a/configuration/scripts/options/set_nml.boxsymw +++ b/configuration/scripts/options/set_nml.boxsymw @@ -6,6 +6,7 @@ ice_ic = 'default' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' +kmt_type = 'default' dxrect = 16.e5 dyrect = 16.e5 close_boundaries = .true. diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 index 8063bee67..e17e57fe2 100644 --- a/configuration/scripts/options/set_nml.gbox12 +++ b/configuration/scripts/options/set_nml.gbox12 @@ -1,5 +1,6 @@ ice_ic = 'default' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 40e81553f..80860bc8e 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,6 +1,7 @@ grid_ocn = 'B' ice_ic = 'default' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 8063bee67..e17e57fe2 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -1,5 +1,6 @@ ice_ic = 'default' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 8063bee67..e17e57fe2 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -1,5 +1,6 @@ ice_ic = 'default' grid_type = 'rectangular' +kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index e4c376ad4..858961eac 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -40,6 +40,7 @@ restart gbox128 4x4 boxrestore,short smoke gbox128 4x4 boxrestore,short,debug restart gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl +smoke gbox12 1x1x12x12x1 boxchan,diag1,debug smoke gx3 8x2 bgcz smoke gx3 8x2 bgcz,debug smoke gx3 8x1 bgcskl,debug diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 8d86544fa..00f955746 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,6 +1,7 @@ # Test Grid PEs Sets BFB-compare smoke gx3 8x2 diag1,run5day restart gx3 4x2 debug,diag1 +smoke gbox12 1x1x12x12x1 boxchan smoke gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn @@ -16,6 +17,7 @@ smoke gbox80 2x4 boxislandsne smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd +smoke gbox12 1x1x12x12x1 boxchan,gridcd smoke gbox80 1x1 box2001,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd @@ -31,6 +33,7 @@ smoke gbox80 2x4 boxislandsne,gridcd smoke gx3 8x2 diag1,run5day,gridc restart gx3 4x2 debug,diag1,gridc +smoke gbox12 1x1x12x12x1 boxchan,gridc smoke gbox80 1x1 box2001,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 9c289f35d..94b24eab0 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -232,7 +232,7 @@ grid_nml "``bathymetry_file``", "string", "name of bathymetry file to be read", "'unknown_bathymetry_file'" "``bathymetry_format``", "``default``", "NetCDF depth field", "'default'" "", "``pop``", "pop thickness file in cm in ascii format", "" - "``close_boundaries``", "logical", "force two gridcell wide land mask on boundaries", "``.false.``" + "``close_boundaries``", "logical", "force two gridcell wide land mask on boundaries for rectangular grids", "``.false.``" "``dxrect``", "real", "x-direction grid spacing for rectangular grid in cm", "0.0" "``dyrect``", "real", "y-direction grid spacing for rectangular grid in cm", "0.0" "``gridcpl_file``", "string", "input file for coupling grid info", "'unknown_gridcpl_file'" @@ -258,8 +258,11 @@ grid_nml "", "``1``", "new formulation with round numbers", "" "", "``2``", "WMO standard categories", "" "", "``3``", "asymptotic scheme", "" - "``kmt_type``", "string", "file, default or boxislands", "file" "``kmt_file``", "string", "name of land mask file to be read", "``unknown_kmt_file``" + "``kmt_type``", "boxislands", "ocean/land mask set internally, complex test geometory", "file" + "", "channel", "ocean/land mask set internally as zonal channel, ", "" + "", "default", "ocean/land mask set internally, land in upper left and lower right of domain, ", "" + "", "file", "ocean/land mask setup read from file, see kmt_file", "" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" @@ -575,12 +578,13 @@ forcing_nml "``formdrag``", "logical", "calculate form drag", "``.false.``" "``fyear_init``", "integer", "first year of atmospheric forcing data", "1900" "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" - "``ice_data_type``", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "``default``" - "", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "" + "``ice_data_type``", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "``default``" + "", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "" "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`)", "" "", "``channel``", "uniform block ice concentration and thickness in i-direction in 50% of domain in j-direction", "" "", "``default``", "ice dependent on latitude and ocean temperature", "" "", "``gauss``", "gauss distributed ice block covering about 90 percent of the area in center of domain", "" + "", "``medblock``", "uniform ice block covering about 50 percent of the area in center of domain", "" "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" "", "``uniform``", "uniform ice concentration and thickness across domain", "" "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" From d9affd6971306b4e1c417a1303cd7846a737a7cc Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 9 Feb 2022 14:56:28 -0700 Subject: [PATCH 072/109] Change averaging for C grid. (#59) * Change averaging for C grid * Fix masking * Use the A averager * Fix halo updates --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 54 +++++++++++++--------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index a588dd435..abc0ec4a2 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -539,21 +539,28 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then call ice_timer_start(timer_bound) - ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvelN, vvelN, fld2) - call ice_HaloUpdate (fld2, halo_info, & + call ice_HaloUpdate (uvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vvelN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) + + if (grid_ice == 'C') then + call grid_average_X2Y('A',uvelE,'E',uvelN,'N') + call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + endif + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelN, halo_info, & field_loc_Nface, field_type_vector) - call unstack_velocity_field(fld2, uvelN, vvelN) - ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvelE, vvelE, fld2) - call ice_HaloUpdate (fld2, halo_info, & + call ice_HaloUpdate (vvelE, halo_info, & field_loc_Eface, field_type_vector) - call unstack_velocity_field(fld2, uvelE, vvelE) call ice_timer_stop(timer_bound) call grid_average_X2Y('S',uvelE,'E',uvel,'U') call grid_average_X2Y('S',vvelN,'N',vvel,'U') - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) endif @@ -734,11 +741,6 @@ subroutine evp (dt) case('CD','C') - if (grid_ice == 'C') then - call grid_average_X2Y('A',uvelE,'E',uvelN,'N') - call grid_average_X2Y('A',vvelN,'N',vvelE,'E') - endif - !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -835,7 +837,7 @@ subroutine evp (dt) stress12U (:,:,iblk), & strintxN (:,:,iblk), strintyN (:,:,iblk), & 'N') - + enddo !$TCXOMP END PARALLEL DO @@ -910,14 +912,24 @@ subroutine evp (dt) endif call ice_timer_start(timer_bound) - call stack_velocity_field(uvelN, vvelN, fld2) - call ice_HaloUpdate (fld2, halo_info, & + call ice_HaloUpdate (uvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vvelN, halo_info, & field_loc_Nface, field_type_vector) - call unstack_velocity_field(fld2, uvelN, vvelN) - call stack_velocity_field(uvelE, vvelE, fld2) - call ice_HaloUpdate (fld2, halo_info, & + call ice_timer_stop(timer_bound) + + if (grid_ice == 'C') then + call grid_average_X2Y('A',uvelE,'E',uvelN,'N') + call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + endif + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_HaloUpdate (vvelE, halo_info, & field_loc_Eface, field_type_vector) - call unstack_velocity_field(fld2, uvelE, vvelE) call ice_timer_stop(timer_bound) call grid_average_X2Y('S',uvelE,'E',uvel,'U') From 08d87307fb716ff1d09774ca057909551aa0d05f Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Wed, 9 Feb 2022 20:55:43 -0500 Subject: [PATCH 073/109] Choice of visc coefficient method in namelist (#60) * visc coeff method now defined in ice_in * corrected format in ice_init --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 11 ++++---- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 4 ++- cicecore/cicedynB/general/ice_init.F90 | 26 +++++++++++++++---- configuration/scripts/ice_in | 1 + 4 files changed, 30 insertions(+), 12 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index abc0ec4a2..66829f896 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1623,7 +1623,8 @@ subroutine stress_U (nx_block, ny_block, & use ice_dyn_shared, only: strain_rates_U, & viscous_coeffs_and_rep_pressure_T2U, & - viscous_coeffs_and_rep_pressure_U + viscous_coeffs_and_rep_pressure_U, & + visc_coeff_method integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1667,7 +1668,7 @@ subroutine stress_U (nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & - i, j, ij, method + i, j, ij real (kind=dbl_kind) :: & divU, tensionU, shearU, DeltaU, & ! strain rates at U point @@ -1684,8 +1685,6 @@ subroutine stress_U (nx_block, ny_block, & call abort_ice(error_message=subname, file=__FILE__, & line=__LINE__) end if - - method=2 do ij = 1, icellu i = indxui(ij) @@ -1713,7 +1712,7 @@ subroutine stress_U (nx_block, ny_block, & ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- - if (method == 1) then + if (visc_coeff_method == 'avg_zeta') then call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & zetax2T(i+1,j+1), zetax2T(i+1,j ), & @@ -1725,7 +1724,7 @@ subroutine stress_U (nx_block, ny_block, & tarea (i+1,j+1), tarea (i+1,j ), & DeltaU,zetax2U, etax2U, rep_prsU) - elseif (method == 2) then + elseif (visc_coeff_method == 'avg_strength') then tinyareaU = puny*uarea(i,j) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index d43701058..77f7606d4 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -56,8 +56,10 @@ module ice_dyn_shared character (len=char_len), public :: & yield_curve , & ! 'ellipse' ('teardrop' needs further testing) + visc_coeff_method, & ! method for visc coeff at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation - ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. + ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. + real (kind=dbl_kind), parameter, public :: & eyc = 0.36_dbl_kind, & ! coefficient for calculating the parameter E diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 80ec1d0a9..1e7c16743 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -106,9 +106,9 @@ subroutine input_data dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & - evp_algorithm, & - seabed_stress, seabed_stress_method, & - k1, k2, alphab, threshold_hw, Ktens, & + evp_algorithm, visc_coeff_method, & + seabed_stress, seabed_stress_method, & + k1, k2, alphab, threshold_hw, Ktens, & e_yieldcurve, e_plasticpot, coriolis, & ssh_stress, kridge, brlx, arlx @@ -217,7 +217,7 @@ subroutine input_data brlx, arlx, ssh_stress, & advection, coriolis, kridge, ktransport, & kstrength, krdg_partic, krdg_redist, mu_rdg, & - e_yieldcurve, e_plasticpot, Ktens, & + e_yieldcurve, e_plasticpot, visc_coeff_method, & maxits_nonlin, precond, dim_fgmres, & dim_pgmres, maxits_fgmres, maxits_pgmres, monitor_nonlin, & monitor_fgmres, monitor_pgmres, reltol_nonlin, reltol_fgmres, & @@ -225,7 +225,7 @@ subroutine input_data damping_andacc, start_andacc, fpfunc_andacc, use_mean_vrel, & ortho_type, seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, & - Cf, Pstar, Cstar + Cf, Pstar, Cstar, Ktens namelist /shortwave_nml/ & shortwave, albedo_type, & @@ -378,6 +378,7 @@ subroutine input_data Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential + visc_coeff_method = 'avg_strength' ! calc visc coeff at U point: avg_strength, avg_zeta maxits_nonlin = 4 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -745,6 +746,7 @@ subroutine input_data call broadcast_scalar(Ktens, master_task) call broadcast_scalar(e_yieldcurve, master_task) call broadcast_scalar(e_plasticpot, master_task) + call broadcast_scalar(visc_coeff_method, master_task) call broadcast_scalar(advection, master_task) call broadcast_scalar(conserv_check, master_task) call broadcast_scalar(shortwave, master_task) @@ -1023,6 +1025,16 @@ subroutine input_data endif endif + if (grid_ice == 'C' .or. grid_ice == 'CD') then + if (visc_coeff_method /= 'avg_zeta' .and. visc_coeff_method /= 'avg_strength') then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: invalid method for viscous coefficients' + write(nu_diag,*) subname//' ERROR: visc_coeff_method should be avg_zeta or avg_strength' + endif + abort_list = trim(abort_list)//":44" + endif + endif + rpcesm = 0 rplvl = 0 rptopo = 0 @@ -1586,6 +1598,10 @@ subroutine input_data write(nu_diag,1002) ' alphab = ', alphab, ' : factor for landfast ice' endif endif + if (grid_ice == 'C' .or. grid_ice == 'CD') then + write(nu_diag,1030) 'viscous coeff method (U point) = ', trim(visc_coeff_method) + endif + write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' if (kdyn == 3) then diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 86efbf65e..fcdbce91c 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -147,6 +147,7 @@ Ktens = 0. e_yieldcurve = 2. e_plasticpot = 2. + visc_coeff_method = 'avg_strength' seabed_stress = .false. seabed_stress_method = 'LKD' k1 = 7.5 From 09238c7ceba0160aae6c18068b6b9088d392e4e5 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 11 Feb 2022 11:00:24 -0800 Subject: [PATCH 074/109] Add new geometries and tests (#61) * Change averaging for C grid * Fix masking * Use the A averager * Fix halo updates * - Add wall kmt_type - Add blockep4, uniformp5, medblocke, blocke to ice_data_type - Add ability to check if C/CD fields are on restart file and skip - Add new tests to gridsys_suite - Update documentation with new namelist * Rename box2000 to boxwallp5 Co-authored-by: David Bailey --- cicecore/cicedynB/general/ice_init.F90 | 38 +++++- cicecore/cicedynB/infrastructure/ice_grid.F90 | 14 ++- .../infrastructure/ice_restart_driver.F90 | 116 ++++++++++-------- .../io/io_netcdf/ice_restart.F90 | 29 ++++- .../infrastructure/io/io_pio2/ice_restart.F90 | 29 ++++- configuration/scripts/options/set_nml.boxchan | 1 + configuration/scripts/options/set_nml.boxwall | 56 +++++++++ .../scripts/options/set_nml.boxwallblock | 56 +++++++++ .../scripts/options/set_nml.boxwallp5 | 56 +++++++++ configuration/scripts/tests/gridsys_suite.ts | 9 ++ doc/source/user_guide/ug_case_settings.rst | 17 ++- 11 files changed, 359 insertions(+), 62 deletions(-) create mode 100644 configuration/scripts/options/set_nml.boxwall create mode 100644 configuration/scripts/options/set_nml.boxwallblock create mode 100644 configuration/scripts/options/set_nml.boxwallp5 diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 1e7c16743..c3e8c582f 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -2142,6 +2142,7 @@ subroutine input_data if (kmt_type /= 'file' .and. & kmt_type /= 'channel' .and. & + kmt_type /= 'wall' .and. & kmt_type /= 'default' .and. & kmt_type /= 'boxislands') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) @@ -2712,6 +2713,8 @@ subroutine set_state_var (nx_block, ny_block, & trim(ice_data_type) == 'smallblock' .or. & trim(ice_data_type) == 'channel' .or. & trim(ice_data_type) == 'bigblock' .or. & + trim(ice_data_type) == 'blockep5' .or. & + trim(ice_data_type) == 'uniformp5' .or. & trim(ice_data_type) == 'gauss') then hbar = c2 ! initial ice thickness @@ -2724,7 +2727,9 @@ subroutine set_state_var (nx_block, ny_block, & endif enddo - elseif (trim(ice_data_type) == 'boxslotcyl') then + elseif (trim(ice_data_type) == 'boxslotcyl' .or. & + trim(ice_data_type) == 'medblocke' .or. & + trim(ice_data_type) == 'blocke') then hbar = c1 ! initial ice thickness (1 m) do n = 1, ncat @@ -2781,7 +2786,8 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! i enddo ! j - elseif (trim(ice_data_type) == 'uniform') then + elseif ((trim(ice_data_type) == 'uniform') .or. & + (trim(ice_data_type) == 'uniformp5')) then ! all cells not land mask are ice icells = 0 do j = jlo, jhi @@ -2807,6 +2813,34 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo + elseif (trim(ice_data_type) == 'blocke' .or. & + trim(ice_data_type) == 'blockep5') then + ! block on east half of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (iglob(i) >= nx_global/2) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'medblocke') then + ! block on east half of domain in center of domain + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4 .and. & + iglob(i) >= nx_global/2) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo + elseif (trim(ice_data_type) == 'smallblock') then ! 2x2 ice in center of domain icells = 0 diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index c5761872d..e43a81b9a 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -1410,9 +1410,17 @@ subroutine rectgrid elseif (trim(kmt_type) == 'channel') then - do j = 3,ny_global-2 ! closed top and bottom - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 + do j = 3,ny_global-2 ! closed top and bottom + do i = 1,nx_global ! open sides + work_g1(i,j) = c1 ! NOTE nx_global > 5 + enddo + enddo + + elseif (trim(kmt_type) == 'wall') then + + do j = 1,ny_global ! open except + do i = 1,nx_global-2 ! closed east edge + work_g1(i,j) = c1 enddo enddo diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 5595d0bf9..b3cd413a9 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -358,21 +358,27 @@ subroutine restartfile (ice_ic) 'vvel',1,diag,field_loc_NEcorner, field_type_vector) if (grid_ice == 'CD') then - call read_restart_field(nu_restart,0,uvelE,'ruf8', & - 'uvelE',1,diag,field_loc_Eface, field_type_vector) - call read_restart_field(nu_restart,0,vvelE,'ruf8', & - 'vvelE',1,diag,field_loc_Eface, field_type_vector) - call read_restart_field(nu_restart,0,uvelN,'ruf8', & - 'uvelN',1,diag,field_loc_Nface, field_type_vector) - call read_restart_field(nu_restart,0,vvelN,'ruf8', & - 'vvelN',1,diag,field_loc_Nface, field_type_vector) + if (query_field(nu_restart,'uvelE')) & + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'vvelE')) & + call read_restart_field(nu_restart,0,vvelE,'ruf8', & + 'vvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'uvelN')) & + call read_restart_field(nu_restart,0,uvelN,'ruf8', & + 'uvelN',1,diag,field_loc_Nface, field_type_vector) + if (query_field(nu_restart,'vvelN')) & + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) endif if (grid_ice == 'C') then - call read_restart_field(nu_restart,0,uvelE,'ruf8', & - 'uvelE',1,diag,field_loc_Eface, field_type_vector) - call read_restart_field(nu_restart,0,vvelN,'ruf8', & - 'vvelN',1,diag,field_loc_Nface, field_type_vector) + if (query_field(nu_restart,'uvelE')) & + call read_restart_field(nu_restart,0,uvelE,'ruf8', & + 'uvelE',1,diag,field_loc_Eface, field_type_vector) + if (query_field(nu_restart,'vvelN')) & + call read_restart_field(nu_restart,0,vvelN,'ruf8', & + 'vvelN',1,diag,field_loc_Nface, field_type_vector) endif !----------------------------------------------------------------- @@ -443,18 +449,24 @@ subroutine restartfile (ice_ic) 'stress12_4',1,diag,field_loc_center,field_type_scalar) ! stress12_4 if (grid_ice == 'CD' .or. grid_ice == 'C') then - call read_restart_field(nu_restart,0,stresspT,'ruf8', & - 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT - call read_restart_field(nu_restart,0,stressmT,'ruf8', & - 'stressmT' ,1,diag,field_loc_center,field_type_scalar) ! stressmT - call read_restart_field(nu_restart,0,stress12T,'ruf8', & - 'stress12T',1,diag,field_loc_center,field_type_scalar) ! stress12T - call read_restart_field(nu_restart,0,stresspU,'ruf8', & - 'stresspU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stresspU - call read_restart_field(nu_restart,0,stressmU,'ruf8', & - 'stressmU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stressmU - call read_restart_field(nu_restart,0,stress12U,'ruf8', & - 'stress12U',1,diag,field_loc_NEcorner,field_type_scalar) ! stress12U + if (query_field(nu_restart,'stresspT')) & + call read_restart_field(nu_restart,0,stresspT,'ruf8', & + 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT + if (query_field(nu_restart,'stressmT')) & + call read_restart_field(nu_restart,0,stressmT,'ruf8', & + 'stressmT' ,1,diag,field_loc_center,field_type_scalar) ! stressmT + if (query_field(nu_restart,'stress12T')) & + call read_restart_field(nu_restart,0,stress12T,'ruf8', & + 'stress12T',1,diag,field_loc_center,field_type_scalar) ! stress12T + if (query_field(nu_restart,'stresspU')) & + call read_restart_field(nu_restart,0,stresspU,'ruf8', & + 'stresspU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stresspU + if (query_field(nu_restart,'stressmU')) & + call read_restart_field(nu_restart,0,stressmU,'ruf8', & + 'stressmU' ,1,diag,field_loc_NEcorner,field_type_scalar) ! stressmU + if (query_field(nu_restart,'stress12U')) & + call read_restart_field(nu_restart,0,stress12U,'ruf8', & + 'stress12U',1,diag,field_loc_NEcorner,field_type_scalar) ! stress12U endif if (trim(grid_type) == 'tripole') then @@ -509,33 +521,37 @@ subroutine restartfile (ice_ic) if (grid_ice == 'CD' .or. grid_ice == 'C') then - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'icenmask',1,diag,field_loc_center, field_type_scalar) - - icenmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. - enddo - enddo - enddo - !$OMP END PARALLEL DO - - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceemask',1,diag,field_loc_center, field_type_scalar) + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + icenmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif - iceemask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. - enddo - enddo - enddo - !$OMP END PARALLEL DO + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceemask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif endif diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index 949a17cf8..d49764375 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -27,7 +27,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field integer (kind=int_kind) :: ncid @@ -886,6 +887,32 @@ subroutine define_rest_field(ncid, vname, dims) end subroutine define_rest_field +!======================================================================= + +! Inquire field existance +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + integer (kind=int_kind) :: status, varid + character(len=*), parameter :: subname = '(query_field)' + + query_field = .false. +#ifdef USE_NETCDF + status = nf90_inq_varid(ncid,trim(vname),varid) + if (status == nf90_noerr) query_field = .true. +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & + file=__FILE__, line=__LINE__) +#endif + + end function query_field + !======================================================================= end module ice_restart diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 2a7efd65d..68dcd309d 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -22,7 +22,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field type(file_desc_t) :: File type(var_desc_t) :: vardesc @@ -929,6 +930,32 @@ subroutine define_rest_field(File, vname, dims) end subroutine define_rest_field +!======================================================================= + +! Inquire field existance +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + integer (kind=int_kind) :: status, varid + character(len=*), parameter :: subname = '(query_field)' + + query_field = .false. +#ifdef USE_NETCDF + status = pio_inq_varid(File,trim(vname),vardesc) + if (status == PIO_noerr) query_field = .true. +#else + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & + file=__FILE__, line=__LINE__) +#endif + + end function query_field + !======================================================================= end module ice_restart diff --git a/configuration/scripts/options/set_nml.boxchan b/configuration/scripts/options/set_nml.boxchan index 5a6ddabaf..6005dfc2d 100644 --- a/configuration/scripts/options/set_nml.boxchan +++ b/configuration/scripts/options/set_nml.boxchan @@ -54,3 +54,4 @@ f_tauby = 'd1' f_divu = 'd1' f_sig1 = 'd1' f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwall b/configuration/scripts/options/set_nml.boxwall new file mode 100644 index 000000000..bf61166dc --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwall @@ -0,0 +1,56 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'blocke' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwallblock b/configuration/scripts/options/set_nml.boxwallblock new file mode 100644 index 000000000..5b64ff798 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwallblock @@ -0,0 +1,56 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'medblocke' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/options/set_nml.boxwallp5 b/configuration/scripts/options/set_nml.boxwallp5 new file mode 100644 index 000000000..6f5d3fa96 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxwallp5 @@ -0,0 +1,56 @@ +days_per_year = 360 +use_leap_years = .false. +npt = 240 +ice_ic = 'default' +restart_ext = .true. +histfreq = 'd','1','x','x','x' +grid_type = 'rectangular' +kmt_type = 'wall' +dxrect = 16.e5 +dyrect = 16.e5 +close_boundaries = .false. +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' +tr_iage = .false. +tr_FY = .false. +tr_lvl = .false. +tr_pond_lvl = .false. +ktherm = -1 +kstrength = 0 +kdyn = 1 +kridge = -1 +ktransport = -1 +coriolis = 'zero' +atmbndy = 'constant' +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'blockep5' +calc_strair = .false. +rotate_wind = .false. +restore_ice = .false. +f_aice = 'd1' +f_hi = 'd1' +f_hs = 'd' +f_Tsfc = 'd' +f_uvel = 'd1' +f_vvel = 'd1' +f_uatm = 'd' +f_vatm = 'd' +f_uocn = 'd' +f_vocn = 'd' +f_strairx = 'd1' +f_strairy = 'd1' +f_strtltx = 'd1' +f_strtlty = 'd1' +f_strcorx = 'd1' +f_strcory = 'd1' +f_strocnx = 'd1' +f_strocny = 'd1' +f_strintx = 'd1' +f_strinty = 'd1' +f_taubx = 'd1' +f_tauby = 'd1' +f_divu = 'd1' +f_sig1 = 'd1' +f_sig2 = 'd1' +f_sigP = 'd1' diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 00f955746..30b9d7183 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -3,6 +3,9 @@ smoke gx3 8x2 diag1,run5day restart gx3 4x2 debug,diag1 smoke gbox12 1x1x12x12x1 boxchan smoke gbox80 1x1 box2001 +smoke gbox80 2x2 boxwallp5 +smoke gbox80 3x3 boxwall +smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn smoke gbox80 2x2 boxsymn @@ -19,6 +22,9 @@ smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd smoke gbox12 1x1x12x12x1 boxchan,gridcd smoke gbox80 1x1 box2001,gridcd +smoke gbox80 2x2 boxwallp5,gridcd +smoke gbox80 3x3 boxwall,gridcd +smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd smoke gbox80 2x2 boxsymn,gridcd @@ -35,6 +41,9 @@ smoke gx3 8x2 diag1,run5day,gridc restart gx3 4x2 debug,diag1,gridc smoke gbox12 1x1x12x12x1 boxchan,gridc smoke gbox80 1x1 box2001,gridc +smoke gbox80 2x2 boxwallp5,gridc +smoke gbox80 3x3 boxwall,gridc +smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc smoke gbox80 2x2 boxsymn,gridc diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 94b24eab0..e31652064 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -260,9 +260,10 @@ grid_nml "", "``3``", "asymptotic scheme", "" "``kmt_file``", "string", "name of land mask file to be read", "``unknown_kmt_file``" "``kmt_type``", "boxislands", "ocean/land mask set internally, complex test geometory", "file" - "", "channel", "ocean/land mask set internally as zonal channel, ", "" + "", "channel", "ocean/land mask set internally as zonal channel", "" "", "default", "ocean/land mask set internally, land in upper left and lower right of domain, ", "" "", "file", "ocean/land mask setup read from file, see kmt_file", "" + "", "wall", "ocean/land mask set at right edge of domain", "" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" @@ -450,8 +451,10 @@ dynamics_nml "``ssh_stress``", "``coupled``", "computed from coupled sea surface height gradient", "``geostrophic``" "", "``geostropic``", "computed from ocean velocity", "" "``threshold_hw``", "real", "Max water depth for grounding (see :cite:`Amundrud04`)", "30." - "``yield_curve``", "``ellipse``", "elliptical yield curve", "``ellipse``" "``use_mean_vrel``", "logical", "Use mean of two previous iterations for vrel in VP", "``.true.``" + "``visc_coeff_method``", "``avg_strength``", "average strength for visc coeff on U grid", "``avg_strength``" + "", "``avg_zeta``", "average zeta for visc coeff on U grid", "" + "``yield_curve``", "``ellipse``", "elliptical yield curve", "``ellipse``" "", "", "", "" shortwave_nml @@ -579,14 +582,18 @@ forcing_nml "``fyear_init``", "integer", "first year of atmospheric forcing data", "1900" "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" "``ice_data_type``", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "``default``" + "", "``blocke``", "initialize ice concentration on right side of domain with aice=1", "" + "", "``blockep5``", "initialize ice concentration on right side of domain with aice=0.5", "" "", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "" - "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`)", "" + "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`) aice 0 to 1 zonally", "" "", "``channel``", "uniform block ice concentration and thickness in i-direction in 50% of domain in j-direction", "" "", "``default``", "ice dependent on latitude and ocean temperature", "" "", "``gauss``", "gauss distributed ice block covering about 90 percent of the area in center of domain", "" - "", "``medblock``", "uniform ice block covering about 50 percent of the area in center of domain", "" + "", "``medblock``", "uniform ice block covering about 25 percent of the area in center of domain", "" + "", "``medblocke``", "uniform ice block covering about 25 percent at right side of domain", "" "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" - "", "``uniform``", "uniform ice concentration and thickness across domain", "" + "", "``uniform``", "uniform ice concentration and thickness across domain distributed in categories", "" + "", "``uniformp5``", "uniform ice concentration and thickness across domain with aice=0.5 q", "" "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" From 52a7054b870f54e8ca3f9d55ea2b03b71e20daa5 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 1 Mar 2022 19:05:29 -0800 Subject: [PATCH 075/109] fix bugs in binary and pio2 updates (#62) --- .../io/io_binary/ice_restart.F90 | 21 ++++++++++++++++++- .../infrastructure/io/io_pio2/ice_restart.F90 | 12 +++++------ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index a6f42a6a5..5dd35fdf4 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -30,7 +30,8 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart + read_restart_field, write_restart_field, final_restart, & + query_field real(kind=dbl_kind) :: time_forc = -99. ! historic now local @@ -892,6 +893,24 @@ subroutine final_restart() end subroutine final_restart +!======================================================================= + +! Inquire field existance, doesn't work in binary files so set to true and return +! author T. Craig + + logical function query_field(nu,vname) + + integer (kind=int_kind), intent(in) :: nu ! unit number + character (len=*) , intent(in) :: vname ! variable name + + ! local variables + + character(len=*), parameter :: subname = '(query_field)' + + query_field = .true. + + end function query_field + !======================================================================= end module ice_restart diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 68dcd309d..24a5b75be 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -255,15 +255,15 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'vvel',dims) if (grid_ice == 'CD') then - call define_rest_field(ncid,'uvelE',dims) - call define_rest_field(ncid,'vvelE',dims) - call define_rest_field(ncid,'uvelN',dims) - call define_rest_field(ncid,'vvelN',dims) + call define_rest_field(File,'uvelE',dims) + call define_rest_field(File,'vvelE',dims) + call define_rest_field(File,'uvelN',dims) + call define_rest_field(File,'vvelN',dims) endif if (grid_ice == 'C') then - call define_rest_field(ncid,'uvelE',dims) - call define_rest_field(ncid,'vvelN',dims) + call define_rest_field(File,'uvelE',dims) + call define_rest_field(File,'vvelN',dims) endif From e1394ef70062c511dcce6517c02162c54ee727d7 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Sat, 5 Mar 2022 15:25:12 -0500 Subject: [PATCH 076/109] deltamin now defined in ice_in (#63) * moved calc of deltamin x tarea in ice_init * Added deltamin in ice_in * Minor modif, BFB (gx3) * Replaced tinyarea by deltaminTarea in ice_dyn_evp * tinyarea not used anymore. BFB for kdyn=1 or 3 with gx3 test case * changed deltaminTarea to DminTarea * Now have deltaminEVP and deltaminVP in namelist * Added capping in namelist...BFB * Added new variables in the doc (cice_index.rst) * changed write format 1003 in ice_init for deltamin value * modifications in ug_case_settings.rst * Minor fix to doc --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 72 +++++++----------- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 76 ++++++++----------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 70 ++++++++++------- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 43 +++-------- cicecore/cicedynB/general/ice_init.F90 | 31 +++++++- cicecore/cicedynB/infrastructure/ice_grid.F90 | 5 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 4 +- configuration/scripts/ice_in | 3 + doc/source/cice_index.rst | 6 +- doc/source/user_guide/ug_case_settings.rst | 14 +++- 10 files changed, 161 insertions(+), 163 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 66829f896..0a48783c4 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -103,7 +103,7 @@ subroutine evp (dt) dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, uarea, & + tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & @@ -113,7 +113,7 @@ subroutine evp (dt) ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout - use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field + use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -705,7 +705,7 @@ subroutine evp (dt) dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), tinyarea (:,:,iblk), & + tarear (:,:,iblk), DminTarea(:,:,iblk), & strength (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & @@ -751,7 +751,7 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), tinyarea (:,:,iblk), & + tarear (:,:,iblk), DminTarea (:,:,iblk), & strength (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk), & @@ -1134,7 +1134,7 @@ subroutine stress (nx_block, ny_block, & dxhy, dyhx, & cxp, cyp, & cxm, cym, & - tarear, tinyarea, & + tarear, DminTarea, & strength, & stressp_1, stressp_2, & stressp_3, stressp_4, & @@ -1146,7 +1146,8 @@ subroutine stress (nx_block, ny_block, & rdg_conv, rdg_shear, & str ) - use ice_dyn_shared, only: strain_rates, deformations, viscous_coeffs_and_rep_pressure_T + use ice_dyn_shared, only: strain_rates, deformations, & + viscous_coeffs_and_rep_pressure_T, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1170,7 +1171,7 @@ subroutine stress (nx_block, ny_block, & cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS tarear , & ! 1/tarea - tinyarea ! puny*tarea + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 @@ -1210,8 +1211,6 @@ subroutine stress (nx_block, ny_block, & str12ew, str12we, str12ns, str12sn , & strp_tmp, strm_tmp, tmp - real(kind=dbl_kind),parameter :: capping = c1 ! of the viscous coef - character(len=*), parameter :: subname = '(stress)' !----------------------------------------------------------------- @@ -1248,24 +1247,24 @@ subroutine stress (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& - Deltane, zetax2ne, & - etax2ne, rep_prsne, & + call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& + Deltane, zetax2ne, & + etax2ne, rep_prsne, & capping) - call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& - Deltanw, zetax2nw, & - etax2nw, rep_prsnw, & + call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& + Deltanw, zetax2nw, & + etax2nw, rep_prsnw, & capping) - call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& - Deltasw, zetax2sw, & - etax2sw, rep_prssw, & + call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& + Deltasw, zetax2sw, & + etax2sw, rep_prssw, & capping) - call viscous_coeffs_and_rep_pressure_T (strength(i,j), tinyarea(i,j),& - Deltase, zetax2se, & - etax2se, rep_prsse, & + call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& + Deltase, zetax2se, & + etax2se, rep_prsse, & capping) @@ -1467,7 +1466,7 @@ subroutine stress_T (nx_block, ny_block, & uvelN, vvelN, & dxN, dyE, & dxT, dyT, & - tarear, tinyarea, & + tarear, DminTarea, & strength, & zetax2T, etax2T, & stresspT, stressmT, & @@ -1476,7 +1475,7 @@ subroutine stress_T (nx_block, ny_block, & rdg_conv, rdg_shear ) use ice_dyn_shared, only: strain_rates_T, deformations_T, & - viscous_coeffs_and_rep_pressure_T + viscous_coeffs_and_rep_pressure_T, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1487,7 +1486,6 @@ subroutine stress_T (nx_block, ny_block, & indxti , & ! compressed index in i-direction indxtj ! compressed index in j-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point vvelE , & ! y-component of velocity (m/s) at the N point @@ -1499,7 +1497,7 @@ subroutine stress_T (nx_block, ny_block, & dyT , & ! height of T-cell through the middle (m) strength , & ! ice strength (N/m) tarear , & ! 1/tarea - tinyarea ! puny*tarea + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & zetax2T , & ! zetax2 = 2*zeta (bulk viscous coeff) @@ -1523,8 +1521,6 @@ subroutine stress_T (nx_block, ny_block, & divT, tensionT, shearT, DeltaT, & ! strain rates at T point rep_prsT ! replacement pressure at T point - real(kind=dbl_kind), parameter :: capping = c1 ! of the viscous coef - character(len=*), parameter :: subname = '(stress_T)' !----------------------------------------------------------------- @@ -1555,8 +1551,7 @@ subroutine stress_T (nx_block, ny_block, & !----------------------------------------------------------------- call viscous_coeffs_and_rep_pressure_T (strength(i,j), & - tinyarea(i,j), & - DeltaT, & + DminTarea(i,j), DeltaT, & zetax2T(i,j),etax2T(i,j),& rep_prsT, capping ) @@ -1624,7 +1619,7 @@ subroutine stress_U (nx_block, ny_block, & use ice_dyn_shared, only: strain_rates_U, & viscous_coeffs_and_rep_pressure_T2U, & viscous_coeffs_and_rep_pressure_U, & - visc_coeff_method + visc_coeff_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1673,19 +1668,10 @@ subroutine stress_U (nx_block, ny_block, & real (kind=dbl_kind) :: & divU, tensionU, shearU, DeltaU, & ! strain rates at U point zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - puny, tinyareaU + DminUarea - real(kind=dbl_kind), parameter :: capping = c1 ! of the viscous coef - character(len=*), parameter :: subname = '(stress_U)' - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) then - call abort_ice(error_message=subname, file=__FILE__, & - line=__LINE__) - end if - do ij = 1, icellu i = indxui(ij) j = indxuj(ij) @@ -1726,15 +1712,15 @@ subroutine stress_U (nx_block, ny_block, & elseif (visc_coeff_method == 'avg_strength') then - tinyareaU = puny*uarea(i,j) - + DminUarea = deltaminEVP*uarea(i,j) + call viscous_coeffs_and_rep_pressure_U (strength(i ,j ), strength(i ,j+1), & strength(i+1,j+1), strength(i+1,j ), & hm (i ,j ) , hm (i ,j+1), & hm (i+1,j+1) , hm (i+1,j ), & tarea (i ,j ) , tarea (i ,j+1), & tarea (i+1,j+1) , tarea (i+1,j ), & - tinyareaU, & + DminUarea, & DeltaU , capping, & zetax2U, etax2U, rep_prsU) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index c691453cb..b896bdfe4 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -132,7 +132,8 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & use ice_kinds_mod use ice_constants, only : p027, p055, p111, p166, p222, p25, & p333, p5, c1p5, c1 - use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp + use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp, & + deltaminEVP implicit none @@ -152,7 +153,7 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! local variables integer(kind=int_kind) :: iw, il, iu - real(kind=dbl_kind) :: puny, divune, divunw, divuse, divusw, & + real(kind=dbl_kind) :: divune, divunw, divuse, divusw, & tensionne, tensionnw, tensionse, tensionsw, shearne, shearnw, & shearse, shearsw, Deltane, Deltanw, Deltase, Deltasw, c0ne, & c0nw, c0se, c0sw, c1ne, c1nw, c1se, c1sw, ssigpn, ssigps, & @@ -163,17 +164,10 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12se, csig12sw, str12ew, str12we, str12ns, str12sn, & strp_tmp, strm_tmp, tmp_uvel_ee, tmp_vvel_se, tmp_vvel_ee, & tmp_vvel_ne, tmp_uvel_ne, tmp_uvel_se, dxhy, dyhx, cxp, cyp, & - cxm, cym, tinyarea,tmparea + cxm, cym, tmparea, DminTarea character(len=*), parameter :: subname = '(stress_iter)' - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) then - call abort_ice(error_message=subname, file=__FILE__, & - line=__LINE__) - end if - #ifdef _OPENACC !$acc parallel & !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & @@ -190,14 +184,14 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of tinyarea. Otherwize not binary identical - tinyarea = puny * tmparea - dxhy = p5 * (hte(iw) - htem1(iw)) - dyhx = p5 * (htn(iw) - htnm1(iw)) - cxp = c1p5 * htn(iw) - p5 * htnm1(iw) - cyp = c1p5 * hte(iw) - p5 * htem1(iw) - cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) - cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) + tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + DminTarea = deltaminEVP * tmparea + dxhy = p5 * (hte(iw) - htem1(iw)) + dyhx = p5 * (htn(iw) - htnm1(iw)) + cxp = c1p5 * htn(iw) - p5 * htnm1(iw) + cyp = c1p5 * hte(iw) - p5 * htem1(iw) + cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) + cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) !-------------------------------------------------------------- ! strain rates @@ -252,10 +246,10 @@ subroutine stress_iter(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! save replacement pressure for principal stress calculation !-------------------------------------------------------------- - c0ne = strength(iw) / max(Deltane, tinyarea) - c0nw = strength(iw) / max(Deltanw, tinyarea) - c0sw = strength(iw) / max(Deltasw, tinyarea) - c0se = strength(iw) / max(Deltase, tinyarea) + c0ne = strength(iw) / max(Deltane, DminTarea) + c0nw = strength(iw) / max(Deltanw, DminTarea) + c0sw = strength(iw) / max(Deltasw, DminTarea) + c0se = strength(iw) / max(Deltase, DminTarea) c1ne = c0ne * arlx1i c1nw = c0nw * arlx1i @@ -408,7 +402,8 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & use ice_kinds_mod use ice_constants, only : p027, p055, p111, p166, p222, p25, & p333, p5, c1p5, c1, c0 - use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp + use ice_dyn_shared, only : ecci, denom1, arlx1i, Ktens, revp,& + deltaminEVP implicit none @@ -429,7 +424,7 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! local variables integer(kind=int_kind) :: iw, il, iu - real(kind=dbl_kind) :: puny, divune, divunw, divuse, divusw, & + real(kind=dbl_kind) :: divune, divunw, divuse, divusw, & tensionne, tensionnw, tensionse, tensionsw, shearne, shearnw, & shearse, shearsw, Deltane, Deltanw, Deltase, Deltasw, c0ne, & c0nw, c0se, c0sw, c1ne, c1nw, c1se, c1sw, ssigpn, ssigps, & @@ -440,17 +435,10 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & csig12se, csig12sw, str12ew, str12we, str12ns, str12sn, & strp_tmp, strm_tmp, tmp_uvel_ee, tmp_vvel_se, tmp_vvel_ee, & tmp_vvel_ne, tmp_uvel_ne, tmp_uvel_se, dxhy, dyhx, cxp, cyp, & - cxm, cym, tinyarea, tmparea + cxm, cym, tmparea, DminTarea character(len=*), parameter :: subname = '(stress_last)' - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) then - call abort_ice(error_message=subname, file=__FILE__, & - line=__LINE__) - end if - #ifdef _OPENACC !$acc parallel & !$acc present(ee, ne, se, strength, uvel, vvel, dxt, dyt, hte, & @@ -468,14 +456,14 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & if (skiptcell(iw)) cycle - tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of tinyarea. Otherwize not binary identical - tinyarea = puny * tmparea - dxhy = p5 * (hte(iw) - htem1(iw)) - dyhx = p5 * (htn(iw) - htnm1(iw)) - cxp = c1p5 * htn(iw) - p5 * htnm1(iw) - cyp = c1p5 * hte(iw) - p5 * htem1(iw) - cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) - cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) + tmparea = dxt(iw) * dyt(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical + DminTarea = deltaminEVP * tmparea + dxhy = p5 * (hte(iw) - htem1(iw)) + dyhx = p5 * (htn(iw) - htnm1(iw)) + cxp = c1p5 * htn(iw) - p5 * htnm1(iw) + cyp = c1p5 * hte(iw) - p5 * htem1(iw) + cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw)) + cym = -(c1p5 * htem1(iw) - p5 * hte(iw)) !-------------------------------------------------------------- ! strain rates @@ -545,10 +533,10 @@ subroutine stress_last(NA_len, ee, ne, se, lb, ub, uvel, vvel, dxt, & ! save replacement pressure for principal stress calculation !-------------------------------------------------------------- - c0ne = strength(iw) / max(Deltane, tinyarea) - c0nw = strength(iw) / max(Deltanw, tinyarea) - c0sw = strength(iw) / max(Deltasw, tinyarea) - c0se = strength(iw) / max(Deltase, tinyarea) + c0ne = strength(iw) / max(Deltane, DminTarea) + c0nw = strength(iw) / max(Deltanw, DminTarea) + c0sw = strength(iw) / max(Deltasw, DminTarea) + c0se = strength(iw) / max(Deltase, DminTarea) c1ne = c0ne * arlx1i c1nw = c0nw * arlx1i diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 77f7606d4..5fe524b32 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -70,13 +70,16 @@ module ice_dyn_shared m_min = p01 ! minimum ice mass (kg/m^2) real (kind=dbl_kind), public :: & - revp , & ! 0 for classic EVP, 1 for revised EVP + revp , & ! 0 for classic EVP, 1 for revised EVP e_yieldcurve, & ! VP aspect ratio of elliptical yield curve e_plasticpot, & ! VP aspect ratio of elliptical plastic potential - epp2i , & ! 1/(e_plasticpot)^2 - e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 - ecci , & ! temporary for 1d evp - dtei , & ! 1/dte, where dte is subcycling timestep (1/s) + epp2i , & ! 1/(e_plasticpot)^2 + e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 + ecci , & ! temporary for 1d evp + deltaminEVP , & ! minimum delta for viscous coefficients (EVP) + deltaminVP , & ! minimum delta for viscous coefficients (VP) + capping , & ! capping of visc coeff (1=Hibler79, 0=Kreyscher2000) + dtei , & ! 1/dte, where dte is subcycling timestep (1/s) ! dte2T , & ! dte/2T denom1 ! constants for stress equation @@ -104,6 +107,9 @@ module ice_dyn_shared uvelE_init, & ! x-component of velocity (m/s), beginning of timestep vvelE_init ! y-component of velocity (m/s), beginning of timestep + real (kind=dbl_kind), allocatable, public :: & + DminTarea(:,:,:) ! deltamin * tarea (m^2/s) + ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) @@ -166,7 +172,7 @@ subroutine init_dyn (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -188,7 +194,8 @@ subroutine init_dyn (dt) endif allocate(fcor_blk(nx_block,ny_block,max_blocks)) - + allocate(DminTarea(nx_block,ny_block,max_blocks)) + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate(fcorE_blk(nx_block,ny_block,max_blocks)) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) @@ -262,6 +269,12 @@ subroutine init_dyn (dt) stress12U (i,j,iblk) = c0 endif + if (kdyn == 1) then + DminTarea(i,j,iblk) = deltaminEVP*tarea(i,j,iblk) + elseif (kdyn == 3) then + DminTarea(i,j,iblk) = deltaminVP*tarea(i,j,iblk) + endif + ! ice extent mask on velocity points iceumask(i,j,iblk) = .false. @@ -2054,7 +2067,7 @@ end subroutine strain_rates_U ! by combining tensile strength and a parameterization for grounded ridges. ! J. Geophys. Res. Oceans, 121, 7354-7368. - subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & + subroutine viscous_coeffs_and_rep_pressure (strength, DminTarea,& Deltane, Deltanw, & Deltasw, Deltase, & zetax2ne, zetax2nw, & @@ -2066,7 +2079,7 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & capping) real (kind=dbl_kind), intent(in):: & - strength, tinyarea ! at the t-point + strength, DminTarea ! at the t-point real (kind=dbl_kind), intent(in):: & Deltane, Deltanw, Deltasw, Deltase ! Delta at each corner @@ -2084,14 +2097,14 @@ subroutine viscous_coeffs_and_rep_pressure (strength, tinyarea, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - tmpcalcne = capping *(strength/max(Deltane, tinyarea))+ & - (c1-capping)* strength/ (Deltane+ tinyarea) - tmpcalcnw = capping *(strength/max(Deltanw, tinyarea))+ & - (c1-capping)* strength/ (Deltanw+ tinyarea) - tmpcalcsw = capping *(strength/max(Deltasw, tinyarea))+ & - (c1-capping)* strength/ (Deltasw+ tinyarea) - tmpcalcse = capping *(strength/max(Deltase, tinyarea))+ & - (c1-capping)* strength/ (Deltase+ tinyarea) + tmpcalcne = capping *(strength/max(Deltane, DminTarea))+ & + (c1-capping)* strength/ (Deltane+ DminTarea) + tmpcalcnw = capping *(strength/max(Deltanw, DminTarea))+ & + (c1-capping)* strength/ (Deltanw+ DminTarea) + tmpcalcsw = capping *(strength/max(Deltasw, DminTarea))+ & + (c1-capping)* strength/ (Deltasw+ DminTarea) + tmpcalcse = capping *(strength/max(Deltase, DminTarea))+ & + (c1-capping)* strength/ (Deltase+ DminTarea) zetax2ne = (c1+Ktens)*tmpcalcne ! northeast rep_prsne = (c1-Ktens)*tmpcalcne*Deltane @@ -2124,15 +2137,14 @@ end subroutine viscous_coeffs_and_rep_pressure ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice ! by combining tensile strength and a parameterization for grounded ridges. ! J. Geophys. Res. Oceans, 121, 7354-7368. -! capping must be 1 (c1) for evp and 0 for vp solver - subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & - Delta , zetax2 , & - etax2 , rep_prs , & - capping) + subroutine viscous_coeffs_and_rep_pressure_T (strength, DminTarea, & + Delta , zetax2 , & + etax2 , rep_prs , & + capping) real (kind=dbl_kind), intent(in):: & - strength, tinyarea + strength, DminTarea real (kind=dbl_kind), intent(in):: & Delta, capping @@ -2148,8 +2160,8 @@ subroutine viscous_coeffs_and_rep_pressure_T (strength, tinyarea, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - tmpcalc = capping *(strength/max(Delta,tinyarea))+ & - (c1-capping)*(strength/(Delta + tinyarea)) + tmpcalc = capping *(strength/max(Delta,DminTarea))+ & + (c1-capping)*(strength/(Delta + DminTarea)) zetax2 = (c1+Ktens)*tmpcalc rep_prs = (c1-Ktens)*tmpcalc*Delta etax2 = epp2i*zetax2 @@ -2213,7 +2225,7 @@ subroutine viscous_coeffs_and_rep_pressure_U (strength_00, strength_01, & maskT_11, maskT_10, & tarea_00, tarea_01, & tarea_11, tarea_10, & - tinyareaU, & + DminUarea, & deltaU, capping, & zetax2U, etax2U, & rep_prsU) @@ -2223,7 +2235,7 @@ subroutine viscous_coeffs_and_rep_pressure_U (strength_00, strength_01, & strength_00,strength_10,strength_11,strength_01, & maskT_00, maskT_10, maskT_11, maskT_01, & tarea_00, tarea_10, tarea_11, tarea_01, & - tinyareaU, deltaU, capping + DminUarea, deltaU, capping real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU @@ -2248,8 +2260,8 @@ subroutine viscous_coeffs_and_rep_pressure_U (strength_00, strength_01, & ! IMPROVE the calc below are the same as in the other viscous coeff...could reduce redundency ! we could have a strength_U subroutine and then calc the visc coeff - tmpcalc = capping *(strength/max(deltaU,tinyareaU))+ & - (c1-capping)*(strength/(deltaU + tinyareaU)) + tmpcalc = capping *(strength/max(deltaU,DminUarea))+ & + (c1-capping)*(strength/(deltaU + DminUarea)) zetax2U = (c1+Ktens)*tmpcalc rep_prsU = (c1-Ktens)*tmpcalc*deltaU etax2U = epp2i*zetax2U diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 5806b0f00..61720d2eb 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -122,7 +122,7 @@ subroutine init_vp use ice_constants, only: c1, & field_loc_center, field_type_scalar use ice_domain, only: blocks_ice, halo_info - use ice_grid, only: tarea, tinyarea +! use ice_grid, only: tarea ! local variables @@ -133,9 +133,6 @@ subroutine init_vp type (block) :: & this_block ! block information for current block - real (kind=dbl_kind) :: & - min_strain_rate = 2e-09_dbl_kind ! used for recomputing tinyarea - ! Initialize module variables allocate(icellt(max_blocks), icellu(max_blocks)) allocate(indxti(nx_block*ny_block, max_blocks), & @@ -144,28 +141,6 @@ subroutine init_vp indxuj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) - ! Redefine tinyarea using min_strain_rate - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - tinyarea(i,j,iblk) = min_strain_rate*tarea(i,j,iblk) - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO - - call ice_HaloUpdate (tinyarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - end subroutine init_vp !======================================================================= @@ -725,7 +700,8 @@ subroutine anderson_solver (icellt , icellu, & use ice_domain_size, only: max_blocks use ice_flux, only: fm, Tbu use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - uarear, tinyarea + uarear + use ice_dyn_shared, only: DminTarea use ice_state, only: uvel, vvel, strength use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound @@ -875,7 +851,7 @@ subroutine anderson_solver (icellt , icellu, & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & - tinyarea (:,:,iblk), strength (:,:,iblk),& + DminTarea (:,:,iblk),strength (:,:,iblk),& zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:),& rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) @@ -1177,11 +1153,12 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & dxhy , dyhx , & cxp , cyp , & cxm , cym , & - tinyarea, strength, & + DminTarea,strength, & zetax2 , etax2 , & rep_prs , stPr) - use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure + use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure, & + capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1203,7 +1180,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS - tinyarea ! min_strain_rate*tarea + DminTarea ! deltaminVP*tarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(out) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) @@ -1228,11 +1205,9 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stressp_1, stressp_2, stressp_3, stressp_4 , & strp_tmp - real(kind=dbl_kind) ,parameter :: capping = c0 ! of the viscous coef character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize - ! Initialize stPr, zetax2 and etax2 to zero ! (for cells where icetmask is false) @@ -1267,7 +1242,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call viscous_coeffs_and_rep_pressure (strength(i,j), tinyarea(i,j), & + call viscous_coeffs_and_rep_pressure (strength(i,j), DminTarea(i,j), & Deltane, Deltanw, & Deltasw, Deltase, & zetax2(i,j,1), zetax2(i,j,2), & diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index c3e8c582f..e6a27c96f 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -110,7 +110,8 @@ subroutine input_data seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, Ktens, & e_yieldcurve, e_plasticpot, coriolis, & - ssh_stress, kridge, brlx, arlx + ssh_stress, kridge, brlx, arlx, & + deltaminEVP, deltaminVP, capping use ice_dyn_vp, only: maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & maxits_pgmres, monitor_nonlin, monitor_fgmres, & @@ -225,6 +226,7 @@ subroutine input_data damping_andacc, start_andacc, fpfunc_andacc, use_mean_vrel, & ortho_type, seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, & + deltaminEVP, deltaminVP, capping, & Cf, Pstar, Cstar, Ktens namelist /shortwave_nml/ & @@ -379,6 +381,9 @@ subroutine input_data e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential visc_coeff_method = 'avg_strength' ! calc visc coeff at U point: avg_strength, avg_zeta + deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscous coeff (EVP, Hunke 2001) + deltaminVP = 2e-9_dbl_kind ! minimum delta for viscous coeff (VP, Hibler 1979) + capping = 1.0_dbl_kind ! method for capping of visc coeff (1=Hibler 1979,0=Kreyscher2000) maxits_nonlin = 4 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -747,6 +752,9 @@ subroutine input_data call broadcast_scalar(e_yieldcurve, master_task) call broadcast_scalar(e_plasticpot, master_task) call broadcast_scalar(visc_coeff_method, master_task) + call broadcast_scalar(deltaminEVP, master_task) + call broadcast_scalar(deltaminVP, master_task) + call broadcast_scalar(capping, master_task) call broadcast_scalar(advection, master_task) call broadcast_scalar(conserv_check, master_task) call broadcast_scalar(shortwave, master_task) @@ -1034,6 +1042,16 @@ subroutine input_data abort_list = trim(abort_list)//":44" endif endif + + if (kdyn == 1 .or. kdyn == 3) then + if (capping /= c0 .and. capping /= c1) then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: invalid method for capping viscous coefficients' + write(nu_diag,*) subname//' ERROR: capping should be equal to 0.0 or 1.0' + endif + abort_list = trim(abort_list)//":45" + endif + endif rpcesm = 0 rplvl = 0 @@ -1549,7 +1567,15 @@ subroutine input_data write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' endif - + + if (kdyn == 1) then + write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscous coefficients' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscous coefficients' + elseif (kdyn == 3) then + write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscous coefficients' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscous coefficients' + endif + if (trim(coriolis) == 'latitude') then tmpstr2 = ' : latitude-dependent Coriolis parameter' elseif (trim(coriolis) == 'contant') then @@ -2210,6 +2236,7 @@ subroutine input_data 1000 format (a20,1x,f13.6,1x,a) ! float 1002 format (a20,5x,f9.2,1x,a) + 1003 format (a20,1x,G11.4,1x,a) 1009 format (a20,1x,d13.6,1x,a) 1010 format (a20,8x,l6,1x,a) ! logical 1011 format (a20,1x,l6) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index e43a81b9a..0b174a408 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -91,7 +91,6 @@ module ice_grid uarear , & ! 1/uarea narear , & ! 1/narea earear , & ! 1/earea - tinyarea,& ! puny*tarea tarean , & ! area of NH T-cells tareas , & ! area of SH T-cells ULON , & ! longitude of velocity pts, NE corner of T pts (radians) @@ -230,7 +229,6 @@ subroutine alloc_grid uarear (nx_block,ny_block,max_blocks), & ! 1/uarea narear (nx_block,ny_block,max_blocks), & ! 1/narea earear (nx_block,ny_block,max_blocks), & ! 1/earea - tinyarea (nx_block,ny_block,max_blocks), & ! puny*tarea tarean (nx_block,ny_block,max_blocks), & ! area of NH T-cells tareas (nx_block,ny_block,max_blocks), & ! area of SH T-cells ULON (nx_block,ny_block,max_blocks), & ! longitude of U pts, NE corner (radians) @@ -530,7 +528,7 @@ subroutine init_grid2 else earear(i,j,iblk) = c0 ! possible on boundaries endif - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) + enddo enddo @@ -1245,7 +1243,6 @@ subroutine latlongrid endif tarear(i,j,iblk) = c1/tarea(i,j,iblk) uarear(i,j,iblk) = c1/uarea(i,j,iblk) - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) if (single_column) then ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 17941435d..a0d18c5fd 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -436,7 +436,7 @@ end subroutine ice_mesh_create_scolumn subroutine ice_mesh_init_tlon_tlat_area_hm() use ice_grid , only : tlon, tlat, hm, tarea, ULON, ULAT, HTN, HTE, ANGLE, ANGLET - use ice_grid , only : uarea, uarear, tarear, tinyarea + use ice_grid , only : uarea, uarear, tarear!, tinyarea use ice_grid , only : dxt, dyt, dxu, dyu, dyhx, dxhy, cyp, cxp, cym, cxm use ice_grid , only : makemask use ice_boundary , only : ice_HaloUpdate @@ -517,7 +517,7 @@ subroutine ice_mesh_init_tlon_tlat_area_hm() endif tarear(i,j,iblk) = c1/tarea(i,j,iblk) uarear(i,j,iblk) = c1/uarea(i,j,iblk) - tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) +! tinyarea(i,j,iblk) = puny*tarea(i,j,iblk) if (.not. single_column) then if (ny_global == 1) then diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index fcdbce91c..11fa9b5ca 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -148,6 +148,9 @@ e_yieldcurve = 2. e_plasticpot = 2. visc_coeff_method = 'avg_strength' + deltaminEVP = 1e-11 + deltaminVP = 2e-9 + capping = 1. seabed_stress = .false. seabed_stress_method = 'LKD' k1 = 7.5 diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 8ca6e5414..38f38b6b1 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -95,6 +95,7 @@ either Celsius or Kelvin units). "calc_dragio", "if true, calculate ``dragio`` from ``iceruf_ocn`` and ``thickness_ocn_layer1``", "F" "calc_strair", "if true, calculate wind stress", "T" "calc_Tsfc", "if true, calculate surface temperature", "T" + "capping", "parameter for capping method of viscous coefficients", "1.0" "Cdn_atm", "atmospheric drag coefficient", "" "Cdn_ocn", "ocean drag coefficient", "" "Cf", "ratio of ridging work to PE change in ridging", "17." @@ -150,6 +151,8 @@ either Celsius or Kelvin units). "debug_model_task", "Local mpi task value that defines debug_model point output.", "" "debug_model_step", "Initial timestep for output from the debug_model flag.", "" "Delta", "function of strain rates (see Section :ref:`dynam`)", "1/s" + "deltaminEVP", "minimum value of Delta for EVP (see Section :ref:`dynam`)", "1/s" + "deltaminVP", "minimum value of Delta for VP (see Section :ref:`dynam`)", "1/s" "default_season", "Season from which initial values of forcing are set.", "winter" "denom1", "combination of constants for stress equation", "" "depressT", "ratio of freezing temperature to salinity of brine", "0.054 deg/ppt" @@ -165,6 +168,7 @@ either Celsius or Kelvin units). "distribution_weight", "weighting method used to compute work per block", "" "divu", "strain rate I component, velocity divergence", "1/s" "divu_adv", "divergence associated with advection", "1/s" + "DminTarea", "deltamin \* tarea", "m\ :math:`^2`/s" "dms", "dimethyl sulfide concentration", "mmol/m\ :math:`^3`" "dmsp", "dimethyl sulfoniopropionate concentration", "mmol/m\ :math:`^3`" "dpscale", "time scale for flushing in permeable ice", ":math:`1\times 10^{-3}`" @@ -677,7 +681,6 @@ either Celsius or Kelvin units). "time_end", "ending time for history averages", "" "time_forc", "time of last forcing update", "s" "Timelt", "melting temperature of ice top surface", "0. C" - "tinyarea", "puny \* tarea", "m\ :math:`^2`" "Tinz", "Internal ice temperature", "C" "TLAT", "latitude of cell center", "radians" "TLON", "longitude of cell center", "radians" @@ -731,6 +734,7 @@ either Celsius or Kelvin units). "vice(n)", "volume per unit area of ice (in category n)", "m" "vicen_init", "ice volume at beginning of timestep", "m" "viscosity_dyn", "dynamic viscosity of brine", ":math:`1.79\times 10^{-3}` kg/m/s" + "visc_coeff_method", "method for calculating viscous coefficients (‘avg_strength’ or ‘avg_zeta’)", "avg_strength" "vocn", "ocean current in the y-direction", "m/s" "vonkar", "von Karman constant", "0.4" "vraftn", "volume of rafted ice", "m" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index e31652064..aa63facf7 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -399,21 +399,27 @@ dynamics_nml "``alphab``", "real", ":math:`\alpha_{b}` factor in :cite:`Lemieux16`", "20.0" "``arlx``", "real", "revised_evp value", "300.0" "``brlx``", "real", "revised_evp value", "300.0" + "``capping``", "real", "method for capping the viscous coefficients", "1.0" + "", "``0``", "Kreyscher 2000", "" + "", "``1``", "Hibler 1979", "" "``Cf``", "real", "ratio of ridging work to PE change in ridging", "17.0" "``coriolis``", "``constant``", "constant coriolis value = 1.46e-4 s\ :math:`^{-1}`", "``latitude``" "", "``latitude``", "coriolis variable by latitude", "" "", "``zero``", "zero coriolis", "" "``Cstar``", "real", "constant in Hibler strength formula", "20" - "``e_ratio``", "real", "EVP ellipse aspect ratio", "2.0" + "``deltaminEVP``", "real", "minimum delta for viscous coefficients", "1e-11" + "``deltaminVP``", "real", "minimum delta for viscous coefficients", "2e-9" "``dim_fgmres``", "integer", "maximum number of Arnoldi iterations for FGMRES solver", "50" "``dim_pgmres``", "integer", "maximum number of Arnoldi iterations for PGMRES preconditioner", "5" + "``e_plasticpot``", "real", "aspect ratio of elliptical plastic potential", "2.0" + "``e_yieldcurve``", "real", "aspect ratio of elliptical yield curve", "2.0" + "``evp_algorithm``", "``standard_2d``", "standard 2d EVP memory parallel solver", "standard_2d" + "", "``shared_mem_1d``", "1d shared memory solver", "" "``kdyn``", "``-1``", "dynamics algorithm OFF", "1" "", "``0``", "dynamics OFF", "" "", "``1``", "EVP dynamics", "" "", "``2``", "EAP dynamics", "" "", "``3``", "VP dynamics", "" - "``evp_algorithm``", "``standard_2d``", "standard 2d EVP memory parallel solver", "standard_2d" - "", "``shared_mem_1d``", "1d shared memory solver", "" "``kstrength``", "``0``", "ice strength formulation :cite:`Hibler79`", "1" "", "``1``", "ice strength formulation :cite:`Rothrock75`", "" "``krdg_partic``", "``0``", "old ridging participation function", "1" @@ -425,7 +431,7 @@ dynamics_nml "``ktransport``", "``-1``", "transport disabled", "1" "", "``1``", "transport enabled", "" "``Ktens``", "real", "Tensile strength factor (see :cite:`Konig10`)", "0.0" - "``k1``", "real", "1st free parameter for landfast parameterization", "8.0" + "``k1``", "real", "1st free parameter for landfast parameterization", "7.5" "``k2``", "real", "2nd free parameter (N/m\ :math:`^3`) for landfast parameterization", "15.0" "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" "``maxits_fgmres``", "integer", "maximum number of restarts for FGMRES solver", "1" From 65340489228b85b83cba4de7d7a8dff75e0d015b Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 7 Mar 2022 16:41:21 -0800 Subject: [PATCH 077/109] Update cgridDEV to main #8d7314f (CICE6.3.1 release) (#64) * update icepack, rename snwITDrdg to snwitdrdg (#658) * Change max_blocks for rake tests on izumi (nothread). (#665) * Fix some raketests for izumi * fix some rake tests * Makefile: make Fortran object files depend on their dependency files (#667) When 'make' is invoked on the CICE Makefile, the first thing it does is to try to make the included dependency files (*.d) (which are in fact Makefiles themselves) [1], in alphabetical order. The rule to make the dep files have the dependency generator, 'makdep', as a prerequisite, so when processing the first dep file, make notices 'makdep' does not exist and proceeds to build it. If for whatever reason this compilation fails, make will then proceed to the second dep file, notice that it recently tried and failed to build its dependency 'makdep', give up on the second dep file, proceed to the third, and so on. In the end, no dep file is produced. Make then restarts itself and proceeds to build the code, which of course fails catastrophically because the Fortran source files are not compiled in the right order because the dependency files are missing. To avoid that, add a dependency on the dep file to the rules that make the object file out of the Fortran source files. Since old-fashioned suffix rules cannot have their own prerequisites [2], migrate the rules for the Fortran source files to use pattern rules [3] instead. While at it, also migrate the rule for the C source files. With this new dependency, the builds abort early, before trying to compile the Fortran sources, making it easier to understand what has gone wrong. Since we do not use suffix rules anymore, remove the '.SUFFIXES' line that indicates which extension to use suffix rules for (but keep the line that eliminates all default suffix rules). [1] https://www.gnu.org/software/make/manual/html_node/Remaking-Makefiles.html [2] https://www.gnu.org/software/make/manual/html_node/Suffix-Rules.html [3] https://www.gnu.org/software/make/manual/html_node/Pattern-Rules.html#Pattern-Rules * Fix multi-pe advection=none bug (#664) * update parsing scripts to improve robustness, fix multi-pe advection=none * Update cice script to improve performance including minor refactoring of parse_namelist and parse_settings to reduce cost and ability to use already setup ice_in file from a prior case in the suite. Added commented out timing ability in cice.setup. Change test default to PEND from FAIL. * fix cice.setup for case * add sedbak implementation to support Mac sed * s/spend/spent * nuopc/cmeps driver updates (#668) * add debug_model feature * add required variables and calls for tr_snow * Main namelist debug (#671) * Adding method to write erroneous namelist options * Remove erroneous comma in abort_ice for namelist check * Added check for zbgc_nml. I missed that namelist in this file. * Added space and colons for namelist error output * Added space and colons for namelist error output Co-authored-by: David A. Hebert * NUOPC/CMEPS cap updates (#670) * updated orbital calculations needed for cesm * fixed problems in updated orbital calculations needed for cesm * update CICE6 to support coupling with UFS * put in changes so that both ufsatm and cesm requirements for potential temperature and density are satisfied * update icepack submodule * Revert "update icepack submodule" This reverts commit e70d1abcbeb4351195a2b81c6ce3f623c936426c. * update comp_ice.backend with temporary ice_timers fix * Fix threading problem in init_bgc * Fix additional OMP problems * changes for coldstart running * Move the forapps directory * remove cesmcoupled ifdefs * Fix logging issues for NUOPC * removal of many cpp-ifdefs * fix compile errors * fixes to get cesm working * fixed white space issue * Add restart_coszen namelist option * Update NUOPC cap to work with latest CICE6 master * nuopc,cmeps or s2s build updates * fixes for dbug_flag * Update nuopc2 to latest CICE master * Fix some merge problems * Fix dbug variable * Manual merge of UFS changes * fixes to get CESM B1850 compset working * refactored ice_prescribed_mod.F90 to work with cdeps rather than the mct data models * Fix use_restart_time * changes for creating masks at runtime * added ice_mesh_mod * implemented area correction factors as option * more cleanup * Fix dragio * Fix mushy bug * updates to nuopc cap to resolve inconsistency between driver inputs and cice namelists * changed error message * added icepack_warnings_flush * updates to get ice categories working * updates to have F compset almost working with cice6 - still problems in polar regions - need to resolve 253K/cice6 versus 273K/cice5 differences * changed tolerance of mesh/grid comparison * added issues raised in PR * Update CESM-CICE sync with new time manager * Add back in latlongrid * Add new advanced snow physics to driver * Fix restart issue with land blocks * Update mesh check in cap * fix scam problems * reintroduced imesh_eps check * Put dragio in the namelist instead * Remove redundant code * Fix some indents Co-authored-by: Mariana Vertenstein Co-authored-by: apcraig Co-authored-by: Denise Worthen * Add CESM1_PIO for fill value check (#675) * Add CESM1_PIO for fill value check * Revert PIO_FILL_DOUBLE change for now * - Update the namelist read to make the group order flexible. (#677) - Remove recent update to namelist read that traps bad lines, it conflicts with flexibility to read groups in random order picked up by NAG. - change print* statements to write(nu_diag,*) * Port to Narwhal and add perf_suite (#678) * Add narwhal intel, gnu, cray, aocc Add perf_suite.ts * update narwhal_cray and perf_suite * Update OMP (#680) * Add narwhal intel, gnu, cray, aocc Add perf_suite.ts * update narwhal_cray and perf_suite * Review and update OMP implementation - Fix call to timers in block loops - Fix some OMP Private variables - Test OMP Scheduling, add SCHEDULE(runtime) to some OMP loops - Review column and advection OMP implementation - ADD OMP_TIMERS CPP option (temporary) to time threaded sections - Add timer_tmp timers (temporary) - Add omp_suite.ts test suite - Add ability to set OMP_SCHEDULE via options (ompscheds, ompscheds1, ompschedd1) * - Review diagnostics OMP implementation - Add timer_stats namelist to turn on extra timer output information - Add ICE_BFBTYPE and update bit-for-bit comparison logic in scripts - Update qc and logbfb testing - Remove logbfb and qchkf tests, add cmplog, cmplogrest, cmprest set_env files to set ICE_BFBTYPE - Update documentation * Update EVP OMP implementation * - Refactor puny/pi scalars in eap dynamics to improve performance - Update OMP in evp and eap * Clean up * Comment out temporary timers * Update OMP env variables on Narwhal * Update gaffney OMP_STACKSIZE * update OMP_STACKSIZE on cori * Update Onyx OMP_STACKSIZE Update documentation * Update OMP_STACKSIZE on mustang * - Update Tsfc values on land in various places in the code, was affecting testing. Specifically fix upwind advection. - Comment out OMP in ice_dyn_evp_1d_kernel, was producing non bit-for-bit results with different thread counts * updating LICENSE.pdf for 2022 * seabed stress - remove if statements (#673) * refactor seabed_stress. Bit for bit * Removed if statement from stepu. Results are binary identical, however taubx and tauby is updated on all iterations instead of just the last one. Not used within iteration * changed capping from logical to numeric in order to remove if statement. Moved call to deformation out of loop * clean dyn_finish, correct intent(inout) to intent(in) for Cw, resse Cb in stepu, remove if from seabed_stress_LKD * Reolve conflicts after updating main * modified environment for Freya to accomodate for additional OMP commands * Requested changes after review. Only changed in seabed stress and not bit for bit if cor=0.0 added legacy comment in ice_dyn_finish * move deformation to subcycling * - Update version and copyright. (#691) - Remove gordon and conrad machines. - Add setenv OMP_STACKSIZE commented out in env files - Update Icepack to fc4b809 * add OMP_STACKSIZE for koehr (#693) * Update C/CD deformations calls to be consistent with main B changes Update tauxbx, tauxby calculations on C/CD to be consistent with main B changes * Update OpenMP in C/CD implementation Extend omp_suite to include C/CD tests * reconcile recent merge problem * set default value of capping to 0. in vp cases for backwards compatibility * Set capping to 1.0 in vp consistent with evp, changes answers for vp configurations Co-authored-by: David A. Bailey Co-authored-by: Philippe Blain Co-authored-by: Denise Worthen Co-authored-by: daveh150 Co-authored-by: David A. Hebert Co-authored-by: Mariana Vertenstein Co-authored-by: Elizabeth Hunke Co-authored-by: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> --- LICENSE.pdf | Bin 80898 -> 113509 bytes .../cicedynB/analysis/ice_diagnostics.F90 | 13 +- cicecore/cicedynB/analysis/ice_history.F90 | 30 +- .../cicedynB/analysis/ice_history_bgc.F90 | 31 +- .../cicedynB/analysis/ice_history_drag.F90 | 30 +- .../cicedynB/analysis/ice_history_fsd.F90 | 29 +- .../cicedynB/analysis/ice_history_mechred.F90 | 29 +- .../cicedynB/analysis/ice_history_pond.F90 | 29 +- .../cicedynB/analysis/ice_history_snow.F90 | 46 ++- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 212 ++++------ cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 383 ++++++++---------- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 23 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 101 ++--- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 64 ++- .../dynamics/ice_transport_driver.F90 | 95 +++-- .../cicedynB/dynamics/ice_transport_remap.F90 | 60 ++- cicecore/cicedynB/general/ice_init.F90 | 196 +++++++-- cicecore/cicedynB/general/ice_step_mod.F90 | 26 +- .../infrastructure/comm/mpi/ice_timers.F90 | 36 +- .../infrastructure/comm/serial/ice_timers.F90 | 34 +- .../cicedynB/infrastructure/ice_domain.F90 | 27 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 31 ++ .../infrastructure/ice_restart_driver.F90 | 11 + .../cicedynB/infrastructure/ice_restoring.F90 | 6 +- .../infrastructure/io/io_pio2/ice_restart.F90 | 11 +- cicecore/drivers/direct/hadgem3/CICE.F90 | 4 +- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 1 - cicecore/drivers/mct/cesm1/CICE_copyright.txt | 4 +- .../drivers/mct/cesm1/ice_prescribed_mod.F90 | 27 +- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 53 ++- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 100 ++++- .../drivers/nuopc/cmeps/CICE_copyright.txt | 4 +- .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 203 ++++++---- .../drivers/nuopc/cmeps/ice_import_export.F90 | 6 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 7 +- cicecore/drivers/nuopc/dmi/CICE.F90 | 4 +- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 4 +- cicecore/drivers/standalone/cice/CICE.F90 | 4 +- .../drivers/standalone/cice/CICE_FinalMod.F90 | 5 +- .../drivers/standalone/cice/CICE_RunMod.F90 | 28 +- cicecore/shared/ice_init_column.F90 | 27 +- cicecore/version.txt | 2 +- configuration/scripts/cice.batch.csh | 20 +- configuration/scripts/cice.launch.csh | 9 +- configuration/scripts/cice.run.setup.csh | 6 +- configuration/scripts/cice.settings | 2 + configuration/scripts/ice_in | 1 + .../scripts/machines/Macros.conrad_cray | 57 --- .../scripts/machines/Macros.conrad_intel | 56 --- .../scripts/machines/Macros.conrad_pgi | 55 --- .../scripts/machines/Macros.gordon_cray | 57 --- ...{Macros.gordon_pgi => Macros.narwhal_aocc} | 22 +- ...{Macros.conrad_gnu => Macros.narwhal_cray} | 21 +- .../{Macros.gordon_gnu => Macros.narwhal_gnu} | 8 +- ...cros.gordon_intel => Macros.narwhal_intel} | 8 +- .../scripts/machines/env.banting_gnu | 3 + .../scripts/machines/env.banting_intel | 3 + .../scripts/machines/env.cesium_intel | 3 + .../scripts/machines/env.cheyenne_gnu | 6 +- .../scripts/machines/env.cheyenne_intel | 6 +- .../scripts/machines/env.cheyenne_pgi | 6 +- .../scripts/machines/env.compy_intel | 3 + .../scripts/machines/env.conda_linux | 3 + .../scripts/machines/env.conda_macos | 3 + configuration/scripts/machines/env.conrad_gnu | 77 ---- .../scripts/machines/env.conrad_intel | 59 --- configuration/scripts/machines/env.conrad_pgi | 57 --- configuration/scripts/machines/env.cori_intel | 1 + configuration/scripts/machines/env.daley_gnu | 3 + .../scripts/machines/env.daley_intel | 3 + configuration/scripts/machines/env.fram_intel | 3 + configuration/scripts/machines/env.freya_gnu | 3 +- .../scripts/machines/env.freya_intel | 1 + configuration/scripts/machines/env.gaea_intel | 3 + .../scripts/machines/env.gaffney_gnu | 1 + .../scripts/machines/env.gaffney_intel | 1 + .../scripts/machines/env.gordon_intel | 59 --- configuration/scripts/machines/env.gordon_pgi | 57 --- configuration/scripts/machines/env.hera_intel | 3 + .../scripts/machines/env.high_Sierra_gnu | 3 + .../scripts/machines/env.hobart_intel | 3 + configuration/scripts/machines/env.hobart_nag | 3 + .../scripts/machines/env.koehr_intel | 3 + .../scripts/machines/env.millikan_intel | 3 + .../scripts/machines/env.mustang_intel18 | 2 +- .../scripts/machines/env.mustang_intel19 | 2 +- .../scripts/machines/env.mustang_intel20 | 2 +- .../scripts/machines/env.narwhal_aocc | 54 +++ .../{env.conrad_cray => env.narwhal_cray} | 43 +- .../{env.gordon_gnu => env.narwhal_gnu} | 43 +- .../{env.gordon_cray => env.narwhal_intel} | 45 +- configuration/scripts/machines/env.onyx_cray | 1 + configuration/scripts/machines/env.onyx_gnu | 1 + configuration/scripts/machines/env.onyx_intel | 1 + .../scripts/machines/env.orion_intel | 3 + .../scripts/machines/env.phase3_intel | 3 + .../scripts/machines/env.testmachine_intel | 3 + .../scripts/machines/env.travisCI_gnu | 3 + configuration/scripts/options/set_env.cmplog | 1 + .../scripts/options/set_env.cmplogrest | 1 + configuration/scripts/options/set_env.cmprest | 1 + .../scripts/options/set_env.ompschedd1 | 1 + .../scripts/options/set_env.ompscheds | 1 + .../scripts/options/set_env.ompscheds1 | 1 + configuration/scripts/options/set_env.qcchk | 1 + configuration/scripts/options/set_env.qcchkf | 1 + configuration/scripts/options/set_nml.dt3456s | 1 + .../scripts/options/set_nml.dynanderson | 2 + .../scripts/options/set_nml.dynpicard | 1 + .../scripts/options/set_nml.qcnonbfb | 16 - .../scripts/options/set_nml.timerstats | 1 + configuration/scripts/tests/baseline.script | 54 ++- configuration/scripts/tests/first_suite.ts | 2 +- configuration/scripts/tests/nothread_suite.ts | 14 +- configuration/scripts/tests/omp_suite.ts | 141 +++++++ configuration/scripts/tests/perf_suite.ts | 30 ++ configuration/scripts/tests/prod_suite.ts | 8 +- configuration/scripts/tests/reprosum_suite.ts | 20 +- .../scripts/tests/test_logbfb.script | 33 -- .../scripts/tests/test_qcchkf.script | 36 -- doc/source/cice_index.rst | 1 + doc/source/conf.py | 6 +- doc/source/intro/copyright.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 27 +- doc/source/user_guide/ug_implementation.rst | 61 ++- icepack | 2 +- 126 files changed, 1753 insertions(+), 1659 deletions(-) mode change 100755 => 100644 cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 mode change 100755 => 100644 cicecore/cicedynB/dynamics/ice_dyn_shared.F90 delete mode 100644 configuration/scripts/machines/Macros.conrad_cray delete mode 100644 configuration/scripts/machines/Macros.conrad_intel delete mode 100644 configuration/scripts/machines/Macros.conrad_pgi delete mode 100644 configuration/scripts/machines/Macros.gordon_cray rename configuration/scripts/machines/{Macros.gordon_pgi => Macros.narwhal_aocc} (70%) rename configuration/scripts/machines/{Macros.conrad_gnu => Macros.narwhal_cray} (75%) rename configuration/scripts/machines/{Macros.gordon_gnu => Macros.narwhal_gnu} (87%) rename configuration/scripts/machines/{Macros.gordon_intel => Macros.narwhal_intel} (82%) delete mode 100755 configuration/scripts/machines/env.conrad_gnu delete mode 100755 configuration/scripts/machines/env.conrad_intel delete mode 100755 configuration/scripts/machines/env.conrad_pgi delete mode 100755 configuration/scripts/machines/env.gordon_intel delete mode 100755 configuration/scripts/machines/env.gordon_pgi create mode 100755 configuration/scripts/machines/env.narwhal_aocc rename configuration/scripts/machines/{env.conrad_cray => env.narwhal_cray} (53%) rename configuration/scripts/machines/{env.gordon_gnu => env.narwhal_gnu} (51%) rename configuration/scripts/machines/{env.gordon_cray => env.narwhal_intel} (50%) create mode 100644 configuration/scripts/options/set_env.cmplog create mode 100644 configuration/scripts/options/set_env.cmplogrest create mode 100644 configuration/scripts/options/set_env.cmprest create mode 100644 configuration/scripts/options/set_env.ompschedd1 create mode 100644 configuration/scripts/options/set_env.ompscheds create mode 100644 configuration/scripts/options/set_env.ompscheds1 create mode 100644 configuration/scripts/options/set_env.qcchk create mode 100644 configuration/scripts/options/set_env.qcchkf create mode 100644 configuration/scripts/options/set_nml.dt3456s delete mode 100644 configuration/scripts/options/set_nml.qcnonbfb create mode 100644 configuration/scripts/options/set_nml.timerstats create mode 100644 configuration/scripts/tests/omp_suite.ts create mode 100644 configuration/scripts/tests/perf_suite.ts delete mode 100644 configuration/scripts/tests/test_logbfb.script delete mode 100644 configuration/scripts/tests/test_qcchkf.script diff --git a/LICENSE.pdf b/LICENSE.pdf index 5d6b29280111f197c9f13fd29e59e3011d268a74..d98d3da80d4be814224a113d781d15426d4bbf6d 100644 GIT binary patch delta 28961 zcmb?@2RxST+i;{^N+m@`Q(>zv1V?Bh7E+rd7ehMw{JQw0Q-=TK!RGA@=5 z)0CAJH`+Mcdf3U(=}dZbr@X-FW$cwy1uF`LBE#mcWYCz_Rvh*U85)H~RisiBX>0{5 zONmBNqSG?&tPrRcm_bvZ&=hD?V+vP^!cn5qqR*}rVEwn&jL$0tUJ5LxaA=&JEG9>R zO0~3BU@{n%3OkvW6a`Bfoo#8!v7xf*))}uT0!|Yn^94l(8SKc9g7Jb3R;2&fEd+!0 zH`Hr(y4z^FID04>YwMe-tIHePJK4DF*?1eeIPG-SH_|m$Txa8MW#eqU)7b;Zxyvv( zFxya3!`aHk+TPh#QQO+a*~8w$M`699k*B4H&mJ2^V|Y*$ja^Kf?O~9O41-Hf!DdBG z7hz`)Rn=**Bq9zVTfJj3L8DQ)s;cfDZZ42;=FFL<=NGpbXGWbXjymdvaE_`wG^E1I$dgJ321BeG_?yMA= z?$Im_0seCB@ zyk^f17hY!f*RGJ2J*B=J!~0=&V=f18+EjS?QAz*K?w1-5GW#4CCpT)}Kep}%<)fzM z0hLhobn50l1+OW}Vca`ogg=^%edkA*j-CCZP@ukMT}Xod#$bviRU&c7bGFgqz1hLq zl7~`*7QDJLWhX^@;nMDBQ^x5G`Yz6Wk{L7WUeoqMMJ0i`B0s(ukC`TWeEp;gD~|Qb z?xZeU!ZK6)Hfzo9*PM%X+ZOxGHxOd9HGC2MUcO)ZV!-TgTH`|fRvr$qNOh?wc9M2p zKui@a=9Nz09C%&DwrvwzenbC>D7kRM*LOA_eg2}mrF^x@V^K|I-759C-c|F0jb3{0 zvydA6wK&}H?9fv;B^w3%lMmKRX3thn)ue419IBl@ymz2O+DvV@BfRSxF;sr5-7b04 z&4|g?wrif{t`03-wm{;#%CT*iAGb~)_U+UTO(&kdE)}j_Qxg()S@v>Jxhqfi_={*Q zYhBd^yHf`P8?={fW;)k4b(ckYJqnZ-byIlvBO&t5<#)OA=TF=ZQF+V7r|vA#R2wj$98g|bbhml?i;mW4veX zDtO}0+F!%CI?2b@WR*SpjqgNL_7TA{N6nlHQH4WAJ)a&CkurH%F#-v^++3md&_I#m zji+kGU+&>0xT^n=o z)cdoW^fQ*Ht)z?F*QnYV&7LZq*e<=-#!#cz#^6QAhvw|%((+*)_q>b8*Jg9_INNfc zX5HM8-^B~-ID3>ZdCppGX#ZKrqE3DB!W%Y03m#;bxp*Ca7dDV{TTwKUKGTbSzG7=~ zbyReBS&Z(2j2)3TzE?NzSJ>CIe5D16Z@IuY~$d&D^!p`hqOGrcx{a;_5ku z)Z)A)_gCjIZgb95AE(*h)bZ2`)D-ZzrkO4se)1#ZtcZP>`1TpX%iXOveI3U-rB4_i zF-^*g(L499rf;6y2Sb^8gs#O=-=A+qQfHn%G<*M~sh)c;b?7HfJUI1wzEj8Ip&P_=8d}Qi~Yy+Pxc$!D$AQRqhRXYi?%9V7kRT7u{P`PW@NS8&t2E= z_r}j|;s%!u@%e?BlPIR?3KOoi*eue%lD6%rb@;IBo(t3K;x_ubEckd`VwM+SB{{5- znfKiOQfj@hs6fZ6Blirft4mccYx~=qzw(K_u=mUJPPJo;o?KBhKI~*Dzh@~utN7R1 z+)nnF)5Y@#YBXMJa4jF6D{5cnv1URPoi2ZvxozO&)vjKp?%egVSE7#Z{b_be$XHn}^rSjG{v2Wkh9Q-#N@E*lEG zvX%4YMeOpWXDXt{L{xwIaJ;%d{M7JD!DtgbnRmj&Yf1Tk4m+jr)Pac zCU94xGZ7>3yk2>G%fTGQDUbHKM9b>|X7%lRs`1xM zyb|Z`c~aT*y{gwlnDNVS%QgR_VUKOS>mnx+(<6V(J=}CMw5V6J*CqLD)!;0jj+@Gh z>b@@4zZYcKWI~kayp(F}-+9bRV97dJWr29#rOG?^Rz1?Wb*Z$trREC9-g-xP%qk^m z(TpWcg()ZW^d;w-EccmtUN-sQMS2Tmigc)oj_d2PAmxqnPW=xAhraQoj|jc;@fKgc zH;toA1Z$1Eb~U!$K>197MsZBW`1htQ>>ibtBlT*IQ_$H%1B23ndYZ?3;tw{GXy`r^7PHx0^TLJc(1 ziCNElcFw+Eq*J_L$E6)(1bV}NT)LNFWb>fhcW8AC@#}c?O0)ImWjsp^VL6b?? zWOhy5WS^M#LhHh}w6ODpZLTJ|=bL7XFJ+pXdRcpNX3A=*-c4m&nHGa9q&|lkS02w% zAKR|4@=LEV`Ng&$EmhY3Yh9v+yi7zJMKd>~M* zHRoQq;Z>`ShEt1FTHQWp1rB*^D2dQxSyz3kP`W%x;VNssqs`Hak@d$kC<-Pv}c)Zr;*YC%W9WXTU}T~C&qPYelL zBRsG+MPRyW|F#X+Z&~F|E-&z{N}2@JF4H1`+J(zp{HE&U`M-kCSr!u z-k9CcT=0PC*Xuqs$jW|DoN_<7?nY7jpnv@rzwZ-m#CAQ8^geOOQAGK(l`eJN42M-M zTKfgYT&!`4{u4(8-}@s`GvYu0@1E#d?R7>z?jANy+RnRNWSHoC8!AGhv5S`WI^CUn z{&GbX*O|#s!Se=Z)7b<4S2z1T9xiS&Od2@_+*E?}Q(-QSK);kmI<4dcMKg1XIhzgi zi{9v5z^5T#_-q-88(w_VZ(lGJg*F!L!yGL!tpr( z9e)ysX-?q62sk2!!;yg+F2@DL!`Zo1Km)&lP9lZD0c_#6P%;@*9+xA-r66 zb5SWYAUi+;N2gMlGE^#)1e8i;L7m(S$V34eQ3!H%DxGT1paA*kFoDhoj!I{m(~;y% zAPdXa{gjWq#BJv#qgG7R2l{No&pC${-*#xfOz0OG>{J@IYFbs$!Wj?IDp0=nGQJx zH$%QaCPc`Q>Ns%}P%Lo_0t>OO3=M>Y6gD6c4ah==ej1R526Ndccnl_q{?RpXKbQ~l zhnv8uag&jfM`JQUTFJ$sWRpUV6LHL+B?}^Gd>;k!&k_$ozQgIUq>s$~^8job!9Nix z-T={P`RD2XT8K6xs()bO&kYecCW;#X#0GW3q%mX|Y``9HIuo$M0Fp34BuUJ%0plPQ zT!O{r(fPUxN)km-g07+RH58r7HRl2Y;i8CM7NVWS;L%}mI*keMSOTel0ofGL6Kp#0 z0%{Q$gA5IGxyVPrfOKFyay&2|h$t`>g$d{9QrSE@2dNF`L$>08z5w`HNC0F_kO>lL z(1%+vL3`seG>~KlfQtH;0o&k~ct!@W9GwYgrvNu0cu*fD+CK;_!y-o5>z{}u&-(|W z|LvV*TR{51mcU&|K$y>CAQ2e+!!Q_p{Ys2<@=u7+bD4Z(|FQYMamBv@HQJ6EoxuWi z1MH1$4Avi-7~dHTg49>I!RRCe2qu#6f0R51P>;bGF*qCs3KO(}Vnxy+2{>$0z<@)b z9kmK@1dxoug+A`c7%rQyDS)gTASHuJkVa011Dcr)){F)^4FN%bSPVLkiS(xMwE*w| zIT<&@F>GMJQ9VbF1zunR+mZ(Xwk5R#vK(4E7|#SPL-v7|VS~27{petSMjQEp0)r3; zBm-{DKr0I>3MV6zVILEi3Qx|Y18RX88K4D_K^b6*=x|b;OKKC43rNlVu^x^AnSnOs zH#W=#`-|Ehm;x+1lLb=C;%jFni*6383EE(D7Mv6WjLBk|vq86!n?ur{#{{}E;Y8#@92SW>k^~4PkbuplfNKE2 zAx#mLUL#sHC601UJPiEw2G7-0rj7|0OI5De5}prvWZTzF>C z& zKmR0u9)wO(`oA9n4=@5e2{h;*fb>5=i`c-0$f0tbOaKrfmn;CHfx%QR=vIXpH97aM#~ejE6HsI<|q0}qG;${d{!4wxej;ulup zP(VkrfS&*i4&aDGh4V8Z0^k6GIp7+h;^cri#yS@$MJMeaNdG_HVRH~zG`CD12TU;s z^%-se49o$&1~uFoF@UWs8mX>1pi4NwFJv2tGKUNNk7(g=K_$XK7O3G7M1nI042d2a zjmzW0dAMktfG4;V@Z9+AAeS7%lcP4_&lJt`Ry>50Oq2DD&d0EqqRcd@{(*%6kxcl{1-I(&c7Ou32@44ajLSt{0bb(b@jz=Z zfg!oT30wl$0IA6ZE6s%gbf72~G&&b8HcsGTA3zF#aX}qpG)+3tPRA47~j2w^(3`L~^`EU%lRii#4+ALh~|6@t~wctNJ;Q!b6e|o^=$wx8puO;Ze z|2PmW^(gB8r}EE*`6wPm^}m<6-sn7Hv@%j6YDZ7<9|8Nv_`epyn3)E`1P_vFXy6T@ z&xOto`e10MNk<5!5^Xz_umR*Bzzg`doxsNeP{FStOFpVbiBIkfsu5MCdWcz|Pp z+7kmAcpxO?8;%6^MvQdgGGOXPI^jGFP*MCoRBQ|&GrtdX6ubdd(bz!^*8xJKRf7uo zKy}LRBuAh}g6&9X1`rx;6;$9%Kvm)os|V>aBr!y(;Bw&sz%Y&)G{~eO*yyy9U65w@ zqYD@wedduaP?#g;5n6E_penzMgR;Q4oX`q~U=Tpokv?!q{_Z35b|8EttRT9eu>oo- zoD^y}5p5%hd~gN-;hn)b&{Cp+qE&+m`2dRnli&@eV+cw15fD1yE8p5e1vbW@cVr?i z0}6yckxKpNPXEm>{0AieTR)FVkW^)&tAwY)=l~v|QAj{QHNhLkJ!Buk2IhRE4`Cx+ zbL;~)g9qQ2fC{bx)aB0uHiI{a2FNvm%ixXgQBcYJBVctzSuxQJl5sm&RH)!k44_f` znV>SLOkg5(MW6!V?x?t61zKP~6c>n=M;#L$yx6;u-#=#nan2|`T#H~)kQ z?hzG(UH+~}b_jPzW`Ljkmm>r5E_RJ*5+)=;|LVdl8I^^OKlnLNVW2jJge}qRpQZZsCO#ynw01lZ+0E&;6m~_HrFuufXh!Hmn z)j3LS;2$&d+)ziz^DmD_s^Mp zVnO_am47;$xC)|dwES}-saF9!>_afI{AWL&70H0>;yS1yAj|aEIHVrthMDO9jc$&M z{0YqHqW=|c+qGy82 zjSWYD6f4Q8Oi;&=;ez={3%rY^qZbAF2y?JR*bH<;T|k5E1ymr5guns|(9j})9tXb_ zh9U8}02hfY%sjvwS`0Wk7bs07F!6w<3-mpl4^s{Np#TpR#fHQZW*gv*&+Aa(0G0uP z=!gKkc&A1KW`|W_M{+H2Wa#MMKyCpZ;5G3V1+R(Z6)sYZ%o`Bou0R7A4wKQz#ascJ za;R`en$aX;c7Wdu9uI#s7(g<}@Fnzc^YA3R)#{akzCIX+}`+hMR1L zJ8ph6#Cwo;9R-Cu0t!I^wubgmw7>-ke>gbMG|V!Pt3Z~4-we43Y#spxjQU73_=q$} zEsTr?-i7aIQKdrxEQ*d7G5}QY0tf_cL=|$lqwzOb8dAdlP|{?Sig$;AC^9wx4;$kE z@@^6Qe$;oUI526*1mR$T>Ok&-+gvyj4fZ2pr@{9Oil^@){!ukl% zi1`4?hxs6!@POS&;2Dra0yU0G0f(WKQ^5w3Hv^={0OF6yG#2O_Qga};VycG&Y%&^p z!*xF-wqPnc4UohzWqa`18G-q_rZ%ADItyqX{F$z=x1VN0+-g1?j;jNe*pm6 z5TSy&8UTk(8>CBd02Fu;if$3#7a?^UvK(whx@YL4P^gf91zM1(#)JUk1XL>r(2Oo0 z9D^V5kON?wze+#|-hi+{*`S}v_eNkM&;woV(GsaXiUOR=gk4!M1_KllIs(8O1@o^( z&~PI8M*8r4I0nm6-{7A%b99qY^kO+WZ*-l%%0E;*fp=fzy$muncKoCK3n-*G{2%q} zIKbbS-A8SX*B981Mga9aUaX<+M1@WQ4(%)uF%|{53!`xwng<{fgHBLMDj}#Oevmqd zr+<`x9pw+uNwock_fgdRZzbuY{B{5M9~Szz4M$`N zzb*KG+m0tg433tFQ3Uosl)(SUX)uBGn*W#b-%c{x{(lSL-{V04Bcy*`bi;=qpuf;# z#9>e&@4ER?@&}c0Pm*o^QQ~rl_0c_W47|fPCFr7~M~5F~Km`mob5#FA4LgBH+58G9 z65v+S6NCzIEPAVW?*$dmjx5w>XbV`NJZXFljDx@#fd^0&oI0rB19L)XcA5PzX&f*N*$0mb`Mr~owhaSM4E7I>EM21i2J$S@G460Znoa07}Dyh}pZK+tHU zM1xX;3M4F;Tpm<#Q*=x)kq7Q;Bp5mI2$}4 z5`!#oaNrFNfEPwAi0a`1AcNk73ONr~Am`x&Z7Bc+ynJX#e;(+}R|B9%;R7--k0dmZ z0wFI+p&Q+L4k${P#G{dsE?kC=Ok%_RAGD32iH%p#cmX=1Sm2vDn1^04v?3sY;*l<( zJZ8-hgK$-gTLV(js6YiVg}yMq53vX}KFE+0hgbkO7x;_|o)sJJpTKoRlZ@<0UYX)u zDTyb%EajjUM}r8y1$l1^--JM@3tOO>=1-!6e*vEmK??wb;X0}rsKBQtBVtHi*Nlod z$SsV8L<%wk6Z~&{gY#hI1_yz-4IfB9Knuie{Ba{14I6PDejjgkCjJKzV8J0Ue~EL*r9im?}ZiMKR5-ZvB?YvXdwoOC}#OF?+6)W=mV|; z5u}sBCA?#<6x#voO!#gAoG`#H23jCtSOI?-oMSq0EPiQ8r&0+R1O6jC0HFXZRN(A* z`%8X22-m;nbYM?Ne!*yra*0osQWC(%z!d-!nhpFwi2Qg6PQ^l35lsMo?MFdu;iWBE z@FR9KZ1DXZb^@RjJRZgw6i8m8{8MS51c48rpD%1!A-Ntf2Oc``HS_}gQB2@upkesF z7wKXG;rZWhpqIyIS(F6+dO$EbUeKZmh6+Yl{15^%*RTspA_Ytm!pek*mkXabQs6}J z;425Hz;_rR74ZET!iP$f0vZMW6}13L>`F*%3C{unPST1;jgOGi2R(J!VF~I&p4VOXppWlhgAUC00QQ&0oH4PA+ z>_fgHBSx|huncdbz@P|XAjzKx!U%7pz<~Gx!;b(1M(MBIIRVYE3dWU~Jcr8PA6LK& zEJ8+`{4QJscmO#IDmabIl90`?IeuP^S4L1F2LL~T9)-96;rj-#za%^UBMpt;q~g?( z@CPb?-}k@^ZU!QcCI~8j-S?0d^?&+o1}(=Q{jduG{tE~;9tyYyt~c-jct-((L=Qet z3due^6e=KoA8rlmk^Eu<68HE9!i{u+3_Cv17Qe1q=`l{Jt9=qoaX) za4ozQ!Ra`K+!%5em_Xy_7NG({hFe1Z0n7q^O$^Eg9-wUyFwCjJgAWE&fPVlE>;kx8 zFxW46V7d(lgLWD5u}QG;{&Qp~oC${`Cy_Y`Bqih};C2Y53w~NbgZwKt!QsH6h6=J0 z4&rNkKq-s?)e8^Q(>NAB5P{4cHUp(GQ$d;r$X0;S$AEw&5OxEB$dBaF4A9^^K>!r= z3BG`DfgTJTnaYLlUr9qjo)~pCbb?C)xd%KWsvuwhsK75g0Lcg(xS`mM7y`aQ1dM!E zNSZh_VR$<9$-q8ACpi$vKq3tivFKoo1Y*z)_rI_LP#rxAI!=QJm=>TjQ~+93hUm&c z1qFc&H=qyH0zUX|9#p`c0)*hLCAtB)K5mG&7^890C~J|x$-E)m|JFnZ6KW;HW)hmqyQfHgDxoeZXWONpRj`}`Rk7WY;v=>s%+6$r@hOuzLp?qpgHJS0S$X665&7vNLL%Gkjz7_uU3>iamV-q``iq&& zDZv}(^c88S?vis@dGJzlaA@Q1r5-l)T}%7c$(JWIUM`G|5a-(^phx(7Mq2SJxiXo=u%nNs=Q4tw>74;Dd@HcZdTm&#kMgmRYgiLb1B_h zyWqz8ZrcN_Q~TFH+py_ubyA~)Kv|FQs_vI{VRMGPgabT_i_DYyw)k;o@S<0_n2K9u z&lX%UpDb)Pe)s1*EKBvG)-~!TcSbiikgJtZ>haCljH`J$Btwete&7ptz7SaGple~*lm$;8EK6Bj?y6hG@K@Y-O1>-~@! zi$BjyYwF-cOqZTY$>=+G*fJ$PM_D9@$65V=AtK@^dz{kpSpF_EEonhs9A(L6B!t$-B&T!`vM^!jF+K!knK@^}UmA)^ zn5^X*k}Y@iiH}y(-l4Tr{XDwY@yNQ9+ZMdLDci5oYtviWUU*f=wB+9B`9Vke4qyI0 zswC`PMsZ z?Z)b*a{G_?j9WCd{A=N_$j9#)LBAU4lar>pTo&5R*ZVWa_PV(xby~k8H;wsH z8kgeJtz42Rni`lKqnhy4BPy(1)VESd`Bub8p>lIPXJj7{yz)BIz*ex8(Q*K3cimiQP{&3<8> za`@MTh4WQnnm6ov5aXV7uyc}aoyERT7uVdFR{xn-xb^ zyEb054`IY=ZwXnmRj)%|BuD2~^PydibvY(PSz&5)=Qzn7aS59J$DCjLKh|Ec*w+2R z#5p3CV>%dmQ^l1Up_q9 zNW|ep>}{G!gwl-`lS96@2UCq~o8A2LUcH~`q@Dg{Gi!@#%EwgY~_{8m`#cRp8mgUcSpO`hao{D-Ql5$j=W_Bmyn#cI+NUzNGOK9dL z)KxYI_bzTKx9(grZoQi1B6`x~z?=D^ovT0U*D$m{UYgOZ>c61zIcKN+)^+iwH8KZ2eHO z#fwg<702a_&l@z{UnyBv>k-aMa)`SSar5k6lc^UROg~zp5dS|{~tfKc3tArDR8g$WPp#!Ye&mu)|0?eomA&@*uJ61|;r+Q+|q{8d{T z`e5JKqIHzru|(wCFAR;xViH?Ar0-8>%B4G(CB)gTvYxwCX6D_a^+!Kvxt^nnb}X1Y zfiodkM2@vHW{Y0RmR_xtfXQu_=dU{M931rii%i z^P06WLN0%5%F~TnOP9tjuc+17#qPIE&=ZODoG$uO+%F`z&iALvse8_{bqiY!WPa7L zti*mu-=2PH-8tKD^DRw@n3V60dxiEK-sL8m6R7;L;r^1Mtv|iOMVL#~8{LmM>E@UcGl8EQ6M8Ixb1;OHdzcHU4ME zyNpd99oH=)L{y$W^*!u0eUk9xw#JFyGX^~UlXaT(6#J_7DS6q|-ud>!iP);v)bPIO zsYCgO$!pHMjyU1qJuV^B_{1VAHT%Z5_4Q$?wVqcp+diK9t|c9*; zrIm9^D{GZ+B>K-7e@+M7xlF4)jD47dF{Pawd390>EDAuen-y&MP0#GfrQ0)Y9h8YX1;qrtL?pU>o>ZXtCPh|GrQ7@YznIn%8(#({ajhJL zl(*95%w)B)Uw0o|lzZ%e&iJ9|9}d&QZYtH+s`S?6Nc{K|sO;sk<}1&cJ=JkphnCEf z(@7#nFB}+?MUBv%ShPE8_RX|JanBjsF1#suQ(iHlJ|t|Wt?!}Al}d|}tF{jB_r?= zo;MP=)9x(KGuas)XLI4uZkMFb+OHnRHg;OoD;GJQ3EI_{F#D&QU$XSs>Pw9t1##iN zkpo%2Z#>#wjC(S)%CS~jl z)@yyF)sgKq@o>1ahGUk|k82D2A||?Y)HufZ>hBpKKAe1TPgf-^NKTb`+D#)?VQp># z`~`-7f1QhdRRZDRpT^7HbUW02WO!~$=E7wA+*`)O6MhuL<#?vOuBwPgofYGunx6P= z(FX27_CbZaa@?_k?G6U(9Ad_{B(|(~8#m;eny>Asr>JrBu~x<uvk3b+4bemS0Cw~YZrHI_bT$;zDH5+j(73h=jWs) zwpdUuDL=_M88SI2&9}U&CgS-=%9DzNPnSp9ySAO)sPy_=cfa5K4<0nFw^w3cIBvQ! z_^s6UZbzc0c~3&pQQe|_`sQVI>RRt#v*w!h+-_el#q5kpyGE$^Y+iPWo5RVMpZ)UN z^Ur=V$IivBIjE-I?W&Yp`h7!|rT#LPqwLP*4o9tmwBOyUd%0+N^^T;j*fpk<3(|*p zCvI-Mw9w|3+o~g0jnTShXH8?9&YEs>x};I|Qhx8o_&IXnQCgk)1@<2Kra^b|JTpVY zPAu4SLtvKh*Ryun_vD6%B$1WQQ;!r@bL8h2KCLS})A{yoWXMvhn9h45M?YlPOFsUv zM)Gsw<8uwG3RQW(H@-J&xI6oyVbA#?i?8g@;cKLiY;3!Ar#&OPH-FvlbXZ}*8m8$n(PbtI7bonQU}7RJy48M;^K8+4c^zU-x0sy9 z7SCPTd2eBP;B55AC;+oM$TO7!sbh}NyH znayn*Qf_q=-w|!fDv7lfDM&HP>L+67@V-e*{a!Ozw>rY@_SeCM>OB7&mCaqr7Yn+Q zLnoIld$BEYQLp)cLZy+7ZkF!gkatl{jH+()fT@_LT=M1+%j4oPx(q+l9QuMC9QVc- zagT$i&wXM(n-ifFig zZ?jKVLy*O9uiEnM!|g%dHNSrEewTa3Rxf^G>3H7!N-y8)qHe3$l|qRZ_67S5)n{(M zx9>w=$@$_{ciZW5n*%LHs^q&OgF>FA2_@2-yGnFqb+^!en<>ecJ^P(EC2{&b#~V6! z(VDZzrH>OXf3i>Y3W{{B(4+N{D{Ms>%hHsW)1`B3PU*$k zv1-I=^@mg63vQ2IT@%u8exY_^)TK?4c0XCG6@vm6SsDrF2(7X_C*gatSkH#2 zaT)p92`AD%&au)nR5Nxs75}Nbf35hNsvCxP26)TsJ%7XwGLwhfUO!ZyGQghC9bQ~% z=31R`cXh}U%`Y+?Z`b}xQ&k9i?bUt3TR3yp^(#We)4mf0R{pm8H4~#(TrL@q*thMf z^u_nfCLBl-3jf7Wxq1E?$1(ZQtWUa%3bSADJoAk=v-Wc4>L}+Yz9KHM6KjoLsOFH`xJ4z_*80i<&o)h1--A?zqPOyABzqsMWlse(6 zvBrYa`c(ZH;m_^le;nzJ@NeAqDLKac#k}oLEaIC6=UK?QJDedrevap6yijkfpm2{p zC^ZZ%W^SChmJ!4~8=R(?Fnv;qR>`F8{X=^Kism0M&M+KT7}i@I9k60@myE{FtS!Sw zonKvjt<-JyiIDwizMyHLYH88@-`fVL=jLRspS$+W{+ur#{T-tAo!l9xpS~k|LEb%! zReGDWeb#I&(_5CQ`pN$$G5zQSx9{Db-5609<3g8M-Ty`;4ihq}WfQ&%e+ zTxpb+m^x?58l((=%)IuF`K&L4t-0i4M<(_Q*GTA*JlGc%0b~qLGkY%dB{v2 z`0&eJsEN{Zwj*rN!uFHJT_avQ;c0nJZF*ava#POorM`!btYU7UciCER5-5+SPJV+TH^OKuDo`xC8fsYZDGT_DE+$z8$uTZm5WiQv>q&I*11lI`mYZw zbdq@xHaOkxlWhDr9fJakUrn?Q|4%bd3C`*+-6NH#adz$Sm)zfnZr#7h38-n$_1B&% zv}OhCMBWU+!?Tad7=#E+77$F061=)zBWWRBZ~TS^t!s(L-(1|GV5M_zj?s}P#@5T! zJo-#-q_3VZZ|fwJGo~{nCu?{F)?NQSm$2)qD1DG6A$9ifi8;faFZVUJ<@gbC-n}*M zx$l0zvz%3EroD+b;kEYypCidL6Hb2HY$%>8$MSEyAojv{##p0@h0k8D_1x%v@_ovd ztEDm>4qMZw8q~EW#Qh3>x$SM&k<KTJ5S|e$F}F*I#(Xn&F*x*`ZMJnz2s2a zty{`2oywP|4=p_B>D;095}U{mt`?q&Mao5G|E1K?$qfEiEN?pIIPz zBI4#E6SnYS$D-wuMAVbl>OAk-g2{AN2j+v|7VjBl};GSoa*=}xnjL}A2=ie{nN11z7rX=^M3#S-gNbIhfVIeqs^ zxpR4u@wLWkrz2D}>A+@K51vSCdjcz9Tw?*1)S9spqDVi;s ztE16me?#@uUbaiGXM^My@uR%&Uh?HVB{Su0bKS1J`jA*j49f)V&)V!ZGk9@fyRN_L z;KGKz?7EM_&Gy0G4;12^g&y9E@|+()AMV^$o^tEgFVj%(l<)pxPd)0N`qU-AU;B#M z6Xq>wKFcpCJLkma$x#Ca?KAJ#_MN* ztqV@N!@^&5%2IA^{_s4%>}LRlDs5Piw(Z^9J3nfJYGr?~Ebll(JU!=oCh&Y>G_B3L z?c7COm6UQ7$;2sE+B!}PZ7<(WqYlS^YRjH9;gk1p>jTRt7DouZ9SnyfA#M{cb9Wz( ztLpQM2%PUI)+H!=_ds^l-l(Pt7owVYZxWxZES%{ee)5gq7ZHi9s^=OHgI7!_SYPsE zfMszoXpkztUG&%YO#)MX4^7)7P@5rQQ;{UedNk+yrA?d?dX)5&Ty>uiHPf-Bp&gHD zbElUMq<_{f>ur9~d8dVlnA3hS<4wBP@ck_kHS{$f+Jwuc%w3IoGP5-M20z^U%)2yh zcg2G5wYr^6f!=Xt_lQ?U>N;XKt{O`wZe@i9m=0E2ch{IYIY#e@E8Ar+Z2awZKT~z$ z%eLAd8;sfyU4D`sA#QFZX3g|C=zJhC;iYK1Qg`0@6_za;L$x}S8^Va#cS)Zl8@a*T zdNb1>OI?Z!%9-1kN=au0Cp&#fHRn0Ig#Dh#_1y6&WMbmmpna-4ZqIy~|1BZ$<6wll zk33h2xrzAbcBX0VtDvIQOQ;_|B(C$cr%RS;K2SLKp~lVFJ2h*5`I4R74Jp0lIbtm_ z+nzfubZf5)u2G1Se3wdmT}%9&&pwxN;e~kUh9jJ^pg0r3)}vxy3B~F6>wV>K3^;E+ zGokfO5HQc_d8WR?wa9vHBKXD^Ebl(>&egO zOkPFa|GjO;5ssSPf?eSin>$uAoo?G_Cpl zjg)X9FT3BhOBIPHl2`o~M`mvOE$_ON*dRC6X#38E9qs#bdyKla_$~FHr)Da(K@GB=Fr$@L!Z=40Y+*%$Zh$52UC z$TYVYp>*2sQUl!!L*;kN#OwT3JZqA+*U5f)Rd{7WtNT)BbjDSu+o3fg1zTI2d==ks zBo4hvy30E0zEu+{5n4vZd9l zr_6I&<5xKEQO&$hyb2XfpDO|K)jGS>5@ocM+@c3X=Z$@O?i=mn(8Z8C;xseOM`ZhE z^QxANjtk;z%Li?`Pg^>2e;(M7zh%*>eNTVxFKHtV(CUj#dP4h83z*PqVyqf}l`^&x*H z6^{Aa9s#-6!@GwT#U6LboJ%NH{x~W&)PE-3InnsRz^943??v@J)NredkC@-<{Gf14 z0TD2}>pGzp7{-(PRVL4RGho%0adNtAe*e8MAKc-G1Nq0kMM!UM+Hv`}c+V60JzI`3 z%O=Ei=(sEke11HQYWi%zJ!#qAk{|ImBo?lk_IPfR>bNSO;nY=KIag`DvkPhi!)N*I zlFFJjuu#xnYN9@`#-umYU7FhBtEqYMZk2$+bu;t%ORf{8p=)xgL=3J=mmic)Oxyi3 zyrRWV<6`bRh3}WsT!Sy^Z}%8GT%dI^VBf0Wc24K~G%w~hP5XUj#*zL>25Lt)5r^JA zh(2F1Gp~P6+s7Qb8WoxGSeQ_yXY@amFcOMcIprVq))k;`S#T{{Tg z$-+;0Y4>Fc&pB?f-`;h{L3sV_GxOi)%dx3u&phYJJ#d&$C1!Jp8)X5enM#T?9tLdK zwrb_Pl5HZqZH{isJ6VIfzebmQjlBEH<>nVzi^!VJt{MGHtvPmx_G^I3$XU|-h0r|~pHOqM{WIYveXUlnd45%h*p-D&%wNl*dyB1&`@L>> zb)I7mYkTiC->hHe;F`#)%NZ{?lo*sAi>FzcJDiwa|ZF|-Qf)rqEu3NdZ|Z@)|!q}L73ELc5l#f2eWwP{%S#Js$>Ty*NmX+k%d)4P@_=tj)of}LCIgOX;q*V4Q_h0&gp&I94X zGC48NC|klaf95pQc^s0vp(MJyc&PI;C4Kw5A05&!h2O@TrB*)u9&voON?XfwgBAU& z3%P`A_w6?~v?W!Z)v3HrNRO=G-ffFF@tH#mh!-Be=(XBx`*RC}hneD=zoiTfvW&M+ zPaf>|-JA8BHV8BHu63>T9GK`8w^i2OHRDX4IKy6j&hCcUe%~$(yw)$SwGONQ*}A!J zW@Dt)%S#E19tRZe-8~~CNWFQW`1@}O>g+3`)5d!TmWCV_lU+(LkdpFIJi#elHb38f zJe7!=8$MU+*SDYrzx-aQ>I?DuvLz;Q-_0BQeb8!g_EDjYRYAwU?l{?cep}&}%*BMa zTki8-*}}mqwVjax(YLy@v||01S4O%Vtd%?abSm4WN4+EM)BL{ZH!HH^uPt|jJk8_Q zPtwPYW`>zx7gKyXp=N(=!p{dUruTo9Klz3I*dmG$pA@0|ZBDj3y|-!evih9^Nv-aJ zKjej64$bJ#mhI5Gol}##@_v(2WQf0&p8DPU$)CoDDOjy<@4V2h<6!LDasO37>CSku zB#SlvrzJY^plm+COy57y7wsVisFeWWVZ^XG89t zxdZCquRgGeo628XJ&OmLyym+(8Omsd&He~mtULuW`nNlwMt4-M!wPP858Df3F=x`AL?l?JNRRV|D({9 zdGlp|@%j`JOvl+AQ(R4S&O6fkeQw3JD0Q)84d36n_2ql^5!D;z0|wN1>sD2+^C_tR zH1pMf{*XXL#^h$9zB&7)n9~nli?EnFAX_PwdVs|`^o%ZH&=@qR^i^oy%7SrX?xK$4 zR@5vhc^atBy|4M+EhE0usgid=H_cpA>W7H_!BlClrEQzkC;9AEuFtqRcrS{zUBa&Z zz^+X$S2QY(f?u^CA+`seTU6k){NCyE&HG+Y?HwxfewCB4UNYZ1WOCB1U+-QBy``Qh zu0APQB0?2i*4k-!F74Qy&0?+{ekYxieU}xxPbhioZFit)SFEMem9vx*vBxS?e>ZjX zIZs)qkTB1W5L!zn2PJ3#n!QJKLnUXbgB|M4%=LWV`{dP zMF`wDckh9)ztj%@g-lk-KA+svugp#b%0JHT8+zvVne%&zr2m{-gIRMQHZARuPZ;}d z|JYsfosw>0F|G5d^1trL4AivNXrG(bK3MTUqJF5%{b(btV4%cn=@R1bgDRg^fsfM8 zV}G1D@9iU1OS>}nCojkRt6l*S8}9Xj_Vu~s$9IDzmm6|K8KSEh?BX$06D4~$ zw4Y`B*8Jwysh-GXb$2NT7=ew?(kiz7WPb3<-R^-OrvlWte%2qWwd%QnI*A3f0r5Ws z$Ep>r4GA!?PxEQ?YI|q4tnJ*IdXWKVuhM|L4Pm@#M-P?#cF3OGdM2d7ir`+o7UF-* z^VZMOqqCx1rz#l~aO~9LLc7lAiEp+bLIsvo!h!2dc9j{P_o2w#X+vUP9587blQ)=ee*0wO1;Y#KPXU&pWsJ z^rV=Ol&C^e?-0DZG6zNq{FrA{DLrp%9M$os&Ub)Zp^N)5jZ{Oa80g` zvt!$bmjR8M<_-`0&jd$G8!aao;-QVSsh2{`n^l%yl3f+>O`ZhO3_*C;4uzQQs)e_OfEVu=`WO9>Ml|_acf5X`~9bx*IiC0uueU_ z6P6%V26)9vHsi@=@bd>g;b%8T5RkZEEwrpWLu7h4$vhMEOqLiVeek_jA++ zdxo{n2b^)pTQ!OLR837;JzKXp-=}KH(0bbg8t<1q*K|;|)2I@VYP!yeUZyZjRBiey zx{OmtYV*aX+sem^`!JTTR-=u&xu%i&XOg|sRLf@h%*K=lqB@TY`+Rp z^5?mpzWN|%=4-)=-cRQ(qvVY*qD}2G3hz^q6j~}8%%1rxe#$pr?cd_vcaNEMxbT-s0iCxwR~&?w&W&7T!6b5$z;$Lzkbd&M@-7Tshd_H}&enBaTg ze;!(+|9A;u`LHtFXK3zV%J&sQ24&M$toW=Nb@%hN%0;HHr+v-Z;&^#d()8fOSM7l> zkND4Q@JUizbN<`PQ^Nu)-gW3VgfEI8JK%34z4q7Q%5Tq7{EZtvJj)7O@#KmBy- zES=h8r&{xhJWn3E8D@P}I79cq*VSi*Q=8$T8oh36M{sA%F)v&OYDbT>_qXOIja?K+*n0%n4@>~lfD6|+2YXO_0m$KQsFj_dk0s|VEUf(`7d;I`C^BKWt>h9JOlh05Qz$I1ERn<;1ZU> zs;~s>60A$456A&-j96li7qD9OG5ZYnu@>_SkNHKM_@Z;DvoF?*M{W+70Yv0pgnyAo zS6a*5Zi6?ty{{TS%k3(7dyI$&+z@}v4PpBY;o(0-a=?EC;3n(hAbuPMS!S>bOaRM( zOB}*yP#hGN@L3cV#fYUG*r}3TyYNz5zeD!N{6a0(fI{Ep{A13SI7c}*S#`b2gI?uV zy~=lbmHlW4&G=DP=npxEEN@J?7E}6SN=r;Zw$G6eQG}hGCC2n`oS)|$u^K|k$06l@ zNck|Nyd8g1&V`gOhS<2>*t?3*z}d@~zQMVd^FGU~D!)~gK~?Eem6$@WQdB2#&U-mG zGNvD0^Cf)bshjj8l0YWvm@R5qAYAB{W5yu=am-_oe>>(?$S00@MZHBoru-+3K5&}u zsj;2(AGF8C`uD5)A?+ax&`s!B=sYp00sVzzUSNOm?_m6!=>ME(GK>Ei>F48SXb-o) zT^;vRCo+PnZ#j`~qUxI@!mIwsiQI$riW9j6`GFI88FJY%*aW}kn9r!O9rP?|5?Jgg zF$6Y~bhYA_kojfkvC8126JetgtU#G|JNg00em2WnR44tsoa*TOB=-=Vr`Sz&KIukc zaIJr%K0bMcXk2ASzkn>#HRGQ8FY_ilaq>^tLiaDU$5~MSu*xo%qpnBF z)VQwxUcagSrm0L%IqF)ZEW>d-QWofYY6%zFMyx=utILtI>XOdAb2_}Z)iY+R`j$SV z{=`7vQ7=Spv1yR6MD)F^*bxGfXKtQz%@{tfjxm=Bg|&&q;&psCEJ z(vBPp*VSiGai7ko^5w1@!n4Ffi&n%l=^62y@*MXZ_H=q$JweYN&u&kHTrd0OI=NP^ zk!4wuU9un(xuLwZVTB2HVT0snBDom5c-t>9X8%O-)EB5MVDH#Zizy*B5T|y1ill!| z#_g_fs_fZ1VIK^qY`Om{nG&Vn&!T4w-z`&eGJ^%mOcxvN`tHmsp|tJd>y1pmb@BCV zmZt2rQIZ-7+7AYB9yP~@Y_}c{kX^4t1JU{;JDxk5cmn0C9DZ~J!k;|?d+aw-0~!05 zd$M*1Yi#YwrtD(_!PA*l;hZp@Osszj6HKz1Rr;!ME_s64>8pwC!!V)=69^+F6Bf2Y zG!{lQy28WKJPgZJOD0O1Rtb8a_OKu583-sDUEIC!$gLZd>12t)>lif4>rj9CD5kj* zgH=$C5oE-K|DVTL{C^0hE5mosj3&qQWL{5>0eSm-ue=zr&yNIyr89S#BWR2J@*|@! zG94bX@9JX-`;49lmWD^4;2ve}VLeeIqsfz*(x^3-aE7hnq&}R;uFOw$ryi?y>0zzz zsV5|xV#&H$t@+dw+$rXsXSIJ)tX7KEnz!b8t<;G)P4#C=GKpuqPgitB@YY~IF!u$zp)pCb4Nl11bCH@u7@u+Gf(W@`-;eWw)bPJ-RcN z|F*eq3ghOKdD)yXmrZxEn1#9iwzjSbIc;2<(k^Q=+Oj4w$LY*<%bd~vu8BpgCOU~D zncy{xP<&Z;vRGgTL4g8LDHkq=yE8G3jN)yV;?2C3JPqhTCollG$*;kG1U>-n11|Dy z@D<=4V1-%4R0mLlsRvUr{sG5r8_;&YGAH zFW!{g0RIm76Yy8y6TmHYh#fq6v9fBj1yTr8OrM}L$*2&Xq+v8DyWEq7aF{T#1&%v_ z&@g>`-4j|U5?l)05D0>}wZMiInQkA<@m~Q30Q<@yybWb;WOEO6a%Ev{4P|a*Z(?c< zH8qz&IRPV=_BjD6f87d#KoAAsSDCwZFLY-Vv?K&kgGkGMpy*C?10fvPHnN_+Bnr;e zIeeTD5ax50Bvlcu^z*@{y+QCPcPcfywjK9bZMDcE4{1u|&B(`q=q$fG8*%&?seV{U zRdV|`-Wb9ePRVmHoFGo5stmXHMjKJq4Q%#s0z#L3H-PQEK{oaURkldEw|cNr9Xfl} zqU;*OH@;XH@4$g#nda+#?zY?vGLGK@3;?FYI}Z(IZe(+Ga%Ev{4P|a*Z(?c puny) & -! print*,'afsdn not normal', & +! write(nu_diag,*) 'afsdn not normal', & ! sum(trcrn(i,j,nt_fsd:nt_fsd+nfsd-1,n,iblk)), & ! trcrn(i,j,nt_fsd:nt_fsd+nfsd-1,n,iblk) ! endif diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index d1fce0d67..94ee4f956 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -103,6 +103,7 @@ subroutine init_hist (dt) cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) character(len=char_len) :: description + character(len=*), parameter :: subname = '(init_hist)' !----------------------------------------------------------------- @@ -224,25 +225,27 @@ subroutine init_hist (dt) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif ! histfreq options ('1','h','d','m','y') @@ -2259,7 +2262,6 @@ subroutine accum_hist (dt) ! increment field !--------------------------------------------------------------- -! MHRI: CHECK THIS OMP ... Maybe ok after "dfresh,dfsalt" added !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP k,n,qn,ns,sn,rho_ocn,rho_ice,Tice,Sbr,phi,rhob,dfresh,dfsalt, & !$OMP worka,workb,worka3,Tinz4d,Sinz4d,Tsnz4d) diff --git a/cicecore/cicedynB/analysis/ice_history_bgc.F90 b/cicecore/cicedynB/analysis/ice_history_bgc.F90 index fdb8c4393..8802cf431 100644 --- a/cicecore/cicedynB/analysis/ice_history_bgc.F90 +++ b/cicecore/cicedynB/analysis/ice_history_bgc.F90 @@ -282,7 +282,8 @@ subroutine init_hist_bgc_2D tr_bgc_N, tr_bgc_C, tr_bgc_chl, & tr_bgc_DON, tr_bgc_Fe, tr_bgc_hum, & skl_bgc, solve_zsal, z_tracers - character(len=*), parameter :: subname = '(init_hist_bgc_2D)' + + character(len=*), parameter :: subname = '(init_hist_bgc_2D)' call icepack_query_parameters(skl_bgc_out=skl_bgc, & solve_zsal_out=solve_zsal, z_tracers_out=z_tracers) @@ -303,25 +304,27 @@ subroutine init_hist_bgc_2D ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_bgc_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_bgc_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_bgc_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_bgc_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_bgc_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif if (.not. tr_iso) then diff --git a/cicecore/cicedynB/analysis/ice_history_drag.F90 b/cicecore/cicedynB/analysis/ice_history_drag.F90 index 31a92158b..c0a1f99bd 100644 --- a/cicecore/cicedynB/analysis/ice_history_drag.F90 +++ b/cicecore/cicedynB/analysis/ice_history_drag.F90 @@ -68,6 +68,7 @@ subroutine init_hist_drag_2D integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: formdrag + character(len=*), parameter :: subname = '(init_hist_drag_2D)' call icepack_query_parameters(formdrag_out=formdrag) @@ -79,26 +80,27 @@ subroutine init_hist_drag_2D ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_drag_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_drag_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_drag_nml,iostat=nml_error) - if (nml_error > 0) read(nu_nml,*) ! for Nagware compiler end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_drag_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_drag_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif call broadcast_scalar (f_Cdn_atm, master_task) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index 7ad81e7d2..c64ecbefa 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -81,6 +81,7 @@ subroutine init_hist_fsd_2D integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_fsd, wave_spec + character(len=*), parameter :: subname = '(init_hist_fsd_2D)' call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) @@ -95,25 +96,27 @@ subroutine init_hist_fsd_2D ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_fsd_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_fsd_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_fsd_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_fsd_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_fsd_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif call broadcast_scalar (f_afsd, master_task) diff --git a/cicecore/cicedynB/analysis/ice_history_mechred.F90 b/cicecore/cicedynB/analysis/ice_history_mechred.F90 index a20df5fb0..920a83b47 100644 --- a/cicecore/cicedynB/analysis/ice_history_mechred.F90 +++ b/cicecore/cicedynB/analysis/ice_history_mechred.F90 @@ -89,6 +89,7 @@ subroutine init_hist_mechred_2D integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_lvl + character(len=*), parameter :: subname = '(init_hist_mechred_2D)' call icepack_query_parameters(secday_out=secday) @@ -101,25 +102,27 @@ subroutine init_hist_mechred_2D ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_mechred_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_mechred_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_mechred_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_mechred_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_mechred_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif if (.not. tr_lvl) then diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index 182865fec..365bd4410 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -73,6 +73,7 @@ subroutine init_hist_pond_2D integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: tr_pond + character(len=*), parameter :: subname = '(init_hist_pond_2D)' call icepack_query_tracer_flags(tr_pond_out=tr_pond) @@ -84,25 +85,27 @@ subroutine init_hist_pond_2D ! read namelist !----------------------------------------------------------------- - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading icefields_pond_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: icefields_pond_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_pond_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice(subname//'ERROR: reading icefields_pond_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_pond_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif if (.not. tr_pond) then diff --git a/cicecore/cicedynB/analysis/ice_history_snow.F90 b/cicecore/cicedynB/analysis/ice_history_snow.F90 index 5a590af2b..090759759 100644 --- a/cicecore/cicedynB/analysis/ice_history_snow.F90 +++ b/cicecore/cicedynB/analysis/ice_history_snow.F90 @@ -87,30 +87,32 @@ subroutine init_hist_snow_2D (dt) if (tr_snow) then - !----------------------------------------------------------------- - ! read namelist - !----------------------------------------------------------------- - - call get_fileunit(nu_nml) - if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) - if (nml_error /= 0) then - nml_error = -1 - else + !----------------------------------------------------------------- + ! read namelist + !----------------------------------------------------------------- + + if (my_task == master_task) then + write(nu_diag,*) subname,' Reading icefields_snow_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_snow_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=icefields_snow_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: icefields_snow_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif - do while (nml_error > 0) - read(nu_nml, nml=icefields_snow_nml,iostat=nml_error) - end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - close (nu_nml) - call abort_ice('ice: error reading icefields_snow_nml') - endif else ! .not. tr_snow f_smassice = 'x' diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 2b735b71c..f3bb7a935 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -25,6 +25,11 @@ module ice_dyn_eap p001, p027, p055, p111, p166, p222, p25, p333 use ice_fileunits, only: nu_diag, nu_dump_eap, nu_restart_eap use ice_exit, only: abort_ice +! use ice_timers, only: & +! ice_timer_start, ice_timer_stop, & +! timer_tmp1, timer_tmp2, timer_tmp3, timer_tmp4, & +! timer_tmp5, timer_tmp6, timer_tmp7, timer_tmp8, timer_tmp9 + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters use icepack_intfc, only: icepack_ice_strength @@ -61,6 +66,11 @@ module ice_dyn_eap a11 , & ! components of structure tensor () a12 + ! private for reuse, set in init_eap + + real (kind=dbl_kind) :: & + puny, pi, pi2, piq, pih + !======================================================================= contains @@ -138,9 +148,6 @@ subroutine eap (dt) grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength -! use ice_timers, only: timer_dynamics, timer_bound, & -! ice_timer_start, ice_timer_stop, & -! timer_tmp1, timer_tmp2, timer_tmp3 use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -211,7 +218,7 @@ subroutine eap (dt) ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block @@ -285,10 +292,7 @@ subroutine eap (dt) call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif -! tcraig, tcx, turned off this threaded region, in evp, this block and -! the icepack_ice_strength call seems to not be thread safe. more -! debugging needed - !$TCXOMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -375,7 +379,7 @@ subroutine eap (dt) strength = strength(i,j, iblk) ) enddo ! ij enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -406,30 +410,28 @@ subroutine eap (dt) !----------------------------------------------------------------- if (seabed_stress) then - - ! tcraig, evp omp causes abort on cheyenne with pgi, turn off here too - !$TCXOMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - if ( seabed_stress_method == 'LKD' ) then - - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) - - elseif ( seabed_stress_method == 'probabilistic' ) then - - call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) - endif - - enddo - !$TCXOMP END PARALLEL DO + if ( seabed_stress_method == 'LKD' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_LKD (nx_block, ny_block, & + icellu (iblk), & + indxui(:,iblk), indxuj(:,iblk), & + vice(:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + elseif ( seabed_stress_method == 'probabilistic' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_prob (nx_block, ny_block, & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + hwater(:,:,iblk), Tbu(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif endif do ksub = 1,ndte ! subcycling @@ -438,10 +440,10 @@ subroutine eap (dt) ! stress tensor equation, total surface stress !----------------------------------------------------------------- - !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) + !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks -! call ice_timer_start(timer_tmp1) ! dynamics +! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & icellt(iblk), & @@ -474,16 +476,16 @@ subroutine eap (dt) ! rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & rdg_conv (:,:,iblk), & strtmp (:,:,:)) -! call ice_timer_stop(timer_tmp1) ! dynamics +! call ice_timer_stop(timer_tmp1,iblk) !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- +! call ice_timer_start(timer_tmp2,iblk) call stepu (nx_block, ny_block, & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - ksub, & aiu (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & @@ -495,12 +497,13 @@ subroutine eap (dt) uvel_init(:,:,iblk), vvel_init(:,:,iblk),& uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) +! call ice_timer_stop(timer_tmp2,iblk) !----------------------------------------------------------------- ! evolution of structure tensor A !----------------------------------------------------------------- -! call ice_timer_start(timer_tmp3) ! dynamics +! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep call stepa (nx_block, ny_block, & dtei, icellt (iblk), & @@ -517,9 +520,9 @@ subroutine eap (dt) stress12_1(:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk)) endif -! call ice_timer_stop(timer_tmp3) ! dynamics +! call ice_timer_stop(timer_tmp3,iblk) enddo - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO call stack_velocity_field(uvel, vvel, fld2) call ice_timer_start(timer_bound) @@ -542,7 +545,7 @@ subroutine eap (dt) ! ice-ocean stress !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call dyn_finish & @@ -552,8 +555,6 @@ subroutine eap (dt) uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & strocnx (:,:,iblk), strocny (:,:,iblk)) enddo @@ -620,17 +621,19 @@ subroutine init_eap real (kind=dbl_kind) :: & ainit, xinit, yinit, zinit, & da, dx, dy, dz, & - pi, pih, piq, phi + phi character(len=*), parameter :: subname = '(init_eap)' - call icepack_query_parameters(pi_out=pi, pih_out=pih, piq_out=piq) + call icepack_query_parameters(puny_out=puny, & + pi_out=pi, pi2_out=pi2, piq_out=piq, pih_out=pih) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) + phi = pi/c12 ! diamond shaped floe smaller angle (default phi = 30 deg) - !$OMP PARALLEL DO PRIVATE(iblk,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block @@ -784,14 +787,9 @@ FUNCTION s11kr(x,y,z,phi) d11, d12, d22, & IIn1t2, IIn2t1, & ! IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s11kr)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s11kr)' p = phi @@ -855,14 +853,9 @@ FUNCTION s12kr(x,y,z,phi) d11, d12, d22, & IIn1t2, IIn2t1, & ! IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s12kr)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s12kr)' p = phi @@ -926,14 +919,9 @@ FUNCTION s22kr(x,y,z,phi) d11, d12, d22, & IIn1t2, IIn2t1, & ! IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s22kr)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s22kr)' p = phi @@ -996,14 +984,9 @@ FUNCTION s11ks(x,y,z,phi) ! t2t1i12, t2t1i21, t2t1i22, & d11, d12, d22, & IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s11ks)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s11ks)' p = phi @@ -1065,14 +1048,9 @@ FUNCTION s12ks(x,y,z,phi) t2t1i12, t2t1i21, & d11, d12, d22, & IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s12ks)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s12ks)' p =phi @@ -1136,14 +1114,9 @@ FUNCTION s22ks(x,y,z,phi) t2t1i22, & d11, d12, d22, & IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1, & - pih, puny - character(len=*), parameter :: subname = '(s22ks)' + Hen1t2, Hen2t1 - call icepack_query_parameters(pih_out=pih, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + character(len=*), parameter :: subname = '(s22ks)' p = phi @@ -1225,11 +1198,6 @@ subroutine stress_eap (nx_block, ny_block, & rdg_conv, & strtmp) -!echmod tmp -! use ice_timers, only: & -! ice_timer_start, ice_timer_stop, & -! timer_tmp1, timer_tmp2, timer_tmp3 - integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step @@ -1307,7 +1275,7 @@ subroutine stress_eap (nx_block, ny_block, & csigmne, csigmnw, csigmse, csigmsw , & csig12ne, csig12nw, csig12se, csig12sw , & str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp, puny + strp_tmp, strm_tmp real (kind=dbl_kind) :: & alpharne, alpharnw, alpharsw, alpharse, & @@ -1319,11 +1287,6 @@ subroutine stress_eap (nx_block, ny_block, & ! Initialize !----------------------------------------------------------------- - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - strtmp(:,:,:) = c0 do ij = 1, icellt @@ -1367,7 +1330,6 @@ subroutine stress_eap (nx_block, ny_block, & !----------------------------------------------------------------- ! Stress updated depending on strain rate and structure tensor !----------------------------------------------------------------- -! call ice_timer_start(timer_tmp2) ! dynamics ! ne call update_stress_rdg (ksub, ndte, divune, tensionne, & @@ -1394,7 +1356,6 @@ subroutine stress_eap (nx_block, ny_block, & stress12tmp_4, strength(i,j), & alpharse, alphasse) -! call ice_timer_stop(timer_tmp2) ! dynamics !----------------------------------------------------------------- ! on last subcycle, save quantities for mechanical redistribution !----------------------------------------------------------------- @@ -1646,10 +1607,14 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & Angle_denom_gamma, Angle_denom_alpha, & Tany_1, Tany_2, & x, y, dx, dy, da, & - invdx, invdy, invda, invsin, & dtemp1, dtemp2, atempprime, & - kxw, kyw, kaw, & - puny, pi, pi2, piq, pih + kxw, kyw, kaw + + real (kind=dbl_kind), save :: & + invdx, invdy, invda, invsin + + logical (kind=log_kind), save :: & + first_call = .true. real (kind=dbl_kind), parameter :: & kfriction = 0.45_dbl_kind @@ -1661,17 +1626,13 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' - call icepack_query_parameters(puny_out=puny, & - pi_out=pi, pi2_out=pi2, piq_out=piq, pih_out=pih) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - ! Factor to maintain the same stress as in EVP (see Section 3) ! Can be set to 1 otherwise - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso + if (first_call) then + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + endif ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates @@ -1679,7 +1640,7 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & a22 = c1-a11 -! gamma: angle between general coordiantes and principal axis of A +! gamma: angle between general coordinates and principal axis of A ! here Tan2gamma = 2 a12 / (a11 - a22) Q11Q11 = c1 @@ -1770,12 +1731,14 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & if (y < 0) y = y + pi ! Now calculate updated stress tensor - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da + if (first_call) then + dx = pi/real(nx_yield-1,kind=dbl_kind) + dy = pi/real(ny_yield-1,kind=dbl_kind) + da = p5/real(na_yield-1,kind=dbl_kind) + invdx = c1/dx + invdy = c1/dy + invda = c1/da + endif if (interpolate_stress_rdg) then @@ -1913,6 +1876,8 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & + rotstemp22s*dtemp22 endif + first_call = .false. + end subroutine update_stress_rdg !======================================================================= @@ -2043,7 +2008,7 @@ subroutine calc_ffrac (stressp, stressm, & real (kind=dbl_kind) :: & sigma11, sigma12, sigma22, & - gamma, sigma_1, sigma_2, pih, & + gamma, sigma_1, sigma_2, & Q11, Q12, Q11Q11, Q11Q12, Q12Q12 real (kind=dbl_kind), parameter :: & @@ -2052,11 +2017,6 @@ subroutine calc_ffrac (stressp, stressm, & character(len=*), parameter :: subname = '(calc_ffrac)' - call icepack_query_parameters(pih_out=pih) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - sigma11 = p5*(stressp+stressm) sigma12 = stress12 sigma22 = p5*(stressp-stressm) @@ -2219,7 +2179,7 @@ subroutine read_restart_eap() ! Ensure unused values in west and south ghost cells are 0 !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, nghost do i = 1, nx_block diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 0a48783c4..360781a79 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -114,7 +114,7 @@ subroutine evp (dt) use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea - + use ice_dyn_shared, only: deformations, deformations_T real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -246,7 +246,7 @@ subroutine evp (dt) ! field_loc_center, field_type_scalar) ! call ice_timer_stop(timer_bound) - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -339,9 +339,7 @@ subroutine evp (dt) endif endif -! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength -! need to do more debugging - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -433,11 +431,11 @@ subroutine evp (dt) enddo ! ij enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -528,7 +526,7 @@ subroutine evp (dt) enddo enddo enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO endif ! grid_ice @@ -592,61 +590,67 @@ subroutine evp (dt) if (seabed_stress) then - ! tcraig, causes abort with pgi compiler on cheyenne - !$TCXOMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - select case (trim(grid_ice)) - case('B') - - if ( seabed_stress_method == 'LKD' ) then + select case (trim(grid_ice)) + case('B') + if ( seabed_stress_method == 'LKD' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block, ny_block, & icellu (iblk), & indxui(:,iblk), indxuj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), Tbu(:,:,iblk)) + enddo + !$OMP END PARALLEL DO - elseif ( seabed_stress_method == 'probabilistic' ) then - + elseif ( seabed_stress_method == 'probabilistic' ) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block, ny_block, & icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater(:,:,iblk), Tbu(:,:,iblk)) - endif + enddo + !$OMP END PARALLEL DO + endif case('CD','C') if ( seabed_stress_method == 'LKD' ) then - - call seabed_stress_factor_LKD (nx_block, ny_block, & - icelle (iblk), & - indxei(:,iblk), indxej(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbE(:,:,iblk)) - call seabed_stress_factor_LKD (nx_block, ny_block, & - icelln (iblk), & - indxni(:,iblk), indxnj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbN(:,:,iblk)) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_LKD (nx_block, ny_block, & + icelle (iblk), & + indxei(:,iblk), indxej(:,iblk), & + vice(:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbE(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block, ny_block, & + icelln (iblk), & + indxni(:,iblk), indxnj(:,iblk), & + vice(:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbN(:,:,iblk)) + enddo + !$OMP END PARALLEL DO elseif ( seabed_stress_method == 'probabilistic' ) then - - call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk), & - TbE(:,:,iblk), TbN(:,:,iblk), & - icelle(iblk), indxei(:,iblk), indxej(:,iblk), & - icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call seabed_stress_factor_prob (nx_block, ny_block, & + icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & + icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + hwater(:,:,iblk), Tbu(:,:,iblk), & + TbE(:,:,iblk), TbN(:,:,iblk), & + icelle(iblk), indxei(:,iblk), indxej(:,iblk), & + icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + enddo + !$OMP END PARALLEL DO endif end select - enddo - !$TCXOMP END PARALLEL DO endif call ice_timer_start(timer_evp_2d) @@ -687,25 +691,24 @@ subroutine evp (dt) do ksub = 1,ndte ! subcycling - !----------------------------------------------------------------- - ! stress tensor equation, total surface stress - !----------------------------------------------------------------- - select case (grid_ice) case('B') - !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) - do iblk = 1, nblocks - + !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) + do iblk = 1, nblocks + + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- call stress (nx_block, ny_block, & - ksub, icellt(iblk), & + icellt(iblk), & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxt (:,:,iblk), dyt (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), DminTarea(:,:,iblk), & + DminTarea (:,:,iblk), & strength (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & @@ -713,17 +716,30 @@ subroutine evp (dt) stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & stress12_1(:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & strtmp (:,:,:) ) - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + call deformations (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxt (:,:,iblk), dyt (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + endif + + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- call stepu (nx_block, ny_block, & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - ksub, & aiu (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & @@ -736,44 +752,58 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - enddo - !$TCXOMP END PARALLEL DO + + + enddo ! iblk + !$OMP END PARALLEL DO case('CD','C') - !$TCXOMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_T (nx_block, ny_block, & - ksub, icellt(iblk), & + icellt(iblk), & indxti (:,iblk), indxtj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), DminTarea (:,:,iblk), & + DminTarea (:,:,iblk), & strength (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv (:,:,iblk), rdg_shear (:,:,iblk) ) - + stress12T (:,:,iblk) ) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + call deformations_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + endif enddo + !$OMP END PARALLEL DO ! Need to update the halos for the stress components call ice_timer_start(timer_bound) call ice_HaloUpdate (zetax2T, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (etax2T, halo_info, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) + call ice_HaloUpdate (etax2T, halo_info, & + field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) - !$TCXOMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_U (nx_block, ny_block, & - ksub, icellu(iblk), & + icellu(iblk), & indxui (:,iblk), indxuj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -789,9 +819,8 @@ subroutine evp (dt) strength (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) - enddo - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO ! Need to update the halos for the stress components call ice_timer_start(timer_bound) @@ -809,11 +838,11 @@ subroutine evp (dt) field_loc_NEcorner, field_type_scalar) call ice_timer_stop(timer_bound) - !$TCXOMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call div_stress (nx_block, ny_block, & ! E point - ksub, icelle(iblk), & + icelle(iblk), & indxei (:,iblk), indxej (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & @@ -825,8 +854,8 @@ subroutine evp (dt) strintxE (:,:,iblk), strintyE (:,:,iblk), & 'E') - call div_stress (nx_block, ny_block, & ! N point - ksub, icelln(iblk), & + call div_stress (nx_block, ny_block, & ! N point + icelln(iblk), & indxni (:,iblk), indxnj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & @@ -839,17 +868,17 @@ subroutine evp (dt) 'N') enddo - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO if (grid_ice == 'CD') then - !$TCXOMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call step_vel (nx_block, ny_block, & ! E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & - ksub, aiE (:,:,iblk), & + aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & forcexE (:,:,iblk), forceyE (:,:,iblk), & @@ -863,7 +892,7 @@ subroutine evp (dt) call step_vel (nx_block, ny_block, & ! N point icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & - ksub, aiN (:,:,iblk), & + aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & forcexN (:,:,iblk), forceyN (:,:,iblk), & @@ -875,17 +904,17 @@ subroutine evp (dt) TbN (:,:,iblk)) enddo - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO elseif (grid_ice == 'C') then - !$TCXOMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stepu_Cgrid (nx_block, ny_block, & ! u, E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & - ksub, aiE (:,:,iblk), & + aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), forcexE (:,:,iblk), & emassdti (:,:,iblk), fmE (:,:,iblk), & @@ -897,7 +926,7 @@ subroutine evp (dt) call stepv_Cgrid (nx_block, ny_block, & ! v, N point icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & - ksub, aiN (:,:,iblk), & + aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & wateryN (:,:,iblk), forceyN (:,:,iblk), & nmassdti (:,:,iblk), fmN (:,:,iblk), & @@ -907,7 +936,7 @@ subroutine evp (dt) TbN (:,:,iblk)) enddo - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO endif @@ -1037,7 +1066,7 @@ subroutine evp (dt) ! ice-ocean stress !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call dyn_finish & @@ -1047,8 +1076,6 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & strocnx (:,:,iblk), strocny (:,:,iblk)) enddo @@ -1056,7 +1083,7 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call dyn_finish & @@ -1066,8 +1093,6 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & aiN (:,:,iblk), fmN (:,:,iblk), & - strintxN(:,:,iblk), strintyN(:,:,iblk), & - strairxN(:,:,iblk), strairyN(:,:,iblk), & strocnxN(:,:,iblk), strocnyN(:,:,iblk)) call dyn_finish & @@ -1077,8 +1102,6 @@ subroutine evp (dt) uvelE (:,:,iblk), vvelE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & aiE (:,:,iblk), fmE (:,:,iblk), & - strintxE(:,:,iblk), strintyE(:,:,iblk), & - strairxE(:,:,iblk), strairyE(:,:,iblk), & strocnxE(:,:,iblk), strocnyE(:,:,iblk)) enddo @@ -1092,7 +1115,7 @@ subroutine evp (dt) ! conservation requires aiu be divided before averaging work1 = c0 work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij,iblk) @@ -1127,14 +1150,14 @@ end subroutine evp ! author: Elizabeth C. Hunke, LANL subroutine stress (nx_block, ny_block, & - ksub, icellt, & + icellt, & indxti, indxtj, & uvel, vvel, & dxt, dyt, & dxhy, dyhx, & cxp, cyp, & cxm, cym, & - tarear, DminTarea, & + DminTarea, & strength, & stressp_1, stressp_2, & stressp_3, stressp_4, & @@ -1142,16 +1165,13 @@ subroutine stress (nx_block, ny_block, & stressm_3, stressm_4, & stress12_1, stress12_2, & stress12_3, stress12_4, & - shear, divu, & - rdg_conv, rdg_shear, & str ) - use ice_dyn_shared, only: strain_rates, deformations, & - viscous_coeffs_and_rep_pressure_T, capping + use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure_T, & + capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - ksub , & ! subcycling step icellt ! no. of cells where icetmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & @@ -1170,7 +1190,6 @@ subroutine stress (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS - tarear , & ! 1/tarea DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1178,12 +1197,6 @@ subroutine stress (nx_block, ny_block, & stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shear , & ! strain rate II component (1/s) - divu , & ! strain rate I component, velocity divergence (1/s) - rdg_conv , & ! convergence term for ridging (1/s) - rdg_shear ! shear term for ridging (1/s) - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & str ! stress combinations @@ -1433,23 +1446,6 @@ subroutine stress (nx_block, ny_block, & enddo ! ij - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformations (nx_block , ny_block , & - icellt , & - indxti , indxtj , & - uvel , vvel , & - dxt , dyt , & - cxp , cyp , & - cxm , cym , & - tarear , & - shear , divu , & - rdg_conv , rdg_shear ) - - endif - end subroutine stress !======================================================================= @@ -1460,26 +1456,23 @@ end subroutine stress ! Nov 2021 subroutine stress_T (nx_block, ny_block, & - ksub, icellt, & + icellt, & indxti, indxtj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & dxT, dyT, & - tarear, DminTarea, & + DminTarea, & strength, & zetax2T, etax2T, & stresspT, stressmT, & - stress12T, & - shear, divu, & - rdg_conv, rdg_shear ) + stress12T ) - use ice_dyn_shared, only: strain_rates_T, deformations_T, & - viscous_coeffs_and_rep_pressure_T, capping + use ice_dyn_shared, only: strain_rates_T, capping, & + viscous_coeffs_and_rep_pressure_T integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - ksub , & ! subcycling step icellt ! no. of cells where icetmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & @@ -1496,7 +1489,6 @@ subroutine stress_T (nx_block, ny_block, & dxT , & ! width of T-cell through the middle (m) dyT , & ! height of T-cell through the middle (m) strength , & ! ice strength (N/m) - tarear , & ! 1/tarea DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1506,12 +1498,6 @@ subroutine stress_T (nx_block, ny_block, & stressmT , & ! sigma11-sigma22 stress12T ! sigma12 - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shear , & ! strain rate II component (1/s) - divu , & ! strain rate I component, velocity divergence (1/s) - rdg_conv , & ! convergence term for ridging (1/s) - rdg_shear ! shear term for ridging (1/s) - ! local variables integer (kind=int_kind) :: & @@ -1532,10 +1518,10 @@ subroutine stress_T (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates at T point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates_T (nx_block, ny_block, & i, j, & @@ -1546,20 +1532,20 @@ subroutine stress_T (nx_block, ny_block, & divT, tensionT, & shearT, DeltaT ) - !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at T point - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at T point + !----------------------------------------------------------------- call viscous_coeffs_and_rep_pressure_T (strength(i,j), & DminTarea(i,j), DeltaT, & zetax2T(i,j),etax2T(i,j),& rep_prsT, capping ) - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspT(i,j) = (stresspT(i,j)*(c1-arlx1i*revp) + & arlx1i*(zetax2T(i,j)*divT - rep_prsT)) * denom1 @@ -1572,25 +1558,7 @@ subroutine stress_T (nx_block, ny_block, & enddo ! ij - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - - call deformations_T (nx_block , ny_block , & - icellt , & - indxti , indxtj , & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - tarear , & - shear , divu , & - rdg_conv , rdg_shear ) - - endif - - end subroutine stress_T + end subroutine stress_T !======================================================================= @@ -1600,7 +1568,7 @@ end subroutine stress_T ! Nov 2021 subroutine stress_U (nx_block, ny_block, & - ksub, icellu, & + icellu, & indxui, indxuj, & uvelE, vvelE, & uvelN, vvelN, & @@ -1623,7 +1591,6 @@ subroutine stress_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - ksub , & ! subcycling step icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & @@ -1676,10 +1643,10 @@ subroutine stress_U (nx_block, ny_block, & i = indxui(ij) j = indxuj(ij) - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates_U (nx_block, ny_block, & i, j, & @@ -1694,43 +1661,40 @@ subroutine stress_U (nx_block, ny_block, & divU, tensionU, & shearU, DeltaU ) - !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at U point - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at U point + !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then - - call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & - zetax2T(i+1,j+1), zetax2T(i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU,zetax2U, etax2U, rep_prsU) - + call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & + zetax2T(i+1,j+1), zetax2T(i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU,zetax2U, etax2U, rep_prsU) elseif (visc_coeff_method == 'avg_strength') then - DminUarea = deltaminEVP*uarea(i,j) - - call viscous_coeffs_and_rep_pressure_U (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea, & - DeltaU , capping, & - zetax2U, etax2U, rep_prsU) + DminUarea = deltaminEVP*uarea(i,j) + call viscous_coeffs_and_rep_pressure_U (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + DminUarea, & + DeltaU , capping, & + zetax2U, etax2U, rep_prsU) endif - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspU(i,j) = (stresspU(i,j)*(c1-arlx1i*revp) + & arlx1i*(zetax2U*divU - rep_prsU)) * denom1 @@ -1743,7 +1707,7 @@ subroutine stress_U (nx_block, ny_block, & enddo ! ij - end subroutine stress_U + end subroutine stress_U !======================================================================= @@ -1753,7 +1717,7 @@ end subroutine stress_U ! Nov 2021 subroutine div_stress (nx_block, ny_block, & - ksub, icell, & + icell, & indxi, indxj, & dxE_N, dyE_N, & dxT_U, dyT_U, & @@ -1768,7 +1732,6 @@ subroutine div_stress (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - ksub , & ! subcycling step icell ! no. of cells where epm (or npm) = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 old mode 100755 new mode 100644 index b896bdfe4..1ca41898d --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -857,10 +857,8 @@ subroutine stepu_last(NA_len, rhow, lb, ub, Cw, aiu, uocn, vocn, & vvel(iw) = (cca * cc2 - ccb * cc1) / ab2 ! calculate seabed stress component for outputs - if (seabed_stress) then - taubx(iw) = -uvel(iw) * Tbu(iw) / (sqrt(uold**2 + vold**2) + u0) - tauby(iw) = -vvel(iw) * Tbu(iw) / (sqrt(uold**2 + vold**2) + u0) - end if + taubx(iw) = -uvel(iw) * Tbu(iw) / (sqrt(uold**2 + vold**2) + u0) + tauby(iw) = -vvel(iw) * Tbu(iw) / (sqrt(uold**2 + vold**2) + u0) end do #ifdef _OPENACC @@ -1294,7 +1292,10 @@ subroutine ice_dyn_evp_1d_kernel if (ndte < 2) call abort_ice(subname & // ' ERROR: ndte must be 2 or higher for this kernel') - !$OMP PARALLEL PRIVATE(ksub) + ! tcraig, turn off the OMP directives here, Jan, 2022 + ! This produces non bit-for-bit results with different thread counts. + ! Seems like there isn't an opportunity for safe threading here ??? + !$XXXOMP PARALLEL PRIVATE(ksub) do ksub = 1, ndte - 1 call evp1d_stress(NA_len, ee, ne, se, 1, NA_len, uvel, & vvel, dxt, dyt, hte, htn, htem1, htnm1, strength, & @@ -1302,15 +1303,15 @@ subroutine ice_dyn_evp_1d_kernel stressm_2, stressm_3, stressm_4, stress12_1, & stress12_2, stress12_3, stress12_4, str1, str2, str3, & str4, str5, str6, str7, str8, skiptcell) - !$OMP BARRIER + !$XXXOMP BARRIER call evp1d_stepu(NA_len, rhow, 1, NA_len, cdn_ocn, aiu, & uocn, vocn, forcex, forcey, umassdti, fm, uarear, Tbu, & uvel_init, vvel_init, uvel, vvel, str1, str2, str3, & str4, str5, str6, str7, str8, nw, sw, sse, skipucell) - !$OMP BARRIER + !$XXXOMP BARRIER call evp1d_halo_update(NAVEL_len, 1, NA_len, uvel, vvel, & halo_parent) - !$OMP BARRIER + !$XXXOMP BARRIER end do call evp1d_stress(NA_len, ee, ne, se, 1, NA_len, uvel, vvel, & @@ -1319,16 +1320,16 @@ subroutine ice_dyn_evp_1d_kernel stressm_3, stressm_4, stress12_1, stress12_2, stress12_3, & stress12_4, str1, str2, str3, str4, str5, str6, str7, & str8, skiptcell, tarear, divu, rdg_conv, rdg_shear, shear) - !$OMP BARRIER + !$XXXOMP BARRIER call evp1d_stepu(NA_len, rhow, 1, NA_len, cdn_ocn, aiu, uocn, & vocn, forcex, forcey, umassdti, fm, uarear, Tbu, & uvel_init, vvel_init, uvel, vvel, str1, str2, str3, str4, & str5, str6, str7, str8, nw, sw, sse, skipucell, strintx, & strinty, taubx, tauby) - !$OMP BARRIER + !$XXXOMP BARRIER call evp1d_halo_update(NAVEL_len, 1, NA_len, uvel, vvel, & halo_parent) - !$OMP END PARALLEL + !$XXXOMP END PARALLEL end if ! master task diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 old mode 100755 new mode 100644 index 5fe524b32..8301a967b --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -201,7 +201,7 @@ subroutine init_dyn (dt) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) endif - !$OMP PARALLEL DO PRIVATE(iblk,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block @@ -696,7 +696,6 @@ end subroutine dyn_prep2 subroutine stepu (nx_block, ny_block, & icellu, Cw, & indxui, indxuj, & - ksub, & aiu, str, & uocn, vocn, & waterx, watery, & @@ -711,8 +710,7 @@ subroutine stepu (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu, & ! total count when iceumask is true - ksub ! subcycling iteration + icellu ! total count when iceumask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxui , & ! compressed index in i-direction @@ -746,7 +744,7 @@ subroutine stepu (nx_block, ny_block, & taubx , & ! seabed stress, x-direction (N/m^2) tauby ! seabed stress, y-direction (N/m^2) - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Cw ! ocean-ice neutral drag coefficient ! local variables @@ -810,14 +808,10 @@ subroutine stepu (nx_block, ny_block, & uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - ! calculate seabed stress component for outputs - if (ksub == ndte) then ! on last subcycling iteration - if ( seabed_stress ) then - taubx(i,j) = -uvel(i,j)*Tbu(i,j) / (sqrt(uold**2 + vold**2) + u0) - tauby(i,j) = -vvel(i,j)*Tbu(i,j) / (sqrt(uold**2 + vold**2) + u0) - endif - endif - + ! calculate seabed stress component for outputs + ! only needed on last iteration. + taubx(i,j) = -uvel(i,j)*Cb + tauby(i,j) = -vvel(i,j)*Cb enddo ! ij end subroutine stepu @@ -829,7 +823,7 @@ end subroutine stepu subroutine step_vel (nx_block, ny_block, & icell, Cw, & indxi, indxj, & - ksub, aiu, & + aiu, & uocn, vocn, & waterx, watery, & forcex, forcey, & @@ -842,8 +836,7 @@ subroutine step_vel (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icell, & ! total count when ice[en]mask is true - ksub ! subcycling iteration + icell ! total count when ice[en]mask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxi , & ! compressed index in i-direction @@ -933,10 +926,9 @@ subroutine step_vel (nx_block, ny_block, & vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 ! calculate seabed stress component for outputs - if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration - taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc - tauby(i,j) = -vvel(i,j)*Tb(i,j) / ccc - endif + ! only needed on last iteration. + taubx(i,j) = -uvel(i,j)*Cb + tauby(i,j) = -vvel(i,j)*Cb enddo ! ij @@ -949,7 +941,7 @@ end subroutine step_vel subroutine stepu_Cgrid (nx_block, ny_block, & icell, Cw, & indxi, indxj, & - ksub, aiu, & + aiu, & uocn, vocn, & waterx, forcex, & massdti, fm, & @@ -960,8 +952,7 @@ subroutine stepu_Cgrid (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icell, & ! total count when ice[en]mask is true - ksub ! subcycling iteration + icell ! total count when ice[en]mask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxi , & ! compressed index in i-direction @@ -1036,9 +1027,8 @@ subroutine stepu_Cgrid (nx_block, ny_block, & uvel(i,j) = (ccb*vold + cc1) / cca ! m/s ! calculate seabed stress component for outputs - if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration - taubx(i,j) = -uvel(i,j)*Tb(i,j) / ccc - endif + ! only needed on last iteration. + taubx(i,j) = -uvel(i,j)*Cb enddo ! ij @@ -1051,7 +1041,7 @@ end subroutine stepu_Cgrid subroutine stepv_Cgrid (nx_block, ny_block, & icell, Cw, & indxi, indxj, & - ksub, aiu, & + aiu, & uocn, vocn, & watery, forcey, & massdti, fm, & @@ -1062,8 +1052,7 @@ subroutine stepv_Cgrid (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icell, & ! total count when ice[en]mask is true - ksub ! subcycling iteration + icell ! total count when ice[en]mask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxi , & ! compressed index in i-direction @@ -1138,9 +1127,8 @@ subroutine stepv_Cgrid (nx_block, ny_block, & vvel(i,j) = (-ccb*uold + cc2) / cca ! calculate seabed stress component for outputs - if (ksub == ndte .and. seabed_stress) then ! on last subcycling iteration - tauby(i,j) = -vvel(i,j)*Tb(i,j) / ccc - endif + ! only needed on last iteration. + tauby(i,j) = -vvel(i,j)*Cb enddo ! ij @@ -1159,9 +1147,7 @@ subroutine dyn_finish (nx_block, ny_block, & uvel, vvel, & uocn, vocn, & aiu, fm, & - strintx, strinty, & - strairx, strairy, & - strocnx, strocny) + strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1177,24 +1163,21 @@ subroutine dyn_finish (nx_block, ny_block, & uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) aiu , & ! ice fraction on u-grid - fm , & ! Coriolis param. * mass in U-cell (kg/s) - strintx , & ! divergence of internal ice stress, x (N/m^2) - strinty , & ! divergence of internal ice stress, y (N/m^2) - strairx , & ! stress on ice by air, x-direction - strairy ! stress on ice by air, y-direction + fm ! Coriolis param. * mass in U-cell (kg/s) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & strocnx , & ! ice-ocean stress, x-direction strocny ! ice-ocean stress, y-direction + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + Cw ! ocean-ice neutral drag coefficient + ! local variables integer (kind=int_kind) :: & i, j, ij real (kind=dbl_kind) :: vrel, rhow - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Cw ! ocean-ice neutral drag coefficient character(len=*), parameter :: subname = '(dyn_finish)' @@ -1277,13 +1260,14 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & Tbu ! seabed stress factor at 'grid_location' (N/m^2) character(len=*), optional, intent(inout) :: & - grid_location ! grid location (U, E, N), U assumed if not present + grid_location ! grid location (U, E, N), U assumed if not present real (kind=dbl_kind) :: & - au, & ! concentration of ice at 'grid_location' - hu, & ! volume per unit area of ice at 'grid_location' (mean thickness, m) - hwu, & ! water depth at 'grid_location' (m) - hcu ! critical thickness at 'grid_location' (m) + au , & ! concentration of ice at u location + hu , & ! volume per unit area of ice at u location (mean thickness, m) + hwu , & ! water depth at u location (m) + docalc_tbu, & ! logical as real (C0,C1) decides whether c0 is 0 or + hcu ! critical thickness at u location (m) integer (kind=int_kind) :: & i, j, ij @@ -1308,18 +1292,17 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & hwu = grid_neighbor_min(hwater, i, j, l_grid_location) - if (hwu < threshold_hw) then + docalc_tbu = merge(c1,c0,hwu < threshold_hw) + - au = grid_neighbor_max(aice, i, j, l_grid_location) - hu = grid_neighbor_max(vice, i, j, l_grid_location) + au = grid_neighbor_max(aice, i, j, l_grid_location) + hu = grid_neighbor_max(vice, i, j, l_grid_location) - ! 1- calculate critical thickness - hcu = au * hwu / k1 + ! 1- calculate critical thickness + hcu = au * hwu / k1 - ! 2- calculate seabed stress factor - Tbu(i,j) = k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) - - endif + ! 2- calculate seabed stress factor + Tbu(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) enddo ! ij @@ -2166,7 +2149,7 @@ subroutine viscous_coeffs_and_rep_pressure_T (strength, DminTarea, & rep_prs = (c1-Ktens)*tmpcalc*Delta etax2 = epp2i*zetax2 - end subroutine viscous_coeffs_and_rep_pressure_T + end subroutine viscous_coeffs_and_rep_pressure_T subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & @@ -2291,7 +2274,7 @@ subroutine stack_velocity_field(uvel, vvel, fld2) character(len=*), parameter :: subname = '(stack_velocity_field)' ! load velocity into array for boundary updates - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks fld2(:,:,1,iblk) = uvel(:,:,iblk) fld2(:,:,2,iblk) = vvel(:,:,iblk) @@ -2323,7 +2306,7 @@ subroutine unstack_velocity_field(fld2, uvel, vvel) character(len=*), parameter :: subname = '(unstack_velocity_field)' ! Unload velocity from array after boundary updates - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks uvel(:,:,iblk) = fld2(:,:,1,iblk) vvel(:,:,iblk) = fld2(:,:,2,iblk) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 61720d2eb..5d414c204 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -328,7 +328,7 @@ subroutine implicit_solver (dt) ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength ! need to do more debugging - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -425,34 +425,34 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - if (seabed_stress) then - - ! tcraig, evp omp causes abort on cheyenne with pgi, turn off here too - !$TCXOMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - if ( seabed_stress_method == 'LKD' ) then - + if ( seabed_stress_method == 'LKD' ) then + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block, ny_block, & icellu (iblk), & indxui(:,iblk), indxuj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), Tbu(:,:,iblk)) + enddo + !$OMP END PARALLEL DO - elseif ( seabed_stress_method == 'probabilistic' ) then + elseif ( seabed_stress_method == 'probabilistic' ) then + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block, ny_block, & icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater(:,:,iblk), Tbu(:,:,iblk)) - endif + enddo + !$OMP END PARALLEL DO - enddo - !$TCXOMP END PARALLEL DO + endif endif + !----------------------------------------------------------------- ! calc size of problem (ntot) and allocate solution vector !----------------------------------------------------------------- @@ -627,8 +627,8 @@ subroutine implicit_solver (dt) uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & +! strintx (:,:,iblk), strinty (:,:,iblk), & +! strairx (:,:,iblk), strairy (:,:,iblk), & strocnx (:,:,iblk), strocny (:,:,iblk)) enddo @@ -830,9 +830,9 @@ subroutine anderson_solver (icellt , icellu, & !----------------------------------------------------------------- ! Calc zetax2, etax2, dPr/dx, dPr/dy, Cb and vrel = f(uprev_k, vprev_k) !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,stress_Pr) do iblk = 1, nblocks - + if (use_mean_vrel) then ulin(:,:,iblk) = p5*uprev_k(:,:,iblk) + p5*uvel(:,:,iblk) vlin(:,:,iblk) = p5*vprev_k(:,:,iblk) + p5*vvel(:,:,iblk) @@ -928,7 +928,7 @@ subroutine anderson_solver (icellt , icellu, & ! Prepare diagonal for preconditioner if (precond == 'diag' .or. precond == 'pgmres') then - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,diag_rheo) do iblk = 1, nblocks ! first compute diagonal contributions due to rheology term call formDiag_step1 (nx_block , ny_block , & @@ -1207,8 +1207,6 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_zeta_dPr)' - ! Initialize - ! Initialize stPr, zetax2 and etax2 to zero ! (for cells where icetmask is false) stPr = c0 @@ -2864,7 +2862,7 @@ subroutine fgmres (zetax2 , etax2 , & ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -2960,7 +2958,7 @@ subroutine fgmres (zetax2 , etax2 , & if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector inverse_norm = c1 / hessenberg(nextit,initer) - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3026,7 +3024,7 @@ subroutine fgmres (zetax2 , etax2 , & ! Form linear combination to get new solution iterate do it = 1, initer t = rhs_hess(it) - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3070,7 +3068,7 @@ subroutine fgmres (zetax2 , etax2 , & workspace_x = c0 workspace_y = c0 do it = 1, nextit - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3257,7 +3255,7 @@ subroutine pgmres (zetax2 , etax2 , & ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3342,7 +3340,7 @@ subroutine pgmres (zetax2 , etax2 , & if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector inverse_norm = c1 / hessenberg(nextit,initer) - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3410,7 +3408,7 @@ subroutine pgmres (zetax2 , etax2 , & workspace_y = c0 do it = 1, initer t = rhs_hess(it) - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3466,7 +3464,7 @@ subroutine pgmres (zetax2 , etax2 , & workspace_x = c0 workspace_y = c0 do it = 1, nextit - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3547,7 +3545,7 @@ subroutine precondition(zetax2 , etax2, & wx = vx wy = vy elseif (precond_type == 'diag') then ! Jacobi preconditioner (diagonal) - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3630,7 +3628,7 @@ subroutine orthogonalize(ortho_type , initer , & do it = 1, initer local_dot = c0 - !$OMP PARALLEL DO PRIVATE(iblk, ij, i, j) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3650,7 +3648,7 @@ subroutine orthogonalize(ortho_type , initer , & ! Second loop of Gram-Schmidt (orthonormalize) do it = 1, initer - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3669,7 +3667,7 @@ subroutine orthogonalize(ortho_type , initer , & do it = 1, initer local_dot = c0 - !$OMP PARALLEL DO PRIVATE(iblk, ij, i, j) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -3684,7 +3682,7 @@ subroutine orthogonalize(ortho_type , initer , & hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) - !$OMP PARALLEL DO PRIVATE(iblk, ij, i, j) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index dc6425adb..38650459f 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -17,6 +17,7 @@ module ice_transport_driver use ice_constants, only: c0, c1, p5, & field_loc_center, & field_type_scalar, field_type_vector, & + field_loc_NEcorner, & field_loc_Nface, field_loc_Eface use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice @@ -355,7 +356,7 @@ subroutine transport_remap (dt) ! Here we assume that aice0 is up to date. !------------------------------------------------------------------- -! !$OMP PARALLEL DO PRIVATE(i,j,iblk) +! !$OMP PARALLEL DO PRIVATE(i,j,iblk) SCHEDULE(runtime) ! do iblk = 1, nblocks ! do j = 1, ny_block ! do i = 1, nx_block @@ -397,8 +398,7 @@ subroutine transport_remap (dt) ! call ice_timer_stop(timer_bound) -! MHRI: CHECK THIS OMP ... maybe ok: Were trcrn(:,:,1:ntrcr,:,iblk) in my testcode - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks !------------------------------------------------------------------- @@ -471,7 +471,7 @@ subroutine transport_remap (dt) tmin(:,:,:,:,:) = c0 tmax(:,:,:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -516,7 +516,7 @@ subroutine transport_remap (dt) field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -561,8 +561,7 @@ subroutine transport_remap (dt) ! Given new fields, recompute state variables. !------------------------------------------------------------------- -! MHRI: CHECK THIS OMP ... maybe ok: Were trcrn(:,:,1:ntrcr,:,iblk) in my testcode - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call tracers_to_state (nx_block, ny_block, & @@ -666,7 +665,7 @@ subroutine transport_remap (dt) !------------------------------------------------------------------- if (l_monotonicity_check) then - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,ckflag,istop,jstop) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,ckflag,istop,jstop) SCHEDULE(runtime) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -720,7 +719,7 @@ subroutine transport_upwind (dt) use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, trcr_depend, bound_state, trcr_base, & n_trcr_strata, nt_strata, uvelE, vvelN - use ice_grid, only: HTE, HTN, tarea, grid_ice + use ice_grid, only: HTE, HTN, tarea, tmask, grid_ice use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect @@ -768,38 +767,46 @@ subroutine transport_upwind (dt) ! vicen, vsnon, & ! ntrcr, trcrn) +! call ice_timer_start(timer_bound) +! call ice_HaloUpdate (uvel, halo_info, & +! field_loc_NEcorner, field_type_vector) +! call ice_HaloUpdate (vvel, halo_info, & +! field_loc_NEcorner, field_type_vector) +! call ice_timer_stop(timer_bound) + !------------------------------------------------------------------- ! Average corner velocities to edges. !------------------------------------------------------------------- if (grid_ice == 'CD' .or. grid_ice == 'C') then - uee(:,:,:)=uvelE(:,:,:) - vnn(:,:,:)=vvelN(:,:,:) + uee(:,:,:)=uvelE(:,:,:) + vnn(:,:,:)=vvelN(:,:,:) else - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) - vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uee, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vnn, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) + vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uee, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vnn, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) endif - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) + + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -839,6 +846,7 @@ subroutine transport_upwind (dt) ntrcr, narr, & trcr_depend(:), trcr_base(:,:), & n_trcr_strata(:), nt_strata(:,:), & + tmask(:,:, iblk), & aicen(:,:, :,iblk), trcrn (:,:,:,:,iblk), & vicen(:,:, :,iblk), vsnon (:,:, :,iblk), & aice0(:,:, iblk), works (:,:, :,iblk)) @@ -1643,6 +1651,7 @@ subroutine work_to_state (nx_block, ny_block, & trcr_base, & n_trcr_strata, & nt_strata, & + tmask, & aicen, trcrn, & vicen, vsnon, & aice0, works) @@ -1665,6 +1674,9 @@ subroutine work_to_state (nx_block, ny_block, & integer (kind=int_kind), dimension (ntrcr,2), intent(in) :: & nt_strata ! indices of underlying tracer layers + logical (kind=log_kind), intent (in) :: & + tmask (nx_block,ny_block) + real (kind=dbl_kind), intent (in) :: & works (nx_block,ny_block,narr) @@ -1684,6 +1696,7 @@ subroutine work_to_state (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij, n ,&! counting indices narrays ,&! counter for number of state variable arrays + nt_Tsfc ,&! Tsfc tracer number icells ! number of ocean/ice cells integer (kind=int_kind), dimension (nx_block*ny_block) :: & @@ -1694,6 +1707,11 @@ subroutine work_to_state (nx_block, ny_block, & character(len=*), parameter :: subname = '(work_to_state)' + call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + ! for call to compute_tracers icells = 0 do j = 1, ny_block @@ -1736,7 +1754,14 @@ subroutine work_to_state (nx_block, ny_block, & n_trcr_strata = n_trcr_strata(:), & nt_strata = nt_strata(:,:), & trcrn = trcrn(i,j,:,n)) + + ! tcraig, don't let land points get non-zero Tsfc + if (.not.tmask(i,j)) then + trcrn(i,j,nt_Tsfc,n) = c0 + endif + enddo + narrays = narrays + ntrcr enddo ! ncat diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 922b3f06b..6f35b2da8 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -270,7 +270,7 @@ subroutine init_remap ! Note: On a rectangular grid, the integral of any odd function ! of x or y = 0. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block @@ -462,10 +462,9 @@ subroutine horizontal_remap (dt, ntrace, & !---! Remap the open water area (without tracers). !---!------------------------------------------------------------------- - !--- tcraig, tcx, this omp loop leads to a seg fault in gnu - !--- need to check private variables and debug further - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,m, & - !$TCXOMP indxinc,indxjnc,mmask,tmask,istop,jstop,l_stop) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n, & + !$OMP indxinc,indxjnc,mmask,tmask,istop,jstop,l_stop) & + !$OMP SCHEDULE(runtime) do iblk = 1, nblocks l_stop = .false. @@ -567,7 +566,7 @@ subroutine horizontal_remap (dt, ntrace, & endif enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO !------------------------------------------------------------------- ! Ghost cell updates @@ -595,7 +594,7 @@ subroutine horizontal_remap (dt, ntrace, & ! tracer fields if (maskhalo_remap) then halomask(:,:,:) = 0 - !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,n,m,j,i) + !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,n,m,j,i) SCHEDULE(runtime) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -639,12 +638,11 @@ subroutine horizontal_remap (dt, ntrace, & endif ! nghost - !--- tcraig, tcx, this omp loop leads to a seg fault in gnu - !--- need to check private variables and debug further - !$TCXOMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n,m, & - !$TCXOMP edgearea_e,edgearea_n,edge,iflux,jflux, & - !$TCXOMP xp,yp,indxing,indxjng,mflxe,mflxn, & - !$TCXOMP mtflxe,mtflxn,triarea,istop,jstop,l_stop) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n, & + !$OMP edgearea_e,edgearea_n,edge,iflux,jflux, & + !$OMP xp,yp,indxing,indxjng,mflxe,mflxn, & + !$OMP mtflxe,mtflxn,triarea,istop,jstop,l_stop) & + !$OMP SCHEDULE(runtime) do iblk = 1, nblocks l_stop = .false. @@ -867,7 +865,7 @@ subroutine horizontal_remap (dt, ntrace, & enddo ! n enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO end subroutine horizontal_remap @@ -2984,17 +2982,17 @@ subroutine locate_triangles (nx_block, ny_block, & i = indxid(ij) j = indxjd(ij) if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then - print*, '' - print*, 'Areas do not add up: m, i, j, edge =', & + write(nu_diag,*) '' + write(nu_diag,*) 'Areas do not add up: m, i, j, edge =', & my_task, i, j, trim(edge) - print*, 'edgearea =', edgearea(i,j) - print*, 'areasum =', areasum(i,j) - print*, 'areafac_c =', areafac_c(i,j) - print*, '' - print*, 'Triangle areas:' + write(nu_diag,*) 'edgearea =', edgearea(i,j) + write(nu_diag,*) 'areasum =', areasum(i,j) + write(nu_diag,*) 'areafac_c =', areafac_c(i,j) + write(nu_diag,*) '' + write(nu_diag,*) 'Triangle areas:' do ng = 1, ngroups ! not vector friendly if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then - print*, ng, triarea(i,j,ng) + write(nu_diag,*) ng, triarea(i,j,ng) endif enddo endif @@ -3051,18 +3049,18 @@ subroutine locate_triangles (nx_block, ny_block, & do i = ib, ie if (abs(triarea(i,j,ng)) > puny) then if (abs(xp(i,j,nv,ng)) > p5+puny) then - print*, '' - print*, 'WARNING: xp =', xp(i,j,nv,ng) - print*, 'm, i, j, ng, nv =', my_task, i, j, ng, nv -! print*, 'yil,xdl,xcl,ydl=',yil,xdl,xcl,ydl -! print*, 'yir,xdr,xcr,ydr=',yir,xdr,xcr,ydr -! print*, 'ydm=',ydm + write(nu_diag,*) '' + write(nu_diag,*) 'WARNING: xp =', xp(i,j,nv,ng) + write(nu_diag,*) 'm, i, j, ng, nv =', my_task, i, j, ng, nv +! write(nu_diag,*) 'yil,xdl,xcl,ydl=',yil,xdl,xcl,ydl +! write(nu_diag,*) 'yir,xdr,xcr,ydr=',yir,xdr,xcr,ydr +! write(nu_diag,*) 'ydm=',ydm ! stop endif if (abs(yp(i,j,nv,ng)) > p5+puny) then - print*, '' - print*, 'WARNING: yp =', yp(i,j,nv,ng) - print*, 'm, i, j, ng, nv =', my_task, i, j, ng, nv + write(nu_diag,*) '' + write(nu_diag,*) 'WARNING: yp =', yp(i,j,nv,ng) + write(nu_diag,*) 'm, i, j, ng, nv =', my_task, i, j, ng, nv endif endif ! triarea enddo diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index e6a27c96f..4c7817a3d 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -120,6 +120,7 @@ subroutine input_data damping_andacc, start_andacc, use_mean_vrel, ortho_type use ice_transport_driver, only: advection, conserv_check use ice_restoring, only: restore_ice + use ice_timers, only: timer_stats #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setIO #endif @@ -178,7 +179,7 @@ subroutine input_data print_global, print_points, latpnt, lonpnt, & debug_forcing, histfreq, histfreq_n, hist_avg, & history_dir, history_file, history_precision, cpl_bgc, & - histfreq_base, dumpfreq_base, & + histfreq_base, dumpfreq_base, timer_stats, & conserv_check, debug_model, debug_model_step, & debug_model_i, debug_model_j, debug_model_iblk, debug_model_task, & year_init, month_init, day_init, sec_init, & @@ -298,6 +299,7 @@ subroutine input_data debug_model_task = -1 ! debug model local task number print_points = .false. ! if true, print point data print_global = .true. ! if true, print global diagnostic data + timer_stats = .false. ! if true, print out detailed timer statistics bfbflag = 'off' ! off = optimized diag_type = 'stdout' diag_file = 'ice_diag.d' @@ -565,53 +567,154 @@ subroutine input_data nml_filename = 'ice_in'//trim(inst_suffix) #endif - call get_fileunit(nu_nml) - if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 - endif + call abort_ice(subname//'ERROR: open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) + endif + write(nu_diag,*) subname,' Reading setup_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: setup_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 do while (nml_error > 0) - print*,'Reading setup_nml' - read(nu_nml, nml=setup_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading grid_nml' - read(nu_nml, nml=grid_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading tracer_nml' - read(nu_nml, nml=tracer_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading thermo_nml' - read(nu_nml, nml=thermo_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading dynamics_nml' - read(nu_nml, nml=dynamics_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading shortwave_nml' - read(nu_nml, nml=shortwave_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading ponds_nml' - read(nu_nml, nml=ponds_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading snow_nml' - read(nu_nml, nml=snow_nml,iostat=nml_error) - if (nml_error /= 0) exit - print*,'Reading forcing_nml' - read(nu_nml, nml=forcing_nml,iostat=nml_error) - if (nml_error /= 0) exit + read(nu_nml, nml=setup_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: reading namelist', & - file=__FILE__, line=__LINE__) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: setup_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading grid_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: grid_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=grid_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: grid_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading tracer_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: tracer_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=tracer_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: tracer_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading thermo_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: thermo_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=thermo_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: thermo_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading dynamics_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: dynamics_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=dynamics_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: dynamics_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading shortwave_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: shortwave_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=shortwave_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: shortwave_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading ponds_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: ponds_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=ponds_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: ponds_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading snow_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: snow_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=snow_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: snow_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + write(nu_diag,*) subname,' Reading forcing_nml' + rewind(unit=nu_nml, iostat=nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: forcing_nml rewind ', & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 + do while (nml_error > 0) + read(nu_nml, nml=forcing_nml,iostat=nml_error) + end do + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: forcing_nml reading ', & + file=__FILE__, line=__LINE__) + endif + + close(nu_nml) + call release_fileunit(nu_nml) endif - call release_fileunit(nu_nml) !----------------------------------------------------------------- ! set up diagnostics output and resolve conflicts @@ -676,6 +779,7 @@ subroutine input_data call broadcast_scalar(debug_model_task, master_task) call broadcast_scalar(print_points, master_task) call broadcast_scalar(print_global, master_task) + call broadcast_scalar(timer_stats, master_task) call broadcast_scalar(bfbflag, master_task) call broadcast_scalar(diag_type, master_task) call broadcast_scalar(diag_file, master_task) @@ -2037,6 +2141,7 @@ subroutine input_data write(nu_diag,1021) ' debug_model_i = ', debug_model_j write(nu_diag,1021) ' debug_model_iblk = ', debug_model_iblk write(nu_diag,1021) ' debug_model_task = ', debug_model_task + write(nu_diag,1011) ' timer_stats = ', timer_stats write(nu_diag,1031) ' bfbflag = ', trim(bfbflag) write(nu_diag,1021) ' numin = ', numin write(nu_diag,1021) ' numax = ', numax @@ -2466,7 +2571,6 @@ subroutine init_state ! Set state variables !----------------------------------------------------------------- -!MHRI: CHECK THIS OMP !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP iglob,jglob) do iblk = 1, nblocks @@ -2703,7 +2807,11 @@ subroutine set_state_var (nx_block, ny_block, & aicen(i,j,n) = c0 vicen(i,j,n) = c0 vsnon(i,j,n) = c0 - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + if (tmask(i,j)) then + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + else + trcrn(i,j,nt_Tsfc,n) = c0 ! at land grid cells (for clean history/restart files) + endif if (ntrcr >= 2) then do it = 2, ntrcr trcrn(i,j,it,n) = c0 diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 46a9c9389..3b0201cbf 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -121,7 +121,7 @@ subroutine prep_radiation (iblk) character(len=*), parameter :: subname = '(prep_radiation)' - call ice_timer_start(timer_sw) ! shortwave + call ice_timer_start(timer_sw,iblk) ! shortwave alvdr_init(:,:,iblk) = c0 alvdf_init(:,:,iblk) = c0 @@ -169,7 +169,7 @@ subroutine prep_radiation (iblk) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - call ice_timer_stop(timer_sw) ! shortwave + call ice_timer_stop(timer_sw,iblk) ! shortwave end subroutine prep_radiation @@ -739,7 +739,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) use ice_state, only: aicen, trcrn, vicen, vsnon, & aice, trcr, vice, vsno, aice0, trcr_depend, & bound_state, trcr_base, nt_strata, n_trcr_strata - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound, timer_updstate real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -763,6 +763,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) character(len=*), parameter :: subname='(update_state)' + call ice_timer_start(timer_updstate) call icepack_query_tracer_flags(tr_iage_out=tr_iage) call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_indices(nt_iage_out=nt_iage) @@ -780,7 +781,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) ntrcr, trcrn) call ice_timer_stop(timer_bound) - !$OMP PARALLEL DO PRIVATE(iblk,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block @@ -834,6 +835,7 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) + call ice_timer_stop(timer_updstate) end subroutine update_state @@ -1009,8 +1011,8 @@ subroutine step_dyn_ridge (dt, ndtd, iblk) ! Ridging !----------------------------------------------------------------- - call ice_timer_start(timer_column) - call ice_timer_start(timer_ridge) + call ice_timer_start(timer_column,iblk) + call ice_timer_start(timer_ridge,iblk) call icepack_query_tracer_sizes(ntrcr_out=ntrcr, nbtrcr_out=nbtrcr) call icepack_warnings_flush(nu_diag) @@ -1079,8 +1081,8 @@ subroutine step_dyn_ridge (dt, ndtd, iblk) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - call ice_timer_stop(timer_ridge) - call ice_timer_stop(timer_column) + call ice_timer_stop(timer_ridge,iblk) + call ice_timer_stop(timer_column,iblk) end subroutine step_dyn_ridge @@ -1267,7 +1269,7 @@ subroutine step_radiation (dt, iblk) character(len=*), parameter :: subname = '(step_radiation)' - call ice_timer_start(timer_sw) ! shortwave + call ice_timer_start(timer_sw,iblk) ! shortwave call icepack_query_tracer_sizes(ntrcr_out=ntrcr, & nbtrcr_out=nbtrcr, nbtrcr_sw_out=nbtrcr_sw) @@ -1386,7 +1388,7 @@ subroutine step_radiation (dt, iblk) deallocate(ztrcr_sw) deallocate(rsnow) - call ice_timer_stop(timer_sw) ! shortwave + call ice_timer_stop(timer_sw,iblk) ! shortwave end subroutine step_radiation @@ -1614,7 +1616,7 @@ subroutine biogeochemistry (dt, iblk) if (tr_brine .or. skl_bgc) then - call ice_timer_start(timer_bgc) ! biogeochemistry + call ice_timer_start(timer_bgc,iblk) ! biogeochemistry this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -1707,7 +1709,7 @@ subroutine biogeochemistry (dt, iblk) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - call ice_timer_stop(timer_bgc) ! biogeochemistry + call ice_timer_stop(timer_bgc,iblk) ! biogeochemistry endif ! tr_brine .or. skl_bgc diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 index 046cf9336..bc14e30d3 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 @@ -32,6 +32,9 @@ module ice_timers ice_timer_print_all, & ice_timer_check + logical(log_kind), public :: & + timer_stats ! controls printing of timer statistics + !----------------------------------------------------------------------- ! public timers !----------------------------------------------------------------------- @@ -62,8 +65,18 @@ module ice_timers timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop - timer_evp_2d ! timer including conversion 1d/2d -! timer_tmp ! for temporary timings + timer_evp_2d, &! timer including conversion 1d/2d + timer_updstate ! update state +! timer_updstate, &! update state +! timer_tmp1, &! for temporary timings +! timer_tmp2, &! for temporary timings +! timer_tmp3, &! for temporary timings +! timer_tmp4, &! for temporary timings +! timer_tmp5, &! for temporary timings +! timer_tmp6, &! for temporary timings +! timer_tmp7, &! for temporary timings +! timer_tmp8, &! for temporary timings +! timer_tmp9 ! for temporary timings !----------------------------------------------------------------------- ! @@ -173,7 +186,7 @@ subroutine init_ice_timers ! call get_ice_timer(timer_ponds, 'Meltponds',nblocks,distrb_info%nprocs) call get_ice_timer(timer_ridge, 'Ridging', nblocks,distrb_info%nprocs) ! call get_ice_timer(timer_catconv, 'Cat Conv', nblocks,distrb_info%nprocs) - call get_ice_timer(timer_fsd, 'Floe size',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_fsd, 'FloeSize', nblocks,distrb_info%nprocs) call get_ice_timer(timer_couple, 'Coupling', nblocks,distrb_info%nprocs) call get_ice_timer(timer_readwrite,'ReadWrite',nblocks,distrb_info%nprocs) call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) @@ -187,9 +200,18 @@ subroutine init_ice_timers call get_ice_timer(timer_cplsend, 'Cpl-Send', nblocks,distrb_info%nprocs) call get_ice_timer(timer_sndrcv, 'Snd->Rcv', nblocks,distrb_info%nprocs) #endif - call get_ice_timer(timer_evp_1d, '1d-evp', nblocks,distrb_info%nprocs) - call get_ice_timer(timer_evp_2d, '2d-evp', nblocks,distrb_info%nprocs) -! call get_ice_timer(timer_tmp, ' ',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_evp_1d, '1d-evp', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_evp_2d, '2d-evp', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_updstate, 'UpdState', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp1, 'tmp1', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp2, 'tmp2', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp3, 'tmp3', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp4, 'tmp4', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp5, 'tmp5', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp6, 'tmp6', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp7, 'tmp7', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp8, 'tmp8', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp9, 'tmp9', nblocks,distrb_info%nprocs) !----------------------------------------------------------------------- @@ -333,6 +355,7 @@ subroutine ice_timer_start(timer_id, block_id) character(len=*), parameter :: subname = '(ice_timer_start)' +! if (my_task == master_task) write(nu_diag,*) subname,trim(all_timers(timer_id)%name) !----------------------------------------------------------------------- ! ! if timer is defined, start it up @@ -433,6 +456,7 @@ subroutine ice_timer_stop(timer_id, block_id) character(len=*), parameter :: subname = '(ice_timer_stop)' +! if (my_task == master_task) write(nu_diag,*) subname,trim(all_timers(timer_id)%name) !----------------------------------------------------------------------- ! ! get end cycles diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 index 4599de42e..b18c35040 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 @@ -30,6 +30,9 @@ module ice_timers ice_timer_print_all, & ice_timer_check + logical(log_kind), public :: & + timer_stats ! controls printing of timer statistics + !----------------------------------------------------------------------- ! public timers !----------------------------------------------------------------------- @@ -54,8 +57,18 @@ module ice_timers timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop - timer_evp_2d ! timer including conversion 1d/2d -! timer_tmp ! for temporary timings + timer_evp_2d, &! timer including conversion 1d/2d + timer_updstate ! update state +! timer_updstate, &! update state +! timer_tmp1, &! for temporary timings +! timer_tmp2, &! for temporary timings +! timer_tmp3, &! for temporary timings +! timer_tmp4, &! for temporary timings +! timer_tmp5, &! for temporary timings +! timer_tmp6, &! for temporary timings +! timer_tmp7, &! for temporary timings +! timer_tmp8, &! for temporary timings +! timer_tmp9 ! for temporary timings !----------------------------------------------------------------------- ! @@ -187,7 +200,7 @@ subroutine init_ice_timers ! call get_ice_timer(timer_ponds, 'Meltponds',nblocks,distrb_info%nprocs) call get_ice_timer(timer_ridge, 'Ridging', nblocks,distrb_info%nprocs) ! call get_ice_timer(timer_catconv, 'Cat Conv', nblocks,distrb_info%nprocs) - call get_ice_timer(timer_fsd, 'Floe size',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_fsd, 'FloeSize', nblocks,distrb_info%nprocs) call get_ice_timer(timer_couple, 'Coupling', nblocks,distrb_info%nprocs) call get_ice_timer(timer_readwrite,'ReadWrite',nblocks,distrb_info%nprocs) call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) @@ -197,7 +210,16 @@ subroutine init_ice_timers call get_ice_timer(timer_forcing, 'Forcing', nblocks,distrb_info%nprocs) call get_ice_timer(timer_evp_1d, '1d-evp', nblocks,distrb_info%nprocs) call get_ice_timer(timer_evp_2d, '2d-evp', nblocks,distrb_info%nprocs) -! call get_ice_timer(timer_tmp, ' ',nblocks,distrb_info%nprocs) + call get_ice_timer(timer_updstate, 'UpdState', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp1, 'tmp1', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp2, 'tmp2', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp3, 'tmp3', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp4, 'tmp4', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp5, 'tmp5', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp6, 'tmp6', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp7, 'tmp7', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp8, 'tmp8', nblocks,distrb_info%nprocs) +! call get_ice_timer(timer_tmp9, 'tmp9', nblocks,distrb_info%nprocs) !----------------------------------------------------------------------- @@ -341,6 +363,8 @@ subroutine ice_timer_start(timer_id, block_id) character(len=*), parameter :: subname = '(ice_timer_start)' +! if (my_task == master_task) write(nu_diag,*) subname,trim(all_timers(timer_id)%name) + !----------------------------------------------------------------------- ! ! if timer is defined, start it up @@ -444,6 +468,8 @@ subroutine ice_timer_stop(timer_id, block_id) character(len=*), parameter :: subname = '(ice_timer_stop)' +! if (my_task == master_task) write(nu_diag,*) subname,trim(all_timers(timer_id)%name) + !----------------------------------------------------------------------- ! ! get end cycles diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index ee7d98b50..6f8fee49a 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -164,24 +164,27 @@ subroutine init_domain_blocks ny_global = -1 ! NYGLOB, j-axis size landblockelim = .true. ! on by default - call get_fileunit(nu_nml) if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading domain_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: domain_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=domain_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: error reading domain_nml') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: domain_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif call broadcast_scalar(nprocs, master_task) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 0b174a408..80e876571 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -437,6 +437,9 @@ subroutine init_grid2 field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & field_type_scalar, field_type_vector, field_type_angle use ice_domain_size, only: max_blocks +#if defined (_OPENMP) + use OMP_LIB +#endif integer (kind=int_kind) :: & i, j, iblk, & @@ -455,6 +458,11 @@ subroutine init_grid2 type (block) :: & this_block ! block information for current block +#if defined (_OPENMP) + integer(kind=omp_sched_kind) :: ompsk ! openmp schedule + integer(kind=int_kind) :: ompcs ! openmp schedule count +#endif + character(len=*), parameter :: subname = '(init_grid2)' !----------------------------------------------------------------- @@ -485,6 +493,29 @@ subroutine init_grid2 call rectgrid ! regular rectangular grid endif + !----------------------------------------------------------------- + ! Diagnose OpenMP thread schedule, force order in output + !----------------------------------------------------------------- + +#if defined (_OPENMP) + !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + if (my_task == master_task) then + !$OMP ORDERED + if (iblk == 1) then + call omp_get_schedule(ompsk,ompcs) + write(nu_diag,*) '' + write(nu_diag,*) subname,' OpenMP runtime thread schedule:' + write(nu_diag,*) subname,' omp schedule = ',ompsk,ompcs + endif + write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() + call flush_fileunit(nu_diag) + !$OMP END ORDERED + endif + enddo + !$OMP END PARALLEL DO +#endif + !----------------------------------------------------------------- ! T-grid cell and U-grid cell quantities ! Fill halo data locally where possible to avoid missing diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index b3cd413a9..474ed892e 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -555,6 +555,17 @@ subroutine restartfile (ice_ic) endif + ! set Tsfcn to c0 on land + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (.not. tmask(i,j,iblk)) trcrn(i,j,nt_Tsfc,:,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + ! for mixed layer model if (oceanmixed_ice) then diff --git a/cicecore/cicedynB/infrastructure/ice_restoring.F90 b/cicecore/cicedynB/infrastructure/ice_restoring.F90 index c7254cd80..f21e50513 100644 --- a/cicecore/cicedynB/infrastructure/ice_restoring.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restoring.F90 @@ -394,7 +394,11 @@ subroutine set_restore_var (nx_block, ny_block, & aicen(i,j,n) = c0 vicen(i,j,n) = c0 vsnon(i,j,n) = c0 - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + if (tmask(i,j)) then + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + else + trcrn(i,j,nt_Tsfc,n) = c0 ! on land gridcells + endif if (ntrcr >= 2) then do it = 2, ntrcr trcrn(i,j,it,n) = c0 diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 24a5b75be..74638e45a 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -745,7 +745,8 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & status = pio_inq_varid(File,trim(vname),vardesc) if (status /= PIO_noerr) then - call abort_ice(subname//"ERROR: CICE restart? Missing variable: "//trim(vname)) + call abort_ice(subname// & + "ERROR: CICE restart? Missing variable: "//trim(vname)) endif status = pio_inq_varndims(File, vardesc, ndims) @@ -755,6 +756,10 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! if (ndim3 == ncat .and. ncat>1) then if (ndim3 == ncat .and. ndims == 3) then call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) +!#ifndef CESM1_PIO +!! This only works for PIO2 +! where (work == PIO_FILL_DOUBLE) work = c0 +!#endif if (present(field_loc)) then do n=1,ndim3 call ice_HaloUpdate (work(:,:,n,:), halo_info, & @@ -764,6 +769,10 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! elseif (ndim3 == 1) then elseif (ndim3 == 1 .and. ndims == 2) then call pio_read_darray(File, vardesc, iodesc2d, work, status) +!#ifndef CESM1_PIO +!! This only works for PIO2 +! where (work == PIO_FILL_DOUBLE) work = c0 +!#endif if (present(field_loc)) then call ice_HaloUpdate (work(:,:,1,:), halo_info, & field_loc, field_type) diff --git a/cicecore/drivers/direct/hadgem3/CICE.F90 b/cicecore/drivers/direct/hadgem3/CICE.F90 index b2314240c..b0176e801 100644 --- a/cicecore/drivers/direct/hadgem3/CICE.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2021, Triad National Security, LLC +! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2021. Triad National Security, LLC. This software was +! Copyright 2022. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index 365322dde..6ede4411d 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -287,7 +287,6 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics -!MHRI: CHECK THIS OMP !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/mct/cesm1/CICE_copyright.txt b/cicecore/drivers/mct/cesm1/CICE_copyright.txt index e10da1e77..6eb3c9cca 100644 --- a/cicecore/drivers/mct/cesm1/CICE_copyright.txt +++ b/cicecore/drivers/mct/cesm1/CICE_copyright.txt @@ -1,7 +1,7 @@ -! Copyright (c) 2021, Triad National Security, LLC +! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2021. Triad National Security, LLC. This software was +! Copyright 2022. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index e068a2892..0868ef2fa 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -168,23 +168,28 @@ subroutine ice_prescribed_init(compid, gsmap, dom) prescribed_ice_fill = .false. ! true if pice data fill required ! read from input file - call get_fileunit(nu_nml) + if (my_task == master_task) then - open (nu_nml, file=nml_filename, status='old',iostat=nml_error) + write(nu_diag,*) subname,' Reading ice_prescribed_nml' + + call get_fileunit(nu_nml) + open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 + call abort_ice(subname//'ERROR: ice_prescribed_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=ice_prescribed_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call release_fileunit(nu_nml) - call broadcast_scalar(nml_error,master_task) - if (nml_error /= 0) then - call abort_ice (subname//' ERROR: Namelist read error in ice_prescribed_mod') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: ice_prescribed_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif call broadcast_scalar(prescribed_ice,master_task) diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index cfca994c3..338b25050 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -8,6 +8,7 @@ module CICE_InitMod use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave + use icepack_intfc, only: icepack_init_snow use icepack_intfc, only: icepack_configure use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_flags @@ -83,7 +84,7 @@ subroutine cice_init2() use ice_dyn_vp , only: init_vp use ice_flux , only: init_coupler_flux, init_history_therm use ice_flux , only: init_history_dyn, init_flux_atm, init_flux_ocn - use ice_forcing , only: init_forcing_ocn + use ice_forcing , only: init_snowtable use ice_forcing_bgc , only: get_forcing_bgc, get_atm_bgc use ice_forcing_bgc , only: faero_default, faero_optics, alloc_forcing_bgc, fiso_default use ice_history , only: init_hist, accum_hist @@ -95,7 +96,8 @@ subroutine cice_init2() use ice_transport_driver , only: init_transport logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers - logical(kind=log_kind) :: tr_iso, tr_fsd, wave_spec + logical(kind=log_kind) :: tr_iso, tr_fsd, wave_spec, tr_snow + character(len=char_len) :: snw_aging_table character(len=*), parameter :: subname = '(cice_init2)' !---------------------------------------------------- @@ -137,15 +139,12 @@ subroutine cice_init2() call calendar() ! determine the initial date - !TODO: - why is this being called when you are using CMEPS? - call init_forcing_ocn(dt) ! initialize sss and sst from data - call init_state ! initialize the ice state call init_transport ! initialize horizontal transport call ice_HaloRestore_init ! restored boundary conditions call icepack_query_parameters(skl_bgc_out=skl_bgc, z_tracers_out=z_tracers, & - wave_spec_out=wave_spec) + wave_spec_out=wave_spec, snw_aging_table_out=snw_aging_table) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) @@ -158,7 +157,7 @@ subroutine cice_init2() call init_history_dyn ! initialize dynamic history variables call icepack_query_tracer_flags(tr_aero_out=tr_aero, tr_zaero_out=tr_zaero) - call icepack_query_tracer_flags(tr_iso_out=tr_iso) + call icepack_query_tracer_flags(tr_iso_out=tr_iso, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) @@ -167,6 +166,17 @@ subroutine cice_init2() call faero_optics !initialize aerosol optical property tables end if + ! snow aging lookup table initialization + if (tr_snow) then ! advanced snow physics + call icepack_init_snow() + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + if (snw_aging_table(1:4) /= 'test') then + call init_snowtable() + endif + endif + ! Initialize shortwave components using swdn from previous timestep ! if restarting. These components will be scaled to current forcing ! in prep_radiation. @@ -199,12 +209,12 @@ subroutine init_restart() use ice_calendar, only: calendar use ice_constants, only: c0 use ice_domain, only: nblocks - use ice_domain_size, only: ncat, n_iso, n_aero, nfsd + use ice_domain_size, only: ncat, n_iso, n_aero, nfsd, nslyr use ice_dyn_eap, only: read_restart_eap use ice_dyn_shared, only: kdyn use ice_grid, only: tmask use ice_init, only: ice_ic - use ice_init_column, only: init_age, init_FY, init_lvl, & + use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & @@ -212,6 +222,7 @@ subroutine init_restart() restart_pond_cesm, read_restart_pond_cesm, & restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & + restart_snow, read_restart_snow, & restart_fsd, read_restart_fsd, & restart_iso, read_restart_iso, & restart_aero, read_restart_aero, & @@ -226,12 +237,13 @@ subroutine init_restart() iblk ! block index logical(kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & - tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & + tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, tr_snow, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & ntrcr integer(kind=int_kind) :: & nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & + nt_smice, nt_smliq, nt_rhos, nt_rsnw, & nt_iage, nt_FY, nt_aero, nt_fsd, nt_isosno, nt_isoice character(len=*), parameter :: subname = '(init_restart)' @@ -247,10 +259,12 @@ subroutine init_restart() call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & - tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) + tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & + nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, & + nt_rhos_out=nt_rhos, nt_rsnw_out=nt_rsnw, & nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -347,6 +361,21 @@ subroutine init_restart() enddo ! iblk endif ! .not. restart_pond endif + ! snow redistribution/metamorphism + if (tr_snow) then + if (trim(runtype) == 'continue') restart_snow = .true. + if (restart_snow) then + call read_restart_snow + else + do iblk = 1, nblocks + call init_snowtracers(trcrn(:,:,nt_smice:nt_smice+nslyr-1,:,iblk), & + trcrn(:,:,nt_smliq:nt_smliq+nslyr-1,:,iblk), & + trcrn(:,:,nt_rhos :nt_rhos +nslyr-1,:,iblk), & + trcrn(:,:,nt_rsnw :nt_rsnw +nslyr-1,:,iblk)) + enddo ! iblk + endif + endif + ! floe size distribution if (tr_fsd) then if (trim(runtype) == 'continue') restart_fsd = .true. @@ -356,7 +385,6 @@ subroutine init_restart() call init_fsd(trcrn(:,:,nt_fsd:nt_fsd+nfsd-1,:,:)) endif endif - ! isotopes if (tr_iso) then if (trim(runtype) == 'continue') restart_iso = .true. @@ -441,7 +469,6 @@ subroutine init_restart() call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - end subroutine init_restart !======================================================================= diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index d4b100518..6f145ab0e 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -56,9 +56,9 @@ subroutine CICE_Run tr_iso, tr_aero, tr_zaero, skl_bgc, z_tracers, wave_spec, tr_fsd character(len=*), parameter :: subname = '(CICE_Run)' - !-------------------------------------------------------------------- - ! initialize error code and step timer - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! initialize error code and step timer + !-------------------------------------------------------------------- call ice_timer_start(timer_step) ! start timing entire run @@ -73,13 +73,13 @@ subroutine CICE_Run if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - !-------------------------------------------------------------------- - ! timestep loop - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! timestep loop + !-------------------------------------------------------------------- call ice_timer_start(timer_couple) ! atm/ocn coupling - call advance_timestep() ! advance timestep and update calendar data + call advance_timestep() ! advance timestep and update calendar data if (z_tracers) call get_atm_bgc ! biogeochemistry @@ -90,9 +90,9 @@ subroutine CICE_Run call ice_step - !-------------------------------------------------------------------- - ! end of timestep loop - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! end of timestep loop + !-------------------------------------------------------------------- call ice_timer_stop(timer_step) ! end timestepping loop timer @@ -110,7 +110,7 @@ subroutine ice_step use ice_boundary, only: ice_HaloUpdate use ice_calendar, only: dt, dt_dyn, ndtd, diagfreq, write_restart, istep use ice_calendar, only: idate, msec - use ice_diagnostics, only: init_mass_diags, runtime_diags + use ice_diagnostics, only: init_mass_diags, runtime_diags, debug_model, debug_ice use ice_diagnostics_bgc, only: hbrine_diags, zsal_diags, bgc_diags use ice_domain, only: halo_info, nblocks use ice_dyn_eap, only: write_restart_eap @@ -123,12 +123,13 @@ subroutine ice_step use ice_restart_column, only: write_restart_age, write_restart_FY, & write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & write_restart_pond_topo, write_restart_aero, write_restart_fsd, & - write_restart_iso, write_restart_bgc, write_restart_hbrine + write_restart_iso, write_restart_bgc, write_restart_hbrine, & + write_restart_snow use ice_restart_driver, only: dumpfile use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, step_prep, step_dyn_wave + biogeochemistry, save_init, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -144,19 +145,28 @@ subroutine ice_step offset ! d(age)/dt time offset logical (kind=log_kind) :: & - tr_iage, tr_FY, tr_lvl, tr_fsd, & + tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' + character (len=char_len) :: plabeld + + if (debug_model) then + plabeld = 'beginning time step' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo + endif + call icepack_query_parameters(calc_Tsfc_out=calc_Tsfc, skl_bgc_out=skl_bgc, & solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & - tr_iso_out=tr_iso, tr_fsd_out=tr_fsd) + tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -201,15 +211,33 @@ subroutine ice_step !----------------------------------------------------------------- if (calc_Tsfc) call prep_radiation (iblk) + if (debug_model) then + plabeld = 'post prep_radiation' + call debug_ice (iblk, plabeld) + endif !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- call step_therm1 (dt, iblk) ! vertical thermodynamics + if (debug_model) then + plabeld = 'post step_therm1' + call debug_ice (iblk, plabeld) + endif + call biogeochemistry (dt, iblk) ! biogeochemistry + if (debug_model) then + plabeld = 'post biogeochemistry' + call debug_ice (iblk, plabeld) + endif + if (.not.prescribed_ice) & call step_therm2 (dt, iblk) ! ice thickness distribution thermo + if (debug_model) then + plabeld = 'post step_therm2' + call debug_ice (iblk, plabeld) + endif endif ! ktherm > 0 @@ -237,6 +265,12 @@ subroutine ice_step ! momentum, stress, transport call step_dyn_horiz (dt_dyn) + if (debug_model) then + plabeld = 'post step_dyn_horiz' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo ! iblk + endif ! ridging !$OMP PARALLEL DO PRIVATE(iblk) @@ -244,12 +278,24 @@ subroutine ice_step if (kridge > 0) call step_dyn_ridge (dt_dyn, ndtd, iblk) enddo !$OMP END PARALLEL DO + if (debug_model) then + plabeld = 'post step_dyn_ridge' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo ! iblk + endif ! clean up, update tendency diagnostics offset = c0 call update_state (dt_dyn, daidtd, dvidtd, dagedtd, offset) enddo + if (debug_model) then + plabeld = 'post dynamics' + do iblk = 1, nblocks + call debug_ice (iblk, plabeld) + enddo + endif endif ! not prescribed ice @@ -260,18 +306,36 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics + !----------------------------------------------------------------- + ! snow redistribution and metamorphosis + !----------------------------------------------------------------- + + if (tr_snow) then ! advanced snow physics + do iblk = 1, nblocks + call step_snow (dt, iblk) + enddo + call update_state (dt) ! clean up + endif + !MHRI: CHECK THIS OMP !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks if (ktherm >= 0) call step_radiation (dt, iblk) + if (debug_model) then + plabeld = 'post step_radiation' + call debug_ice (iblk, plabeld) + endif !----------------------------------------------------------------- ! get ready for coupling and the next time step !----------------------------------------------------------------- call coupling_prep (iblk) - + if (debug_model) then + plabeld = 'post coupling_prep' + call debug_ice (iblk, plabeld) + endif enddo ! iblk !$OMP END PARALLEL DO @@ -309,6 +373,7 @@ subroutine ice_step if (tr_pond_cesm) call write_restart_pond_cesm if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo + if (tr_snow) call write_restart_snow if (tr_fsd) call write_restart_fsd if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero @@ -634,11 +699,12 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & real (kind=dbl_kind) :: & puny, & ! + Lsub, & ! rLsub ! 1/Lsub character(len=*), parameter :: subname = '(sfcflux_to_ocn)' - call icepack_query_parameters(puny_out=puny) + call icepack_query_parameters(puny_out=puny, Lsub_out=Lsub) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) diff --git a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt index e10da1e77..6eb3c9cca 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt +++ b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt @@ -1,7 +1,7 @@ -! Copyright (c) 2021, Triad National Security, LLC +! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2021. Triad National Security, LLC. This software was +! Copyright 2022. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index a832e7bdf..a9d71e479 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -88,6 +88,7 @@ module ice_comp_nuopc integer :: nthrds ! Number of threads to use in this component integer :: dbug = 0 + logical :: profile_memory = .false. integer , parameter :: debug_import = 0 ! internal debug level integer , parameter :: debug_export = 0 ! internal debug level character(*), parameter :: modName = "(ice_comp_nuopc)" @@ -157,6 +158,10 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) type(ESMF_State) :: importState, exportState type(ESMF_Clock) :: clock integer, intent(out) :: rc + + logical :: isPresent, isSet + character(len=64) :: value + character(len=char_len_long) :: logmsg !-------------------------------- rc = ESMF_SUCCESS @@ -166,6 +171,14 @@ subroutine InitializeP0(gcomp, importState, exportState, clock, rc) acceptStringList=(/"IPDv01p"/), rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return + profile_memory = .false. + call NUOPC_CompAttributeGet(gcomp, name="ProfileMemory", value=value, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) profile_memory=(trim(value)=="true") + write(logmsg,*) profile_memory + call ESMF_LogWrite('CICE_cap:ProfileMemory = '//trim(logmsg), ESMF_LOGMSG_INFO) + end subroutine InitializeP0 !=============================================================================== @@ -224,6 +237,10 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain character(len=char_len_long) :: diag_filename = 'unset' character(len=char_len_long) :: logmsg + character(len=char_len_long) :: single_column_lnd_domainfile + real(dbl_kind) :: scol_lon + real(dbl_kind) :: scol_lat + real(dbl_kind) :: scol_spval character(len=*), parameter :: subname=trim(modName)//':(InitializeAdvertise) ' !-------------------------------- @@ -363,8 +380,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) depressT_in = 0.054_dbl_kind, & Tocnfrz_in = -34.0_dbl_kind*0.054_dbl_kind, & pi_in = SHR_CONST_PI, & - snowpatch_in = 0.005_dbl_kind, & - dragio_in = 0.00536_dbl_kind) + snowpatch_in = 0.005_dbl_kind) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -493,12 +509,67 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! First cice initialization phase - before initializing grid info !---------------------------------------------------------------------------- +#ifdef CESMCOUPLED + ! Determine if single column + + call NUOPC_CompAttributeGet(gcomp, name='scol_lon', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scmlon + call NUOPC_CompAttributeGet(gcomp, name='scol_lat', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scmlat + call NUOPC_CompAttributeGet(gcomp, name='scol_spval', value=cvalue, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_spval + + if (scmlon > scol_spval .and. scmlat > scol_spval) then + call NUOPC_CompAttributeGet(gcomp, name='single_column_lnd_domainfile', & + value=single_column_lnd_domainfile, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (trim(single_column_lnd_domainfile) /= 'UNSET') then + single_column = .true. + else + call abort_ice('single_column_domainfile cannot be null for single column mode') + end if + call NUOPC_CompAttributeGet(gcomp, name='scol_ocnmask', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_mask + call NUOPC_CompAttributeGet(gcomp, name='scol_ocnfrac', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_frac + call NUOPC_CompAttributeGet(gcomp, name='scol_ni', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_ni + call NUOPC_CompAttributeGet(gcomp, name='scol_nj', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_nj + + call ice_mesh_create_scolumn(scmlon, scmlat, ice_mesh, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + scol_valid = (scol_mask == 1) + if (.not. scol_valid) then + write(6,*)'DEBUG: i am here' + ! Advertise fields + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + call t_stopf ('cice_init_total') + + ! ******************* + ! *** RETURN HERE *** + ! ******************* + RETURN + end if + end if + ! Read the cice namelist as part of the call to cice_init1 + ! Note that if single_column is true and scol_valid is not - will never get here + call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') -#ifdef CESMCOUPLED ! Form of ocean freezing temperature ! 'minus1p8' = -1.8 C ! 'linear_salt' = -depressT * sss @@ -546,13 +617,20 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ' must be the same as natmiter from cice namelist ',natmiter call abort_ice(trim(errmsg)) endif + +#else + + ! Read the cice namelist as part of the call to cice_init1 + call t_startf ('cice_init1') + call cice_init1 + call t_stopf ('cice_init1') + #endif + !---------------------------------------------------------------------------- ! Initialize grid info !---------------------------------------------------------------------------- - ! Initialize cice mesh and mask if appropriate - if (single_column .and. scol_valid) then call ice_mesh_init_tlon_tlat_area_hm() else @@ -737,82 +815,43 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) #ifdef CESMCOUPLED - call NUOPC_CompAttributeGet(gcomp, name='scol_lon', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlon - call NUOPC_CompAttributeGet(gcomp, name='scol_lat', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scmlat - call NUOPC_CompAttributeGet(gcomp, name='scol_spval', value=cvalue, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scol_spval - - if (scmlon > scol_spval .and. scmlat > scol_spval) then - call NUOPC_CompAttributeGet(gcomp, name='single_column_lnd_domainfile', & - value=single_column_lnd_domainfile, rc=rc) + ! if single column is not valid - set all export state fields to zero and return + if (single_column .and. .not. scol_valid) then + write(nu_diag,'(a)')' (ice_comp_nuopc) single column mode point does not contain any ocn/ice '& + //' - setting all export data to 0' + call ice_realize_fields(gcomp, mesh=ice_mesh, & + flds_scalar_name=flds_scalar_name, flds_scalar_num=flds_scalar_num, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (trim(single_column_lnd_domainfile) /= 'UNSET') then - single_column = .true. - else - call abort_ice('single_column_domainfile cannot be null for single column mode') - end if - call NUOPC_CompAttributeGet(gcomp, name='scol_ocnmask', value=cvalue, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scol_mask - call NUOPC_CompAttributeGet(gcomp, name='scol_ocnfrac', value=cvalue, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scol_frac - call NUOPC_CompAttributeGet(gcomp, name='scol_ni', value=cvalue, rc=rc) + call ESMF_StateGet(exportState, itemCount=fieldCount, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scol_ni - call NUOPC_CompAttributeGet(gcomp, name='scol_nj', value=cvalue, rc=rc) + allocate(lfieldnamelist(fieldCount)) + call ESMF_StateGet(exportState, itemNameList=lfieldnamelist, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return - read(cvalue,*) scol_nj - - call ice_mesh_create_scolumn(scmlon, scmlat, ice_mesh, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - scol_valid = (scol_mask == 1) - if (.not. scol_valid) then - ! if single column is not valid - set all export state fields to zero and return - write(nu_diag,'(a)')' (ice_comp_nuopc) single column mode point does not contain any ocn/ice '& - //' - setting all export data to 0' - call ice_realize_fields(gcomp, mesh=ice_mesh, & - flds_scalar_name=flds_scalar_name, flds_scalar_num=flds_scalar_num, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - call ESMF_StateGet(exportState, itemCount=fieldCount, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - allocate(lfieldnamelist(fieldCount)) - call ESMF_StateGet(exportState, itemNameList=lfieldnamelist, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - do n = 1, fieldCount - if (trim(lfieldnamelist(n)) /= flds_scalar_name) then - call ESMF_StateGet(exportState, itemName=trim(lfieldnamelist(n)), field=lfield, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - call ESMF_FieldGet(lfield, rank=rank, rc=rc) - if (chkerr(rc,__LINE__,u_FILE_u)) return - if (rank == 2) then - call ESMF_FieldGet(lfield, farrayPtr=fldptr2d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - fldptr2d(:,:) = 0._dbl_kind - else - call ESMF_FieldGet(lfield, farrayPtr=fldptr1d, rc=rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - fldptr1d(:) = 0._dbl_kind - end if + do n = 1, fieldCount + if (trim(lfieldnamelist(n)) /= flds_scalar_name) then + call ESMF_StateGet(exportState, itemName=trim(lfieldnamelist(n)), field=lfield, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + call ESMF_FieldGet(lfield, rank=rank, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + if (rank == 2) then + call ESMF_FieldGet(lfield, farrayPtr=fldptr2d, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + fldptr2d(:,:) = 0._dbl_kind + else + call ESMF_FieldGet(lfield, farrayPtr=fldptr1d, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + fldptr1d(:) = 0._dbl_kind end if - enddo - deallocate(lfieldnamelist) - ! ******************* - ! *** RETURN HERE *** - ! ******************* - RETURN - else - write(nu_diag,'(a,3(f10.5,2x))')' (ice_comp_nuopc) single column mode lon/lat/frac is ',& - scmlon,scmlat,scol_frac - end if + end if + enddo + deallocate(lfieldnamelist) + ! ******************* + ! *** RETURN HERE *** + ! ******************* + RETURN else - single_column = .false. + write(nu_diag,'(a,3(f10.5,2x))')' (ice_comp_nuopc) single column mode lon/lat/frac is ',& + scmlon,scmlat,scol_frac end if #endif @@ -902,6 +941,16 @@ subroutine ModelAdvance(gcomp, rc) !-------------------------------- rc = ESMF_SUCCESS + + call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) + + if (single_column .and. .not. scol_valid) then + ! ******************* + ! *** RETURN HERE *** + ! ******************* + RETURN + end if + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) ! query the Component for its clock, importState and exportState @@ -1049,7 +1098,9 @@ subroutine ModelAdvance(gcomp, rc) ! Advance cice and timestep update !-------------------------------- + if(profile_memory) call ESMF_VMLogMemInfo("Entering CICE_Run : ") call CICE_Run() + if(profile_memory) call ESMF_VMLogMemInfo("Leaving CICE_Run : ") !-------------------------------- ! Create export state diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 0f7f1ebd4..dbdf5c07d 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -131,7 +131,9 @@ subroutine ice_advertise_fields(gcomp, importState, exportState, flds_scalar_nam write(nu_diag,*)'send_i2x_per_cat = ',send_i2x_per_cat end if if (.not.send_i2x_per_cat) then - deallocate(fswthrun_ai) + if (allocated(fswthrun_ai)) then + deallocate(fswthrun_ai) + end if end if ! Determine if the following attributes are sent by the driver and if so read them in @@ -583,7 +585,7 @@ subroutine ice_import( importState, rc ) rhoa(i,j,iblk) = inst_pres_height_lowest / & (287.058_ESMF_KIND_R8*(1._ESMF_KIND_R8+0.608_ESMF_KIND_R8*Qa(i,j,iblk))*Tair(i,j,iblk)) else - rhoa(i,j,iblk) = 0._ESMF_KIND_R8 + rhoa(i,j,iblk) = 1.2_ESMF_KIND_R8 endif end do !i end do !j diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index a0d18c5fd..e7fb5f632 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -639,16 +639,13 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) diff_lon = abs(mod(lonMesh(n) - tmplon,360.0)) if (diff_lon > eps_imesh ) then write(6,100)n,lonMesh(n),tmplon, diff_lon - call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + !call abort_ice(error_message=subname, file=__FILE__, line=__LINE__) end if diff_lat = abs(latMesh(n) - lat(n)) if (diff_lat > eps_imesh) then write(6,101)n,latMesh(n),lat(n), diff_lat - call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + !call abort_ice(error_message=subname, file=__FILE__, line=__LINE__) end if - enddo enddo enddo diff --git a/cicecore/drivers/nuopc/dmi/CICE.F90 b/cicecore/drivers/nuopc/dmi/CICE.F90 index 7056e0e5b..d6a28c3ba 100644 --- a/cicecore/drivers/nuopc/dmi/CICE.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2021, Triad National Security, LLC +! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2021. Triad National Security, LLC. This software was +! Copyright 2022. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 7da73db1d..1aaee77f4 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -496,7 +496,7 @@ subroutine coupling_prep (iblk) enddo enddo - call ice_timer_start(timer_couple) ! atm/ocn coupling + call ice_timer_start(timer_couple,iblk) ! atm/ocn coupling if (oceanmixed_ice) & call ocean_mixed_layer (dt,iblk) ! ocean surface fluxes and sst @@ -663,7 +663,7 @@ subroutine coupling_prep (iblk) endif !echmod #endif - call ice_timer_stop(timer_couple) ! atm/ocn coupling + call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling end subroutine coupling_prep diff --git a/cicecore/drivers/standalone/cice/CICE.F90 b/cicecore/drivers/standalone/cice/CICE.F90 index 7056e0e5b..d6a28c3ba 100644 --- a/cicecore/drivers/standalone/cice/CICE.F90 +++ b/cicecore/drivers/standalone/cice/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2021, Triad National Security, LLC +! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2021. Triad National Security, LLC. This software was +! Copyright 2022. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 index a59c210aa..28811c3cd 100644 --- a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 @@ -31,7 +31,8 @@ module CICE_FinalMod subroutine CICE_Finalize use ice_restart_shared, only: runid - use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total + use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total, & + timer_stats character(len=*), parameter :: subname = '(CICE_Finalize)' @@ -40,7 +41,7 @@ subroutine CICE_Finalize !------------------------------------------------------------------- call ice_timer_stop(timer_total) ! stop timing entire run - call ice_timer_print_all(stats=.false.) ! print timing information + call ice_timer_print_all(stats=timer_stats) ! print timing information call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 27d61db84..0b4326c0a 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -218,10 +218,9 @@ subroutine ice_step call step_prep - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - if (ktherm >= 0) then + if (ktherm >= 0) then + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks !----------------------------------------------------------------- ! scale radiation fields @@ -237,7 +236,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics if (debug_model) then @@ -259,10 +258,9 @@ subroutine ice_step call debug_ice (iblk, plabeld) endif - endif ! ktherm > 0 - - enddo ! iblk - !$OMP END PARALLEL DO + enddo + !$OMP END PARALLEL DO + endif ! ktherm > 0 ! clean up, update tendency diagnostics offset = dt @@ -292,7 +290,7 @@ subroutine ice_step endif ! ridging - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks if (kridge > 0) call step_dyn_ridge (dt_dyn, ndtd, iblk) enddo @@ -326,14 +324,15 @@ subroutine ice_step !----------------------------------------------------------------- if (tr_snow) then ! advanced snow physics + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call step_snow (dt, iblk) enddo + !$OMP END PARALLEL DO call update_state (dt) ! clean up endif -!MHRI: CHECK THIS OMP - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -405,7 +404,6 @@ subroutine ice_step if (kdyn == 2) call write_restart_eap call final_restart endif - call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step @@ -488,7 +486,7 @@ subroutine coupling_prep (iblk) enddo enddo - call ice_timer_start(timer_couple) ! atm/ocn coupling + call ice_timer_start(timer_couple,iblk) ! atm/ocn coupling if (oceanmixed_ice) & call ocean_mixed_layer (dt,iblk) ! ocean surface fluxes and sst @@ -655,7 +653,7 @@ subroutine coupling_prep (iblk) endif !echmod #endif - call ice_timer_stop(timer_couple) ! atm/ocn coupling + call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling end subroutine coupling_prep diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index eff39a464..5643b4277 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -1301,27 +1301,28 @@ subroutine input_zbgc ! read from input file !----------------------------------------------------------------- - call get_fileunit(nu_nml) - if (my_task == master_task) then + write(nu_diag,*) subname,' Reading zbgc_nml' + + call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - nml_error = -1 - else - nml_error = 1 - endif + call abort_ice(subname//'ERROR: zbgc_nml open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) + endif - print*,'Reading zbgc_nml' + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=zbgc_nml,iostat=nml_error) end do - if (nml_error == 0) close(nu_nml) - endif - call broadcast_scalar(nml_error, master_task) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: reading zbgc namelist') + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: zbgc_nml reading ', & + file=__FILE__, line=__LINE__) + endif + close(nu_nml) + call release_fileunit(nu_nml) endif - call release_fileunit(nu_nml) !----------------------------------------------------------------- ! broadcast diff --git a/cicecore/version.txt b/cicecore/version.txt index 04a90ef1a..9e5f9f3e1 100644 --- a/cicecore/version.txt +++ b/cicecore/version.txt @@ -1 +1 @@ -CICE 6.3.0 +CICE 6.3.1 diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 04f397034..7a1334532 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -91,7 +91,25 @@ cat >> ${jobfile} << EOFB #PBS -l walltime=${batchtime} EOFB -else if (${ICE_MACHINE} =~ gordon* || ${ICE_MACHINE} =~ conrad* || ${ICE_MACHINE} =~ gaffney* || ${ICE_MACHINE} =~ koehr* || ${ICE_MACHINE} =~ mustang) then +else if (${ICE_MACHINE} =~ gaffney* || ${ICE_MACHINE} =~ koehr* || ${ICE_MACHINE} =~ mustang*) then +cat >> ${jobfile} << EOFB +#PBS -N ${shortcase} +#PBS -q ${queue} +#PBS -A ${acct} +#PBS -l select=${nnodes}:ncpus=${maxtpn}:mpiprocs=${taskpernode} +#PBS -l walltime=${batchtime} +#PBS -j oe +#PBS -W umask=022 +###PBS -M username@domain.com +###PBS -m be +EOFB + +else if (${ICE_MACHINE} =~ narwhal*) then +if (${runlength} <= 0) then + set batchtime = "00:29:59" +else + set queue = "standard" +endif cat >> ${jobfile} << EOFB #PBS -N ${shortcase} #PBS -q ${queue} diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index 904a0b636..9a557ec44 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -69,15 +69,8 @@ mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE EOFR endif - -#======= -else if (${ICE_MACHINE} =~ onyx*) then -cat >> ${jobfile} << EOFR -aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE -EOFR - #======= -else if (${ICE_MACHINE} =~ gordon* || ${ICE_MACHINE} =~ conrad*) then +else if (${ICE_MACHINE} =~ onyx* || ${ICE_MACHINE} =~ narwhal) then cat >> ${jobfile} << EOFR aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR diff --git a/configuration/scripts/cice.run.setup.csh b/configuration/scripts/cice.run.setup.csh index aa578b5ca..58c4ebe66 100755 --- a/configuration/scripts/cice.run.setup.csh +++ b/configuration/scripts/cice.run.setup.csh @@ -9,8 +9,6 @@ echo "running cice.run.setup.csh" set jobfile = cice.run set subfile = cice.submit -set nthrds = ${ICE_NTHRDS} - #========================================== # Write the batch code into the job file @@ -43,7 +41,9 @@ set ICE_RUNLOG_FILE = "cice.runlog.\${stamp}" #-------------------------------------------- cd \${ICE_RUNDIR} -setenv OMP_NUM_THREADS ${nthrds} +setenv OMP_NUM_THREADS \${ICE_NTHRDS} +setenv OMP_SCHEDULE "\${ICE_OMPSCHED}" +#setenv OMP_DISPLAY_ENV TRUE cp -f \${ICE_CASEDIR}/ice_in \${ICE_RUNDIR} cp -f \${ICE_CASEDIR}/env.${ICE_MACHCOMP} \${ICE_RUNDIR} diff --git a/configuration/scripts/cice.settings b/configuration/scripts/cice.settings index 1faf2c5be..9b57aab3f 100755 --- a/configuration/scripts/cice.settings +++ b/configuration/scripts/cice.settings @@ -21,6 +21,7 @@ setenv ICE_QUIETMODE false setenv ICE_GRID undefined setenv ICE_NTASKS undefined setenv ICE_NTHRDS undefined +setenv ICE_OMPSCHED "static,1" setenv ICE_TEST undefined setenv ICE_TESTNAME undefined setenv ICE_TESTID undefined @@ -28,6 +29,7 @@ setenv ICE_BASELINE undefined setenv ICE_BASEGEN undefined setenv ICE_BASECOM undefined setenv ICE_BFBCOMP undefined +setenv ICE_BFBTYPE restart setenv ICE_SPVAL undefined setenv ICE_RUNLENGTH -1 setenv ICE_ACCOUNT undefined diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 11fa9b5ca..095822640 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -38,6 +38,7 @@ debug_forcing = .false. print_global = .true. print_points = .true. + timer_stats = .false. conserv_check = .false. latpnt(1) = 90. lonpnt(1) = 0. diff --git a/configuration/scripts/machines/Macros.conrad_cray b/configuration/scripts/machines/Macros.conrad_cray deleted file mode 100644 index 19ddcb8f5..000000000 --- a/configuration/scripts/machines/Macros.conrad_cray +++ /dev/null @@ -1,57 +0,0 @@ -#============================================================================== -# Macros file for NAVYDSRC conrad, cray compiler -#============================================================================== - -CPP := ftn -e P -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} -CFLAGS := -c -O2 -h fp0 - -FIXEDFLAGS := -132 -FREEFLAGS := -FFLAGS := -h fp0 -h byteswapio -FFLAGS_NOOPT:= -O0 -LDFLAGS := -h byteswapio - -ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -Rbcdps -# FFLAGS += -O0 -g -Rbcdps -ei -else - FFLAGS += -O2 -endif - -SCC := cc -SFC := ftn -MPICC := cc -MPIFC := ftn - -ifeq ($(ICE_COMMDIR), mpi) - FC := $(MPIFC) - CC := $(MPICC) -else - FC := $(SFC) - CC := $(SCC) -endif -LD:= $(FC) - -# defined by module -#NETCDF_PATH := $(NETCDF) -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default -#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib - -#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -INCLDIR := $(INCLDIR) -#INCLDIR += -I$(NETCDF_PATH)/include - -#LIB_NETCDF := $(NETCDF_PATH)/lib -#LIB_PNETCDF := $(PNETCDF_PATH)/lib -#LIB_MPI := $(IMPILIBDIR) -#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff - -ifeq ($(ICE_THREADED), false) - LDFLAGS += -hnoomp - CFLAGS += -hnoomp - FFLAGS += -hnoomp -endif - diff --git a/configuration/scripts/machines/Macros.conrad_intel b/configuration/scripts/machines/Macros.conrad_intel deleted file mode 100644 index 74a36304d..000000000 --- a/configuration/scripts/machines/Macros.conrad_intel +++ /dev/null @@ -1,56 +0,0 @@ -#============================================================================== -# Macros file for NAVYDSRC conrad, intel compiler -#============================================================================== - -CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} -CFLAGS := -c -O2 -fp-model precise -xHost - -FIXEDFLAGS := -132 -FREEFLAGS := -FR -FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback -xHost -FFLAGS_NOOPT:= -O0 - -ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -# FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -init=snan,arrays -else - FFLAGS += -O2 -endif - -SCC := cc -SFC := ftn -MPICC := cc -MPIFC := ftn - -ifeq ($(ICE_COMMDIR), mpi) - FC := $(MPIFC) - CC := $(MPICC) -else - FC := $(SFC) - CC := $(SCC) -endif -LD:= $(FC) - -# defined by module -#NETCDF_PATH := $(NETCDF) -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default -#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib - -#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -INCLDIR := $(INCLDIR) -#INCLDIR += -I$(NETCDF_PATH)/include - -#LIB_NETCDF := $(NETCDF_PATH)/lib -#LIB_PNETCDF := $(PNETCDF_PATH)/lib -#LIB_MPI := $(IMPILIBDIR) -#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff - -ifeq ($(ICE_THREADED), true) - LDFLAGS += -qopenmp - CFLAGS += -qopenmp - FFLAGS += -qopenmp -endif - diff --git a/configuration/scripts/machines/Macros.conrad_pgi b/configuration/scripts/machines/Macros.conrad_pgi deleted file mode 100644 index ef0a25ab4..000000000 --- a/configuration/scripts/machines/Macros.conrad_pgi +++ /dev/null @@ -1,55 +0,0 @@ -#============================================================================== -# Macros file for NAVYDSRC conrad, pgi compiler -#============================================================================== - -CPP := pgcc -E -CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} -CFLAGS := -c -O2 -Kieee - -FIXEDFLAGS := -Mextend -FREEFLAGS := -Mfree -FFLAGS := -Kieee -Mbyteswapio -traceback -FFLAGS_NOOPT:= -O0 - -ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -Mbounds -Mchkptr -else - FFLAGS += -O -g -endif - -SCC := cc -SFC := ftn -MPICC := cc -MPIFC := ftn - -ifeq ($(ICE_COMMDIR), mpi) - FC := $(MPIFC) - CC := $(MPICC) -else - FC := $(SFC) - CC := $(SCC) -endif -LD:= $(FC) - -# defined by module -#NETCDF_PATH := $(NETCDF) -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default -#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib - -#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -INCLDIR := $(INCLDIR) -#INCLDIR += -I$(NETCDF_PATH)/include - -#LIB_NETCDF := $(NETCDF_PATH)/lib -#LIB_PNETCDF := $(PNETCDF_PATH)/lib -#LIB_MPI := $(IMPILIBDIR) -#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff - -ifeq ($(ICE_THREADED), true) - LDFLAGS += -mp - CFLAGS += -mp - FFLAGS += -mp -endif - diff --git a/configuration/scripts/machines/Macros.gordon_cray b/configuration/scripts/machines/Macros.gordon_cray deleted file mode 100644 index 6c5032e0d..000000000 --- a/configuration/scripts/machines/Macros.gordon_cray +++ /dev/null @@ -1,57 +0,0 @@ -#============================================================================== -# Macros file for NAVYDSRC gordon, cray compiler -#============================================================================== - -CPP := ftn -e P -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} -CFLAGS := -c -O2 -h fp0 - -FIXEDFLAGS := -132 -FREEFLAGS := -FFLAGS := -h fp0 -h byteswapio -FFLAGS_NOOPT:= -O0 -LDFLAGS := -h byteswapio - -ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -Rbcdps -# FFLAGS += -O0 -g -Rbcdps -ei -else - FFLAGS += -O2 -endif - -SCC := cc -SFC := ftn -MPICC := cc -MPIFC := ftn - -ifeq ($(ICE_COMMDIR), mpi) - FC := $(MPIFC) - CC := $(MPICC) -else - FC := $(SFC) - CC := $(SCC) -endif -LD:= $(FC) - -# defined by module -#NETCDF_PATH := $(NETCDF) -#PNETCDF_PATH := $(PNETCDF) -#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default -#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib - -#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs - -INCLDIR := $(INCLDIR) -#INCLDIR += -I$(NETCDF_PATH)/include - -#LIB_NETCDF := $(NETCDF_PATH)/lib -#LIB_PNETCDF := $(PNETCDF_PATH)/lib -#LIB_MPI := $(IMPILIBDIR) -#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff - -ifeq ($(ICE_THREADED), false) - LDFLAGS += -hnoomp - CFLAGS += -hnoomp - FFLAGS += -hnoomp -endif - diff --git a/configuration/scripts/machines/Macros.gordon_pgi b/configuration/scripts/machines/Macros.narwhal_aocc similarity index 70% rename from configuration/scripts/machines/Macros.gordon_pgi rename to configuration/scripts/machines/Macros.narwhal_aocc index 1190f6eca..44b1dc2f6 100644 --- a/configuration/scripts/machines/Macros.gordon_pgi +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -1,20 +1,20 @@ #============================================================================== -# Macros file for NAVYDSRC gordon, pgi compiler +# Macros file for NAVYDSRC narwhal, aocc compiler #============================================================================== -CPP := pgcc -Mcpp -CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} -CFLAGS := -c -O2 -Kieee +CPP := ftn -E +CPPDEFS := -DNO_R16 -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -FIXEDFLAGS := -Mextend -FREEFLAGS := -Mfree -FFLAGS := -Kieee -Mbyteswapio -traceback +FIXEDFLAGS := -ffixed-form +FREEFLAGS := -ffree-form +FFLAGS := -byteswapio FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -Mbounds -Mchkptr + FFLAGS += -O0 -g -fsanitize=integer-divide-by-zero,float-divide-by-zero,bounds else - FFLAGS += -O -g + FFLAGS += -O2 endif SCC := cc @@ -51,5 +51,9 @@ ifeq ($(ICE_THREADED), true) LDFLAGS += -mp CFLAGS += -mp FFLAGS += -mp +else + LDFLAGS += -nomp +# CFLAGS += -nomp + FFLAGS += -nomp endif diff --git a/configuration/scripts/machines/Macros.conrad_gnu b/configuration/scripts/machines/Macros.narwhal_cray similarity index 75% rename from configuration/scripts/machines/Macros.conrad_gnu rename to configuration/scripts/machines/Macros.narwhal_cray index 5459d9b6b..ab0e6378e 100644 --- a/configuration/scripts/machines/Macros.conrad_gnu +++ b/configuration/scripts/machines/Macros.narwhal_cray @@ -1,20 +1,21 @@ #============================================================================== -# Macros file for NAVYDSRC conrad, gnu compiler +# Macros file for NAVYDSRC narwhal, cray compiler #============================================================================== -CPP := ftn -E +CPP := ftn -e P CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} CFLAGS := -c -O2 -FIXEDFLAGS := -ffixed-line-length-132 -FREEFLAGS := -ffree-form -FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +FIXEDFLAGS := -132 +FREEFLAGS := +FFLAGS := -hbyteswapio FFLAGS_NOOPT:= -O0 - +LDFLAGS := -hbyteswapio + ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow + FFLAGS += -O0 -hfp0 -g -Rbcdps -Ktrap=fp else - FFLAGS += -O2 + FFLAGS += -O2 -hfp0 # -eo endif SCC := cc @@ -51,5 +52,9 @@ ifeq ($(ICE_THREADED), true) LDFLAGS += -fopenmp CFLAGS += -fopenmp FFLAGS += -fopenmp +else + LDFLAGS += -hnoomp +# CFLAGS += -hnoomp + FFLAGS += -hnoomp endif diff --git a/configuration/scripts/machines/Macros.gordon_gnu b/configuration/scripts/machines/Macros.narwhal_gnu similarity index 87% rename from configuration/scripts/machines/Macros.gordon_gnu rename to configuration/scripts/machines/Macros.narwhal_gnu index 8c3e277ab..e980c1e29 100644 --- a/configuration/scripts/machines/Macros.gordon_gnu +++ b/configuration/scripts/machines/Macros.narwhal_gnu @@ -1,5 +1,5 @@ #============================================================================== -# Macros file for NAVYDSRC gordon, gnu compiler +# Macros file for NAVYDSRC narwhal, gnu compiler #============================================================================== CPP := ftn -E @@ -8,12 +8,12 @@ CFLAGS := -c FIXEDFLAGS := -ffixed-line-length-132 FREEFLAGS := -ffree-form -FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none +FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none -fallow-argument-mismatch FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow - CFLAGS += -O0 + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow + CFLAGS += -O0 endif ifeq ($(ICE_COVERAGE), true) diff --git a/configuration/scripts/machines/Macros.gordon_intel b/configuration/scripts/machines/Macros.narwhal_intel similarity index 82% rename from configuration/scripts/machines/Macros.gordon_intel rename to configuration/scripts/machines/Macros.narwhal_intel index 84659d00a..c7c103b24 100644 --- a/configuration/scripts/machines/Macros.gordon_intel +++ b/configuration/scripts/machines/Macros.narwhal_intel @@ -1,18 +1,20 @@ #============================================================================== -# Macros file for NAVYDSRC gordon, intel compiler +# Macros file for NAVYDSRC narwhal, intel compiler #============================================================================== CPP := fpp CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} -CFLAGS := -c -O2 -fp-model precise -xHost +CFLAGS := -c -O2 -fp-model precise -fcommon FIXEDFLAGS := -132 FREEFLAGS := -FR -FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback -xHost +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +# -mcmodel medium -shared-intel FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays else FFLAGS += -O2 endif diff --git a/configuration/scripts/machines/env.banting_gnu b/configuration/scripts/machines/env.banting_gnu index 0c0a4abce..997816a9d 100755 --- a/configuration/scripts/machines/env.banting_gnu +++ b/configuration/scripts/machines/env.banting_gnu @@ -19,6 +19,9 @@ module load cray-netcdf # NetCDF module load cray-hdf5 # HDF5 setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesystem +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME banting diff --git a/configuration/scripts/machines/env.banting_intel b/configuration/scripts/machines/env.banting_intel index ac01e4d72..0beeb2618 100755 --- a/configuration/scripts/machines/env.banting_intel +++ b/configuration/scripts/machines/env.banting_intel @@ -14,6 +14,9 @@ module load cray-netcdf # NetCDF module load cray-hdf5 # HDF5 setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesystem +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME banting diff --git a/configuration/scripts/machines/env.cesium_intel b/configuration/scripts/machines/env.cesium_intel index 19209919e..8dabe1645 100755 --- a/configuration/scripts/machines/env.cesium_intel +++ b/configuration/scripts/machines/env.cesium_intel @@ -6,6 +6,9 @@ source /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156/intelcomp_2016.1.156_mult source $ssmuse -d /fs/ssm/main/opt/openmpi/openmpi-1.6.5/intelcomp-2016.1.156 # openmpi source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and openmpi) +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME cesium setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_MAKE colormake-short diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index c68a87d5c..f580cc354 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -29,8 +29,8 @@ if ($ICE_IOTYPE =~ pio*) then endif endif -if ($?ICE_TEST) then -if ($ICE_TEST =~ qcchk*) then +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then module load python source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default endif @@ -40,6 +40,8 @@ endif limit coredumpsize unlimited limit stacksize unlimited +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index d6eeb67ea..ef12df914 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -29,8 +29,8 @@ if ($ICE_IOTYPE =~ pio*) then endif endif -if ($?ICE_TEST) then -if ($ICE_TEST =~ qcchk*) then +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then module load python source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default endif @@ -40,6 +40,8 @@ endif limit coredumpsize unlimited limit stacksize unlimited +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index 9c559b90c..cbd486c29 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -29,8 +29,8 @@ if ($ICE_IOTYPE =~ pio*) then endif endif -if ($?ICE_TEST) then -if ($ICE_TEST =~ qcchk*) then +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then module load python source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default endif @@ -40,6 +40,8 @@ endif limit coredumpsize unlimited limit stacksize unlimited +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.compy_intel b/configuration/scripts/machines/env.compy_intel index fe3511aa6..6fc273204 100755 --- a/configuration/scripts/machines/env.compy_intel +++ b/configuration/scripts/machines/env.compy_intel @@ -23,6 +23,9 @@ setenv I_MPI_ADJUST_ALLREDUCE 1 limit coredumpsize unlimited limit stacksize unlimited +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME compy diff --git a/configuration/scripts/machines/env.conda_linux b/configuration/scripts/machines/env.conda_linux index 08cf27724..ae6ea1b79 100755 --- a/configuration/scripts/machines/env.conda_linux +++ b/configuration/scripts/machines/env.conda_linux @@ -24,6 +24,9 @@ if $status then exit 1 endif +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME conda diff --git a/configuration/scripts/machines/env.conda_macos b/configuration/scripts/machines/env.conda_macos index e33eee710..3b3537bf7 100755 --- a/configuration/scripts/machines/env.conda_macos +++ b/configuration/scripts/machines/env.conda_macos @@ -24,6 +24,9 @@ if $status then exit 1 endif +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME conda diff --git a/configuration/scripts/machines/env.conrad_gnu b/configuration/scripts/machines/env.conrad_gnu deleted file mode 100755 index f14ee33a5..000000000 --- a/configuration/scripts/machines/env.conrad_gnu +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/csh -f - -set inp = "undefined" -if ($#argv == 1) then - set inp = $1 -endif - -if ("$inp" != "-nomodules") then - -source /opt/modules/default/init/csh - -module unload PrgEnv-cray -module unload PrgEnv-gnu -module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-gnu/5.2.82 - -module unload gcc -module load gcc/6.3.0 - -module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 - -module unload netcdf -module unload cray-netcdf -module unload cray-hdf5 -module unload cray-hdf5-parallel -module unload cray-netcdf-hdf5parallel -module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell - -setenv NETCDF_PATH ${NETCDF_DIR} -limit coredumpsize unlimited -limit stacksize unlimited - -endif - -setenv ICE_MACHINE_MACHNAME conrad -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME gnu -setenv ICE_MACHINE_ENVINFO "GNU Fortran (GCC) 6.3.0 20161221, mpich 7.5.3, netcdf 4.4.1.1" -setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium -setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub " -setenv ICE_MACHINE_ACCT P00000000 -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " - -# For lcov -set lcovpath = "/p/home/apcraig/bin" -set lcovp5l = "/p/home/apcraig/usr/lib/perl5/site_perl/5.10.0/x86_64-linux-thread-multi" - -if ($?PATH) then - if ("$PATH" !~ "*${lcovpath}*") then - setenv PATH ${PATH}:$lcovpath - endif -else - setenv PATH $lcovpath -endif - -if ($?PERL5LIB) then - if ("$PERL5LIB" !~ "*${lcovp5l}*") then - setenv PERL5LIB ${PERL5LIB}:$lcovp5l - endif -else - setenv PERL5LIB $lcovp5l -endif diff --git a/configuration/scripts/machines/env.conrad_intel b/configuration/scripts/machines/env.conrad_intel deleted file mode 100755 index e37ce4b1f..000000000 --- a/configuration/scripts/machines/env.conrad_intel +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/csh -f - -set inp = "undefined" -if ($#argv == 1) then - set inp = $1 -endif - -if ("$inp" != "-nomodules") then - -source /opt/modules/default/init/csh - -module unload PrgEnv-cray -module unload PrgEnv-gnu -module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-intel/5.2.40 - -module unload intel -module load intel/17.0.2.174 - -module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.3.2 - -module unload netcdf -module unload cray-netcdf -module unload cray-hdf5 -module unload cray-hdf5-parallel -module unload cray-netcdf-hdf5parallel -module unload cray-parallel-netcdf -module load cray-netcdf/4.3.2 -module load cray-hdf5/1.8.13 - -module unload cray-libsci - -module load craype-haswell - -setenv NETCDF_PATH ${NETCDF_DIR} -limit coredumpsize unlimited -limit stacksize unlimited - -endif - -setenv ICE_MACHINE_MACHNAME conrad -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 17.0.2 20170213, mpich 7.3.2, netcdf 4.3.2" -setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium -setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub " -setenv ICE_MACHINE_ACCT P00000000 -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_MAXPES 8000 # maximum total pes (tasks * threads) available -setenv ICE_MACHINE_MAXRUNLENGTH 168 # maximum batch wall time limit in hours (integer) -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.conrad_pgi b/configuration/scripts/machines/env.conrad_pgi deleted file mode 100755 index 2e82ea34f..000000000 --- a/configuration/scripts/machines/env.conrad_pgi +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/csh -f - -set inp = "undefined" -if ($#argv == 1) then - set inp = $1 -endif - -if ("$inp" != "-nomodules") then - -source /opt/modules/default/init/csh - -module unload PrgEnv-cray -module unload PrgEnv-gnu -module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-pgi/5.2.82 - -module unload pgi -module load pgi/16.10.0 - -module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 - -module unload netcdf -module unload cray-netcdf -module unload cray-hdf5 -module unload cray-hdf5-parallel -module unload cray-netcdf-hdf5parallel -module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell - -setenv NETCDF_PATH ${NETCDF_DIR} -limit coredumpsize unlimited -limit stacksize unlimited - -endif - -setenv ICE_MACHINE_MACHNAME conrad -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME pgi -setenv ICE_MACHINE_ENVINFO "pgf90 16.10-0, mpich 7.5.3, netcdf 4.4.1.1" -setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium -setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub " -setenv ICE_MACHINE_ACCT ARLAP96070PET -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel index ed01928f4..734b2edf3 100755 --- a/configuration/scripts/machines/env.cori_intel +++ b/configuration/scripts/machines/env.cori_intel @@ -39,6 +39,7 @@ module load craype/2.6.2 setenv NETCDF_PATH ${NETCDF_DIR} setenv OMP_PROC_BIND true setenv OMP_PLACES threads +setenv OMP_STACKSIZE 32M limit coredumpsize unlimited limit stacksize unlimited diff --git a/configuration/scripts/machines/env.daley_gnu b/configuration/scripts/machines/env.daley_gnu index b1e379eb0..25b438e8e 100755 --- a/configuration/scripts/machines/env.daley_gnu +++ b/configuration/scripts/machines/env.daley_gnu @@ -19,6 +19,9 @@ module load cray-netcdf # NetCDF module load cray-hdf5 # HDF5 setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesystem +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME daley diff --git a/configuration/scripts/machines/env.daley_intel b/configuration/scripts/machines/env.daley_intel index 502c71037..49178365b 100755 --- a/configuration/scripts/machines/env.daley_intel +++ b/configuration/scripts/machines/env.daley_intel @@ -14,6 +14,9 @@ module load cray-netcdf # NetCDF module load cray-hdf5 # HDF5 setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesystem +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME daley diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel index a7b141479..98edb3a66 100755 --- a/configuration/scripts/machines/env.fram_intel +++ b/configuration/scripts/machines/env.fram_intel @@ -7,6 +7,9 @@ source /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156/intelcomp_2016.1.156_mult source $ssmuse -d /fs/ssm/main/opt/openmpi/openmpi-1.6.5/intelcomp-2016.1.156 # openmpi source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and openmpi) +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME fram setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_MAKE make diff --git a/configuration/scripts/machines/env.freya_gnu b/configuration/scripts/machines/env.freya_gnu index b655d6dd0..2681e1318 100755 --- a/configuration/scripts/machines/env.freya_gnu +++ b/configuration/scripts/machines/env.freya_gnu @@ -8,7 +8,7 @@ endif if ("$inp" != "-nomodules") then source /opt/modules/default/init/csh # Initialize modules for csh - Clear environment +# Clear environment module rm PrgEnv-intel module rm PrgEnv-cray module rm PrgEnv-gnu @@ -37,3 +37,4 @@ setenv ICE_MACHINE_ACCT P0000000 setenv ICE_MACHINE_QUEUE "development" setenv ICE_MACHINE_BLDTHRDS 18 setenv ICE_MACHINE_QSTAT "qstat " +setenv OMP_STACKSIZE 64M diff --git a/configuration/scripts/machines/env.freya_intel b/configuration/scripts/machines/env.freya_intel index dcbc1f8ba..4b45cd9e7 100755 --- a/configuration/scripts/machines/env.freya_intel +++ b/configuration/scripts/machines/env.freya_intel @@ -36,3 +36,4 @@ setenv ICE_MACHINE_ACCT P0000000 setenv ICE_MACHINE_QUEUE "development" setenv ICE_MACHINE_BLDTHRDS 18 setenv ICE_MACHINE_QSTAT "qstat " +setenv OMP_STACKSIZE 64M diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel index d143270d7..e204c6fff 100755 --- a/configuration/scripts/machines/env.gaea_intel +++ b/configuration/scripts/machines/env.gaea_intel @@ -16,6 +16,9 @@ module load cray-netcdf module load PrgEnv-intel/6.0.5 module list +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME gaea diff --git a/configuration/scripts/machines/env.gaffney_gnu b/configuration/scripts/machines/env.gaffney_gnu index a63ee2ae4..dd889c5af 100755 --- a/configuration/scripts/machines/env.gaffney_gnu +++ b/configuration/scripts/machines/env.gaffney_gnu @@ -24,6 +24,7 @@ setenv MPI_DSM_DISTRIBUTE 0 setenv KMP_AFFINITY disabled limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 64M endif diff --git a/configuration/scripts/machines/env.gaffney_intel b/configuration/scripts/machines/env.gaffney_intel index 9fa11d16e..c7fd0f6b3 100755 --- a/configuration/scripts/machines/env.gaffney_intel +++ b/configuration/scripts/machines/env.gaffney_intel @@ -24,6 +24,7 @@ setenv MPI_DSM_DISTRIBUTE 0 setenv KMP_AFFINITY disabled limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 64M endif diff --git a/configuration/scripts/machines/env.gordon_intel b/configuration/scripts/machines/env.gordon_intel deleted file mode 100755 index 67aaa9c69..000000000 --- a/configuration/scripts/machines/env.gordon_intel +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/csh -f - -set inp = "undefined" -if ($#argv == 1) then - set inp = $1 -endif - -if ("$inp" != "-nomodules") then - -source /opt/modules/default/init/csh - -module unload PrgEnv-cray -module unload PrgEnv-gnu -module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-intel/5.2.40 - -module unload intel -module load intel/17.0.2.174 - -module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.3.2 - -module unload netcdf -module unload cray-netcdf -module unload cray-hdf5 -module unload cray-hdf5-parallel -module unload cray-netcdf-hdf5parallel -module unload cray-parallel-netcdf -module load cray-netcdf/4.3.2 -module load cray-hdf5/1.8.13 - -module unload cray-libsci - -module load craype-haswell - -setenv NETCDF_PATH ${NETCDF_DIR} -limit coredumpsize unlimited -limit stacksize unlimited - -endif - -setenv ICE_MACHINE_MACHNAME gordon -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 17.0.2 20170213, mpich 7.3.2, netcdf 4.3.2" -setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium -setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub " -setenv ICE_MACHINE_ACCT P00000000 -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_MAXPES 8000 # maximum total pes (tasks * threads) available -setenv ICE_MACHINE_MAXRUNLENGTH 168 # maximum batch wall time limit in hours (integer) -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.gordon_pgi b/configuration/scripts/machines/env.gordon_pgi deleted file mode 100755 index 5885afb4b..000000000 --- a/configuration/scripts/machines/env.gordon_pgi +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/csh -f - -set inp = "undefined" -if ($#argv == 1) then - set inp = $1 -endif - -if ("$inp" != "-nomodules") then - -source /opt/modules/default/init/csh - -module unload PrgEnv-cray -module unload PrgEnv-gnu -module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-pgi/5.2.82 - -module unload pgi -module load pgi/16.10.0 - -module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 - -module unload netcdf -module unload cray-netcdf -module unload cray-hdf5 -module unload cray-hdf5-parallel -module unload cray-netcdf-hdf5parallel -module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell - -setenv NETCDF_PATH ${NETCDF_DIR} -limit coredumpsize unlimited -limit stacksize unlimited - -endif - -setenv ICE_MACHINE_MACHNAME gordon -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME pgi -setenv ICE_MACHINE_ENVINFO "pgf90 16.10-0, mpich 7.5.3, netcdf 4.4.1.1" -setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium -setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE -setenv ICE_MACHINE_SUBMIT "qsub " -setenv ICE_MACHINE_ACCT ARLAP96070PET -setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 -setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel index 7330c3937..a9cf59516 100755 --- a/configuration/scripts/machines/env.hera_intel +++ b/configuration/scripts/machines/env.hera_intel @@ -15,6 +15,9 @@ module load impi/2018.0.4 module load netcdf/4.7.0 #module list +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME hera diff --git a/configuration/scripts/machines/env.high_Sierra_gnu b/configuration/scripts/machines/env.high_Sierra_gnu index 3845a91aa..0bd31181b 100755 --- a/configuration/scripts/machines/env.high_Sierra_gnu +++ b/configuration/scripts/machines/env.high_Sierra_gnu @@ -1,5 +1,8 @@ #!/bin/csh -f +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME high_Sierra setenv ICE_MACHINE_ENVNAME gnu setenv ICE_MACHINE_MAKE make diff --git a/configuration/scripts/machines/env.hobart_intel b/configuration/scripts/machines/env.hobart_intel index 2ab7a3c53..0b6c5b12c 100755 --- a/configuration/scripts/machines/env.hobart_intel +++ b/configuration/scripts/machines/env.hobart_intel @@ -12,6 +12,9 @@ source /usr/share/Modules/init/csh module purge module load compiler/intel/18.0.3 +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME hobart diff --git a/configuration/scripts/machines/env.hobart_nag b/configuration/scripts/machines/env.hobart_nag index cae8c0fd8..6d22beca9 100755 --- a/configuration/scripts/machines/env.hobart_nag +++ b/configuration/scripts/machines/env.hobart_nag @@ -12,6 +12,9 @@ source /usr/share/Modules/init/csh module purge module load compiler/nag/6.2 +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME hobart diff --git a/configuration/scripts/machines/env.koehr_intel b/configuration/scripts/machines/env.koehr_intel index f4d7cada2..21f124b5f 100755 --- a/configuration/scripts/machines/env.koehr_intel +++ b/configuration/scripts/machines/env.koehr_intel @@ -25,6 +25,9 @@ setenv KMP_AFFINITY disabled limit coredumpsize unlimited limit stacksize unlimited +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME koehr diff --git a/configuration/scripts/machines/env.millikan_intel b/configuration/scripts/machines/env.millikan_intel index 63913166d..c0a7356ad 100755 --- a/configuration/scripts/machines/env.millikan_intel +++ b/configuration/scripts/machines/env.millikan_intel @@ -6,6 +6,9 @@ source /fs/ssm/main/opt/intelcomp/intelcomp-2016.1.156/intelcomp_2016.1.156_mult source $ssmuse -d /fs/ssm/main/opt/openmpi/openmpi-1.6.5/intelcomp-2016.1.156 # openmpi source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and openmpi) +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME millikan setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_MAKE make diff --git a/configuration/scripts/machines/env.mustang_intel18 b/configuration/scripts/machines/env.mustang_intel18 index f420ec7ff..45e5b6518 100755 --- a/configuration/scripts/machines/env.mustang_intel18 +++ b/configuration/scripts/machines/env.mustang_intel18 @@ -21,7 +21,7 @@ module load netcdf-fortran/intel/4.4.2 setenv NETCDF_PATH /app/COST/netcdf-fortran/4.4.2/intel -#setenv OMP_STACKSIZE 256M +setenv OMP_STACKSIZE 64M #setenv MP_LABELIO yes #setenv MP_INFOLEVEL 2 #setenv MP_SHARED_MEMORY yes diff --git a/configuration/scripts/machines/env.mustang_intel19 b/configuration/scripts/machines/env.mustang_intel19 index 0fc0458fd..438bc1111 100755 --- a/configuration/scripts/machines/env.mustang_intel19 +++ b/configuration/scripts/machines/env.mustang_intel19 @@ -21,7 +21,7 @@ module load netcdf-fortran/intel/4.4.2 setenv NETCDF_PATH /app/COST/netcdf-fortran/4.4.2/intel -#setenv OMP_STACKSIZE 256M +setenv OMP_STACKSIZE 64M #setenv MP_LABELIO yes #setenv MP_INFOLEVEL 2 #setenv MP_SHARED_MEMORY yes diff --git a/configuration/scripts/machines/env.mustang_intel20 b/configuration/scripts/machines/env.mustang_intel20 index 00c4a250d..cca0b3019 100755 --- a/configuration/scripts/machines/env.mustang_intel20 +++ b/configuration/scripts/machines/env.mustang_intel20 @@ -21,7 +21,7 @@ module load netcdf-fortran/intel/4.4.2 setenv NETCDF_PATH /app/COST/netcdf-fortran/4.4.2/intel -#setenv OMP_STACKSIZE 256M +setenv OMP_STACKSIZE 64M #setenv MP_LABELIO yes #setenv MP_INFOLEVEL 2 #setenv MP_SHARED_MEMORY yes diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc new file mode 100755 index 000000000..6d6822f46 --- /dev/null +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -0,0 +1,54 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module unload PrgEnv-aocc +module unload PrgEnv-cray +module unload PrgEnv-gnu +module unload PrgEnv-intel +module unload PrgEnv-nvidia +module load PrgEnv-aocc/8.1.0 +module load cray-pals/1.0.17 +module load bct-env/0.1 +module unload aocc +module load aocc/2.2.0.1 +module unload cray-mpich +module load cray-mpich/8.1.5 + +module unload cray-hdf5 +module unload cray-hdf5-parallel +module unload cray-netcdf-hdf5parallel +module unload cray-parallel-netcdf +module unload netcdf +module load cray-netcdf/4.7.4.4 +module load cray-hdf5/1.12.0.4 + +setenv NETCDF_PATH ${NETCDF_DIR} +limit coredumpsize unlimited +limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE + +endif + +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium +setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub " +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "debug" +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.conrad_cray b/configuration/scripts/machines/env.narwhal_cray similarity index 53% rename from configuration/scripts/machines/env.conrad_cray rename to configuration/scripts/machines/env.narwhal_cray index 62549a738..d0fcc9ba7 100755 --- a/configuration/scripts/machines/env.conrad_cray +++ b/configuration/scripts/machines/env.narwhal_cray @@ -7,51 +7,48 @@ endif if ("$inp" != "-nomodules") then -source /opt/modules/default/init/csh +source ${MODULESHOME}/init/csh +module unload PrgEnv-aocc module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-cray/5.2.82 - -module unload cce -module load cce/8.5.8 - +module unload PrgEnv-nvidia +module load PrgEnv-cray/8.1.0 +module load cray-pals/1.0.17 +module load bct-env/0.1 +module unload cce +module load cce/12.0.3 module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 +module load cray-mpich/8.1.9 -module unload netcdf -module unload cray-netcdf module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell +module unload netcdf +module load cray-netcdf/4.7.4.4 +module load cray-hdf5/1.12.0.4 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE endif -setenv ICE_MACHINE_MACHNAME conrad -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" setenv ICE_MACHINE_ENVNAME cray -setenv ICE_MACHINE_ENVINFO "cce 8.5.8, mpich 7.5.3, netcdf 4.4.1.1" +setenv ICE_MACHINE_ENVINFO "cce 12.0.3, cray-mpich/8.1.9, netcdf/4.7.4.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE setenv ICE_MACHINE_SUBMIT "qsub " setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.gordon_gnu b/configuration/scripts/machines/env.narwhal_gnu similarity index 51% rename from configuration/scripts/machines/env.gordon_gnu rename to configuration/scripts/machines/env.narwhal_gnu index d17923bd3..51a272f4e 100755 --- a/configuration/scripts/machines/env.gordon_gnu +++ b/configuration/scripts/machines/env.narwhal_gnu @@ -7,51 +7,48 @@ endif if ("$inp" != "-nomodules") then -source /opt/modules/default/init/csh +source ${MODULESHOME}/init/csh +module unload PrgEnv-aocc module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-gnu/5.2.82 - +module unload PrgEnv-nvidia +module load PrgEnv-gnu/8.1.0 +module load cray-pals/1.0.17 +module load bct-env/0.1 module unload gcc -module load gcc/6.3.0 - +module load gcc/11.2.0 module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 +module load cray-mpich/8.1.9 -module unload netcdf -module unload cray-netcdf module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell +module unload netcdf +module load cray-netcdf/4.7.4.4 +module load cray-hdf5/1.12.0.4 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE endif -setenv ICE_MACHINE_MACHNAME gordon -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME gnu -setenv ICE_MACHINE_ENVINFO "GNU Fortran (GCC) 6.3.0 20161221, mpich 7.5.3, netcdf 4.4.1.1" +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "gnu fortran/c 11.2.0, cray-mpich/8.1.9, netcdf/4.7.4.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE setenv ICE_MACHINE_SUBMIT "qsub " setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.gordon_cray b/configuration/scripts/machines/env.narwhal_intel similarity index 50% rename from configuration/scripts/machines/env.gordon_cray rename to configuration/scripts/machines/env.narwhal_intel index d8c392d60..f79d962ff 100755 --- a/configuration/scripts/machines/env.gordon_cray +++ b/configuration/scripts/machines/env.narwhal_intel @@ -7,51 +7,48 @@ endif if ("$inp" != "-nomodules") then -source /opt/modules/default/init/csh +source ${MODULESHOME}/init/csh +module unload PrgEnv-aocc module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel -module unload PrgEnv-pgi -module load PrgEnv-cray/5.2.82 - -module unload cce -module load cce/8.5.8 - +module unload PrgEnv-nvidia +module load PrgEnv-intel/8.0.0 +module load cray-pals/1.0.17 +module load bct-env/0.1 +module unload intel +module load intel/2021.1 module unload cray-mpich -module unload cray-mpich2 -module load cray-mpich/7.5.3 +module load cray-mpich/8.1.9 -module unload netcdf -module unload cray-netcdf module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf -module load cray-netcdf/4.4.1.1 -module load cray-hdf5/1.10.0.1 - -module unload cray-libsci - -module load craype-haswell +module unload netcdf +module load cray-netcdf/4.7.4.4 +module load cray-hdf5/1.12.0.4 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE endif -setenv ICE_MACHINE_MACHNAME gordon -setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" -setenv ICE_MACHINE_ENVNAME cray -setenv ICE_MACHINE_ENVINFO "cce 8.5.8, mpich 7.5.3, netcdf 4.4.1.1" +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "ifort 2021.1 Beta 20201112, cray-mpich/8.1.9, netcdf/4.7.4.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS -setenv ICE_MACHINE_INPUTDATA /p/work1/RASM_data/cice_consortium +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE setenv ICE_MACHINE_SUBMIT "qsub " setenv ICE_MACHINE_ACCT P00000000 setenv ICE_MACHINE_QUEUE "debug" -setenv ICE_MACHINE_TPNODE 32 # tasks per node -setenv ICE_MACHINE_BLDTHRDS 4 +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.onyx_cray b/configuration/scripts/machines/env.onyx_cray index 38785a27d..e696d1b98 100755 --- a/configuration/scripts/machines/env.onyx_cray +++ b/configuration/scripts/machines/env.onyx_cray @@ -39,6 +39,7 @@ module load craype-broadwell setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 64M endif diff --git a/configuration/scripts/machines/env.onyx_gnu b/configuration/scripts/machines/env.onyx_gnu index 699c01559..80ebb8e43 100755 --- a/configuration/scripts/machines/env.onyx_gnu +++ b/configuration/scripts/machines/env.onyx_gnu @@ -39,6 +39,7 @@ module load craype-broadwell setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 64M endif diff --git a/configuration/scripts/machines/env.onyx_intel b/configuration/scripts/machines/env.onyx_intel index 39f25e8e5..362454dd4 100755 --- a/configuration/scripts/machines/env.onyx_intel +++ b/configuration/scripts/machines/env.onyx_intel @@ -39,6 +39,7 @@ module load craype-broadwell setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited limit stacksize unlimited +setenv OMP_STACKSIZE 64M endif diff --git a/configuration/scripts/machines/env.orion_intel b/configuration/scripts/machines/env.orion_intel index 95850b6bb..bdfccdd60 100755 --- a/configuration/scripts/machines/env.orion_intel +++ b/configuration/scripts/machines/env.orion_intel @@ -22,6 +22,9 @@ echo " module load netcdf/4.7.2" #module load netcdf/4.7.2 ##module list +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + endif setenv ICE_MACHINE_MACHNAME orion diff --git a/configuration/scripts/machines/env.phase3_intel b/configuration/scripts/machines/env.phase3_intel index af8dd3e5f..f5e3e4584 100755 --- a/configuration/scripts/machines/env.phase3_intel +++ b/configuration/scripts/machines/env.phase3_intel @@ -13,6 +13,9 @@ module load NetCDF/4.5.0 module load ESMF/7_1_0r module list +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME phase3 setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.testmachine_intel b/configuration/scripts/machines/env.testmachine_intel index 5b52f1b07..b6f7c329e 100755 --- a/configuration/scripts/machines/env.testmachine_intel +++ b/configuration/scripts/machines/env.testmachine_intel @@ -1,5 +1,8 @@ #!/bin/csh -f +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME testmachine setenv ICE_MACHINE_MACHINFO "Undefined" setenv ICE_MACHINE_ENVNAME intel diff --git a/configuration/scripts/machines/env.travisCI_gnu b/configuration/scripts/machines/env.travisCI_gnu index b7a1b6176..aa3c1eec7 100755 --- a/configuration/scripts/machines/env.travisCI_gnu +++ b/configuration/scripts/machines/env.travisCI_gnu @@ -1,5 +1,8 @@ #!/bin/csh -f +# May be needed for OpenMP memory +#setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME travisCI setenv ICE_MACHINE_MACHINFO "Cloud Computing" setenv ICE_MACHINE_ENVNAME gnu diff --git a/configuration/scripts/options/set_env.cmplog b/configuration/scripts/options/set_env.cmplog new file mode 100644 index 000000000..b59c1cb6d --- /dev/null +++ b/configuration/scripts/options/set_env.cmplog @@ -0,0 +1 @@ +setenv ICE_BFBTYPE log diff --git a/configuration/scripts/options/set_env.cmplogrest b/configuration/scripts/options/set_env.cmplogrest new file mode 100644 index 000000000..118986199 --- /dev/null +++ b/configuration/scripts/options/set_env.cmplogrest @@ -0,0 +1 @@ +setenv ICE_BFBTYPE logrest diff --git a/configuration/scripts/options/set_env.cmprest b/configuration/scripts/options/set_env.cmprest new file mode 100644 index 000000000..7258fa058 --- /dev/null +++ b/configuration/scripts/options/set_env.cmprest @@ -0,0 +1 @@ +setenv ICE_BFBTYPE restart diff --git a/configuration/scripts/options/set_env.ompschedd1 b/configuration/scripts/options/set_env.ompschedd1 new file mode 100644 index 000000000..a4d255f48 --- /dev/null +++ b/configuration/scripts/options/set_env.ompschedd1 @@ -0,0 +1 @@ +setenv ICE_OMPSCHED "dynamic,1" diff --git a/configuration/scripts/options/set_env.ompscheds b/configuration/scripts/options/set_env.ompscheds new file mode 100644 index 000000000..b9a4f58b0 --- /dev/null +++ b/configuration/scripts/options/set_env.ompscheds @@ -0,0 +1 @@ +setenv ICE_OMPSCHED "static" diff --git a/configuration/scripts/options/set_env.ompscheds1 b/configuration/scripts/options/set_env.ompscheds1 new file mode 100644 index 000000000..a9ca4874d --- /dev/null +++ b/configuration/scripts/options/set_env.ompscheds1 @@ -0,0 +1 @@ +setenv ICE_OMPSCHED "static,1" diff --git a/configuration/scripts/options/set_env.qcchk b/configuration/scripts/options/set_env.qcchk new file mode 100644 index 000000000..9b9fbbd2e --- /dev/null +++ b/configuration/scripts/options/set_env.qcchk @@ -0,0 +1 @@ +setenv ICE_BFBTYPE qcchk diff --git a/configuration/scripts/options/set_env.qcchkf b/configuration/scripts/options/set_env.qcchkf new file mode 100644 index 000000000..589e60772 --- /dev/null +++ b/configuration/scripts/options/set_env.qcchkf @@ -0,0 +1 @@ +setenv ICE_BFBTYPE qcchkf diff --git a/configuration/scripts/options/set_nml.dt3456s b/configuration/scripts/options/set_nml.dt3456s new file mode 100644 index 000000000..74e5482d7 --- /dev/null +++ b/configuration/scripts/options/set_nml.dt3456s @@ -0,0 +1 @@ +dt = 3456.0 diff --git a/configuration/scripts/options/set_nml.dynanderson b/configuration/scripts/options/set_nml.dynanderson index 566c53a09..2e8e13659 100644 --- a/configuration/scripts/options/set_nml.dynanderson +++ b/configuration/scripts/options/set_nml.dynanderson @@ -1,3 +1,5 @@ kdyn = 3 algo_nonlin = 'anderson' use_mean_vrel = .false. +capping = 1. + diff --git a/configuration/scripts/options/set_nml.dynpicard b/configuration/scripts/options/set_nml.dynpicard index b81f4d4e6..05efb3526 100644 --- a/configuration/scripts/options/set_nml.dynpicard +++ b/configuration/scripts/options/set_nml.dynpicard @@ -1,3 +1,4 @@ kdyn = 3 algo_nonlin = 'picard' use_mean_vrel = .true. +capping = 1. diff --git a/configuration/scripts/options/set_nml.qcnonbfb b/configuration/scripts/options/set_nml.qcnonbfb deleted file mode 100644 index a965b863c..000000000 --- a/configuration/scripts/options/set_nml.qcnonbfb +++ /dev/null @@ -1,16 +0,0 @@ -dt = 3456.0 -npt_unit = 'y' -npt = 5 -year_init = 2005 -month_init = 1 -day_init = 1 -sec_init = 0 -use_leap_years = .false. -fyear_init = 2005 -ycycle = 1 -dumpfreq = 'm' -dumpfreq_n = 12 -diagfreq = 24 -histfreq = 'd','x','x','x','x' -f_hi = 'd' -hist_avg = .false. diff --git a/configuration/scripts/options/set_nml.timerstats b/configuration/scripts/options/set_nml.timerstats new file mode 100644 index 000000000..723891b7b --- /dev/null +++ b/configuration/scripts/options/set_nml.timerstats @@ -0,0 +1 @@ +timer_stats = .true. diff --git a/configuration/scripts/tests/baseline.script b/configuration/scripts/tests/baseline.script index ac69d49a0..bb8f50a1f 100644 --- a/configuration/scripts/tests/baseline.script +++ b/configuration/scripts/tests/baseline.script @@ -36,7 +36,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then ${ICE_CASEDIR}/casescripts/comparelog.csh ${base_file} ${test_file} notcicefile set bfbstatus = $status - else if (${ICE_TEST} =~ qcchk*) then + else if (${ICE_BFBTYPE} =~ qcchk*) then set test_dir = ${ICE_RUNDIR} set base_dir = ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME} ${ICE_SANDBOX}/configuration/scripts/tests/QC/cice.t-test.py ${base_dir} ${test_dir} @@ -151,7 +151,7 @@ if (${ICE_BFBCOMP} != ${ICE_SPVAL}) then endif endif - if (${ICE_TEST} == "logbfb") then + if (${ICE_BFBTYPE} == "log") then set test_file = `ls -1t ${ICE_RUNDIR}/cice.runlog* | head -1` set base_file = `ls -1t ${ICE_RUNDIR}/../${ICE_BFBCOMP}.${ICE_TESTID}/cice.runlog* | head -1` @@ -163,21 +163,61 @@ if (${ICE_BFBCOMP} != ${ICE_SPVAL}) then ${ICE_CASEDIR}/casescripts/comparelog.csh ${base_file} ${test_file} set bfbstatus = $status - else if (${ICE_TEST} =~ qcchk*) then + else if (${ICE_BFBTYPE} == "logrest") then + set test_file = `ls -1t ${ICE_RUNDIR}/cice.runlog* | head -1` + set base_file = `ls -1t ${ICE_RUNDIR}/../${ICE_BFBCOMP}.${ICE_TESTID}/cice.runlog* | head -1` + + echo "" + echo "bfb Log Compare Mode:" + echo "base_file: ${base_file}" + echo "test_file: ${test_file}" + + ${ICE_CASEDIR}/casescripts/comparelog.csh ${base_file} ${test_file} + set bfbstatusl = $status + + set test_dir = ${ICE_RUNDIR}/restart + set base_dir = ${ICE_RUNDIR}/../${ICE_BFBCOMP}.${ICE_TESTID}/restart + + echo "" + echo "bfb Restart Compare Mode:" + echo "base_dir: ${base_dir}" + echo "test_dir: ${test_dir}" + + ${ICE_CASEDIR}/casescripts/comparebfb.csh ${base_dir} ${test_dir} + set bfbstatusr = $status + + if ({$bfbstatusl} == ${bfbstatusr}) then + set bfbstatus = ${bfbstatusl} + else if (${bfbstatusl} == 1 || ${bfbstatusr} == 1) then + set bfbstatus = 1 + else if ({$bfbstatusl} > ${bfbstatusr}) then + set bfbstatus = ${bfbstatusl} + else + set bfbstatus = ${bfbstatusr} + endif + + echo "bfb log, rest, combined status = ${bfbstatusl},${bfbstatusr},${bfbstatus}" + + else if (${ICE_BFBTYPE} =~ qcchk*) then set test_dir = ${ICE_RUNDIR} set base_dir = ${ICE_RUNDIR}/../${ICE_BFBCOMP}.${ICE_TESTID} + echo "" + echo "qcchk Compare Mode:" + echo "base_dir: ${base_dir}" + echo "test_dir: ${test_dir}" ${ICE_SANDBOX}/configuration/scripts/tests/QC/cice.t-test.py ${base_dir} ${test_dir} set bfbstatus = $status # expecting failure, so switch value - if (${ICE_TEST} =~ qcchkf*) then + if (${ICE_BFBTYPE} == "qcchkf") then @ bfbstatus = 1 - $bfbstatus endif + else set test_dir = ${ICE_RUNDIR}/restart set base_dir = ${ICE_RUNDIR}/../${ICE_BFBCOMP}.${ICE_TESTID}/restart echo "" - echo "bfb Compare Mode:" + echo "bfb Restart Compare Mode:" echo "base_dir: ${base_dir}" echo "test_dir: ${test_dir}" @@ -190,10 +230,10 @@ if (${ICE_BFBCOMP} != ${ICE_SPVAL}) then rm -f ${ICE_CASEDIR}/test_output.prev if (${bfbstatus} == 0) then echo "PASS ${ICE_TESTNAME} bfbcomp ${ICE_BFBCOMP}" >> ${ICE_CASEDIR}/test_output - echo "bfb baseline and test dataset are identical" + echo "bfbcomp baseline and test dataset pass" else if (${bfbstatus} == 1) then echo "FAIL ${ICE_TESTNAME} bfbcomp ${ICE_BFBCOMP} different-data" >> ${ICE_CASEDIR}/test_output - echo "bfbcomp and test dataset are different" + echo "bfbcomp baseline and test dataset fail" else if (${bfbstatus} == 2) then echo "MISS ${ICE_TESTNAME} bfbcomp ${ICE_BFBCOMP} missing-data" >> ${ICE_CASEDIR}/test_output echo "Missing data" diff --git a/configuration/scripts/tests/first_suite.ts b/configuration/scripts/tests/first_suite.ts index 31eba9fb7..b42d917ea 100644 --- a/configuration/scripts/tests/first_suite.ts +++ b/configuration/scripts/tests/first_suite.ts @@ -2,5 +2,5 @@ smoke gx3 8x2 diag1,run5day restart gx3 4x2x25x29x4 dslenderX2 smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 -logbfb gx3 4x2x25x29x4 dslenderX2,diag1,reprosum +smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum,cmplog smoke gx3 1x2 run2day diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index 616741aa2..12fd03662 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -1,7 +1,7 @@ # Test Grid PEs Sets BFB-compare restart gx3 8x1x25x29x2 dslenderX2 -logbfb gx3 8x1x25x29x2 dslenderX2,diag1,reprosum +smoke gx3 8x1x25x29x2 dslenderX2,diag1,reprosum smoke gx3 16x1 diag1,run5day smoke gx3 1x1 debug,diag1,run2day @@ -70,9 +70,9 @@ restart gx3 32x1x5x10x12 drakeX2 restart_gx3_8x1x25x29x2_ restart gx3 16x1x8x10x10 droundrobin,maskhalo restart_gx3_8x1x25x29x2_dslenderX2 restart gx3 4x1x25x29x4 droundrobin restart_gx3_8x1x25x29x2_dslenderX2 -logbfb gx3 1x1x50x58x4 droundrobin,diag1,maskhalo,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum -logbfb gx3 4x1x25x116x1 dslenderX1,diag1,maskhalo,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum -logbfb gx3 20x1x5x29x20 dsectrobin,diag1,short,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum -logbfb gx3 16x1x8x10x10 droundrobin,diag1,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum -logbfb gx3 6x1x50x58x1 droundrobin,diag1,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum -logbfb gx3 12x1x4x29x9 dspacecurve,diag1,maskhalo,reprosum logbfb_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 1x1x50x58x4 droundrobin,diag1,maskhalo,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 4x1x25x116x1 dslenderX1,diag1,maskhalo,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 20x1x5x29x20 dsectrobin,diag1,short,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 16x1x8x10x10 droundrobin,diag1,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 6x1x50x58x1 droundrobin,diag1,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum +smoke gx3 12x1x4x29x9 dspacecurve,diag1,maskhalo,reprosum,cmplog smoke_gx3_8x1x25x29x2_diag1_dslenderX2_reprosum diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts new file mode 100644 index 000000000..8ac499c2f --- /dev/null +++ b/configuration/scripts/tests/omp_suite.ts @@ -0,0 +1,141 @@ +# Test Grid PEs Sets BFB-compare + +smoke gx3 8x4 diag1,reprosum,run10day +smoke gx3 6x2 alt01,reprosum,run10day +smoke gx3 8x2 alt02,reprosum,run10day +smoke gx3 12x2 alt03,droundrobin,reprosum,run10day +smoke gx3 4x4 alt04,reprosum,run10day +smoke gx3 4x4 alt05,reprosum,run10day +smoke gx3 8x2 alt06,reprosum,run10day +smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx1 15x2 seabedprob,reprosum,run10day +smoke gx3 14x2 fsd12,reprosum,run10day +smoke gx3 11x2 isotope,reprosum,run10day +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day +smoke gx3 6x4 dynpicard,reprosum,run10day +smoke gx3 8x3 zsal,reprosum,run10day + +smoke gbox128 8x2 reprosum,run10day +smoke gbox128 12x2 boxnodyn,reprosum,run10day +smoke gbox128 9x2 boxadv,reprosum,run10day +smoke gbox128 14x2 boxrestore,reprosum,run10day +smoke gbox80 4x5 box2001,reprosum,run10day +smoke gbox80 11x3 boxslotcyl,reprosum,run10day + +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_diag1_reprosum_run10day +smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread smoke_gx3_6x2_alt01_reprosum_run10day +smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt02_reprosum_run10day +smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread smoke_gx3_12x2_alt03_droundrobin_reprosum_run10day +smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day +smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day +smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day +smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob +smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day +smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day +smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day +smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal + +smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day +smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day +smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread smoke_gbox128_9x2_boxadv_reprosum_run10day +smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread smoke_gbox128_14x2_boxrestore_reprosum_run10day +smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread smoke_gbox80_4x5_box2001_reprosum_run10day +smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke_gbox80_11x3_boxslotcyl_reprosum_run10day + +#gridC + +smoke gx3 8x4 diag1,reprosum,run10day,gridc +smoke gx3 6x2 alt01,reprosum,run10day,gridc +smoke gx3 8x2 alt02,reprosum,run10day,gridc +smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc +smoke gx3 4x4 alt04,reprosum,run10day,gridc +smoke gx3 4x4 alt05,reprosum,run10day,gridc +smoke gx3 8x2 alt06,reprosum,run10day,gridc +smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx1 15x2 seabedprob,reprosum,run10day,gridc +smoke gx3 14x2 fsd12,reprosum,run10day,gridc +smoke gx3 11x2 isotope,reprosum,run10day,gridc +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc +smoke gx3 6x4 dynpicard,reprosum,run10day,gridc +smoke gx3 8x3 zsal,reprosum,run10day,gridc + +smoke gbox128 8x2 reprosum,run10day,gridc +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc +smoke gbox128 9x2 boxadv,reprosum,run10day,gridc +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc +smoke gbox80 4x5 box2001,reprosum,run10day,gridc +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc + +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day +smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day +smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day +smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day +smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day +smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day +smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day +smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob +smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day +smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day +smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day +smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal + +smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day +smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day +smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day +smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_14x2_boxrestore_gridc_reprosum_run10day +smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_4x5_box2001_gridc_reprosum_run10day +smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_11x3_boxslotcyl_gridc_reprosum_run10day + +#gridCD + +smoke gx3 8x4 diag1,reprosum,run10day,gridcd +smoke gx3 6x2 alt01,reprosum,run10day,gridcd +smoke gx3 8x2 alt02,reprosum,run10day,gridcd +smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd +smoke gx3 4x4 alt04,reprosum,run10day,gridcd +smoke gx3 4x4 alt05,reprosum,run10day,gridcd +smoke gx3 8x2 alt06,reprosum,run10day,gridcd +smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd +smoke gx3 14x2 fsd12,reprosum,run10day,gridcd +smoke gx3 11x2 isotope,reprosum,run10day,gridcd +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd +smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd +smoke gx3 8x3 zsal,reprosum,run10day,gridcd + +smoke gbox128 8x2 reprosum,run10day,gridcd +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd +smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd +smoke gbox80 4x5 box2001,reprosum,run10day,gridcd +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd + +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day +smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day +smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day +smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day +smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day +smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day +smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day +smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob +smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day +smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day +smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day +smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal + +smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day +smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day +smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day +smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_14x2_boxrestore_gridcd_reprosum_run10day +smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_4x5_box2001_gridcd_reprosum_run10day +smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_11x3_boxslotcyl_gridcd_reprosum_run10day + diff --git a/configuration/scripts/tests/perf_suite.ts b/configuration/scripts/tests/perf_suite.ts new file mode 100644 index 000000000..9a17d8a55 --- /dev/null +++ b/configuration/scripts/tests/perf_suite.ts @@ -0,0 +1,30 @@ +# Test Grid PEs Sets BFB-compare +smoke gx1 1x1x320x384x1 run2day,droundrobin +smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +sleep 180 +# +smoke gx1 1x1x320x384x1 run2day,droundrobin +smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +# +smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 32x1x16x16x15 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +# +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +smoke gx1 32x2x16x16x16 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 16x4x16x16x32 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 8x8x16x16x64 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 4x16x16x16x128 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 32x2x16x16x16 run2day,droundrobin,ompscheds smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 32x2x16x16x16 run2day,droundrobin,ompschedd1 smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 32x2x16x16x16 run2day,droundrobin,ompscheds1 smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +# diff --git a/configuration/scripts/tests/prod_suite.ts b/configuration/scripts/tests/prod_suite.ts index 04982adb1..877fa1ce6 100644 --- a/configuration/scripts/tests/prod_suite.ts +++ b/configuration/scripts/tests/prod_suite.ts @@ -1,6 +1,6 @@ # Test Grid PEs Sets BFB-compare -qcchk gx3 72x1 qc,medium qcchk_gx3_72x1_medium_qc -qcchk gx1 144x1 qc,medium +qcchk gx3 72x1 qc,qcchk,medium qcchk_gx3_72x1_medium_qc_qcchk +qcchk gx1 144x1 qc,qcchk,medium smoke gx1 144x2 gx1prod,long,run10year -qcchkf gx3 72x1 qc,medium,alt02 qcchk_gx3_72x1_medium_qc -qcchk gx3 72x1 qcnonbfb,medium qcchk_gx3_72x1_medium_qc +qcchk gx3 72x1 qc,qcchkf,medium,alt02 qcchk_gx3_72x1_medium_qc_qcchk +qcchk gx3 72x1 qc,qcchk,dt3456s,medium qcchk_gx3_72x1_medium_qc_qcchk diff --git a/configuration/scripts/tests/reprosum_suite.ts b/configuration/scripts/tests/reprosum_suite.ts index a7f3fe5bc..417a7de2e 100644 --- a/configuration/scripts/tests/reprosum_suite.ts +++ b/configuration/scripts/tests/reprosum_suite.ts @@ -1,11 +1,11 @@ # Test Grid PEs Sets BFB-compare -logbfb gx3 4x2x25x29x4 dslenderX2,diag1,reprosum -#logbfb gx3 4x2x25x29x4 dslenderX2,diag1 -logbfb gx3 1x1x50x58x4 droundrobin,diag1,thread,maskhalo,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 4x1x25x116x1 dslenderX1,diag1,thread,maskhalo,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 1x20x5x29x80 dsectrobin,diag1,short,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 8x2x8x10x20 droundrobin,diag1,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 6x2x50x58x1 droundrobin,diag1,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 6x2x4x29x18 dspacecurve,diag1,maskhalo,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -logbfb gx3 17x2x1x1x800 droundrobin,diag1,maskhalo,reprosum logbfb_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum -#logbfb gx3 8x2x8x10x20 droundrobin,diag1 logbfb_gx3_4x2x25x29x4_diag1_dslenderX2 +smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum +#smoke gx3 4x2x25x29x4 dslenderX2,diag1 +smoke gx3 1x1x50x58x4 droundrobin,diag1,thread,maskhalo,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 4x1x25x116x1 dslenderX1,diag1,thread,maskhalo,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 1x20x5x29x80 dsectrobin,diag1,short,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 8x2x8x10x20 droundrobin,diag1,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 6x2x50x58x1 droundrobin,diag1,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 6x2x4x29x18 dspacecurve,diag1,maskhalo,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +smoke gx3 17x2x1x1x800 droundrobin,diag1,maskhalo,reprosum,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2_reprosum +#smoke gx3 8x2x8x10x20 droundrobin,diag1,cmplog smoke_gx3_4x2x25x29x4_diag1_dslenderX2 diff --git a/configuration/scripts/tests/test_logbfb.script b/configuration/scripts/tests/test_logbfb.script deleted file mode 100644 index 0ac1ed224..000000000 --- a/configuration/scripts/tests/test_logbfb.script +++ /dev/null @@ -1,33 +0,0 @@ -# This is identical to a smoke test, but triggers bfbcompare with log files instead of restarts -#---------------------------------------------------- -# Run the CICE model -# cice.run returns -1 if run did not complete successfully - -./cice.run -set res="$status" - -set log_file = `ls -t1 ${ICE_RUNDIR}/cice.runlog* | head -1` -set ttimeloop = `grep TimeLoop ${log_file} | grep Timer | cut -c 22-32` -set tdynamics = `grep Dynamics ${log_file} | grep Timer | cut -c 22-32` -set tcolumn = `grep Column ${log_file} | grep Timer | cut -c 22-32` -if (${ttimeloop} == "") set ttimeloop = -1 -if (${tdynamics} == "") set tdynamics = -1 -if (${tcolumn} == "") set tcolumn = -1 - -mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev -cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output -mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev -cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test" >! ${ICE_CASEDIR}/test_output -rm -f ${ICE_CASEDIR}/test_output.prev - -set grade = PASS -if ( $res != 0 ) then - set grade = FAIL - echo "$grade ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output - echo "$grade ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output - exit 99 -endif - -echo "$grade ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output -echo "$grade ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output - diff --git a/configuration/scripts/tests/test_qcchkf.script b/configuration/scripts/tests/test_qcchkf.script deleted file mode 100644 index 81b5f05fc..000000000 --- a/configuration/scripts/tests/test_qcchkf.script +++ /dev/null @@ -1,36 +0,0 @@ - -cp ${ICE_SANDBOX}/configuration/scripts/tests/QC/CICE_t_critical_p0.8.nc . -cp ${ICE_SANDBOX}/configuration/scripts/tests/QC/CICE_Lookup_Table_p0.8_n1825.nc . - -#---------------------------------------------------- -# Run the CICE model -# cice.run returns -1 if run did not complete successfully - -./cice.run -set res="$status" - -set log_file = `ls -t1 ${ICE_RUNDIR}/cice.runlog* | head -1` -set ttimeloop = `grep TimeLoop ${log_file} | grep Timer | cut -c 22-32` -set tdynamics = `grep Dynamics ${log_file} | grep Timer | cut -c 22-32` -set tcolumn = `grep Column ${log_file} | grep Timer | cut -c 22-32` -if (${ttimeloop} == "") set ttimeloop = -1 -if (${tdynamics} == "") set tdynamics = -1 -if (${tcolumn} == "") set tcolumn = -1 - -mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev -cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output -mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev -cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test" >! ${ICE_CASEDIR}/test_output -rm -f ${ICE_CASEDIR}/test_output.prev - -set grade = PASS -if ( $res != 0 ) then - set grade = FAIL - echo "$grade ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output - echo "$grade ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output - exit 99 -endif - -echo "$grade ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output -echo "$grade ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output - diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 38f38b6b1..8ec9c8f4a 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -675,6 +675,7 @@ either Celsius or Kelvin units). "Tffresh", "freezing temp of fresh ice", "273.15 K" "tfrz_option", "form of ocean freezing temperature", "" "thinS", "minimum ice thickness for brine tracer", "" + "timer_stats", "logical to turn on extra timer statistics", ".false." "timesecs", "total elapsed time in seconds", "s" "time_beg", "beginning time for history averages", "" "time_bounds", "beginning and ending time for history averages", "" diff --git a/doc/source/conf.py b/doc/source/conf.py index 099f65403..8b9aecaa6 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -54,7 +54,7 @@ # General information about the project. project = u'CICE' -copyright = u'2021, Triad National Security, LLC (code) and National Center for Atmospheric Research (documentation)' +copyright = u'2022, Triad National Security, LLC (code) and National Center for Atmospheric Research (documentation)' author = u'CICE-Consortium' # The version info for the project you're documenting, acts as replacement for @@ -62,9 +62,9 @@ # built documents. # # The short X.Y version. -version = u'6.3.0' +version = u'6.3.1' # The full version, including alpha/beta/rc tags. -version = u'6.3.0' +version = u'6.3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/intro/copyright.rst b/doc/source/intro/copyright.rst index f09f6c58d..86b15b8d2 100644 --- a/doc/source/intro/copyright.rst +++ b/doc/source/intro/copyright.rst @@ -5,7 +5,7 @@ Copyright ============================= -© Copyright 2021, Triad National Security LLC. All rights reserved. +© Copyright 2022, Triad National Security LLC. All rights reserved. This software was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index aa63facf7..51437ae1e 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -81,25 +81,34 @@ can be modified as needed. "ICE_TARGET", "string", "build target", "set by cice.setup" "ICE_IOTYPE", "string", "I/O format", "set by cice.setup" " ", "netcdf", "serial netCDF" - " ", "pio", "parallel netCDF" " ", "none", "netCDF library is not available" + " ", "pio", "parallel netCDF" "ICE_CLEANBUILD", "true, false", "automatically clean before building", "true" "ICE_CPPDEFS", "user defined preprocessor macros for build", "null" "ICE_QUIETMODE", "true, false", "reduce build output to the screen", "false" "ICE_GRID", "string (see below)", "grid", "set by cice.setup" - " ", "gx3", "3-deg displace-pole (Greenland) global grid", " " - " ", "gx1", "1-deg displace-pole (Greenland) global grid", " " - " ", "tx1", "1-deg tripole global grid", " " " ", "gbox80", "80x80 box", " " " ", "gbox128", "128x128 box", " " - "ICE_NTASKS", "integer", "number of tasks, must be set to 1", "set by cice.setup" - "ICE_NTHRDS", "integer", "number of threads per task, must be set to 1", "set by cice.setup" + " ", "gbox180", "180x180 box", " " + " ", "gx1", "1-deg displace-pole (Greenland) global grid", " " + " ", "gx3", "3-deg displace-pole (Greenland) global grid", " " + " ", "tx1", "1-deg tripole global grid", " " + "ICE_NTASKS", "integer", "number of MPI tasks", "set by cice.setup" + "ICE_NTHRDS", "integer", "number of threads per task", "set by cice.setup" + "ICE_OMPSCHED", "string", "OpenMP SCHEDULE env setting", "static,1" "ICE_TEST", "string", "test setting if using a test", "set by cice.setup" "ICE_TESTNAME", "string", "test name if using a test", "set by cice.setup" - "ICE_BASELINE", "string", "baseline directory name, associated with cice.setup -bdir ", "set by cice.setup" + "ICE_TESTID", "string", "test name testid", "set by cice.setup" + "ICE_BASELINE", "string", "baseline directory name, associated with cice.setup --bdir ", "set by cice.setup" "ICE_BASEGEN", "string", "baseline directory name for regression generation, associated with cice.setup -bgen ", "set by cice.setup" "ICE_BASECOM", "string", "baseline directory name for regression comparison, associated with cice.setup -bcmp ", "set by cice.setup" - "ICE_BFBCOMP", "string", "location of case for comparison, associated with cice.setup -td", "set by cice.setup" + "ICE_BFBCOMP", "string", "location of case for comparison, associated with cice.setup --bcmp", "set by cice.setup" + "ICE_BFBTYPE", "string", "type and files used in BFBCOMP", "restart" + " ", "log", "log file comparison for bit for bit", " " + " ", "logrest", "log and restart files for bit for bit", " " + " ", "qcchk", "QC test for same climate", " " + " ", "qcchkf", "QC test for different climate", " " + " ", "restart", "restart files for bit for bit", " " "ICE_SPVAL", "string", "special value for cice.settings strings", "set by cice.setup" "ICE_RUNLENGTH", "integer (see below)", "batch run length default", "set by cice.setup" " ", "-1", "15 minutes (default)", " " @@ -111,6 +120,7 @@ can be modified as needed. "ICE_ACCOUNT", "string", "batch account number", "set by cice.setup, .cice_proj or by default" "ICE_QUEUE", "string", "batch queue name", "set by cice.setup or by default" "ICE_THREADED", "true, false", "force threading in compile, will always compile threaded if ICE_NTHRDS :math:`> 1`", "false" + "ICE_COMMDIR", "mpi, serial", "specify infrastructure comm version", "set by ICE_NTASKS" "ICE_BLDDEBUG", "true, false", "turn on compile debug flags", "false" "ICE_COVERAGE", "true, false", "turn on code coverage flags", "false" @@ -214,6 +224,7 @@ setup_nml "``runtype``", "``continue``", "restart using ``pointer_file``", "``initial``" "", "``initial``", "start from ``ice_ic``", "" "``sec_init``", "integer", "the initial second if not using restart", "0" + "``timer_stats``", "logical", "controls extra timer output", "``.false.``" "``use_leap_years``", "logical", "include leap days", "``.false.``" "``use_restart_time``", "logical", "set initial date using restart file on initial runtype only", "``.false.``" "``version_name``", "string", "model version", "'unknown_version_name'" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index a838f887b..624d135c3 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -214,7 +214,8 @@ and chooses a block size ``block_size_x`` :math:`\times`\ ``block_size_y``, and ``distribution_type`` in **ice\_in**. That information is used to determine how the blocks are distributed across the processors, and how the processors are -distributed across the grid domain. Recommended combinations of these +distributed across the grid domain. The model is parallelized over blocks +for both MPI and OpenMP. Some suggested combinations for these parameters for best performance are given in Section :ref:`performance`. The script **cice.setup** computes some default decompositions and layouts but the user can overwrite the defaults by manually changing the values in @@ -553,7 +554,8 @@ The user specifies the total number of tasks and threads in **cice.settings** and the block size and decompostion in the namelist file. The main trades offs are the relative efficiency of large square blocks versus model internal load balance -as CICE computation cost is very small for ice-free blocks. +as CICE computation cost is very small for ice-free blocks. The code +is parallelized over blocks for both MPI and OpenMP. Smaller, more numerous blocks provides an opportunity for better load balance by allocating each processor both ice-covered and ice-free blocks. But smaller, more numerous blocks becomes @@ -564,6 +566,18 @@ volume-to-surface ratio important for communication cost. Often 3 to 8 blocks per processor provide the decompositions flexiblity to create reasonable load balance configurations. +Like MPI, load balance +of blocks across threads is important for efficient performance. Most of the OpenMP +threading is implemented with ``SCHEDULE(runtime)``, so the OMP_SCHEDULE env +variable can be used to set the OpenMPI schedule. The default ``OMP_SCHEDULE`` +setting is defined by the +variable ``ICE_OMPSCHE`` in **cice.settings**. ``OMP_SCHEDULE`` values of "STATIC,1" +and "DYNAMIC,1" are worth testing. The OpenMP implementation in +CICE is constantly under review, but users should validate results and +performance on their machine. CICE should be bit-for-bit with different block sizes, +different decompositions, different MPI task counts, and different OpenMP threads. +Finally, we recommend the ``OMP_STACKSIZE`` env variable should be set to 32M or greater. + The ``distribution_type`` options allow standard cartesian distributions of blocks, redistribution via a ‘rake’ algorithm for improved load balancing across processors, and redistribution based on space-filling @@ -1227,15 +1241,18 @@ Timers are declared and initialized in **ice\_timers.F90**, and the code to be timed is wrapped with calls to *ice\_timer\_start* and *ice\_timer\_stop*. Finally, *ice\_timer\_print* writes the results to the log file. The optional “stats" argument (true/false) prints -additional statistics. Calling *ice\_timer\_print\_all* prints all of +additional statistics. The "stats" argument can be set by the ``timer_stats`` +namelist. Calling *ice\_timer\_print\_all* prints all of the timings at once, rather than having to call each individually. Currently, the timers are set up as in :ref:`timers`. Section :ref:`addtimer` contains instructions for adding timers. The timings provided by these timers are not mutually exclusive. For -example, the column timer (5) includes the timings from 6–10, and -subroutine *bound* (timer 15) is called from many different places in -the code, including the dynamics and advection routines. +example, the Column timer includes the timings from several other +timers, while timer Bound is called from many different places in +the code, including the dynamics and advection routines. The +Dynamics, Advection, and Column timers do not overlap and represent +most of the overall model work. The timers use *MPI\_WTIME* for parallel runs and the F90 intrinsic *system\_clock* for single-processor runs. @@ -1251,35 +1268,41 @@ The timers use *MPI\_WTIME* for parallel runs and the F90 intrinsic +--------------+-------------+----------------------------------------------------+ | 1 | Total | the entire run | +--------------+-------------+----------------------------------------------------+ - | 2 | Step | total minus initialization and exit | + | 2 | Timeloop | total minus initialization and exit | +--------------+-------------+----------------------------------------------------+ - | 3 | Dynamics | EVP | + | 3 | Dynamics | dynamics | +--------------+-------------+----------------------------------------------------+ | 4 | Advection | horizontal transport | +--------------+-------------+----------------------------------------------------+ | 5 | Column | all vertical (column) processes | +--------------+-------------+----------------------------------------------------+ - | 6 | Thermo | vertical thermodynamics | + | 6 | Thermo | vertical thermodynamics, part of Column timer | + +--------------+-------------+----------------------------------------------------+ + | 7 | Shortwave | SW radiation and albedo, part of Thermo timer | + +--------------+-------------+----------------------------------------------------+ + | 8 | Ridging | mechanical redistribution, part of Column timer | + +--------------+-------------+----------------------------------------------------+ + | 9 | FloeSize | flow size, part of Column timer | +--------------+-------------+----------------------------------------------------+ - | 7 | Shortwave | SW radiation and albedo | + | 10 | Coupling | sending/receiving coupler messages | +--------------+-------------+----------------------------------------------------+ - | 8 | Meltponds | melt ponds | + | 11 | ReadWrite | reading/writing files | +--------------+-------------+----------------------------------------------------+ - | 9 | Ridging | mechanical redistribution | + | 12 | Diags | diagnostics (log file) | +--------------+-------------+----------------------------------------------------+ - | 10 | Cat Conv | transport in thickness space | + | 13 | History | history output | +--------------+-------------+----------------------------------------------------+ - | 11 | Coupling | sending/receiving coupler messages | + | 14 | Bound | boundary conditions and subdomain communications | +--------------+-------------+----------------------------------------------------+ - | 12 | ReadWrite | reading/writing files | + | 15 | BGC | biogeochemistry, part of Thermo timer | +--------------+-------------+----------------------------------------------------+ - | 13 | Diags | diagnostics (log file) | + | 16 | Forcing | forcing | +--------------+-------------+----------------------------------------------------+ - | 14 | History | history output | + | 17 | 1d-evp | 1d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 15 | Bound | boundary conditions and subdomain communications | + | 18 | 2d-evp | 2d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 16 | BGC | biogeochemistry | + | 19 | UpdState | update state | +--------------+-------------+----------------------------------------------------+ .. _restartfiles: diff --git a/icepack b/icepack index 152bd701e..76ecd418d 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 152bd701e0cf3ec4385e5ce81918ba94e7a791cb +Subproject commit 76ecd418d2efad7e74fe35c4ec85f0830923bda6 From 21768d81045583162ee3682004ebddec9f373a0e Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 10 Mar 2022 07:48:37 -0800 Subject: [PATCH 078/109] Fix box2001 forcing, restart fields on land, bathymetry default value, omp_suite (#65) * Fix several problems noted in PR https://github.com/apcraig/CICE/pull/64. This should address the problems with the boxrestore test errors. - Update box2001 so it's bit-for-bit with different blocks/decomps/pe counts. - Initialize bathymetry values at all gridcells when bathymetry_format='default' and use_bathymetry=.false. * update comparelog to exclude extraneous icepack output, help prod comparisons * - Zero out certain fields on land on restart files. Some fields have non-zero values over land by default and this causes problems with land block elimination and comparisons of different decompositions. Does not affect science. - Update omp_suite to use different block sizes in comparisons * use c0 instead of 0. * fix OpenMP private variables on new code * update pio restart diagnostics format --- cicecore/cicedynB/general/ice_forcing.F90 | 56 ++++++--- cicecore/cicedynB/infrastructure/ice_grid.F90 | 3 + .../infrastructure/ice_restart_driver.F90 | 16 ++- .../infrastructure/io/io_pio2/ice_restart.F90 | 13 +- cicecore/shared/ice_restart_column.F90 | 38 +++++- configuration/scripts/tests/comparelog.csh | 4 +- configuration/scripts/tests/omp_suite.ts | 114 +++++++++--------- 7 files changed, 160 insertions(+), 84 deletions(-) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index c53f944ac..320042940 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -682,7 +682,7 @@ subroutine get_forcing_atmo !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2438,7 +2438,7 @@ subroutine LY_data enddo ! AOMIP - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -3480,7 +3480,7 @@ subroutine monthly_data enddo ! AOMIP - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -5313,10 +5313,10 @@ subroutine box2001_data_atm ! these are defined at the u point ! authors: Elizabeth Hunke, LANL - use ice_domain, only: nblocks + use ice_domain, only: nblocks, blocks_ice use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs - use ice_blocks, only: nx_block, ny_block, nghost + use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray use ice_state, only: aice @@ -5325,8 +5325,15 @@ subroutine box2001_data_atm integer (kind=int_kind) :: & iblk, i,j ! loop indices + integer (kind=int_kind) :: & + iglob(nx_block), & ! global indices + jglob(ny_block) ! global indices + + type (block) :: & + this_block ! block information for current block + real (kind=dbl_kind) :: & - secday, pi , puny, period, pi2, tau + secday, pi , puny, period, pi2, tau character(len=*), parameter :: subname = '(box2001_data_atm)' @@ -5341,6 +5348,10 @@ subroutine box2001_data_atm do j = 1, ny_block do i = 1, nx_block + this_block = get_block(blocks_ice(iblk),iblk) + iglob = this_block%i_glob + jglob = this_block%j_glob + !tcraig, move to box2001_data_ocn ! ! ocean current ! ! constant in time, could be initialized in ice_flux.F90 @@ -5354,14 +5365,14 @@ subroutine box2001_data_atm ! wind components uatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & - * sin(pi2*real(i-nghost, kind=dbl_kind) & + * sin(pi2*real(iglob(i), kind=dbl_kind) & /real(nx_global,kind=dbl_kind)) & - * sin(pi *real(j-nghost, kind=dbl_kind) & + * sin(pi *real(jglob(j), kind=dbl_kind) & /real(ny_global,kind=dbl_kind)) vatm(i,j,iblk) = c5 + (sin(pi2*timesecs/period)-c3) & - * sin(pi *real(i-nghost, kind=dbl_kind) & + * sin(pi *real(iglob(i), kind=dbl_kind) & /real(nx_global,kind=dbl_kind)) & - * sin(pi2*real(j-nghost, kind=dbl_kind) & + * sin(pi2*real(jglob(j), kind=dbl_kind) & /real(ny_global,kind=dbl_kind)) ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) @@ -5408,10 +5419,10 @@ subroutine box2001_data_ocn ! these are defined at the u point ! authors: Elizabeth Hunke, LANL - use ice_domain, only: nblocks + use ice_domain, only: nblocks, blocks_ice use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs - use ice_blocks, only: nx_block, ny_block, nghost + use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uocn, vocn use ice_grid, only: uvm @@ -5420,8 +5431,15 @@ subroutine box2001_data_ocn integer (kind=int_kind) :: & iblk, i,j ! loop indices + integer (kind=int_kind) :: & + iglob(nx_block), & ! global indices + jglob(ny_block) ! global indices + + type (block) :: & + this_block ! block information for current block + real (kind=dbl_kind) :: & - secday, pi , puny, period, pi2, tau + secday, pi , puny, period, pi2, tau character(len=*), parameter :: subname = '(box2001_data_ocn)' @@ -5431,12 +5449,16 @@ subroutine box2001_data_ocn do j = 1, ny_block do i = 1, nx_block + this_block = get_block(blocks_ice(iblk),iblk) + iglob = this_block%i_glob + jglob = this_block%j_glob + ! ocean current ! constant in time, could be initialized in ice_flux.F90 - uocn(i,j,iblk) = p2*real(j-nghost, kind=dbl_kind) & - / real(nx_global,kind=dbl_kind) - p1 - vocn(i,j,iblk) = -p2*real(i-nghost, kind=dbl_kind) & - / real(ny_global,kind=dbl_kind) + p1 + uocn(i,j,iblk) = p2*real(jglob(j), kind=dbl_kind) & + / real(ny_global,kind=dbl_kind) - p1 + vocn(i,j,iblk) = -p2*real(iglob(i), kind=dbl_kind) & + / real(nx_global,kind=dbl_kind) + p1 uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 80e876571..29853c96b 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -4177,6 +4177,8 @@ end subroutine gridbox_verts subroutine get_bathymetry + use ice_constants, only: c0 + integer (kind=int_kind) :: & i, j, k, iblk ! loop indices @@ -4228,6 +4230,7 @@ subroutine get_bathymetry depth(k) = depth(k-1) + thick(k) enddo + bathymetry = c0 do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 474ed892e..978ca7f55 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -62,7 +62,7 @@ subroutine dumpfile(filename_spec) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_ice + use ice_grid, only: grid_ice, tmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & uvelE, vvelE, uvelN, vvelN @@ -97,6 +97,20 @@ subroutine dumpfile(filename_spec) diag = .true. + !----------------------------------------------------------------- + ! Zero out tracers over land + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (.not. tmask(i,j,iblk)) trcrn(i,j,:,:,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO + !----------------------------------------------------------------- ! state variables ! Tsfc is the only tracer written to binary files. All other diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 74638e45a..ce3946db2 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -788,8 +788,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & amax = global_maxval(work(:,:,n,:),distrb_info) asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif enddo else @@ -797,9 +796,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & amax = global_maxval(work(:,:,1,:),distrb_info) asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - write(nu_diag,*) '' + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif @@ -878,8 +875,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) amax = global_maxval(work(:,:,n,:),distrb_info) asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif enddo else @@ -887,8 +883,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) amax = global_maxval(work(:,:,1,:),distrb_info) asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum + write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif endif diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index 074b37dbe..6ce393190 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -913,6 +913,7 @@ subroutine write_restart_hbrine() use ice_blocks, only: block, get_block use ice_domain, only: nblocks, blocks_ice use ice_fileunits, only: nu_dump_hbrine + use ice_grid, only: tmask use ice_state, only: trcrn use ice_restart,only: write_restart_field @@ -949,7 +950,8 @@ subroutine write_restart_hbrine() do j = jlo, jhi do i = ilo, ihi do n = 1, ncat - if (first_ice (i,j,n,iblk)) then + ! zero out first_ice over land + if (tmask(i,j,iblk) .and. first_ice (i,j,n,iblk)) then first_ice_real(i,j,n,iblk) = c1 else first_ice_real(i,j,n,iblk) = c0 @@ -983,6 +985,7 @@ subroutine write_restart_bgc() use ice_fileunits, only: nu_dump_bgc use ice_flux_bgc, only: nit, amm, sil, dmsp, dms, algalN, & doc, don, dic, fed, fep, zaeros, hum + use ice_grid, only: tmask use ice_state, only: trcrn use ice_flux, only: sss use ice_restart, only: write_restart_field @@ -1058,6 +1061,39 @@ subroutine write_restart_bgc() diag = .true. + !----------------------------------------------------------------- + ! Zero out tracers over land + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + if (.not. tmask(i,j,iblk)) then + if (tr_bgc_N ) algalN(i,j,:,iblk) = c0 + if (tr_bgc_C ) doc (i,j,:,iblk) = c0 + if (tr_bgc_C ) dic (i,j,:,iblk) = c0 + if (tr_bgc_Nit) nit (i,j ,iblk) = c0 + if (tr_bgc_Am ) amm (i,j ,iblk) = c0 + if (tr_bgc_Sil) sil (i,j ,iblk) = c0 + if (tr_bgc_hum) hum (i,j ,iblk) = c0 + if (tr_bgc_DMS) dms (i,j ,iblk) = c0 + if (tr_bgc_DMS) dmsp (i,j ,iblk) = c0 + if (tr_bgc_DON) don (i,j,:,iblk) = c0 + if (tr_bgc_Fe ) fed (i,j,:,iblk) = c0 + if (tr_bgc_Fe ) fep (i,j,:,iblk) = c0 + if (solve_zsal) sss (i,j ,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + !----------------------------------------------------------------- ! Salinity and extras !----------------------------------------------------------------- diff --git a/configuration/scripts/tests/comparelog.csh b/configuration/scripts/tests/comparelog.csh index af6b2d76e..576289cd7 100755 --- a/configuration/scripts/tests/comparelog.csh +++ b/configuration/scripts/tests/comparelog.csh @@ -55,8 +55,8 @@ if (${filearg} == 1) then touch ${test_out} if (${cicefile} == 1) then - cat ${base_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" >&! ${base_out} - cat ${test_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" >&! ${test_out} + cat ${base_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" | grep -iv "init_vert" | grep -iv "ridge_ice" >&! ${base_out} + cat ${test_data} | grep -A 99999999 "total ice area (km^2)" | grep -e istep1: -e = | grep -iv "min, max, sum" | grep -iv "init_vert" | grep -iv "ridge_ice" >&! ${test_out} else sed -n '/RunningUnitTest/,$p' ${base_data} >! ${base_out} sed -n '/RunningUnitTest/,$p' ${test_data} >! ${test_out} diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 8ac499c2f..d814e2b3d 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -8,6 +8,7 @@ smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day smoke gx3 14x2 fsd12,reprosum,run10day smoke gx3 11x2 isotope,reprosum,run10day @@ -25,25 +26,26 @@ smoke gbox80 11x3 boxslotcyl,reprosum,run10day smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread smoke_gx3_6x2_alt01_reprosum_run10day -smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt02_reprosum_run10day -smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread smoke_gx3_12x2_alt03_droundrobin_reprosum_run10day -smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day -smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day -smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day -smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day -smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob -smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day -smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day -smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day -smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt02_reprosum_run10day +smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread smoke_gx3_12x2_alt03_droundrobin_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt04_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal -smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day -smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day -smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread smoke_gbox128_9x2_boxadv_reprosum_run10day -smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread smoke_gbox128_14x2_boxrestore_reprosum_run10day -smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread smoke_gbox80_4x5_box2001_reprosum_run10day -smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke_gbox80_11x3_boxslotcyl_reprosum_run10day +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day +smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread smoke_gbox128_9x2_boxadv_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread smoke_gbox128_14x2_boxrestore_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread smoke_gbox80_4x5_box2001_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke_gbox80_11x3_boxslotcyl_reprosum_run10day #gridC @@ -55,6 +57,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx1 15x2 reprosum,run10day,gridc smoke gx1 15x2 seabedprob,reprosum,run10day,gridc smoke gx3 14x2 fsd12,reprosum,run10day,gridc smoke gx3 11x2 isotope,reprosum,run10day,gridc @@ -72,25 +75,26 @@ smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day -smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day -smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day -smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day -smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day -smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day -smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day -smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob -smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day -smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day -smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day -smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day +smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal -smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day -smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day -smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day -smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_14x2_boxrestore_gridc_reprosum_run10day -smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_4x5_box2001_gridc_reprosum_run10day -smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_11x3_boxslotcyl_gridc_reprosum_run10day +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day +smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_14x2_boxrestore_gridc_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_4x5_box2001_gridc_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_11x3_boxslotcyl_gridc_reprosum_run10day #gridCD @@ -102,6 +106,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx1 15x2 reprosum,run10day,gridcd smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd smoke gx3 14x2 fsd12,reprosum,run10day,gridcd smoke gx3 11x2 isotope,reprosum,run10day,gridcd @@ -119,23 +124,24 @@ smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day -smoke gx3 16x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day -smoke gx3 24x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day -smoke gx3 24x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day -smoke gx3 14x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day -smoke gx3 24x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day -smoke gx3 12x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day -smoke gx1 28x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob -smoke gx3 30x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day -smoke gx3 16x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day -smoke gx3 28x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 18x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day -smoke gx3 20x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal +smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day +smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day +smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day +smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day +smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day +smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day +smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob +smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day +smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day +smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal -smoke gbox128 20x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day -smoke gbox128 16x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day -smoke gbox128 14x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day -smoke gbox128 24x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_14x2_boxrestore_gridcd_reprosum_run10day -smoke gbox80 19x1 box2001,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_4x5_box2001_gridcd_reprosum_run10day -smoke gbox80 8x4 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_11x3_boxslotcyl_gridcd_reprosum_run10day +smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day +smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day +smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day +smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_14x2_boxrestore_gridcd_reprosum_run10day +smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_4x5_box2001_gridcd_reprosum_run10day +smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_11x3_boxslotcyl_gridcd_reprosum_run10day From 4bf2611e788342a40d7a759878b26da6706851c8 Mon Sep 17 00:00:00 2001 From: apcraig Date: Mon, 14 Mar 2022 11:51:17 -0600 Subject: [PATCH 079/109] - Work around a couple C/CD issues, https://github.com/CICE-Consortium/CICE/issues/700 - modify gridcell + land block elimination for C/CD, using a larger stencil, needs to be investigate further - turn off maskhalo_dyn for C/CD, needs to be investigated further - Update omp_suite and gridsys_suite to extend testing - Add new histdbg option to turn on some output at each timestep to help debugging --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 4 +- .../cicedynB/infrastructure/ice_domain.F90 | 35 ++- cicecore/cicedynB/infrastructure/ice_grid.F90 | 2 +- configuration/scripts/options/set_nml.histdbg | 225 ++++++++++++++++++ configuration/scripts/tests/gridsys_suite.ts | 24 ++ configuration/scripts/tests/omp_suite.ts | 9 + 6 files changed, 288 insertions(+), 11 deletions(-) create mode 100644 configuration/scripts/options/set_nml.histdbg diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 360781a79..42fe1b53d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -971,7 +971,9 @@ subroutine evp (dt) call ice_timer_start(timer_bound) call stack_velocity_field(uvel, vvel, fld2) - if (maskhalo_dyn) then + ! maskhalo_dyn causes non bit-for-bit results on different decomps + ! with C/CD in some cases + if (grid_ice == 'B' .and. maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & field_loc_NEcorner, field_type_vector) else diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index 6f8fee49a..0d0502e85 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -287,7 +287,7 @@ end subroutine init_domain_blocks !*********************************************************************** - subroutine init_domain_distribution(KMTG,ULATG) + subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ! This routine calls appropriate setup routines to distribute blocks ! across processors and defines arrays with block ids for any local @@ -302,6 +302,9 @@ subroutine init_domain_distribution(KMTG,ULATG) KMTG ,&! global topography ULATG ! global latitude field (radians) + character(len=*), intent(in) :: & + grid_ice ! grid_ice, B, C, CD, etc + !---------------------------------------------------------------------- ! ! local variables @@ -319,6 +322,7 @@ subroutine init_domain_distribution(KMTG,ULATG) integer (int_kind) :: & i,j,n ,&! dummy loop indices ig,jg ,&! global indices + igm1,igp1,jgm1,jgp1,&! global indices ninfo ,&! ice_distributionGet check work_unit ,&! size of quantized work unit #ifdef USE_NETCDF @@ -509,10 +513,25 @@ subroutine init_domain_distribution(KMTG,ULATG) if (this_block%i_glob(i) > 0) then ig = this_block%i_glob(i) jg = this_block%j_glob(j) - if (KMTG(ig,jg) > puny .and. & - (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & - nocn(n) = nocn(n) + flat(ig,jg) + if (grid_ice == 'C' .or. grid_ice == 'CD') then + ! Have to be careful about block elimination with C/CD + ! Use a bigger stencil + igm1 = mod(ig-2+nx_global,nx_global)+1 + igp1 = mod(ig,nx_global)+1 + jgm1 = max(jg-1,1) + jgp1 = min(jg+1,ny_global) + if ((KMTG(ig ,jg ) > puny .or. & + KMTG(igm1,jg ) > puny .or. KMTG(igp1,jg ) > puny .or. & + KMTG(ig ,jgp1) > puny .or. KMTG(ig ,jgm1) > puny) .and. & + (ULATG(ig,jg) < shlat/rad_to_deg .or. & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & + nocn(n) = nocn(n) + flat(ig,jg) + else + if (KMTG(ig,jg) > puny .and. & + (ULATG(ig,jg) < shlat/rad_to_deg .or. & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & + nocn(n) = nocn(n) + flat(ig,jg) + endif endif end do endif @@ -529,10 +548,8 @@ subroutine init_domain_distribution(KMTG,ULATG) ! Keep all blocks even the ones only containing land points if (distribution_wght == 'block') nocn(n) = nx_block*ny_block #else - if (distribution_wght == 'block' .and. & ! POP style - nocn(n) > 0) nocn(n) = nx_block*ny_block - if (distribution_wght == 'blockall') & - nocn(n) = nx_block*ny_block + if (distribution_wght == 'block' .and. nocn(n) > 0) nocn(n) = nx_block*ny_block + if (.not. landblockelim) nocn(n) = max(nocn(n),1) #endif end do endif ! distribution_wght = file diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 29853c96b..39750cc75 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -400,7 +400,7 @@ subroutine init_grid1 ! distribute blocks among processors !----------------------------------------------------------------- - call init_domain_distribution(work_g2, work_g1) ! KMT, ULAT + call init_domain_distribution(work_g2, work_g1, grid_ice) ! KMT, ULAT deallocate(work_g1) deallocate(work_g2) diff --git a/configuration/scripts/options/set_nml.histdbg b/configuration/scripts/options/set_nml.histdbg new file mode 100644 index 000000000..247d185fd --- /dev/null +++ b/configuration/scripts/options/set_nml.histdbg @@ -0,0 +1,225 @@ + histfreq = 'm','d','1','h','x' + histfreq_n = 1,1,1,1,1 + histfreq_base = 'zero' + write_ic = .true. + f_tmask = .true. + f_blkmask = .true. + f_tarea = .true. + f_uarea = .true. + f_dxt = .true. + f_dyt = .true. + f_dxu = .true. + f_dyu = .true. + f_HTN = .true. + f_HTE = .true. + f_ANGLE = .true. + f_ANGLET = .true. + f_NCAT = .true. + f_VGRDi = .true. + f_VGRDs = .true. + f_VGRDb = .true. + f_VGRDa = .true. + f_bounds = .true. + f_CMIP = 'm' + f_aice = 'md1h' + f_hi = 'h1dm' + f_hs = 'd1m' + f_Tsfc = 'mdh1' + f_sice = 'md' + f_uvel = 'md1' + f_vvel = 'dm1' + f_uatm = 'dm1' + f_vatm = 'dm1' + f_fswdn = 'dm1' + f_flwdn = 'md1' + f_snowfrac = 'md1' + f_snow = 'md1' + f_snow_ai = 'md1' + f_rain = 'md1' + f_rain_ai = 'md1' + f_sst = 'md1' + f_sss = 'md1' + f_uocn = 'md1' + f_vocn = 'md1' + f_frzmlt = 'md' + f_fswfac = 'md' + f_fswint_ai = 'md' + f_fswabs = 'md' + f_fswabs_ai = 'md' + f_albsni = 'md' + f_alvdr = 'md' + f_alidr = 'md' + f_alvdf = 'md' + f_alidf = 'md' + f_alvdr_ai = 'md' + f_alidr_ai = 'md' + f_alvdf_ai = 'md' + f_alidf_ai = 'md' + f_albice = 'md' + f_albsno = 'md' + f_albpnd = 'md' + f_coszen = 'md' + f_flat = 'md' + f_flat_ai = 'md' + f_fsens = 'md' + f_fsens_ai = 'md' + f_fswup = 'md' + f_flwup = 'md' + f_flwup_ai = 'md' + f_evap = 'md' + f_evap_ai = 'md' + f_Tair = 'md' + f_Tref = 'md' + f_Qref = 'md' + f_congel = 'md' + f_frazil = 'md' + f_snoice = 'md' + f_dsnow = 'md' + f_melts = 'md' + f_meltt = 'md' + f_meltb = 'md' + f_meltl = 'md' + f_fresh = 'md' + f_fresh_ai = 'md' + f_fsalt = 'md' + f_fsalt_ai = 'md' + f_fbot = 'md' + f_fhocn = 'md' + f_fhocn_ai = 'md' + f_fswthru = 'md' + f_fswthru_ai = 'md' + f_fsurf_ai = 'md' + f_fcondtop_ai = 'md' + f_fmeltt_ai = 'md' + f_strairx = 'md1' + f_strairy = 'md1' + f_strtltx = 'md1' + f_strtlty = 'md1' + f_strcorx = 'md1' + f_strcory = 'md1' + f_strocnx = 'md1' + f_strocny = 'md1' + f_strintx = 'md1' + f_strinty = 'md1' + f_taubx = 'md1' + f_tauby = 'md1' + f_strength = 'md1' + f_divu = 'md1' + f_shear = 'md1' + f_sig1 = 'md1' + f_sig2 = 'md1' + f_sigP = 'md1' + f_dvidtt = 'md' + f_dvidtd = 'md' + f_daidtt = 'md' + f_daidtd = 'md' + f_dagedtt = 'md' + f_dagedtd = 'md' + f_mlt_onset = 'md' + f_frz_onset = 'md' + f_hisnap = 'md' + f_aisnap = 'md' + f_trsig = 'md' + f_icepresent = 'md' + f_iage = 'md' + f_FY = 'md' + f_aicen = 'md' + f_vicen = 'md' + f_vsnon = 'md' + f_snowfracn = 'md' + f_keffn_top = 'md' + f_Tinz = 'md' + f_Sinz = 'md' + f_Tsnz = 'md' + f_fsurfn_ai = 'md' + f_fcondtopn_ai = 'md' + f_fmelttn_ai = 'md' + f_flatn_ai = 'md' + f_fsensn_ai = 'md' + f_alvl = 'md' + f_vlvl = 'md' + f_ardg = 'md' + f_vrdg = 'md' + f_dardg1dt = 'md' + f_dardg2dt = 'md' + f_dvirdgdt = 'md' + f_opening = 'md' + f_ardgn = 'md' + f_vrdgn = 'md' + f_dardg1ndt = 'md' + f_dardg2ndt = 'md' + f_dvirdgndt = 'md' + f_krdgn = 'md' + f_aparticn = 'md' + f_aredistn = 'md' + f_vredistn = 'md' + f_araftn = 'md' + f_vraftn = 'md' + f_apondn = 'md' + f_apeffn = 'md' + f_hpondn = 'md' + f_apond = 'md' + f_hpond = 'md' + f_ipond = 'md' + f_apeff = 'md' + f_apond_ai = 'md' + f_hpond_ai = 'md' + f_ipond_ai = 'md' + f_apeff_ai = 'md' + f_fiso_atm = 'md' + f_fiso_ocn = 'md' + f_iso = 'md' + f_faero_atm = 'md' + f_faero_ocn = 'md' + f_aero = 'md' + f_fbio = 'md' + f_fbio_ai = 'md' + f_zaero = 'md' + f_bgc_S = 'md' + f_bgc_N = 'md' + f_bgc_C = 'md' + f_bgc_DOC = 'md' + f_bgc_DIC = 'md' + f_bgc_chl = 'md' + f_bgc_Nit = 'md' + f_bgc_Am = 'md' + f_bgc_Sil = 'md' + f_bgc_DMSPp = 'md' + f_bgc_DMSPd = 'md' + f_bgc_DMS = 'md' + f_bgc_DON = 'md' + f_bgc_Fe = 'md' + f_bgc_hum = 'md' + f_bgc_PON = 'md' + f_bgc_ml = 'md' + f_upNO = 'md' + f_upNH = 'md' + f_bTin = 'md' + f_bphi = 'md' + f_iDi = 'md' + f_iki = 'md' + f_fbri = 'md' + f_hbri = 'md' + f_zfswin = 'md' + f_bionet = 'md' + f_biosnow = 'md' + f_grownet = 'md' + f_PPnet = 'md' + f_algalpeak = 'md' + f_zbgc_frac = 'md' + f_drag = 'md' + f_Cdn_atm = 'md' + f_Cdn_ocn = 'md' + f_fsdrad = 'md' + f_fsdperim = 'md' + f_afsd = 'md' + f_afsdn = 'md' + f_dafsd_newi = 'md' + f_dafsd_latg = 'md' + f_dafsd_latm = 'md' + f_dafsd_wave = 'md' + f_dafsd_weld = 'md' + f_wave_sig_ht = 'md' + f_aice_ww = 'md' + f_diam_ww = 'md' + f_hice_ww = 'md' diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 30b9d7183..aa3150be3 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -17,6 +17,14 @@ smoke gbox80 4x2 boxsymne,kmtislands smoke gbox80 8x1 boxislandsn smoke gbox80 4x2 boxislandse smoke gbox80 2x4 boxislandsne +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd @@ -36,6 +44,14 @@ smoke gbox80 4x2 boxsymne,kmtislands,gridcd smoke gbox80 8x1 boxislandsn,gridcd smoke gbox80 4x2 boxislandse,gridcd smoke gbox80 2x4 boxislandsne,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridc restart gx3 4x2 debug,diag1,gridc @@ -55,3 +71,11 @@ smoke gbox80 4x2 boxsymne,kmtislands,gridc smoke gbox80 8x1 boxislandsn,gridc smoke gbox80 4x2 boxislandse,gridc smoke gbox80 2x4 boxislandsne,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index d814e2b3d..fdf27881a 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -15,6 +15,7 @@ smoke gx3 11x2 isotope,reprosum,run10day smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day smoke gx3 6x4 dynpicard,reprosum,run10day smoke gx3 8x3 zsal,reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,thread smoke gbox128 8x2 reprosum,run10day smoke gbox128 12x2 boxnodyn,reprosum,run10day @@ -39,6 +40,8 @@ smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread smoke_gbox128_8x2_reprosum_run10day smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread smoke_gbox128_12x2_boxnodyn_reprosum_run10day @@ -64,6 +67,7 @@ smoke gx3 11x2 isotope,reprosum,run10day,gridc smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc smoke gx3 6x4 dynpicard,reprosum,run10day,gridc smoke gx3 8x3 zsal,reprosum,run10day,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread smoke gbox128 8x2 reprosum,run10day,gridc smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc @@ -88,6 +92,8 @@ smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gr smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day @@ -113,6 +119,7 @@ smoke gx3 11x2 isotope,reprosum,run10day,gridcd smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd smoke gx3 8x3 zsal,reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread smoke gbox128 8x2 reprosum,run10day,gridcd smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd @@ -137,6 +144,8 @@ smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gr smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal +smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread +smoke gx3 8x4x5x4x80 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day From c5d3a71e0fdaa3ae8a35554e0f0b627f97614ca2 Mon Sep 17 00:00:00 2001 From: apcraig Date: Mon, 14 Mar 2022 15:22:11 -0600 Subject: [PATCH 080/109] - Fix query_field error, fixes C/CD restart error - Add 2 day restart test, restart2 --- .../infrastructure/ice_read_write.F90 | 2 +- .../infrastructure/ice_restart_driver.F90 | 45 +++++----- .../io/io_netcdf/ice_restart.F90 | 11 +-- .../infrastructure/io/io_pio2/ice_restart.F90 | 13 ++- .../scripts/options/test_nml.restart21 | 7 ++ .../scripts/options/test_nml.restart22 | 6 ++ configuration/scripts/tests/gridsys_suite.ts | 3 + .../scripts/tests/test_restart2.files | 2 + .../scripts/tests/test_restart2.script | 82 +++++++++++++++++++ 9 files changed, 135 insertions(+), 36 deletions(-) create mode 100644 configuration/scripts/options/test_nml.restart21 create mode 100644 configuration/scripts/options/test_nml.restart22 create mode 100644 configuration/scripts/tests/test_restart2.files create mode 100644 configuration/scripts/tests/test_restart2.script diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 index bf0361cf1..2443d75a3 100644 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedynB/infrastructure/ice_read_write.F90 @@ -1670,7 +1670,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & amin = minval(work_g1(:,:,n)) amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum + write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) enddo endif diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 978ca7f55..07f05af30 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -19,6 +19,7 @@ module ice_restart_driver use ice_kinds_mod use ice_arrays_column, only: oceanmixed_ice + use ice_communicate, only: my_task, master_task use ice_constants, only: c0, c1, p5, & field_loc_center, field_loc_NEcorner, & field_loc_Eface, field_loc_Nface, & @@ -222,29 +223,29 @@ subroutine dumpfile(filename_spec) if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) endif @@ -266,7 +267,6 @@ subroutine restartfile (ice_ic) use ice_boundary, only: ice_HaloUpdate_stress use ice_blocks, only: nghost, nx_block, ny_block use ice_calendar, only: istep0, npt, calendar - use ice_communicate, only: my_task, master_task use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks @@ -697,7 +697,6 @@ subroutine restartfile_v4 (ice_ic) use ice_blocks, only: nghost, nx_block, ny_block use ice_calendar, only: istep0, istep1, timesecs, calendar, npt, & set_date_from_timesecs - use ice_communicate, only: my_task, master_task use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index d49764375..f117384d9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -10,6 +10,7 @@ module ice_restart use ice_broadcast + use ice_communicate, only: my_task, master_task use ice_kinds_mod #ifdef USE_NETCDF use netcdf @@ -45,7 +46,6 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: msec, mmonth, mday, myear, & istep0, istep1, npt - use ice_communicate, only: my_task, master_task character(len=char_len_long), intent(in), optional :: ice_ic @@ -132,7 +132,6 @@ subroutine init_restart_write(filename_spec) use ice_blocks, only: nghost use ice_calendar, only: msec, mmonth, mday, myear, istep1 - use ice_communicate, only: my_task, master_task use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & n_dic, n_don, n_fed, n_fep, nfsd @@ -841,7 +840,6 @@ end subroutine write_restart_field subroutine final_restart() use ice_calendar, only: istep1, idate - use ice_communicate, only: my_task, master_task integer (kind=int_kind) :: status @@ -904,8 +902,11 @@ logical function query_field(nu,vname) query_field = .false. #ifdef USE_NETCDF - status = nf90_inq_varid(ncid,trim(vname),varid) - if (status == nf90_noerr) query_field = .true. + if (my_task == master_task) then + status = nf90_inq_varid(ncid,trim(vname),varid) + if (status == nf90_noerr) query_field = .true. + endif + call broadcast_scalar(query_field,master_task) #else call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & file=__FILE__, line=__LINE__) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index ce3946db2..e585788b7 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -6,6 +6,7 @@ module ice_restart use ice_broadcast + use ice_communicate, only: my_task, master_task use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag, nu_restart, nu_rst_pointer use ice_kinds_mod @@ -44,7 +45,6 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: istep0, istep1, myear, mmonth, & mday, msec, npt - use ice_communicate, only: my_task, master_task use ice_domain_size, only: ncat use ice_read_write, only: ice_open @@ -140,7 +140,6 @@ end subroutine init_restart_read subroutine init_restart_write(filename_spec) use ice_calendar, only: msec, mmonth, mday, myear, istep1 - use ice_communicate, only: my_task, master_task use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & n_dic, n_don, n_fed, n_fep, nfsd @@ -696,7 +695,6 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & field_loc, field_type) use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_boundary, only: ice_HaloUpdate use ice_domain, only: halo_info, distrb_info, nblocks @@ -815,7 +813,6 @@ end subroutine read_restart_field subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: max_blocks, ncat @@ -901,7 +898,6 @@ end subroutine write_restart_field subroutine final_restart() use ice_calendar, only: istep1, idate, msec - use ice_communicate, only: my_task, master_task character(len=*), parameter :: subname = '(final_restart)' @@ -951,8 +947,11 @@ logical function query_field(nu,vname) query_field = .false. #ifdef USE_NETCDF - status = pio_inq_varid(File,trim(vname),vardesc) - if (status == PIO_noerr) query_field = .true. + if (my_task == master_task) then + status = pio_inq_varid(File,trim(vname),vardesc) + if (status == PIO_noerr) query_field = .true. + endif + call broadcast_scalar(query_field,master_task) #else call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined for '//trim(ice_ic), & file=__FILE__, line=__LINE__) diff --git a/configuration/scripts/options/test_nml.restart21 b/configuration/scripts/options/test_nml.restart21 new file mode 100644 index 000000000..7e4281ff6 --- /dev/null +++ b/configuration/scripts/options/test_nml.restart21 @@ -0,0 +1,7 @@ +npt = 2 +npt_unit = 'd' +dumpfreq = 'd' +dumpfreq_n = 1 +dumpfreq_base = 'init' +runtype = 'initial' +use_restart_time = .false. diff --git a/configuration/scripts/options/test_nml.restart22 b/configuration/scripts/options/test_nml.restart22 new file mode 100644 index 000000000..edc3e975a --- /dev/null +++ b/configuration/scripts/options/test_nml.restart22 @@ -0,0 +1,6 @@ +npt = 2 +npt_unit = 'd' +dumpfreq = 'd' +dumpfreq_n = 2 +dumpfreq_base = 'init' +runtype = 'continue' diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index aa3150be3..01132ff83 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,6 +1,7 @@ # Test Grid PEs Sets BFB-compare smoke gx3 8x2 diag1,run5day restart gx3 4x2 debug,diag1 +restart2 gx1 16x2 debug,diag1 smoke gbox12 1x1x12x12x1 boxchan smoke gbox80 1x1 box2001 smoke gbox80 2x2 boxwallp5 @@ -28,6 +29,7 @@ smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke gx3 8x2 diag1,run5day,gridcd restart gx3 4x2 debug,diag1,gridcd +restart2 gx1 16x2 debug,diag1,gridcd smoke gbox12 1x1x12x12x1 boxchan,gridcd smoke gbox80 1x1 box2001,gridcd smoke gbox80 2x2 boxwallp5,gridcd @@ -55,6 +57,7 @@ smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke gx3 8x2 diag1,run5day,gridc restart gx3 4x2 debug,diag1,gridc +restart2 gx1 16x2 debug,diag1,gridc smoke gbox12 1x1x12x12x1 boxchan,gridc smoke gbox80 1x1 box2001,gridc smoke gbox80 2x2 boxwallp5,gridc diff --git a/configuration/scripts/tests/test_restart2.files b/configuration/scripts/tests/test_restart2.files new file mode 100644 index 000000000..7c22abe3a --- /dev/null +++ b/configuration/scripts/tests/test_restart2.files @@ -0,0 +1,2 @@ +test_nml.restart21 +test_nml.restart22 diff --git a/configuration/scripts/tests/test_restart2.script b/configuration/scripts/tests/test_restart2.script new file mode 100644 index 000000000..67760bbf4 --- /dev/null +++ b/configuration/scripts/tests/test_restart2.script @@ -0,0 +1,82 @@ + +# Build around a 2 day run with restart at day 1. +#----------------------------------------------------------- +# Run the CICE model baseline simulation + +cp ice_in ice_in.0 +${ICE_CASEDIR}/casescripts/parse_namelist.sh ice_in ${ICE_CASEDIR}/casescripts/test_nml.restart21 +cp ice_in ice_in.1 + +./cice.run +set res="$status" + +if ( $res != 0 ) then + mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev + cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output + mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev + cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test " >! ${ICE_CASEDIR}/test_output + rm -f ${ICE_CASEDIR}/test_output.prev + echo "FAIL ${ICE_TESTNAME} run" >> ${ICE_CASEDIR}/test_output + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + exit 99 +endif + +# Prepend 'base_' to the final restart file to save for comparison +if ( "${ICE_IOTYPE}" == "binary" ) then + set end_date = `ls -t1 ${ICE_RUNDIR}/restart | head -1 | awk -F'.' '{print $NF}'` + foreach file (${ICE_RUNDIR}/restart/*${end_date}) + set surname = `echo $file | awk -F'/' '{print $NF}'` + mv $file ${ICE_RUNDIR}/restart/base_$surname + end +else + set test_file = `ls -t1 ${ICE_RUNDIR}/restart | head -1` + set test_data = ${ICE_RUNDIR}/restart/${test_file} + set base_data = ${ICE_RUNDIR}/restart/base_${test_file} + mv ${test_data} ${base_data} +endif + +#----------------------------------------------------------- +# Run the CICE model for the restart simulation + +# Modify the contents of the pointer file for restart +perl -i -pe's/(\d{4})-(\d{2})-(\d{2})/sprintf("%04d-%02d-%02d",$1,$2,$3-1)/e' ${ICE_RUNDIR}/ice.restart_file + +${ICE_CASEDIR}/casescripts/parse_namelist.sh ice_in ${ICE_CASEDIR}/casescripts/test_nml.restart22 +cp ice_in ice_in.2 + +./cice.run +set res="$status" + +cp ice_in.0 ice_in + +mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev +cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output +mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev +cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test" >! ${ICE_CASEDIR}/test_output +rm -f ${ICE_CASEDIR}/test_output.prev + +if ( $res != 0 ) then + echo "FAIL ${ICE_TESTNAME} run " >> ${ICE_CASEDIR}/test_output + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + exit 99 +else + set log_file = `ls -t1 ${ICE_RUNDIR}/cice.runlog* | head -1` + set ttimeloop = `grep TimeLoop ${log_file} | grep Timer | cut -c 22-32` + set tdynamics = `grep Dynamics ${log_file} | grep Timer | cut -c 22-32` + set tcolumn = `grep Column ${log_file} | grep Timer | cut -c 22-32` + if (${ttimeloop} == "") set ttimeloop = -1 + if (${tdynamics} == "") set tdynamics = -1 + if (${tcolumn} == "") set tcolumn = -1 + echo "PASS ${ICE_TESTNAME} run ${ttimeloop} ${tdynamics} ${tcolumn}" >> ${ICE_CASEDIR}/test_output + + ${ICE_CASEDIR}/casescripts/comparebfb.csh ${ICE_RUNDIR}/restart + set bfbstatus = $status + if (${bfbstatus} == 0) then + echo "PASS ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + else + echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output + endif +endif + +#----------------------------------------------------------- + From 7a2d071fff4c5167e823115c8d9e2fcc954fb35f Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 15 Mar 2022 09:28:27 -0700 Subject: [PATCH 081/109] Refactor viscous_coeffs_and_rep_pressure, rename ecy to elasticDamp and make namelist (#67) * - Rename ecy to elasticDamp and add to dynamics namelist - Update gridsys tests, run sym tests 1 day - Fix minor formatting issues with namelist output in log file * update documentation for elasticDamp * Clean up viscous_coeffs_and_rep_pressure, improve code reuse, rename subroutines Covert grid_ice "select case" to "if" * clean up arg list --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 111 +++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 349 +++++++----------- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 32 +- cicecore/cicedynB/general/ice_init.F90 | 21 +- configuration/scripts/tests/gridsys_suite.ts | 54 +-- doc/source/cice_index.rst | 2 +- doc/source/science_guide/sg_dynamics.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 1 + doc/source/user_guide/ug_implementation.rst | 4 +- 9 files changed, 246 insertions(+), 330 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 42fe1b53d..b58fbc1c7 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -590,8 +590,7 @@ subroutine evp (dt) if (seabed_stress) then - select case (trim(grid_ice)) - case('B') + if (grid_ice == "B") then if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) @@ -616,7 +615,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO endif - case('CD','C') + elseif (grid_ice == "C" .or. grid_ice == "CD") then if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) @@ -649,7 +648,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO endif - end select + endif endif @@ -691,8 +690,7 @@ subroutine evp (dt) do ksub = 1,ndte ! subcycling - select case (grid_ice) - case('B') + if (grid_ice == "B") then !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -757,7 +755,7 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - case('CD','C') + elseif (grid_ice == "C" .or. grid_ice == "CD") then !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -967,7 +965,7 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) - end select + endif ! grid_ice call ice_timer_start(timer_bound) call stack_velocity_field(uvel, vvel, fld2) @@ -1169,9 +1167,8 @@ subroutine stress (nx_block, ny_block, & stress12_3, stress12_4, & str ) - use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure_T, & - capping - + use ice_dyn_shared, only: strain_rates, visccoeff_replpress, capping + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -1262,27 +1259,18 @@ subroutine stress (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& - Deltane, zetax2ne, & - etax2ne, rep_prsne, & - capping) - - call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& - Deltanw, zetax2nw, & - etax2nw, rep_prsnw, & - capping) + call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltane, & + zetax2ne, etax2ne, rep_prsne, capping) - call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& - Deltasw, zetax2sw, & - etax2sw, rep_prssw, & - capping) + call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltanw, & + zetax2nw, etax2nw, rep_prsnw, capping) - call viscous_coeffs_and_rep_pressure_T (strength(i,j), DminTarea(i,j),& - Deltase, zetax2se, & - etax2se, rep_prsse, & - capping) + call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltasw, & + zetax2sw, etax2sw, rep_prssw, capping) + + call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltase, & + zetax2se, etax2se, rep_prsse, capping) - !----------------------------------------------------------------- ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast @@ -1471,7 +1459,7 @@ subroutine stress_T (nx_block, ny_block, & stress12T ) use ice_dyn_shared, only: strain_rates_T, capping, & - viscous_coeffs_and_rep_pressure_T + visccoeff_replpress integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1538,10 +1526,10 @@ subroutine stress_T (nx_block, ny_block, & ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- - call viscous_coeffs_and_rep_pressure_T (strength(i,j), & - DminTarea(i,j), DeltaT, & - zetax2T(i,j),etax2T(i,j),& - rep_prsT, capping ) + call visccoeff_replpress (strength(i,j), DminTarea(i,j), & + DeltaT , zetax2T (i,j), & + etax2T (i,j), rep_prsT , & + capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1587,10 +1575,10 @@ subroutine stress_U (nx_block, ny_block, & stress12U ) use ice_dyn_shared, only: strain_rates_U, & - viscous_coeffs_and_rep_pressure_T2U, & - viscous_coeffs_and_rep_pressure_U, & + visccoeff_replpress_avgstr, & + visccoeff_replpress_avgzeta, & visc_coeff_method, deltaminEVP, capping - + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellu ! no. of cells where iceumask = 1 @@ -1637,7 +1625,7 @@ subroutine stress_U (nx_block, ny_block, & real (kind=dbl_kind) :: & divU, tensionU, shearU, DeltaU, & ! strain rates at U point zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - DminUarea + DminUarea, strtmp, areatmp ! Dmin on U and tmp variables character(len=*), parameter :: subname = '(stress_U)' @@ -1668,28 +1656,26 @@ subroutine stress_U (nx_block, ny_block, & !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then - call viscous_coeffs_and_rep_pressure_T2U (zetax2T(i ,j ), zetax2T(i ,j+1), & - zetax2T(i+1,j+1), zetax2T(i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU,zetax2U, etax2U, rep_prsU) - elseif (visc_coeff_method == 'avg_strength') then + call visccoeff_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & + zetax2T (i+1,j+1), zetax2T (i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU, zetax2U, etax2U, rep_prsU) + elseif (visc_coeff_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) - call viscous_coeffs_and_rep_pressure_U (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea, & - DeltaU , capping, & - zetax2U, etax2U, rep_prsU) - + call visccoeff_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + DminUarea, DeltaU, & + zetax2U, etax2U, rep_prsU, capping) endif !----------------------------------------------------------------- @@ -1779,8 +1765,7 @@ subroutine div_stress (nx_block, ny_block, & ! F1,F2 : div of stress tensor for u,v components !----------------------------------------------------------------- - select case (trim(grid_location)) - case('E') + if (grid_location == "E") then F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j)-stresspF1(i,j) ) & @@ -1796,7 +1781,7 @@ subroutine div_stress (nx_block, ny_block, & + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12F2(i+1,j) & -(dyT_U(i,j)**2)*stress12F2(i,j) ) ) - case('N') + elseif (grid_location == "N") then F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i,j)-stresspF1(i-1,j) ) & @@ -1811,9 +1796,9 @@ subroutine div_stress (nx_block, ny_block, & -(dxT_U(i,j)**2)*stressmF2(i,j) ) & + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12F2(i,j) & -(dyT_U(i-1,j)**2)*stress12F2(i-1,j) ) ) - case default + else call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select + endif enddo ! ij diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 8301a967b..f9f1b3c92 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -29,10 +29,9 @@ module ice_dyn_shared alloc_dyn_shared, & deformations, deformations_T, & strain_rates, strain_rates_T, strain_rates_U, & - viscous_coeffs_and_rep_pressure, & - viscous_coeffs_and_rep_pressure_T, & - viscous_coeffs_and_rep_pressure_T2U, & - viscous_coeffs_and_rep_pressure_U, & + visccoeff_replpress, & + visccoeff_replpress_avgstr, & + visccoeff_replpress_avgzeta, & stack_velocity_field, unstack_velocity_field ! namelist parameters @@ -52,6 +51,10 @@ module ice_dyn_shared character (len=char_len), public :: & evp_algorithm ! standard_2d = 2D org version (standard) ! shared_mem_1d = 1d without mpi call and refactorization to 1d + + real (kind=dbl_kind), public :: & + elasticDamp ! coefficient for calculating the parameter E, elastic damping parameter + ! other EVP parameters character (len=char_len), public :: & @@ -62,7 +65,6 @@ module ice_dyn_shared real (kind=dbl_kind), parameter, public :: & - eyc = 0.36_dbl_kind, & ! coefficient for calculating the parameter E u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) cosw = c1 , & ! cos(ocean turning angle) ! turning angle = 0 sinw = c0 , & ! sin(ocean turning angle) ! turning angle = 0 @@ -190,7 +192,7 @@ subroutine init_dyn (dt) if (my_task == master_task) then write(nu_diag,*) 'dt = ',dt write(nu_diag,*) 'dte = ',dt/real(ndte,kind=dbl_kind) - write(nu_diag,*) 'tdamp =', eyc*dt + write(nu_diag,*) 'tdamp =', elasticDamp * dt endif allocate(fcor_blk(nx_block,ny_block,max_blocks)) @@ -318,8 +320,8 @@ subroutine set_evp_parameters (dt) ecci = c1/e_yieldcurve**2 ! temporary for 1d evp ! constants for stress equation - !tdamp2 = c2*eyc*dt ! s - !dte2T = dte/tdamp2 or c1/(c2*eyc*real(ndte,kind=dbl_kind)) ! ellipse (unitless) + !tdamp2 = c2 * elasticDamp * dt ! s + !dte2T = dte/tdamp2 or c1/(c2*elasticDamp*real(ndte,kind=dbl_kind)) ! ellipse (unitless) if (revised_evp) then ! Bouillon et al, Ocean Mod 2013 revp = c1 @@ -330,7 +332,7 @@ subroutine set_evp_parameters (dt) !arlx1i = dte2T !arlx = c1/arlx1i !brlx = dt*dtei - arlx = c2*eyc*real(ndte,kind=dbl_kind) + arlx = c2 * elasticDamp * real(ndte,kind=dbl_kind) arlx1i = c1/arlx brlx = real(ndte,kind=dbl_kind) denom1 = c1/(c1+arlx1i) @@ -1032,7 +1034,7 @@ subroutine stepu_Cgrid (nx_block, ny_block, & enddo ! ij - end subroutine stepu_Cgrid + end subroutine stepu_Cgrid !======================================================================= @@ -1132,7 +1134,7 @@ subroutine stepv_Cgrid (nx_block, ny_block, & enddo ! ij - end subroutine stepv_Cgrid + end subroutine stepv_Cgrid !======================================================================= @@ -1306,7 +1308,7 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & enddo ! ij - end subroutine seabed_stress_factor_LKD + end subroutine seabed_stress_factor_LKD !======================================================================= ! Computes seabed (basal) stress factor Tbu (landfast ice) based on @@ -1491,39 +1493,38 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & endif enddo - select case (trim(grid_ice)) - case('B') - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) - ! convert quantities to U-location - Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') - enddo ! ij - case('CD','C') - if(present(Tbe) .and. present(TbN) .and. & - present(icelle) .and. present(icelln) .and. & - present(indxei) .and. present(indxej) .and. & - present(indxni) .and. present(indxnj)) then - - do ij = 1, icelle - i = indxei(ij) - j = indxej(ij) - ! convert quantities to E-location - TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') - enddo - do ij = 1, icelln - i = indxni(ij) - j = indxnj(ij) - ! convert quantities to N-location - TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') - enddo + if (grid_ice == "B") then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + ! convert quantities to U-location + Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') + enddo ! ij + elseif (grid_ice == "C" .or. grid_ice == "CD") then + if (present(Tbe) .and. present(TbN) .and. & + present(icelle) .and. present(icelln) .and. & + present(indxei) .and. present(indxej) .and. & + present(indxni) .and. present(indxnj)) then + + do ij = 1, icelle + i = indxei(ij) + j = indxej(ij) + ! convert quantities to E-location + TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') + enddo + do ij = 1, icelln + i = indxni(ij) + j = indxnj(ij) + ! convert quantities to N-location + TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') + enddo - else - call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) - endif - end select + else + call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) + endif + endif - end subroutine seabed_stress_factor_prob + end subroutine seabed_stress_factor_prob !======================================================================= @@ -1688,7 +1689,7 @@ subroutine deformations (nx_block, ny_block, & enddo ! ij - end subroutine deformations + end subroutine deformations !======================================================================= @@ -1780,7 +1781,7 @@ subroutine deformations_T (nx_block, ny_block, & enddo ! ij - end subroutine deformations_T + end subroutine deformations_T !======================================================================= @@ -1929,9 +1930,8 @@ subroutine strain_rates_T (nx_block, ny_block, & ! Delta (in the denominator of zeta, eta) DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) - end subroutine strain_rates_T + end subroutine strain_rates_T - !======================================================================= ! Compute strain rates at the U point including boundary conditions @@ -2034,100 +2034,27 @@ subroutine strain_rates_U (nx_block, ny_block, & ! Delta (in the denominator of zeta, eta) DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) - end subroutine strain_rates_U - - !======================================================================= - ! Computes viscous coefficients and replacement pressure for stress - ! calculations. Note that tensile strength is included here. - ! - ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. - ! Oceanogr., 9, 817-846. - ! - ! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by - ! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. - ! - ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice - ! by combining tensile strength and a parameterization for grounded ridges. - ! J. Geophys. Res. Oceans, 121, 7354-7368. - - subroutine viscous_coeffs_and_rep_pressure (strength, DminTarea,& - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2ne, zetax2nw, & - zetax2sw, zetax2se, & - etax2ne, etax2nw, & - etax2sw, etax2se, & - rep_prsne, rep_prsnw,& - rep_prssw, rep_prsse,& - capping) - - real (kind=dbl_kind), intent(in):: & - strength, DminTarea ! at the t-point - - real (kind=dbl_kind), intent(in):: & - Deltane, Deltanw, Deltasw, Deltase ! Delta at each corner - - real(kind=dbl_kind) , intent(in):: capping - - real (kind=dbl_kind), intent(out):: & - zetax2ne, zetax2nw, zetax2sw, zetax2se, & ! zetax2 at each corner - etax2ne, etax2nw, etax2sw, etax2se, & ! etax2 at each corner - rep_prsne, rep_prsnw, rep_prssw, rep_prsse ! replacement pressure - - ! local variables - real (kind=dbl_kind) :: & - tmpcalcne, tmpcalcnw, tmpcalcsw, tmpcalcse + end subroutine strain_rates_U - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - tmpcalcne = capping *(strength/max(Deltane, DminTarea))+ & - (c1-capping)* strength/ (Deltane+ DminTarea) - tmpcalcnw = capping *(strength/max(Deltanw, DminTarea))+ & - (c1-capping)* strength/ (Deltanw+ DminTarea) - tmpcalcsw = capping *(strength/max(Deltasw, DminTarea))+ & - (c1-capping)* strength/ (Deltasw+ DminTarea) - tmpcalcse = capping *(strength/max(Deltase, DminTarea))+ & - (c1-capping)* strength/ (Deltase+ DminTarea) - - zetax2ne = (c1+Ktens)*tmpcalcne ! northeast - rep_prsne = (c1-Ktens)*tmpcalcne*Deltane - etax2ne = epp2i*zetax2ne - - zetax2nw = (c1+Ktens)*tmpcalcnw ! northwest - rep_prsnw = (c1-Ktens)*tmpcalcnw*Deltanw - etax2nw = epp2i*zetax2nw +!======================================================================= +! Computes viscous coefficients and replacement pressure for stress +! calculations. Note that tensile strength is included here. +! +! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. +! Oceanogr., 9, 817-846. +! +! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by +! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. +! +! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice +! by combining tensile strength and a parameterization for grounded ridges. +! J. Geophys. Res. Oceans, 121, 7354-7368. - zetax2sw = (c1+Ktens)*tmpcalcsw ! southwest - rep_prssw = (c1-Ktens)*tmpcalcsw*Deltasw - etax2sw = epp2i*zetax2sw - - zetax2se = (c1+Ktens)*tmpcalcse ! southeast - rep_prsse = (c1-Ktens)*tmpcalcse*Deltase - etax2se = epp2i*zetax2se - - end subroutine viscous_coeffs_and_rep_pressure - - !======================================================================= - ! Computes viscous coefficients and replacement pressure for stress - ! calculations. Note that tensile strength is included here. - ! - ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. - ! Oceanogr., 9, 817-846. - ! - ! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by - ! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. - ! - ! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice - ! by combining tensile strength and a parameterization for grounded ridges. - ! J. Geophys. Res. Oceans, 121, 7354-7368. - - subroutine viscous_coeffs_and_rep_pressure_T (strength, DminTarea, & - Delta , zetax2 , & - etax2 , rep_prs , & - capping) + subroutine visccoeff_replpress(strength, DminArea, Delta, & + zetax2, etax2, rep_prs, capping) real (kind=dbl_kind), intent(in):: & - strength, DminTarea + strength, DminArea real (kind=dbl_kind), intent(in):: & Delta, capping @@ -2139,123 +2066,115 @@ subroutine viscous_coeffs_and_rep_pressure_T (strength, DminTarea, & real (kind=dbl_kind) :: & tmpcalc - character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_T)' + character(len=*), parameter :: subname = '(visccoeff_replpress)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - tmpcalc = capping *(strength/max(Delta,DminTarea))+ & - (c1-capping)*(strength/(Delta + DminTarea)) - zetax2 = (c1+Ktens)*tmpcalc + tmpcalc = capping *(strength/max(Delta,DminArea))+ & + (c1-capping)*(strength/(Delta + DminArea)) + zetax2 = (c1+Ktens)*tmpcalc rep_prs = (c1-Ktens)*tmpcalc*Delta - etax2 = epp2i*zetax2 + etax2 = epp2i*zetax2 - end subroutine viscous_coeffs_and_rep_pressure_T + end subroutine visccoeff_replpress +!======================================================================= - subroutine viscous_coeffs_and_rep_pressure_T2U (zetax2T_00, zetax2T_01, & - zetax2T_11, zetax2T_10, & - etax2T_00, etax2T_01, & - etax2T_11, etax2T_10, & - maskT_00, maskT_01, & - maskT_11, maskT_10, & - tarea_00, tarea_01, & - tarea_11, tarea_10, & - deltaU, & - zetax2U, etax2U, & - rep_prsU) - + subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & + zetax2T3, zetax2T4, & + etax2T1, etax2T2, & + etax2T3, etax2T4, & + mask1, mask2, & + mask3, mask4, & + area1, area2, & + area3, area4, & + deltaU, zetax2U, etax2U, rep_prsU) real (kind=dbl_kind), intent(in):: & - zetax2T_00,zetax2T_10,zetax2T_11,zetax2T_01, & - etax2T_00, etax2T_10, etax2T_11, etax2T_01, & ! 2 x viscous coeffs, replacement pressure - maskT_00, maskT_10, maskT_11, maskT_01, & - tarea_00, tarea_10, tarea_11, tarea_01, & + zetax2T1,zetax2T2,zetax2T3,zetax2T4, & + etax2T1, etax2T2, etax2T3, etax2T4, & ! 2 x viscous coeffs, replacement pressure + mask1, mask2, mask3, mask4, & + area1, area2, area3, area4, & deltaU real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU ! local variables - + real (kind=dbl_kind) :: & - Totarea + areatmp - character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_T2U)' + character(len=*), parameter :: subname = '(visccoeff_replpress_avgzeta)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - Totarea = maskT_00*Tarea_00 + & - maskT_10*Tarea_10 + & - maskT_11*Tarea_11 + & - maskT_01*Tarea_01 - zetax2U = (maskT_00*Tarea_00 *zetax2T_00 + & - maskT_10*Tarea_10 *zetax2T_10 + & - maskT_11*Tarea_11 *zetax2T_11 + & - maskT_01*Tarea_01 *zetax2T_01)/Totarea - - etax2U = (maskT_00*Tarea_00 *etax2T_00 + & - maskT_10*Tarea_10 *etax2T_10 + & - maskT_11*Tarea_11 *etax2T_11 + & - maskT_01*Tarea_01 *etax2T_01)/Totarea - + + areatmp = (mask1 * area1 + & + mask4 * area4 + & + mask3 * area3 + & + mask2 * area2) + + zetax2U = (mask1 * area1 * zetax2T1 + & + mask4 * area4 * zetax2T4 + & + mask3 * area3 * zetax2T3 + & + mask2 * area2 * zetax2T2) / areatmp + + etax2U = (mask1 * area1 * etax2T1 + & + mask4 * area4 * etax2T4 + & + mask3 * area3 * etax2T3 + & + mask2 * area2 * etax2T2) / areatmp + rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU - end subroutine viscous_coeffs_and_rep_pressure_T2U + end subroutine visccoeff_replpress_avgzeta !======================================================================= - subroutine viscous_coeffs_and_rep_pressure_U (strength_00, strength_01, & - strength_11, strength_10, & - maskT_00, maskT_01, & - maskT_11, maskT_10, & - tarea_00, tarea_01, & - tarea_11, tarea_10, & - DminUarea, & - deltaU, capping, & - zetax2U, etax2U, & - rep_prsU) - + subroutine visccoeff_replpress_avgstr (strength1, strength2, & + strength3, strength4, & + mask1, mask2, & + mask3, mask4, & + area1, area2, & + area3, area4, & + DminUarea, deltaU, & + zetax2U, etax2U, rep_prsU, capping) real (kind=dbl_kind), intent(in):: & - strength_00,strength_10,strength_11,strength_01, & - maskT_00, maskT_10, maskT_11, maskT_01, & - tarea_00, tarea_10, tarea_11, tarea_01, & + strength1,strength2,strength3,strength4, & + mask1, mask2, mask3, mask4, & + area1, area2, area3, area4, & DminUarea, deltaU, capping real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU ! local variables - + real (kind=dbl_kind) :: & - Totarea, tmpcalc, strength + areatmp, strtmp ! area and strength average - character(len=*), parameter :: subname = '(viscous_coeffs_and_rep_pressure_U)' + character(len=*), parameter :: subname = '(visccoeff_replpress_avgstr)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - Totarea = maskT_00*Tarea_00 + & - maskT_10*Tarea_10 + & - maskT_11*Tarea_11 + & - maskT_01*Tarea_01 - strength = (maskT_00*Tarea_00 *strength_00 + & - maskT_10*Tarea_10 *strength_10 + & - maskT_11*Tarea_11 *strength_11 + & - maskT_01*Tarea_01 *strength_01)/Totarea - - ! rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU - ! IMPROVE the calc below are the same as in the other viscous coeff...could reduce redundency - ! we could have a strength_U subroutine and then calc the visc coeff - - tmpcalc = capping *(strength/max(deltaU,DminUarea))+ & - (c1-capping)*(strength/(deltaU + DminUarea)) - zetax2U = (c1+Ktens)*tmpcalc - rep_prsU = (c1-Ktens)*tmpcalc*deltaU - etax2U = epp2i*zetax2U + + areatmp = (mask1 * area1 + & + mask4 * area4 + & + mask3 * area3 + & + mask2 * area2) + + strtmp = (mask1 * area1 * strength1 + & + mask4 * area4 * strength4 + & + mask3 * area3 * strength3 + & + mask2 * area2 * strength2) / areatmp + + call visccoeff_replpress (strtmp, DminUarea, deltaU, & + zetax2U, etax2U, rep_prsU, capping) - end subroutine viscous_coeffs_and_rep_pressure_U + end subroutine visccoeff_replpress_avgstr !======================================================================= ! Load velocity components into array for boundary updates - subroutine stack_velocity_field(uvel, vvel, fld2) + subroutine stack_velocity_field(uvel, vvel, fld2) use ice_domain, only: nblocks @@ -2287,7 +2206,7 @@ end subroutine stack_velocity_field ! Unload velocity components from array after boundary updates - subroutine unstack_velocity_field(fld2, uvel, vvel) + subroutine unstack_velocity_field(fld2, uvel, vvel) use ice_domain, only: nblocks diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 5d414c204..17de3bca8 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -1157,8 +1157,8 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & zetax2 , etax2 , & rep_prs , stPr) - use ice_dyn_shared, only: strain_rates, viscous_coeffs_and_rep_pressure, & - capping + use ice_dyn_shared, only: strain_rates, visccoeff_replpress, & + capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1240,17 +1240,23 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call viscous_coeffs_and_rep_pressure (strength(i,j), DminTarea(i,j), & - Deltane, Deltanw, & - Deltasw, Deltase, & - zetax2(i,j,1), zetax2(i,j,2), & - zetax2(i,j,3), zetax2(i,j,4), & - etax2(i,j,1), etax2(i,j,2), & - etax2(i,j,3), etax2(i,j,4), & - rep_prs(i,j,1), rep_prs(i,j,2), & - rep_prs(i,j,3), rep_prs(i,j,4), & - capping) - + call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & + Deltane , zetax2 (i,j,1), & + etax2 (i,j,1), rep_prs (i,j,1), & + capping) + call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & + Deltanw , zetax2 (i,j,2), & + etax2 (i,j,2), rep_prs (i,j,2), & + capping) + call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & + Deltasw , zetax2 (i,j,3), & + etax2 (i,j,3), rep_prs (i,j,3), & + capping) + call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & + Deltase , zetax2 (i,j,4), & + etax2 (i,j,4), rep_prs (i,j,4), & + capping) + !----------------------------------------------------------------- ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 4c7817a3d..da6478069 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -111,7 +111,8 @@ subroutine input_data k1, k2, alphab, threshold_hw, Ktens, & e_yieldcurve, e_plasticpot, coriolis, & ssh_stress, kridge, brlx, arlx, & - deltaminEVP, deltaminVP, capping + deltaminEVP, deltaminVP, capping, & + elasticDamp use ice_dyn_vp, only: maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & maxits_pgmres, monitor_nonlin, monitor_fgmres, & @@ -215,7 +216,7 @@ subroutine input_data namelist /dynamics_nml/ & kdyn, ndte, revised_evp, yield_curve, & - evp_algorithm, & + evp_algorithm, elasticDamp, & brlx, arlx, ssh_stress, & advection, coriolis, kridge, ktransport, & kstrength, krdg_partic, krdg_redist, mu_rdg, & @@ -357,11 +358,12 @@ subroutine input_data ndtd = 1 ! dynamic time steps per thermodynamic time step ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master + elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' + yield_curve = 'ellipse' ! yield curve kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) @@ -833,6 +835,7 @@ subroutine input_data call broadcast_scalar(ndtd, master_task) call broadcast_scalar(ndte, master_task) call broadcast_scalar(evp_algorithm, master_task) + call broadcast_scalar(elasticDamp, master_task) call broadcast_scalar(pgl_global_ext, master_task) call broadcast_scalar(brlx, master_task) call broadcast_scalar(arlx, master_task) @@ -1666,10 +1669,10 @@ subroutine input_data endif if (kdyn == 1 .or. kdyn == 3) then - write(nu_diag,1030) ' yield_curve = ', trim(yield_curve) + write(nu_diag,1030) ' yield_curve = ', trim(yield_curve), ' : yield curve' if (trim(yield_curve) == 'ellipse') & - write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' - write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' + write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' + write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' endif if (kdyn == 1) then @@ -1680,6 +1683,8 @@ subroutine input_data write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscous coefficients' endif + write(nu_diag,1002) ' elasticDamp = ', elasticDamp, ' : coefficient for calculating the parameter E' + if (trim(coriolis) == 'latitude') then tmpstr2 = ' : latitude-dependent Coriolis parameter' elseif (trim(coriolis) == 'contant') then @@ -1729,7 +1734,7 @@ subroutine input_data endif endif if (grid_ice == 'C' .or. grid_ice == 'CD') then - write(nu_diag,1030) 'viscous coeff method (U point) = ', trim(visc_coeff_method) + write(nu_diag,1030) ' visc_coeff_method= ', trim(visc_coeff_method),' : viscous coeff method (U point)' endif write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' @@ -2341,7 +2346,7 @@ subroutine input_data 1000 format (a20,1x,f13.6,1x,a) ! float 1002 format (a20,5x,f9.2,1x,a) - 1003 format (a20,1x,G11.4,1x,a) + 1003 format (a20,1x,G13.4,1x,a) 1009 format (a20,1x,d13.6,1x,a) 1010 format (a20,8x,l6,1x,a) ! logical 1011 format (a20,1x,l6) diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 01132ff83..19f512f14 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -9,15 +9,15 @@ smoke gbox80 3x3 boxwall smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn -smoke gbox80 2x2 boxsymn -smoke gbox80 4x2 boxsyme -smoke gbox80 4x1 boxsymne -smoke gbox80 2x2 boxsymn,kmtislands -smoke gbox80 4x1 boxsyme,kmtislands -smoke gbox80 4x2 boxsymne,kmtislands -smoke gbox80 8x1 boxislandsn -smoke gbox80 4x2 boxislandse -smoke gbox80 2x4 boxislandsne +smoke gbox80 2x2 boxsymn,run1day +smoke gbox80 4x2 boxsyme,run1day +smoke gbox80 4x1 boxsymne,run1day +smoke gbox80 2x2 boxsymn,run1day,kmtislands +smoke gbox80 4x1 boxsyme,run1day,kmtislands +smoke gbox80 4x2 boxsymne,run1day,kmtislands +smoke gbox80 8x1 boxislandsn,run1day +smoke gbox80 4x2 boxislandse,run1day +smoke gbox80 2x4 boxislandsne,run1day smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day @@ -37,15 +37,15 @@ smoke gbox80 3x3 boxwall,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd -smoke gbox80 2x2 boxsymn,gridcd -smoke gbox80 4x2 boxsyme,gridcd -smoke gbox80 4x1 boxsymne,gridcd -smoke gbox80 2x2 boxsymn,kmtislands,gridcd -smoke gbox80 4x1 boxsyme,kmtislands,gridcd -smoke gbox80 4x2 boxsymne,kmtislands,gridcd -smoke gbox80 8x1 boxislandsn,gridcd -smoke gbox80 4x2 boxislandse,gridcd -smoke gbox80 2x4 boxislandsne,gridcd +smoke gbox80 2x2 boxsymn,run1day,gridcd +smoke gbox80 4x2 boxsyme,run1day,gridcd +smoke gbox80 4x1 boxsymne,run1day,gridcd +smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd +smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridcd +smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd +smoke gbox80 8x1 boxislandsn,run1day,gridcd +smoke gbox80 4x2 boxislandse,run1day,gridcd +smoke gbox80 2x4 boxislandsne,run1day,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day @@ -65,15 +65,15 @@ smoke gbox80 3x3 boxwall,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc -smoke gbox80 2x2 boxsymn,gridc -smoke gbox80 4x2 boxsyme,gridc -smoke gbox80 4x1 boxsymne,gridc -smoke gbox80 2x2 boxsymn,kmtislands,gridc -smoke gbox80 4x1 boxsyme,kmtislands,gridc -smoke gbox80 4x2 boxsymne,kmtislands,gridc -smoke gbox80 8x1 boxislandsn,gridc -smoke gbox80 4x2 boxislandse,gridc -smoke gbox80 2x4 boxislandsne,gridc +smoke gbox80 2x2 boxsymn,run1day,gridc +smoke gbox80 4x2 boxsyme,run1day,gridc +smoke gbox80 4x1 boxsymne,run1day,gridc +smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc +smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridc +smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc +smoke gbox80 8x1 boxislandsn,run1day,gridc +smoke gbox80 4x2 boxislandse,run1day,gridc +smoke gbox80 2x4 boxislandsne,run1day,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 8ec9c8f4a..b058d1503 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -215,7 +215,7 @@ either Celsius or Kelvin units). "etax2", "2 x eta (shear viscous coefficient)", "kg/s" "evap", "evaporative water flux", "kg/m\ :math:`^2`/s" "ew_boundary_type", "type of east-west boundary condition", "" - "eyc", "coefficient for calculating the parameter E, 0\ :math:`<` eyc :math:`<`\ 1", "0.36" + "elasticDamp", "coefficient for calculating the parameter E, 0\ :math:`<` elasticDamp :math:`<`\ 1", "0.36" "e_yieldcurve", "yield curve minor/major axis ratio", "2" "e_plasticpot", "plastic potential minor/major axis ratio", "2" "**F**", "", "" diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 9c529b8ec..b627f896d 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -457,7 +457,7 @@ for elastic waves, :math:`\Delta t_e < T < \Delta t`, as .. math:: E = {\zeta\over T}, -where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (eyc) is a tunable +where :math:`T=E_\circ\Delta t` and :math:`E_\circ` (elasticDamp) is a tunable parameter less than one. Including the modification proposed by :cite:`Bouillon13` for equations :eq:`sig2` and :eq:`sig12` in order to improve numerical convergence, the stress equations become .. math:: diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 51437ae1e..fb3806bfd 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -424,6 +424,7 @@ dynamics_nml "``dim_pgmres``", "integer", "maximum number of Arnoldi iterations for PGMRES preconditioner", "5" "``e_plasticpot``", "real", "aspect ratio of elliptical plastic potential", "2.0" "``e_yieldcurve``", "real", "aspect ratio of elliptical yield curve", "2.0" + "``elasticDamp``", "real", "elastic damping parameter", "0.36" "``evp_algorithm``", "``standard_2d``", "standard 2d EVP memory parallel solver", "standard_2d" "", "``shared_mem_1d``", "1d shared memory solver", "" "``kdyn``", "``-1``", "dynamics algorithm OFF", "1" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 624d135c3..9fc5069d1 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -1026,9 +1026,9 @@ t_e`) is thus .. math:: dte = dt\_dyn/ndte. -A second parameter, :math:`E_\circ` (``eyc``), defines the elastic wave +A second parameter, :math:`E_\circ` (``elasticDamp``), defines the elastic wave damping timescale :math:`T`, described in Section :ref:`dynam`, as -``eyc * dt_dyn``. The forcing terms are not updated during the subcycling. +``elasticDamp * dt_dyn``. The forcing terms are not updated during the subcycling. Given the small step (``dte``) at which the EVP dynamics model is subcycled, the elastic parameter :math:`E` is also limited by stability constraints, as discussed in :cite:`Hunke97`. Linear stability From 97806bcbe4b7b5a9a83a064bf0ea61f6dfde59a0 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 15 Mar 2022 10:06:55 -0700 Subject: [PATCH 082/109] split if block sections for C and CD in evp for further cleanup (#68) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 272 +++++++++++++++------ 1 file changed, 196 insertions(+), 76 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index b58fbc1c7..464c786bb 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -755,7 +755,7 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - elseif (grid_ice == "C" .or. grid_ice == "CD") then + elseif (grid_ice == "C") then !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -868,75 +868,34 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - if (grid_ice == 'CD') then - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call step_vel (nx_block, ny_block, & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call step_vel (nx_block, ny_block, & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - - elseif (grid_ice == 'C') then - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call stepu_Cgrid (nx_block, ny_block, & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), forcexE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), taubxE (:,:,iblk), & - uvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepv_Cgrid (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - wateryN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintyN (:,:,iblk), taubyN (:,:,iblk), & - vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - - endif + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call stepu_Cgrid (nx_block, ny_block, & ! u, E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepv_Cgrid (nx_block, ny_block, & ! v, N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO call ice_timer_start(timer_bound) call ice_HaloUpdate (uvelE, halo_info, & @@ -945,14 +904,175 @@ subroutine evp (dt) field_loc_Nface, field_type_vector) call ice_timer_stop(timer_bound) - if (grid_ice == 'C') then - call grid_average_X2Y('A',uvelE,'E',uvelN,'N') - call grid_average_X2Y('A',vvelN,'N',vvelE,'E') - uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) - vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) - endif + call grid_average_X2Y('A',uvelE,'E',uvelN,'N') + call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_HaloUpdate (vvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_timer_stop(timer_bound) + + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + + elseif (grid_ice == "CD") then + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stress_T (nx_block, ny_block, & + icellt(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + DminTarea (:,:,iblk), & + strength (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk) ) + + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then + call deformations_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + endif + enddo + !$OMP END PARALLEL DO + ! Need to update the halos for the stress components call ice_timer_start(timer_bound) + call ice_HaloUpdate (zetax2T, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (etax2T, halo_info, & + field_loc_center, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stress_U (nx_block, ny_block, & + icellu(iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + tarea (:,:,iblk), uarea (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + hm (:,:,iblk), uvm (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + strength (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! Need to update the halos for the stress components + call ice_timer_start(timer_bound) + call ice_HaloUpdate (stresspT, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stressmT, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stress12T, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate (stresspU, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (stressmU, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloUpdate (stress12U, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_timer_stop(timer_bound) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call div_stress (nx_block, ny_block, & ! E point + icelle(iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + 'E') + + call div_stress (nx_block, ny_block, & ! N point + icelln(iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + 'N') + + enddo + !$OMP END PARALLEL DO + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call step_vel (nx_block, ny_block, & ! E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call step_vel (nx_block, ny_block, & ! N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (uvelE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (vvelN, halo_info, & + field_loc_Nface, field_type_vector) call ice_HaloUpdate (uvelN, halo_info, & field_loc_Nface, field_type_vector) call ice_HaloUpdate (vvelE, halo_info, & From 5d5a7e0e0132a0da9194d58320e5547f4419bce5 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 15 Mar 2022 12:35:44 -0600 Subject: [PATCH 083/109] Create new stressC_T and stressC_U routines (#69) * Add new stressC routines * Add calls to new stressC routines --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 261 ++++++++++++++++++++- 1 file changed, 250 insertions(+), 11 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 464c786bb..50e7051c2 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -759,7 +759,7 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_T (nx_block, ny_block, & + call stressC_T (nx_block, ny_block, & icellt(iblk), & indxti (:,iblk), indxtj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & @@ -769,8 +769,7 @@ subroutine evp (dt) DminTarea (:,:,iblk), & strength (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk) ) + stresspT (:,:,iblk), stressmT (:,:,iblk)) !----------------------------------------------------------------- ! on last subcycle, save quantities for mechanical redistribution @@ -800,7 +799,7 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_U (nx_block, ny_block, & + call stressC_U (nx_block, ny_block, & icellu(iblk), & indxui (:,iblk), indxuj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & @@ -815,7 +814,6 @@ subroutine evp (dt) hm (:,:,iblk), uvm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & strength (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -826,12 +824,6 @@ subroutine evp (dt) field_loc_center, field_type_scalar) call ice_HaloUpdate (stressmT, halo_info, & field_loc_center, field_type_scalar) - call ice_HaloUpdate (stress12T, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stresspU, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (stressmU, halo_info, & - field_loc_NEcorner, field_type_scalar) call ice_HaloUpdate (stress12U, halo_info, & field_loc_NEcorner, field_type_scalar) call ice_timer_stop(timer_bound) @@ -1560,6 +1552,253 @@ end subroutine stress !======================================================================= +! Computes the strain rates and internal stress components for C grid + +! author: JF Lemieux, ECCC +! updated: D. Bailey, NCAR +! Nov 2021 + + subroutine stressC_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + DminTarea, & + strength, & + zetax2T, etax2T, & + stressp, stressm) + + use ice_dyn_shared, only: strain_rates_T, capping, & + visccoeff_replpress + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + DminTarea ! deltaminEVP*tarea + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + zetax2T , & ! zetax2 = 2*zeta (bulk viscous coeff) + etax2T , & ! etax2 = 2*eta (shear viscous coeff) + stressp , & ! sigma11+sigma22 + stressm ! sigma11-sigma22 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divT, tensionT, shearT, DeltaT, & ! strain rates at T point + rep_prsT ! replacement pressure at T point + + character(len=*), parameter :: subname = '(stressC_T)' + + !----------------------------------------------------------------- + ! Initialize + !----------------------------------------------------------------- + + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at T point + !----------------------------------------------------------------- + + call visccoeff_replpress (strength(i,j), DminTarea(i,j), & + DeltaT , zetax2T (i,j), & + etax2T (i,j), rep_prsT , & + capping) + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) + & + arlx1i*(zetax2T(i,j)*divT - rep_prsT)) * denom1 + + stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) + & + arlx1i*etax2T(i,j)*tensionT) * denom1 + + enddo ! ij + + end subroutine stressC_T + +!======================================================================= + +! Computes the strain rates and internal stress components for U points + +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine stressC_U (nx_block, ny_block, & + icellu, & + indxui, indxuj, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + tarea, uarea, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, hm, uvm, & + zetax2T, etax2T, & + strength, & + stress12 ) + + use ice_dyn_shared, only: strain_rates_U, & + visccoeff_replpress_avgstr, & + visccoeff_replpress_avgzeta, & + visc_coeff_method, deltaminEVP, capping + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) at the U point + vvelU , & ! y-component of velocity (m/s) at the U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + tarea , & ! area of T-cell (m^2) + uarea , & ! area of U-cell (m^2) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline + epm , & ! E-cell mask + npm , & ! N-cell mask + hm , & ! T-cell mask + uvm , & ! U-cell mask + zetax2T , & ! 2*zeta at the T point + etax2T , & ! 2*eta at the T point + strength ! ice strength at the T point + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + stress12 ! sigma12 + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind) :: & + divU, tensionU, shearU, DeltaU, & ! strain rates at U point + zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point + DminUarea, strtmp, areatmp ! Dmin on U and tmp variables + + character(len=*), parameter :: subname = '(stressC_U)' + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_U (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + divU, tensionU, & + shearU, DeltaU ) + + !----------------------------------------------------------------- + ! viscous coefficients and replacement pressure at U point + !----------------------------------------------------------------- + + if (visc_coeff_method == 'avg_zeta') then + call visccoeff_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & + zetax2T (i+1,j+1), zetax2T (i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU, zetax2U, etax2U, rep_prsU) + + elseif (visc_coeff_method == 'avg_strength') then + DminUarea = deltaminEVP*uarea(i,j) + call visccoeff_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + DminUarea, DeltaU, & + zetax2U, etax2U, rep_prsU, capping) + endif + + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + !----------------------------------------------------------------- + + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) + & + arlx1i*p5*etax2U*shearU) * denom1 + + enddo ! ij + + + end subroutine stressC_U + +!======================================================================= + ! Computes the strain rates and internal stress components for T points ! author: JF Lemieux, ECCC From edede9b0a91ca8613e9b921621d807b93b5a6867 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 15 Mar 2022 20:39:56 +0000 Subject: [PATCH 084/109] Add shear strain rate code (#70) * Added new code for shear at T point calculation * Finished adding new shear code...calls are commented out * Fixed compiling issue --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 32 ++++- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 130 +++++++++++++++++- 2 files changed, 159 insertions(+), 3 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 50e7051c2..9ad299acf 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -114,7 +114,7 @@ subroutine evp (dt) use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea - use ice_dyn_shared, only: deformations, deformations_T + use ice_dyn_shared, only: deformations, deformations_T, shear_strain_rate_U real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -757,6 +757,28 @@ subroutine evp (dt) elseif (grid_ice == "C") then +! !$OMP PARALLEL DO PRIVATE(iblk) +! do iblk = 1, nblocks +! call shear_strain_rate_U (nx_block, ny_block, & +! icellu (iblk) , & +! indxui (:,iblk) , indxuj (:,iblk), & +! uvelE (:,:,iblk), vvelN (:,:,iblk), & +! uvel (:,:,iblk), vvel (:,:,iblk), & +! dxE (:,:,iblk), dyN (:,:,iblk), & +! dxU (:,:,iblk), dyU (:,:,iblk), & +! ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & +! ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & +! epm (:,:,iblk), npm (:,:,iblk), & +! uvm (:,:,iblk), & +! shrU (:,:,iblk)) +! enddo +! !$OMP END PARALLEL DO + +! call ice_timer_start(timer_bound) +! call ice_HaloUpdate (shrU, halo_info, & +! field_loc_NEcorner, field_type_scalar) +! call ice_timer_stop(timer_bound) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_T (nx_block, ny_block, & @@ -1571,7 +1593,7 @@ subroutine stressC_T (nx_block, ny_block, & stressp, stressm) use ice_dyn_shared, only: strain_rates_T, capping, & - visccoeff_replpress + visccoeff_replpress !, calc_shearT_DeltaT integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1633,6 +1655,12 @@ subroutine stressC_T (nx_block, ny_block, & divT, tensionT, & shearT, DeltaT ) + +! call calc_shearT_DeltaT (shrU(i,j), shrU(i,j-1), & +! shrU(i-1,j-1), shrU(i-1,j), & +! divT, tensionT, & +! shearT, DeltaT ) + !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index f9f1b3c92..32c9da480 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -29,6 +29,7 @@ module ice_dyn_shared alloc_dyn_shared, & deformations, deformations_T, & strain_rates, strain_rates_T, strain_rates_U, & + shear_strain_rate_U, calc_shearT_DeltaT, & visccoeff_replpress, & visccoeff_replpress_avgstr, & visccoeff_replpress_avgzeta, & @@ -1932,6 +1933,54 @@ subroutine strain_rates_T (nx_block, ny_block, & end subroutine strain_rates_T + + !======================================================================= + + subroutine calc_shearT_DeltaT (shrUij, shrUijm1, & + shrUim1jm1, shrUim1j, & + divT, tensionT, & + shearT, DeltaT ) + + real (kind=dbl_kind), intent(in) :: & + shrUij , & ! shear strain rate at U point (i,j) + shrUijm1 , & ! shear strain rate at U point (i,j-1) + shrUim1jm1, & ! shear strain rate at U point (i-1,j-1) + shrUim1j, & ! shear strain rate at U point (i-1,j) + divT, & + tensionT + + real (kind=dbl_kind), intent(inout):: & + shearT, DeltaT ! strain rates at the T point + + character(len=*), parameter :: subname = '(calc_shearT_DeltaT)' + + ! local variables + real (kind=dbl_kind) :: shearTsqr + + logical (kind=log_kind) :: B2009 + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + B2009 = .false. + + shearT = ( shrUij + shrUijm1 + shrUim1jm1 + shrUim1j ) / 4d0 + + if (B2009) then + + DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) + + else + + shearTsqr = (shrUij**2 + shrUijm1**2 + shrUim1jm1**2 + shrUim1j**2)/4d0 + DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearTsqr)) + + endif + + end subroutine calc_shearT_DeltaT + !======================================================================= ! Compute strain rates at the U point including boundary conditions @@ -2034,8 +2083,87 @@ subroutine strain_rates_U (nx_block, ny_block, & ! Delta (in the denominator of zeta, eta) DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) - end subroutine strain_rates_U + end subroutine strain_rates_U + +!======================================================================= + +! Computes and stores the shear strain rate at U points based on C-grid +! velocity components (uvelE and vvelN) + + subroutine shear_strain_rate_U (nx_block, ny_block, & + icellu, & + indxui, indxuj, & + uvelE, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + shrU) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu ! no. of cells where iceumask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) at the U point + vvelU , & ! y-component of velocity (m/s) at the U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline + epm , & ! E-cell mask + npm , & ! N-cell mask + uvm ! U-cell mask + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + shrU ! shear strain rate at U point (m^2/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + real (kind=dbl_kind) :: & + uEijp1, uEij, vNip1j, vNij + + character(len=*), parameter :: subname = '(shear_strain_rate_U)' + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + + ! shear strain rate = 2*e_12 + ! NOTE these are actually strain rates * area (m^2/s) + shrU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + + enddo ! ij + + end subroutine shear_strain_rate_U + !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. From ff8fb699f89ac93b8b73ae4f2ad1d126931878fe Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 15 Mar 2022 14:40:10 -0600 Subject: [PATCH 085/109] Change restart for C-grid (#71) --- cicecore/cicedynB/infrastructure/ice_restart_driver.F90 | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 07f05af30..2e236b62a 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -196,7 +196,7 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) - if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (grid_ice == 'CD') then call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) @@ -205,6 +205,12 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12U,'ruf8','stress12U',1,diag) endif + if (grid_ice == 'C') then + call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) + call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) + call write_restart_field(nu_dump,0,stress12U,'ruf8','stress12U',1,diag) + endif + !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- From 8638cb22468ffdc4ef9fb601b514a453be1f14f5 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 15 Mar 2022 14:49:39 -0700 Subject: [PATCH 086/109] Make some arguments optional in C dyn methods (#72) * Add new stressC routines * Add calls to new stressC routines * make some arguments optional for C grid * fix zetaavg optional arguments Co-authored-by: David Bailey --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 89 +++++++----- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 133 +++++++++++------- 2 files changed, 136 insertions(+), 86 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 9ad299acf..aea1ffc53 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -859,12 +859,11 @@ subroutine evp (dt) dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - 'E') + stresspF1 = stresspT (:,:,iblk), & + stressmF1 = stressmT (:,:,iblk), & + stress12F1 = stress12U (:,:,iblk), & + F1 = strintxE (:,:,iblk), & + grid_location = 'E') call div_stress (nx_block, ny_block, & ! N point icelln(iblk), & @@ -872,12 +871,11 @@ subroutine evp (dt) dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - 'N') + stresspF2 = stresspT (:,:,iblk), & + stressmF2 = stressmT (:,:,iblk), & + stress12F2 = stress12U (:,:,iblk), & + F2 = strintyN (:,:,iblk), & + grid_location = 'N') enddo !$OMP END PARALLEL DO @@ -1593,7 +1591,7 @@ subroutine stressC_T (nx_block, ny_block, & stressp, stressm) use ice_dyn_shared, only: strain_rates_T, capping, & - visccoeff_replpress !, calc_shearT_DeltaT + visccoeff_replpress integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1669,7 +1667,7 @@ subroutine stressC_T (nx_block, ny_block, & DeltaT , zetax2T (i,j), & etax2T (i,j), rep_prsT , & capping) - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- @@ -1783,7 +1781,7 @@ subroutine stressC_U (nx_block, ny_block, & epm, npm, uvm, & divU, tensionU, & shearU, DeltaU ) - + !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- @@ -1797,7 +1795,7 @@ subroutine stressC_U (nx_block, ny_block, & hm (i+1,j+1), hm (i+1,j ), & tarea (i ,j ), tarea (i ,j+1), & tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU, zetax2U, etax2U, rep_prsU) + DeltaU, etax2U=etax2U) elseif (visc_coeff_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) @@ -2119,7 +2117,9 @@ subroutine div_stress (nx_block, ny_block, & dyE_N , & ! height of E or N-cell through the middle (m) dxT_U , & ! width of T or U-cell through the middle (m) dyT_U , & ! height of T or U-cell through the middle (m) - arear , & ! earear or narear + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & stresspF1 , & ! stressp (U or T) used for F1 calculation stressmF1 , & ! stressm (U or T) used for F1 calculation stress12F1 , & ! stress12 (U or T) used for F1 calculation @@ -2130,7 +2130,7 @@ subroutine div_stress (nx_block, ny_block, & character(len=*), intent(in) :: & grid_location ! E (East) or N (North) ! TO BE IMPROVED!!!! - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & F1 , & ! div of stress tensor for u component F2 ! div of stress tensor for v component @@ -2143,54 +2143,79 @@ subroutine div_stress (nx_block, ny_block, & !!! Instead of having the if statements below we could define for example ! i+ci, j+cj where ci, cj would change with grid_position - - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) + + if (grid_location /= "E" .and. grid_location /= "N") then + call abort_ice(subname // ' ERROR: unknown grid_location: ' // grid_location) + endif + + if (present(F1) .and. & + (.not.present(stresspF1) .or. .not.present(stressmF1) .or. .not.present(stress12F1))) then + call abort_ice(subname // ' ERROR: F1 passing arguments ') + endif + + if (present(F2) .and. & + (.not.present(stresspF2) .or. .not.present(stressmF2) .or. .not.present(stress12F2))) then + call abort_ice(subname // ' ERROR: F2 passing arguments ') + endif !----------------------------------------------------------------- ! F1,F2 : div of stress tensor for u,v components !----------------------------------------------------------------- - if (grid_location == "E") then - + if (grid_location == "E" .and. present(F1)) then + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j)-stresspF1(i,j) ) & + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressmF1(i+1,j) & -(dyT_U(i,j)**2)*stressmF1(i,j) ) & + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12F1(i,j) & -(dxT_U(i,j-1)**2)*stress12F1(i,j-1) ) ) + enddo + endif + if (grid_location == "E" .and. present(F2)) then + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) F2(i,j) = arear(i,j) * & ( p5 * dxE_N(i,j) * ( stresspF2(i,j)-stresspF2(i,j-1) ) & - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressmF2(i,j) & -(dxT_U(i,j-1)**2)*stressmF2(i,j-1) ) & + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12F2(i+1,j) & -(dyT_U(i,j)**2)*stress12F2(i,j) ) ) + enddo + endif - elseif (grid_location == "N") then - + if (grid_location == "N" .and. present(F1)) then + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) F1(i,j) = arear(i,j) * & ( p5 * dyE_N(i,j) * ( stresspF1(i,j)-stresspF1(i-1,j) ) & + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressmF1(i,j) & -(dyT_U(i-1,j)**2)*stressmF1(i-1,j) ) & + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12F1(i,j+1) & -(dxT_U(i,j)**2)*stress12F1(i,j) ) ) + enddo + endif + if (grid_location == "N" .and. present(F2)) then + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) F2(i,j) = arear(i,j) * & ( p5 * dxE_N(i,j) * ( stresspF2(i,j+1)-stresspF2(i,j) ) & - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressmF2(i,j+1) & -(dxT_U(i,j)**2)*stressmF2(i,j) ) & + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12F2(i,j) & -(dyT_U(i-1,j)**2)*stress12F2(i-1,j) ) ) - else - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - endif + enddo + endif - enddo ! ij + end subroutine div_stress - end subroutine div_stress - !======================================================================= end module ice_dyn_evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 32c9da480..c67910cd9 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1767,7 +1767,7 @@ subroutine deformations_T (nx_block, ny_block, & dxT, dyT, & divT, tensionT, & shearT, DeltaT ) - + !----------------------------------------------------------------- ! deformations for mechanical redistribution !----------------------------------------------------------------- @@ -1906,30 +1906,38 @@ subroutine strain_rates_T (nx_block, ny_block, & dxT , & ! width of T-cell through the middle (m) dyT ! height of T-cell through the middle (m) - real (kind=dbl_kind), intent(out):: & + real (kind=dbl_kind), optional, intent(out):: & divT, tensionT, shearT, DeltaT ! strain rates at the T point - + character(len=*), parameter :: subname = '(strain_rates_T)' - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- ! divergence = e_11 + e_22 - divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + if (present(deltaT) .or. present(divT)) then + divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + endif ! tension strain rate = e_11 - e_22 - tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + if (present(deltaT) .or. present(tensionT)) then + tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + endif ! shearing strain rate = 2*e_12 - shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + if (present(deltaT) .or. present(shearT)) then + shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + endif ! Delta (in the denominator of zeta, eta) - DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) + if (present(deltaT)) then + DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) + endif end subroutine strain_rates_T @@ -2027,7 +2035,7 @@ subroutine strain_rates_U (nx_block, ny_block, & uvm ! U-cell mask - real (kind=dbl_kind), intent(out):: & + real (kind=dbl_kind), optional, intent(out):: & divU, tensionU, shearU, DeltaU ! strain rates at the U point ! local variables @@ -2042,46 +2050,56 @@ subroutine strain_rates_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - uNip1j = uvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) - uNij = uvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) - vEijp1 = vvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) - vEij = vvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + if (present(divU) .or. present(tensionU) .or. present(DeltaU)) then + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + endif ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... ! divergence = e_11 + e_22 - divU = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + if (present(divU) .or. present(DeltaU)) then + divU = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + endif ! tension strain rate = e_11 - e_22 - tensionU = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + if (present(tensionU) .or. present(DeltaU)) then + tensionU = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + endif + + if (present(shearU) .or. present(DeltaU)) then + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - ! shearing strain rate = 2*e_12 - shearU = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - + ! shearing strain rate = 2*e_12 + shearU = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + endif + ! Delta (in the denominator of zeta, eta) - DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) + if (present(DeltaU)) then + DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) + endif end subroutine strain_rates_U @@ -2225,7 +2243,8 @@ subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & area1, area2, area3, area4, & deltaU - real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU + real (kind=dbl_kind), optional, intent(out):: & + zetax2U, etax2U, rep_prsU ! local variables @@ -2241,17 +2260,23 @@ subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & mask3 * area3 + & mask2 * area2) - zetax2U = (mask1 * area1 * zetax2T1 + & - mask4 * area4 * zetax2T4 + & - mask3 * area3 * zetax2T3 + & - mask2 * area2 * zetax2T2) / areatmp + if (present(rep_prsU) .or. present(zetax2U)) then + zetax2U = (mask1 * area1 * zetax2T1 + & + mask4 * area4 * zetax2T4 + & + mask3 * area3 * zetax2T3 + & + mask2 * area2 * zetax2T2) / areatmp + endif - etax2U = (mask1 * area1 * etax2T1 + & - mask4 * area4 * etax2T4 + & - mask3 * area3 * etax2T3 + & - mask2 * area2 * etax2T2) / areatmp + if (present(etax2U)) then + etax2U = (mask1 * area1 * etax2T1 + & + mask4 * area4 * etax2T4 + & + mask3 * area3 * etax2T3 + & + mask2 * area2 * etax2T2) / areatmp + endif - rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU + if (present(rep_prsU)) then + rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU + endif end subroutine visccoeff_replpress_avgzeta From 012e21bff8540efd9b3bf95a8535307115bb7bb4 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 16 Mar 2022 07:42:01 -0700 Subject: [PATCH 087/109] - Turn on new calls and use shrU (#73) * - Add new calls and use shrU - bit-for-bit except for when using calc_shearT_DeltaT - fix some bit-for-bit issues from earlier PRs in cheyenne+intel non-debug compiler optimization * fix shrU argument --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 166 ++++++++++-------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 14 +- 2 files changed, 98 insertions(+), 82 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index aea1ffc53..940167433 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -185,8 +185,9 @@ subroutine evp (dt) real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) real (kind=dbl_kind), allocatable :: & - zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2T(:,:,:) ! etax2 = 2*eta (shear viscous coeff) + shrU (:,:,:), & ! shearU array for gridC + zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscous coeff) + etax2T (:,:,:) ! etax2 = 2*eta (shear viscous coeff) real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -221,8 +222,10 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then + allocate(shrU (nx_block,ny_block,max_blocks)) allocate(zetax2T(nx_block,ny_block,max_blocks)) - allocate(etax2T(nx_block,ny_block,max_blocks)) + allocate(etax2T (nx_block,ny_block,max_blocks)) + shrU (:,:,:) = c0 zetax2T(:,:,:) = c0 etax2T (:,:,:) = c0 @@ -757,27 +760,26 @@ subroutine evp (dt) elseif (grid_ice == "C") then -! !$OMP PARALLEL DO PRIVATE(iblk) -! do iblk = 1, nblocks -! call shear_strain_rate_U (nx_block, ny_block, & -! icellu (iblk) , & -! indxui (:,iblk) , indxuj (:,iblk), & -! uvelE (:,:,iblk), vvelN (:,:,iblk), & -! uvel (:,:,iblk), vvel (:,:,iblk), & -! dxE (:,:,iblk), dyN (:,:,iblk), & -! dxU (:,:,iblk), dyU (:,:,iblk), & -! ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & -! ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & -! epm (:,:,iblk), npm (:,:,iblk), & -! uvm (:,:,iblk), & -! shrU (:,:,iblk)) -! enddo -! !$OMP END PARALLEL DO - -! call ice_timer_start(timer_bound) -! call ice_HaloUpdate (shrU, halo_info, & -! field_loc_NEcorner, field_type_scalar) -! call ice_timer_stop(timer_bound) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call shear_strain_rate_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + uvm (:,:,iblk), shrU (:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + call ice_timer_start(timer_bound) + call ice_HaloUpdate (shrU, halo_info, & + field_loc_NEcorner, field_type_scalar) + call ice_timer_stop(timer_bound) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -789,7 +791,7 @@ subroutine evp (dt) dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & DminTarea (:,:,iblk), & - strength (:,:,iblk), & + strength (:,:,iblk), shrU (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) @@ -835,7 +837,7 @@ subroutine evp (dt) epm (:,:,iblk), npm (:,:,iblk), & hm (:,:,iblk), uvm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & - strength (:,:,iblk), & + strength (:,:,iblk), shrU (:,:,iblk), & stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1120,7 +1122,7 @@ subroutine evp (dt) deallocate(fld2) if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(zetax2T, etax2T) + deallocate(shrU, zetax2T, etax2T) endif if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) @@ -1578,21 +1580,22 @@ end subroutine stress ! updated: D. Bailey, NCAR ! Nov 2021 - subroutine stressC_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - DminTarea, & - strength, & - zetax2T, etax2T, & - stressp, stressm) + subroutine stressC_T (nx_block, ny_block , & + icellt , & + indxti , indxtj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + DminTarea, & + strength, shrU , & + zetax2T , etax2T , & + stressp , stressm ) use ice_dyn_shared, only: strain_rates_T, capping, & - visccoeff_replpress - + visccoeff_replpress, & + calc_shearT_DeltaT + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -1611,6 +1614,7 @@ subroutine stressC_T (nx_block, ny_block, & dxT , & ! width of T-cell through the middle (m) dyT , & ! height of T-cell through the middle (m) strength , & ! ice strength (N/m) + shrU , & ! shearU DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1644,6 +1648,8 @@ subroutine stressC_T (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- +! tcraig bit-for-bit here, not below +#if (1 == 0) call strain_rates_T (nx_block, ny_block, & i, j, & uvelE, vvelE, & @@ -1653,11 +1659,21 @@ subroutine stressC_T (nx_block, ny_block, & divT, tensionT, & shearT, DeltaT ) - -! call calc_shearT_DeltaT (shrU(i,j), shrU(i,j-1), & -! shrU(i-1,j-1), shrU(i-1,j), & -! divT, tensionT, & -! shearT, DeltaT ) +#else + call strain_rates_T (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT = divT, & + tensionT = tensionT ) + + call calc_shearT_DeltaT (shrU(i,j), shrU(i,j-1), & + shrU(i-1,j-1), shrU(i-1,j), & + divT, tensionT, & + shearT, DeltaT ) +#endif !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at T point @@ -1691,21 +1707,21 @@ end subroutine stressC_T ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stressC_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - tarea, uarea, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, hm, uvm, & - zetax2T, etax2T, & - strength, & - stress12 ) + subroutine stressC_U (nx_block, ny_block, & + icellu, & + indxui , indxuj, & + uvelE , vvelE, & + uvelN , vvelN, & + uvelU , vvelU, & + dxE , dyN, & + dxU , dyU, & + tarea , uarea, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, hm, uvm, & + zetax2T , etax2T, & + strength, shrU, & + stress12 ) use ice_dyn_shared, only: strain_rates_U, & visccoeff_replpress_avgstr, & @@ -1743,6 +1759,7 @@ subroutine stressC_U (nx_block, ny_block, & uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point etax2T , & ! 2*eta at the T point + shrU , & ! shearU array strength ! ice strength at the T point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1769,24 +1786,14 @@ subroutine stressC_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - call strain_rates_U (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, uvm, & - divU, tensionU, & - shearU, DeltaU ) !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then + DeltaU = c0 ! not needed in avgzeta just computing etax2U + shearU = shrU(i,j) call visccoeff_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & zetax2T (i+1,j+1), zetax2T (i+1,j ), & etax2T (i ,j ), etax2T (i ,j+1), & @@ -1798,6 +1805,19 @@ subroutine stressC_U (nx_block, ny_block, & DeltaU, etax2U=etax2U) elseif (visc_coeff_method == 'avg_strength') then + call strain_rates_U (nx_block, ny_block, & + i, j, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + divU, tensionU, & + shearU, DeltaU ) + shearU = shrU(i,j) DminUarea = deltaminEVP*uarea(i,j) call visccoeff_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & strength(i+1,j+1), strength(i+1,j ), & @@ -1814,7 +1834,7 @@ subroutine stressC_U (nx_block, ny_block, & !----------------------------------------------------------------- ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) + & arlx1i*p5*etax2U*shearU) * denom1 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index c67910cd9..9e9f86743 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -2050,7 +2050,7 @@ subroutine strain_rates_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - if (present(divU) .or. present(tensionU) .or. present(DeltaU)) then + if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then uNip1j = uvelN(i+1,j) * npm(i+1,j) & +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) uNij = uvelN(i,j) * npm(i,j) & @@ -2059,27 +2059,23 @@ subroutine strain_rates_U (nx_block, ny_block, & +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) vEij = vvelE(i,j) * epm(i,j) & +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - endif ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... - ! divergence = e_11 + e_22 - if (present(divU) .or. present(DeltaU)) then + ! divergence = e_11 + e_22 divU = dyU(i,j) * ( uNip1j - uNij ) & + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + dxU(i,j) * ( vEijp1 - vEij ) & + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - endif - ! tension strain rate = e_11 - e_22 - if (present(tensionU) .or. present(DeltaU)) then + ! tension strain rate = e_11 - e_22 tensionU = dyU(i,j) * ( uNip1j - uNij ) & - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - dxU(i,j) * ( vEijp1 - vEij ) & + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) endif - if (present(shearU) .or. present(DeltaU)) then + if (present(DeltaU) .or. present(shearU)) then uEijp1 = uvelE(i,j+1) * epm(i,j+1) & +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) uEij = uvelE(i,j) * epm(i,j) & @@ -2096,8 +2092,8 @@ subroutine strain_rates_U (nx_block, ny_block, & - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) endif - ! Delta (in the denominator of zeta, eta) if (present(DeltaU)) then + ! Delta (in the denominator of zeta, eta) DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) endif From 67b25a4901cfdc57008820037e5cc249a924fd53 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 16 Mar 2022 13:20:20 -0700 Subject: [PATCH 088/109] Rename stress_T and stress_U to stressCD_T and stressCD_U (#74) Rename step[uv]_cgrid to stepuC_[EN], step_vel, stepuCD create strain_rates_T/U version 2 that operates on arrays, replace most calls to use version 2, still working on a few debugging. this still needs to be cleaned up. Add elasticDamp to ice_in Add a few tests --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 361 ++++++++----- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 493 +++++++++++++----- configuration/scripts/ice_in | 1 + configuration/scripts/tests/gridsys_suite.ts | 3 + 4 files changed, 578 insertions(+), 280 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 940167433..7a3aca1ac 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -42,7 +42,7 @@ module ice_dyn_evp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, step_vel, stepu_Cgrid, stepv_Cgrid, & + use ice_dyn_shared, only: stepu, stepuCD, stepuC_E, stepuC_N, & dyn_prep1, dyn_prep2, dyn_finish, & ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & @@ -114,7 +114,7 @@ subroutine evp (dt) use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea - use ice_dyn_shared, only: deformations, deformations_T, shear_strain_rate_U + use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U, strain_rates_U2 ! tcx, shear_strain_rate_U real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -760,8 +760,10 @@ subroutine evp (dt) elseif (grid_ice == "C") then - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks + +#if (1 == 0) call shear_strain_rate_U (nx_block , ny_block , & icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -773,7 +775,39 @@ subroutine evp (dt) ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & uvm (:,:,iblk), shrU (:,:,iblk)) - enddo +#else +#if (1 == 0) + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + call strain_rates_U (nx_block, ny_block, i, j, & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & + epm(:,:,iblk), npm(:,:,iblk), uvm(:,:,iblk), & + shearU=shrU(i,j,iblk) ) + enddo ! ij +#else + call strain_rates_U2 (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & + epm(:,:,iblk), npm(:,:,iblk), uvm(:,:,iblk), & + shearU=shrU(:,:,iblk) ) +#endif +#endif + + enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) @@ -885,29 +919,29 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stepu_Cgrid (nx_block, ny_block, & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), forcexE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), taubxE (:,:,iblk), & - uvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepv_Cgrid (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - wateryN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintyN (:,:,iblk), taubyN (:,:,iblk), & - vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call stepuC_E (nx_block, ny_block, & ! u, E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepuC_N (nx_block, ny_block, & ! v, N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -940,18 +974,18 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_T (nx_block, ny_block, & - icellt(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - DminTarea (:,:,iblk), & - strength (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk) ) + call stressCD_T (nx_block, ny_block, & + icellt(iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + DminTarea (:,:,iblk), & + strength (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk) ) !----------------------------------------------------------------- ! on last subcycle, save quantities for mechanical redistribution @@ -981,23 +1015,23 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stress_U (nx_block, ny_block, & - icellu(iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - tarea (:,:,iblk), uarea (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - hm (:,:,iblk), uvm (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - strength (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U (:,:,iblk)) + call stressCD_U (nx_block, ny_block, & + icellu(iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + tarea (:,:,iblk), uarea (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + hm (:,:,iblk), uvm (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + strength (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1052,33 +1086,33 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call step_vel (nx_block, ny_block, & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call step_vel (nx_block, ny_block, & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call stepuCD (nx_block, ny_block, & ! E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepuCD (nx_block, ny_block, & ! N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1592,9 +1626,8 @@ subroutine stressC_T (nx_block, ny_block , & zetax2T , etax2T , & stressp , stressm ) - use ice_dyn_shared, only: strain_rates_T, capping, & - visccoeff_replpress, & - calc_shearT_DeltaT + use ice_dyn_shared, only: strain_rates_T, strain_rates_T2, capping, & + visccoeff_replpress, e_factor integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1628,9 +1661,12 @@ subroutine stressC_T (nx_block, ny_block , & integer (kind=int_kind) :: & i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT, tensionT + real (kind=dbl_kind) :: & - divT, tensionT, shearT, DeltaT, & ! strain rates at T point - rep_prsT ! replacement pressure at T point + shearTsqr, DeltaT, & ! strain rates at T point + rep_prsT ! replacement pressure at T point character(len=*), parameter :: subname = '(stressC_T)' @@ -1638,6 +1674,15 @@ subroutine stressC_T (nx_block, ny_block , & ! Initialize !----------------------------------------------------------------- + call strain_rates_T2 (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT = divT (:,:), & + tensionT = tensionT(:,:) ) do ij = 1, icellt i = indxti(ij) @@ -1660,6 +1705,7 @@ subroutine stressC_T (nx_block, ny_block , & shearT, DeltaT ) #else +#if (1 == 0) call strain_rates_T (nx_block, ny_block, & i, j, & uvelE, vvelE, & @@ -1668,11 +1714,19 @@ subroutine stressC_T (nx_block, ny_block , & dxT, dyT, & divT = divT, & tensionT = tensionT ) - call calc_shearT_DeltaT (shrU(i,j), shrU(i,j-1), & shrU(i-1,j-1), shrU(i-1,j), & - divT, tensionT, & + divT(i,j), tensionT(i,j), & shearT, DeltaT ) +#else + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + shearTsqr = (shrU(i,j)**2 + shrU(i,j-1)**2 + shrU(i-1,j-1)**2 + shrU(i-1,j)**2)/4d0 + DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) +#endif #endif !----------------------------------------------------------------- @@ -1691,10 +1745,10 @@ subroutine stressC_T (nx_block, ny_block , & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2T(i,j)*divT - rep_prsT)) * denom1 + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2T(i,j)*tensionT) * denom1 + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 enddo ! ij @@ -1850,7 +1904,7 @@ end subroutine stressC_U ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stress_T (nx_block, ny_block, & + subroutine stressCD_T (nx_block, ny_block, & icellt, & indxti, indxtj, & uvelE, vvelE, & @@ -1863,7 +1917,7 @@ subroutine stress_T (nx_block, ny_block, & stresspT, stressmT, & stress12T ) - use ice_dyn_shared, only: strain_rates_T, capping, & + use ice_dyn_shared, only: strain_rates_T2, capping, & visccoeff_replpress integer (kind=int_kind), intent(in) :: & @@ -1898,41 +1952,39 @@ subroutine stress_T (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT, tensionT, shearT, DeltaT ! strain rates at T point + real (kind=dbl_kind) :: & - divT, tensionT, shearT, DeltaT, & ! strain rates at T point rep_prsT ! replacement pressure at T point - character(len=*), parameter :: subname = '(stress_T)' + character(len=*), parameter :: subname = '(stressCD_T)' !----------------------------------------------------------------- - ! Initialize + ! strain rates at T point + ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- + call strain_rates_T2 (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) do ij = 1, icellt i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates at T point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates_T (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) - !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- call visccoeff_replpress (strength(i,j), DminTarea(i,j), & - DeltaT , zetax2T (i,j), & + DeltaT (i,j), zetax2T (i,j), & etax2T (i,j), rep_prsT , & capping) @@ -1943,17 +1995,17 @@ subroutine stress_T (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspT(i,j) = (stresspT(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2T(i,j)*divT - rep_prsT)) * denom1 + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 stressmT(i,j) = (stressmT(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2T(i,j)*tensionT) * denom1 + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2T(i,j)*shearT) * denom1 + arlx1i*p5*etax2T(i,j)*shearT(i,j)) * denom1 enddo ! ij - end subroutine stress_T + end subroutine stressCD_T !======================================================================= @@ -1962,7 +2014,7 @@ end subroutine stress_T ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stress_U (nx_block, ny_block, & + subroutine stressCD_U (nx_block, ny_block, & icellu, & indxui, indxuj, & uvelE, vvelE, & @@ -1979,7 +2031,7 @@ subroutine stress_U (nx_block, ny_block, & stresspU, stressmU, & stress12U ) - use ice_dyn_shared, only: strain_rates_U, & + use ice_dyn_shared, only: strain_rates_U, strain_rates_U2, & visccoeff_replpress_avgstr, & visccoeff_replpress_avgzeta, & visc_coeff_method, deltaminEVP, capping @@ -2027,35 +2079,55 @@ subroutine stress_U (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divU, tensionU, shearU, DeltaU ! strain rates at U point + real (kind=dbl_kind) :: & - divU, tensionU, shearU, DeltaU, & ! strain rates at U point zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - DminUarea, strtmp, areatmp ! Dmin on U and tmp variables + DminUarea ! Dmin on U and tmp variables - character(len=*), parameter :: subname = '(stress_U)' + character(len=*), parameter :: subname = '(stressCD_U)' + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + +#if (1 == 0) + call strain_rates_U2 (nx_block , ny_block , & + icellu , & + indxui (:) , indxuj(:) , & + uvelE (:,:), vvelE(:,:) , & + uvelN (:,:), vvelN(:,:) , & + uvelU (:,:), vvelU(:,:) , & + dxE (:,:), dyN(:,:) , & + dxU (:,:), dyU(:,:) , & + ratiodxN(:,:), ratiodxNr(:,:), & + ratiodyE(:,:), ratiodyEr(:,:), & + epm(:,:), npm(:,:), uvm(:,:), & + divU (:,:), tensionU (:,:), & + shearU (:,:), DeltaU (:,:) ) +#endif + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- +#if (1 == 1) + call strain_rates_U (nx_block , ny_block , & + i, j, & + uvelE (:,:), vvelE(:,:) , & + uvelN (:,:), vvelN(:,:) , & + uvelU (:,:), vvelU(:,:) , & + dxE (:,:), dyN(:,:) , & + dxU (:,:), dyU(:,:) , & + ratiodxN(:,:), ratiodxNr(:,:), & + ratiodyE(:,:), ratiodyEr(:,:), & + epm(:,:), npm(:,:), uvm(:,:), & + divU (i,j), tensionU (i,j), & + shearU (i,j), DeltaU (i,j) ) +#endif - call strain_rates_U (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, uvm, & - divU, tensionU, & - shearU, DeltaU ) - !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- @@ -2069,7 +2141,8 @@ subroutine stress_U (nx_block, ny_block, & hm (i+1,j+1), hm (i+1,j ), & tarea (i ,j ), tarea (i ,j+1), & tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU, zetax2U, etax2U, rep_prsU) + DeltaU (i ,j ), & + zetax2U, etax2U, rep_prsU) elseif (visc_coeff_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) @@ -2079,7 +2152,7 @@ subroutine stress_U (nx_block, ny_block, & hm (i+1,j+1) , hm (i+1,j ), & tarea (i ,j ) , tarea (i ,j+1), & tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea, DeltaU, & + DminUarea , DeltaU (i ,j ), & zetax2U, etax2U, rep_prsU, capping) endif @@ -2090,17 +2163,17 @@ subroutine stress_U (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspU(i,j) = (stresspU(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2U*divU - rep_prsU)) * denom1 + arlx1i*(zetax2U*divU(i,j) - rep_prsU)) * denom1 stressmU(i,j) = (stressmU(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2U*tensionU) * denom1 + arlx1i*etax2U*tensionU(i,j)) * denom1 stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2U*shearU) * denom1 + arlx1i*p5*etax2U*shearU(i,j)) * denom1 enddo ! ij - end subroutine stress_U + end subroutine stressCD_U !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 9e9f86743..5ead645dc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -23,13 +23,15 @@ module ice_dyn_shared implicit none private - public :: set_evp_parameters, stepu, step_vel, stepu_Cgrid, stepv_Cgrid, & + public :: set_evp_parameters, stepu, stepuCD, stepuC_E, stepuC_N, & principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & deformations, deformations_T, & strain_rates, strain_rates_T, strain_rates_U, & - shear_strain_rate_U, calc_shearT_DeltaT, & + strain_rates_T2, strain_rates_U2, & +! shear_strain_rate_U, & +! calc_shearT_DeltaT, & visccoeff_replpress, & visccoeff_replpress_avgstr, & visccoeff_replpress_avgzeta, & @@ -823,19 +825,19 @@ end subroutine stepu ! Integration of the momentum equation to find velocity (u,v) at E and N locations - subroutine step_vel (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - waterx, watery, & - forcex, forcey, & - massdti, fm, & - strintx, strinty, & - taubx, tauby, & - uvel_init, vvel_init,& - uvel, vvel, & - Tb) + subroutine stepuCD (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + waterx, watery, & + forcex, forcey, & + massdti, fm, & + strintx, strinty, & + taubx, tauby, & + uvel_init, vvel_init,& + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -886,7 +888,7 @@ subroutine step_vel (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(step_vel)' + character(len=*), parameter :: subname = '(stepuCD)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -935,23 +937,23 @@ subroutine step_vel (nx_block, ny_block, & enddo ! ij - end subroutine step_vel + end subroutine stepuCD !======================================================================= ! Integration of the momentum equation to find velocity u at E location on C grid - subroutine stepu_Cgrid (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - waterx, forcex, & - massdti, fm, & - strintx, taubx, & - uvel_init, & - uvel, vvel, & - Tb) + subroutine stepuC_E (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + waterx, forcex, & + massdti, fm, & + strintx, taubx, & + uvel_init, & + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -967,10 +969,10 @@ subroutine stepu_Cgrid (nx_block, ny_block, & aiu , & ! ice fraction on [en]-grid waterx , & ! for ocean stress calculation, x (m/s) forcex , & ! work array: combined atm stress and ocn tilt, x - massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + massdti , & ! mass of e-cell/dt (kg/m^2 s) uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + fm , & ! Coriolis param. * mass in e-cell (kg/s) strintx , & ! divergence of internal ice stress, x (N/m^2) Cw , & ! ocean-ice neutral drag coefficient vvel ! y-component of velocity (m/s) interpolated to E location @@ -992,7 +994,7 @@ subroutine stepu_Cgrid (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(stepu_Cgrid)' + character(len=*), parameter :: subname = '(stepuC_E)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -1035,23 +1037,23 @@ subroutine stepu_Cgrid (nx_block, ny_block, & enddo ! ij - end subroutine stepu_Cgrid + end subroutine stepuC_E !======================================================================= ! Integration of the momentum equation to find velocity v at N location on C grid - subroutine stepv_Cgrid (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - watery, forcey, & - massdti, fm, & - strinty, tauby, & - vvel_init, & - uvel, vvel, & - Tb) + subroutine stepuC_N (nx_block, ny_block, & + icell, Cw, & + indxi, indxj, & + aiu, & + uocn, vocn, & + watery, forcey, & + massdti, fm, & + strinty, tauby, & + vvel_init, & + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1067,10 +1069,10 @@ subroutine stepv_Cgrid (nx_block, ny_block, & aiu , & ! ice fraction on [en]-grid watery , & ! for ocean stress calculation, y (m/s) forcey , & ! work array: combined atm stress and ocn tilt, y - massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) + massdti , & ! mass of n-cell/dt (kg/m^2 s) uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) + fm , & ! Coriolis param. * mass in n-cell (kg/s) strinty , & ! divergence of internal ice stress, y (N/m^2) Cw , & ! ocean-ice neutral drag coefficient uvel ! x-component of velocity (m/s) interpolated to N location @@ -1092,7 +1094,7 @@ subroutine stepv_Cgrid (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(stepv_Cgrid)' + character(len=*), parameter :: subname = '(stepuC_N)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -1135,7 +1137,7 @@ subroutine stepv_Cgrid (nx_block, ny_block, & enddo ! ij - end subroutine stepv_Cgrid + end subroutine stepuC_N !======================================================================= @@ -1732,8 +1734,7 @@ subroutine deformations_T (nx_block, ny_block, & dyT , & ! height of T-cell through the middle (m) tarear ! 1/tarea - real (kind=dbl_kind), dimension (nx_block,ny_block), & - intent(inout) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -1744,41 +1745,44 @@ subroutine deformations_T (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT, tensionT, shearT, DeltaT ! strain rates at T point + real (kind=dbl_kind) :: & - divT, tensionT, shearT, DeltaT, & ! strain rates at T point tmp ! useful combination character(len=*), parameter :: subname = '(deformations_T)' - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) - !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - call strain_rates_T (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) + call strain_rates_T2 (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- - divu(i,j) = divT * tarear(i,j) - tmp = Deltat * tarear(i,j) + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- + divu(i,j) = divT(i,j) * tarear(i,j) + tmp = Deltat(i,j) * tarear(i,j) rdg_conv(i,j) = -min(divu(i,j),c0) rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) ! diagnostic only ! shear = sqrt(tension**2 + shearing**2) - shear(i,j) = tarear(i,j)*sqrt( tensionT**2 + shearT**2 ) + shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) enddo ! ij @@ -1891,7 +1895,7 @@ subroutine strain_rates_T (nx_block, ny_block, & shearT, DeltaT ) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions + nx_block, ny_block ! block dimensions integer (kind=int_kind) :: & i, j ! indices @@ -1916,34 +1920,124 @@ subroutine strain_rates_T (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - ! divergence = e_11 + e_22 - if (present(deltaT) .or. present(divT)) then - divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) - endif + ! divergence = e_11 + e_22 + if (present(deltaT) .or. present(divT)) then + divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + endif - ! tension strain rate = e_11 - e_22 - if (present(deltaT) .or. present(tensionT)) then - tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) - endif + ! tension strain rate = e_11 - e_22 + if (present(deltaT) .or. present(tensionT)) then + tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + endif - ! shearing strain rate = 2*e_12 - if (present(deltaT) .or. present(shearT)) then - shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) - endif + ! shearing strain rate = 2*e_12 + if (present(deltaT) .or. present(shearT)) then + shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + endif - ! Delta (in the denominator of zeta, eta) - if (present(deltaT)) then - DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) - endif + ! Delta (in the denominator of zeta, eta) + if (present(deltaT)) then + DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) + endif end subroutine strain_rates_T - !======================================================================= +!======================================================================= + +! Compute strain rates at the T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_T2 (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT ! height of T-cell through the middle (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & + divT , & + tensionT , & + shearT , & + DeltaT ! strain rates at the T point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + character(len=*), parameter :: subname = '(strain_rates_T2)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + if (present(divT) ) divT (:,:) = c0 + if (present(tensionT)) tensionT(:,:) = c0 + if (present(shearT) ) shearT (:,:) = c0 + if (present(deltaT) ) deltaT (:,:) = c0 + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + ! divergence = e_11 + e_22 + if (present(deltaT) .or. present(divT)) then + divT (i,j) = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + endif + + ! tension strain rate = e_11 - e_22 + if (present(deltaT) .or. present(tensionT)) then + tensionT(i,j) = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + endif + + ! shearing strain rate = 2*e_12 + if (present(deltaT) .or. present(shearT)) then + shearT (i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + endif + + ! Delta (in the denominator of zeta, eta) + if (present(deltaT)) then + DeltaT (i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearT(i,j)**2)) + endif + + enddo + + end subroutine strain_rates_T2 + +!======================================================================= +#if (1 == 0) subroutine calc_shearT_DeltaT (shrUij, shrUijm1, & shrUim1jm1, shrUim1j, & divT, tensionT, & @@ -1988,7 +2082,7 @@ subroutine calc_shearT_DeltaT (shrUij, shrUijm1, & endif end subroutine calc_shearT_DeltaT - +#endif !======================================================================= ! Compute strain rates at the U point including boundary conditions @@ -2011,7 +2105,7 @@ subroutine strain_rates_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block ! block dimensions - + integer (kind=int_kind) :: & i, j ! indices @@ -2034,7 +2128,6 @@ subroutine strain_rates_U (nx_block, ny_block, & npm , & ! N-cell mask uvm ! U-cell mask - real (kind=dbl_kind), optional, intent(out):: & divU, tensionU, shearU, DeltaU ! strain rates at the U point @@ -2050,57 +2143,186 @@ subroutine strain_rates_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then - uNip1j = uvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) - uNij = uvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) - vEijp1 = vvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) - vEij = vvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... - ! divergence = e_11 + e_22 - divU = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - - ! tension strain rate = e_11 - e_22 - tensionU = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - endif + ! divergence = e_11 + e_22 + divU = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + ! tension strain rate = e_11 - e_22 + tensionU = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + endif - if (present(DeltaU) .or. present(shearU)) then - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + if (present(DeltaU) .or. present(shearU)) then + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - ! shearing strain rate = 2*e_12 - shearU = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - endif + ! shearing strain rate = 2*e_12 + shearU = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + endif - if (present(DeltaU)) then - ! Delta (in the denominator of zeta, eta) - DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) - endif + if (present(DeltaU)) then + ! Delta (in the denominator of zeta, eta) + DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) + endif - end subroutine strain_rates_U + end subroutine strain_rates_U !======================================================================= + +! Compute strain rates at the U point including boundary conditions +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_U2 (nx_block, ny_block, & + icellu, & + indxui, indxuj, & + uvelE, vvelE, & + uvelN, vvelN, & + uvelU, vvelU, & + dxE, dyN, & + dxU, dyU, & + ratiodxN, ratiodxNr, & + ratiodyE, ratiodyEr, & + epm, npm, uvm, & + divU, tensionU, & + shearU, DeltaU ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellu + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + uvelU , & ! x-component of velocity (m/s) interp. at U point + vvelU , & ! y-component of velocity (m/s) interp. at U point + dxE , & ! width of E-cell through the middle (m) + dyN , & ! height of N-cell through the middle (m) + dxU , & ! width of U-cell through the middle (m) + dyU , & ! height of U-cell through the middle (m) + ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs + ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs + ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs + ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs + epm , & ! E-cell mask + npm , & ! N-cell mask + uvm ! U-cell mask + + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & + divU , & + tensionU , & + shearU , & + DeltaU ! strain rates at the U point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + real (kind=dbl_kind) :: & + uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij + character(len=*), parameter :: subname = '(strain_rates_U2)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + if (present(divU) ) divU (:,:) = c0 + if (present(tensionU)) tensionU(:,:) = c0 + if (present(shearU) ) shearU (:,:) = c0 + if (present(deltaU) ) deltaU (:,:) = c0 + + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + + if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) + + ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... + + ! divergence = e_11 + e_22 + divU (i,j) = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + ! tension strain rate = e_11 - e_22 + tensionU(i,j) = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + endif + + if (present(DeltaU) .or. present(shearU)) then + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) + + ! shearing strain rate = 2*e_12 + shearU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + endif + + if (present(DeltaU)) then + ! Delta (in the denominator of zeta, eta) + DeltaU(i,j) = sqrt(divU(i,j)**2 + e_factor*(tensionU(i,j)**2 + shearU(i,j)**2)) + endif + + enddo + + end subroutine strain_rates_U2 + +!======================================================================= +#if (1 == 0) ! Computes and stores the shear strain rate at U points based on C-grid ! velocity components (uvelE and vvelN) @@ -2157,7 +2379,7 @@ subroutine shear_strain_rate_U (nx_block, ny_block, & do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) uEij = uvelE(i,j) * epm(i,j) & @@ -2173,11 +2395,10 @@ subroutine shear_strain_rate_U (nx_block, ny_block, & - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + dyU(i,j) * ( vNip1j - vNij ) & - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - enddo ! ij end subroutine shear_strain_rate_U - +#endif !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 095822640..598f21aea 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -149,6 +149,7 @@ e_yieldcurve = 2. e_plasticpot = 2. visc_coeff_method = 'avg_strength' + elasticDamp = 0.36d0 deltaminEVP = 1e-11 deltaminVP = 2e-9 capping = 1. diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 19f512f14..95ee56f7f 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,5 +1,6 @@ # Test Grid PEs Sets BFB-compare smoke gx3 8x2 diag1,run5day +smoke gx3 8x4 diag1,run5day,debug restart gx3 4x2 debug,diag1 restart2 gx1 16x2 debug,diag1 smoke gbox12 1x1x12x12x1 boxchan @@ -28,6 +29,7 @@ smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridcd +smoke gx3 8x4 diag1,run5day,debug,gridcd restart gx3 4x2 debug,diag1,gridcd restart2 gx1 16x2 debug,diag1,gridcd smoke gbox12 1x1x12x12x1 boxchan,gridcd @@ -56,6 +58,7 @@ smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,g smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridc +smoke gx3 8x4 diag1,run5day,debug,gridc restart gx3 4x2 debug,diag1,gridc restart2 gx1 16x2 debug,diag1,gridc smoke gbox12 1x1x12x12x1 boxchan,gridc From 1e041dbee18bb0fc3bcfc21195b4ee72ccffb8c6 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 17 Mar 2022 07:23:33 -0700 Subject: [PATCH 089/109] Finish cleanup of strain_rates, rename step and visc subroutines (#76) * Finish clean up of strain_rates, all bit-for-bit Rename visccoeffs_ to visc_ Rename stepu subroutines * clean up --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 377 ++++++-------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 480 ++++-------------- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 34 +- 3 files changed, 268 insertions(+), 623 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 7a3aca1ac..18396088a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -42,7 +42,7 @@ module ice_dyn_evp field_type_scalar, field_type_vector use ice_constants, only: c0, p027, p055, p111, p166, & p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, stepuCD, stepuC_E, stepuC_N, & + use ice_dyn_shared, only: stepu, stepuv_CD, stepu_C, stepv_C, & dyn_prep1, dyn_prep2, dyn_finish, & ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & @@ -114,7 +114,8 @@ subroutine evp (dt) use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea - use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U, strain_rates_U2 ! tcx, shear_strain_rate_U + use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U + real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -763,36 +764,7 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks -#if (1 == 0) - call shear_strain_rate_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvelE (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - uvm (:,:,iblk), shrU (:,:,iblk)) -#else -#if (1 == 0) - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - call strain_rates_U (nx_block, ny_block, i, j, & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & - ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & - epm(:,:,iblk), npm(:,:,iblk), uvm(:,:,iblk), & - shearU=shrU(i,j,iblk) ) - enddo ! ij -#else - call strain_rates_U2 (nx_block , ny_block , & + call strain_rates_U (nx_block , ny_block , & icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & @@ -804,8 +776,6 @@ subroutine evp (dt) ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & epm(:,:,iblk), npm(:,:,iblk), uvm(:,:,iblk), & shearU=shrU(:,:,iblk) ) -#endif -#endif enddo ! iblk !$OMP END PARALLEL DO @@ -919,29 +889,29 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stepuC_E (nx_block, ny_block, & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), forcexE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), taubxE (:,:,iblk), & - uvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepuC_N (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - wateryN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintyN (:,:,iblk), taubyN (:,:,iblk), & - vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call stepu_C (nx_block, ny_block, & ! u, E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepv_C (nx_block, ny_block, & ! v, N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1086,33 +1056,33 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stepuCD (nx_block, ny_block, & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepuCD (nx_block, ny_block, & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call stepuv_CD (nx_block, ny_block, & ! E point + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepuv_CD (nx_block, ny_block, & ! N point + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1335,7 +1305,7 @@ subroutine stress (nx_block, ny_block, & stress12_3, stress12_4, & str ) - use ice_dyn_shared, only: strain_rates, visccoeff_replpress, capping + use ice_dyn_shared, only: strain_rates, visc_replpress, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1427,17 +1397,17 @@ subroutine stress (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltane, & - zetax2ne, etax2ne, rep_prsne, capping) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & + zetax2ne, etax2ne, rep_prsne, capping) - call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltanw, & - zetax2nw, etax2nw, rep_prsnw, capping) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltanw, & + zetax2nw, etax2nw, rep_prsnw, capping) - call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltasw, & - zetax2sw, etax2sw, rep_prssw, capping) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltasw, & + zetax2sw, etax2sw, rep_prssw, capping) - call visccoeff_replpress (strength(i,j), DminTarea(i,j), Deltase, & - zetax2se, etax2se, rep_prsse, capping) + call visc_replpress (strength(i,j), DminTarea(i,j), Deltase, & + zetax2se, etax2se, rep_prsse, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1626,8 +1596,8 @@ subroutine stressC_T (nx_block, ny_block , & zetax2T , etax2T , & stressp , stressm ) - use ice_dyn_shared, only: strain_rates_T, strain_rates_T2, capping, & - visccoeff_replpress, e_factor + use ice_dyn_shared, only: strain_rates_T, capping, & + visc_replpress, e_factor integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1674,7 +1644,7 @@ subroutine stressC_T (nx_block, ny_block , & ! Initialize !----------------------------------------------------------------- - call strain_rates_T2 (nx_block , ny_block , & + call strain_rates_T (nx_block , ny_block , & icellt , & indxti(:) , indxtj (:) , & uvelE (:,:), vvelE (:,:), & @@ -1688,55 +1658,17 @@ subroutine stressC_T (nx_block, ny_block , & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates at T point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - -! tcraig bit-for-bit here, not below -#if (1 == 0) - call strain_rates_T (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) - -#else -#if (1 == 0) - call strain_rates_T (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT = divT, & - tensionT = tensionT ) - call calc_shearT_DeltaT (shrU(i,j), shrU(i,j-1), & - shrU(i-1,j-1), shrU(i-1,j), & - divT(i,j), tensionT(i,j), & - shearT, DeltaT ) -#else - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - shearTsqr = (shrU(i,j)**2 + shrU(i,j-1)**2 + shrU(i-1,j-1)**2 + shrU(i-1,j)**2)/4d0 DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) -#endif -#endif !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- - call visccoeff_replpress (strength(i,j), DminTarea(i,j), & - DeltaT , zetax2T (i,j), & - etax2T (i,j), rep_prsT , & - capping) + call visc_replpress (strength(i,j), DminTarea(i,j), & + DeltaT , zetax2T (i,j), & + etax2T (i,j), rep_prsT , & + capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1778,8 +1710,8 @@ subroutine stressC_U (nx_block, ny_block, & stress12 ) use ice_dyn_shared, only: strain_rates_U, & - visccoeff_replpress_avgstr, & - visccoeff_replpress_avgzeta, & + visc_replpress_avgstr, & + visc_replpress_avgzeta, & visc_coeff_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -1824,65 +1756,69 @@ subroutine stressC_U (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + DeltaU ! strain rates at U point + real (kind=dbl_kind) :: & - divU, tensionU, shearU, DeltaU, & ! strain rates at U point - zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - DminUarea, strtmp, areatmp ! Dmin on U and tmp variables + zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point + DminUarea, strtmp, areatmp ! Dmin on U and tmp variables character(len=*), parameter :: subname = '(stressC_U)' + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + if (visc_coeff_method == 'avg_strength') then + call strain_rates_U (nx_block , ny_block , & + icellu , & + indxui (:) , indxuj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + uvelU (:,:), vvelU (:,:), & + dxE (:,:), dyN (:,:), & + dxU (:,:), dyU (:,:), & + ratiodxN(:,:), ratiodxNr(:,:), & + ratiodyE(:,:), ratiodyEr(:,:), & + epm(:,:), npm(:,:), uvm(:,:), & + DeltaU = DeltaU(:,:) ) + endif + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then - DeltaU = c0 ! not needed in avgzeta just computing etax2U - shearU = shrU(i,j) - call visccoeff_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & - zetax2T (i+1,j+1), zetax2T (i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU, etax2U=etax2U) + DeltaU(i,j) = c0 ! not needed in avgzeta just computing etax2U + call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & + zetax2T (i+1,j+1), zetax2T (i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU (i ,j ), etax2U=etax2U) elseif (visc_coeff_method == 'avg_strength') then - call strain_rates_U (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, uvm, & - divU, tensionU, & - shearU, DeltaU ) - shearU = shrU(i,j) DminUarea = deltaminEVP*uarea(i,j) - call visccoeff_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea, DeltaU, & - zetax2U, etax2U, rep_prsU, capping) + ! only need etax2U here, but other terms are calculated with etax2U + ! minimal extra calculations here even though it seems like there is + call visc_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + DminUarea, DeltaU(i,j), & + zetax2U, etax2U, rep_prsU, capping) endif - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- @@ -1890,7 +1826,7 @@ subroutine stressC_U (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2U*shearU) * denom1 + arlx1i*p5*etax2U*shrU(i,j)) * denom1 enddo ! ij @@ -1917,8 +1853,8 @@ subroutine stressCD_T (nx_block, ny_block, & stresspT, stressmT, & stress12T ) - use ice_dyn_shared, only: strain_rates_T2, capping, & - visccoeff_replpress + use ice_dyn_shared, only: strain_rates_T, capping, & + visc_replpress integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1965,7 +1901,7 @@ subroutine stressCD_T (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - call strain_rates_T2 (nx_block , ny_block , & + call strain_rates_T (nx_block , ny_block , & icellt , & indxti(:) , indxtj (:) , & uvelE (:,:), vvelE (:,:), & @@ -1983,10 +1919,10 @@ subroutine stressCD_T (nx_block, ny_block, & ! viscous coefficients and replacement pressure at T point !----------------------------------------------------------------- - call visccoeff_replpress (strength(i,j), DminTarea(i,j), & - DeltaT (i,j), zetax2T (i,j), & - etax2T (i,j), rep_prsT , & - capping) + call visc_replpress (strength(i,j), DminTarea(i,j), & + DeltaT (i,j), zetax2T (i,j), & + etax2T (i,j), rep_prsT , & + capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -2031,9 +1967,9 @@ subroutine stressCD_U (nx_block, ny_block, & stresspU, stressmU, & stress12U ) - use ice_dyn_shared, only: strain_rates_U, strain_rates_U2, & - visccoeff_replpress_avgstr, & - visccoeff_replpress_avgzeta, & + use ice_dyn_shared, only: strain_rates_U, & + visc_replpress_avgstr, & + visc_replpress_avgzeta, & visc_coeff_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -2093,8 +2029,7 @@ subroutine stressCD_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- -#if (1 == 0) - call strain_rates_U2 (nx_block , ny_block , & + call strain_rates_U (nx_block , ny_block , & icellu , & indxui (:) , indxuj(:) , & uvelE (:,:), vvelE(:,:) , & @@ -2107,53 +2042,37 @@ subroutine stressCD_U (nx_block, ny_block, & epm(:,:), npm(:,:), uvm(:,:), & divU (:,:), tensionU (:,:), & shearU (:,:), DeltaU (:,:) ) -#endif do ij = 1, icellu i = indxui(ij) j = indxuj(ij) -#if (1 == 1) - call strain_rates_U (nx_block , ny_block , & - i, j, & - uvelE (:,:), vvelE(:,:) , & - uvelN (:,:), vvelN(:,:) , & - uvelU (:,:), vvelU(:,:) , & - dxE (:,:), dyN(:,:) , & - dxU (:,:), dyU(:,:) , & - ratiodxN(:,:), ratiodxNr(:,:), & - ratiodyE(:,:), ratiodyEr(:,:), & - epm(:,:), npm(:,:), uvm(:,:), & - divU (i,j), tensionU (i,j), & - shearU (i,j), DeltaU (i,j) ) -#endif - !----------------------------------------------------------------- ! viscous coefficients and replacement pressure at U point !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then - call visccoeff_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & - zetax2T (i+1,j+1), zetax2T (i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU (i ,j ), & - zetax2U, etax2U, rep_prsU) + call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & + zetax2T (i+1,j+1), zetax2T (i+1,j ), & + etax2T (i ,j ), etax2T (i ,j+1), & + etax2T (i+1,j+1), etax2T (i+1,j ), & + hm (i ,j ), hm (i ,j+1), & + hm (i+1,j+1), hm (i+1,j ), & + tarea (i ,j ), tarea (i ,j+1), & + tarea (i+1,j+1), tarea (i+1,j ), & + DeltaU (i ,j ), & + zetax2U, etax2U, rep_prsU) elseif (visc_coeff_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) - call visccoeff_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea , DeltaU (i ,j ), & - zetax2U, etax2U, rep_prsU, capping) + call visc_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & + strength(i+1,j+1), strength(i+1,j ), & + hm (i ,j ) , hm (i ,j+1), & + hm (i+1,j+1) , hm (i+1,j ), & + tarea (i ,j ) , tarea (i ,j+1), & + tarea (i+1,j+1) , tarea (i+1,j ), & + DminUarea , DeltaU (i ,j ), & + zetax2U, etax2U, rep_prsU, capping) endif !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 5ead645dc..61631fa95 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -23,18 +23,15 @@ module ice_dyn_shared implicit none private - public :: set_evp_parameters, stepu, stepuCD, stepuC_E, stepuC_N, & + public :: set_evp_parameters, stepu, stepuv_CD, stepu_C, stepv_C, & principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & deformations, deformations_T, & strain_rates, strain_rates_T, strain_rates_U, & - strain_rates_T2, strain_rates_U2, & -! shear_strain_rate_U, & -! calc_shearT_DeltaT, & - visccoeff_replpress, & - visccoeff_replpress_avgstr, & - visccoeff_replpress_avgzeta, & + visc_replpress, & + visc_replpress_avgstr, & + visc_replpress_avgzeta, & stack_velocity_field, unstack_velocity_field ! namelist parameters @@ -825,7 +822,7 @@ end subroutine stepu ! Integration of the momentum equation to find velocity (u,v) at E and N locations - subroutine stepuCD (nx_block, ny_block, & + subroutine stepuv_CD (nx_block, ny_block, & icell, Cw, & indxi, indxj, & aiu, & @@ -888,7 +885,7 @@ subroutine stepuCD (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(stepuCD)' + character(len=*), parameter :: subname = '(stepuv_CD)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -937,13 +934,13 @@ subroutine stepuCD (nx_block, ny_block, & enddo ! ij - end subroutine stepuCD + end subroutine stepuv_CD !======================================================================= ! Integration of the momentum equation to find velocity u at E location on C grid - subroutine stepuC_E (nx_block, ny_block, & + subroutine stepu_C (nx_block, ny_block, & icell, Cw, & indxi, indxj, & aiu, & @@ -994,7 +991,7 @@ subroutine stepuC_E (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(stepuC_E)' + character(len=*), parameter :: subname = '(stepu_C)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -1037,13 +1034,13 @@ subroutine stepuC_E (nx_block, ny_block, & enddo ! ij - end subroutine stepuC_E + end subroutine stepu_C !======================================================================= ! Integration of the momentum equation to find velocity v at N location on C grid - subroutine stepuC_N (nx_block, ny_block, & + subroutine stepv_C (nx_block, ny_block, & icell, Cw, & indxi, indxj, & aiu, & @@ -1094,7 +1091,7 @@ subroutine stepuC_N (nx_block, ny_block, & Cb , & ! complete seabed (basal) stress coeff rhow ! - character(len=*), parameter :: subname = '(stepuC_N)' + character(len=*), parameter :: subname = '(stepv_C)' !----------------------------------------------------------------- ! integrate the momentum equation @@ -1137,7 +1134,7 @@ subroutine stepuC_N (nx_block, ny_block, & enddo ! ij - end subroutine stepuC_N + end subroutine stepv_C !======================================================================= @@ -1758,7 +1755,7 @@ subroutine deformations_T (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - call strain_rates_T2 (nx_block , ny_block , & + call strain_rates_T (nx_block , ny_block , & icellt , & indxti(:) , indxtj (:) , & uvelE (:,:), vvelE (:,:), & @@ -1886,74 +1883,6 @@ end subroutine strain_rates ! Nov 2021 subroutine strain_rates_T (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - integer (kind=int_kind) :: & - i, j ! indices - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT ! height of T-cell through the middle (m) - - real (kind=dbl_kind), optional, intent(out):: & - divT, tensionT, shearT, DeltaT ! strain rates at the T point - - character(len=*), parameter :: subname = '(strain_rates_T)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - ! divergence = e_11 + e_22 - if (present(deltaT) .or. present(divT)) then - divT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) - endif - - ! tension strain rate = e_11 - e_22 - if (present(deltaT) .or. present(tensionT)) then - tensionT = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) - endif - - ! shearing strain rate = 2*e_12 - if (present(deltaT) .or. present(shearT)) then - shearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) - endif - - ! Delta (in the denominator of zeta, eta) - if (present(deltaT)) then - DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) - endif - - end subroutine strain_rates_T - - -!======================================================================= - -! Compute strain rates at the T point -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine strain_rates_T2 (nx_block, ny_block, & icellt, & indxti, indxtj, & uvelE, vvelE, & @@ -1989,10 +1918,15 @@ subroutine strain_rates_T2 (nx_block, ny_block, & ! local variables + real (kind=dbl_kind) :: & + ldivT , & + ltensionT , & + lshearT ! local values + integer (kind=int_kind) :: & ij, i, j ! indices - character(len=*), parameter :: subname = '(strain_rates_T2)' + character(len=*), parameter :: subname = '(strain_rates_T)' !----------------------------------------------------------------- ! strain rates @@ -2010,79 +1944,40 @@ subroutine strain_rates_T2 (nx_block, ny_block, & ! divergence = e_11 + e_22 if (present(deltaT) .or. present(divT)) then - divT (i,j) = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + ldivT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + if (present(divT)) then + divT(i,j) = ldivT + endif endif ! tension strain rate = e_11 - e_22 if (present(deltaT) .or. present(tensionT)) then - tensionT(i,j) = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + ltensionT= (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + if (present(tensionT)) then + tensionT(i,j) = ltensionT + endif endif ! shearing strain rate = 2*e_12 if (present(deltaT) .or. present(shearT)) then - shearT (i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + lshearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) + if (present(shearT)) then + shearT(i,j) = lshearT + endif endif ! Delta (in the denominator of zeta, eta) if (present(deltaT)) then - DeltaT (i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearT(i,j)**2)) + DeltaT (i,j) = sqrt(ldivT**2 + e_factor*(ltensionT**2 + lshearT**2)) endif enddo - end subroutine strain_rates_T2 - - -!======================================================================= -#if (1 == 0) - subroutine calc_shearT_DeltaT (shrUij, shrUijm1, & - shrUim1jm1, shrUim1j, & - divT, tensionT, & - shearT, DeltaT ) - - real (kind=dbl_kind), intent(in) :: & - shrUij , & ! shear strain rate at U point (i,j) - shrUijm1 , & ! shear strain rate at U point (i,j-1) - shrUim1jm1, & ! shear strain rate at U point (i-1,j-1) - shrUim1j, & ! shear strain rate at U point (i-1,j) - divT, & - tensionT - - real (kind=dbl_kind), intent(inout):: & - shearT, DeltaT ! strain rates at the T point - - character(len=*), parameter :: subname = '(calc_shearT_DeltaT)' - - ! local variables - real (kind=dbl_kind) :: shearTsqr - - logical (kind=log_kind) :: B2009 - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - B2009 = .false. - - shearT = ( shrUij + shrUijm1 + shrUim1jm1 + shrUim1j ) / 4d0 - - if (B2009) then - - DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearT**2)) - - else - - shearTsqr = (shrUij**2 + shrUijm1**2 + shrUim1jm1**2 + shrUim1j**2)/4d0 - DeltaT = sqrt(divT**2 + e_factor*(tensionT**2 + shearTsqr)) - - endif + end subroutine strain_rates_T - end subroutine calc_shearT_DeltaT -#endif !======================================================================= ! Compute strain rates at the U point including boundary conditions @@ -2091,115 +1986,6 @@ end subroutine calc_shearT_DeltaT ! Nov 2021 subroutine strain_rates_U (nx_block, ny_block, & - i, j, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, uvm, & - divU, tensionU, & - shearU, DeltaU ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - integer (kind=int_kind) :: & - i, j ! indices - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - uvelU , & ! x-component of velocity (m/s) interp. at U point - vvelU , & ! y-component of velocity (m/s) interp. at U point - dxE , & ! width of E-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs - epm , & ! E-cell mask - npm , & ! N-cell mask - uvm ! U-cell mask - - real (kind=dbl_kind), optional, intent(out):: & - divU, tensionU, shearU, DeltaU ! strain rates at the U point - - ! local variables - - real (kind=dbl_kind) :: & - uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij - - character(len=*), parameter :: subname = '(strain_rates_U)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then - uNip1j = uvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) - uNij = uvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) - vEijp1 = vvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) - vEij = vvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - - ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... - - ! divergence = e_11 + e_22 - divU = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - - ! tension strain rate = e_11 - e_22 - tensionU = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - endif - - if (present(DeltaU) .or. present(shearU)) then - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - - ! shearing strain rate = 2*e_12 - shearU = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - endif - - if (present(DeltaU)) then - ! Delta (in the denominator of zeta, eta) - DeltaU = sqrt(divU**2 + e_factor*(tensionU**2 + shearU**2)) - endif - - end subroutine strain_rates_U - -!======================================================================= - -! Compute strain rates at the U point including boundary conditions -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine strain_rates_U2 (nx_block, ny_block, & icellu, & indxui, indxuj, & uvelE, vvelE, & @@ -2251,10 +2037,15 @@ subroutine strain_rates_U2 (nx_block, ny_block, & integer (kind=int_kind) :: & ij, i, j ! indices + real (kind=dbl_kind) :: & + ldivU , & + ltensionU , & + lshearU ! local values + real (kind=dbl_kind) :: & uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij - character(len=*), parameter :: subname = '(strain_rates_U2)' + character(len=*), parameter :: subname = '(strain_rates_U)' !----------------------------------------------------------------- ! strain rates @@ -2283,16 +2074,22 @@ subroutine strain_rates_U2 (nx_block, ny_block, & ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... ! divergence = e_11 + e_22 - divU (i,j) = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + ldivU = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + if (present(divU)) then + divU(i,j) = ldivU + endif ! tension strain rate = e_11 - e_22 - tensionU(i,j) = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + ltensionU = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + if (present(tensionU)) then + tensionU(i,j) = ltensionU + endif endif if (present(DeltaU) .or. present(shearU)) then @@ -2306,99 +2103,24 @@ subroutine strain_rates_U2 (nx_block, ny_block, & +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) ! shearing strain rate = 2*e_12 - shearU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + lshearU = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + if (present(shearU)) then + shearU(i,j) = lshearU + endif endif if (present(DeltaU)) then ! Delta (in the denominator of zeta, eta) - DeltaU(i,j) = sqrt(divU(i,j)**2 + e_factor*(tensionU(i,j)**2 + shearU(i,j)**2)) + DeltaU(i,j) = sqrt(ldivU**2 + e_factor*(ltensionU**2 + lshearU**2)) endif enddo - end subroutine strain_rates_U2 - -!======================================================================= -#if (1 == 0) -! Computes and stores the shear strain rate at U points based on C-grid -! velocity components (uvelE and vvelN) - - subroutine shear_strain_rate_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & - uvelE, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, uvm, & - shrU) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - uvelU , & ! x-component of velocity (m/s) at the U point - vvelU , & ! y-component of velocity (m/s) at the U point - dxE , & ! width of E-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline - epm , & ! E-cell mask - npm , & ! N-cell mask - uvm ! U-cell mask - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shrU ! shear strain rate at U point (m^2/s) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - uEijp1, uEij, vNip1j, vNij - - character(len=*), parameter :: subname = '(shear_strain_rate_U)' - - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) - - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - - ! shear strain rate = 2*e_12 - ! NOTE these are actually strain rates * area (m^2/s) - shrU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - enddo ! ij + end subroutine strain_rates_U - end subroutine shear_strain_rate_U -#endif !======================================================================= ! Computes viscous coefficients and replacement pressure for stress ! calculations. Note that tensile strength is included here. @@ -2413,8 +2135,8 @@ end subroutine shear_strain_rate_U ! by combining tensile strength and a parameterization for grounded ridges. ! J. Geophys. Res. Oceans, 121, 7354-7368. - subroutine visccoeff_replpress(strength, DminArea, Delta, & - zetax2, etax2, rep_prs, capping) + subroutine visc_replpress(strength, DminArea, Delta, & + zetax2, etax2, rep_prs, capping) real (kind=dbl_kind), intent(in):: & strength, DminArea @@ -2429,7 +2151,7 @@ subroutine visccoeff_replpress(strength, DminArea, Delta, & real (kind=dbl_kind) :: & tmpcalc - character(len=*), parameter :: subname = '(visccoeff_replpress)' + character(len=*), parameter :: subname = '(visc_replpress)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code @@ -2439,19 +2161,19 @@ subroutine visccoeff_replpress(strength, DminArea, Delta, & rep_prs = (c1-Ktens)*tmpcalc*Delta etax2 = epp2i*zetax2 - end subroutine visccoeff_replpress + end subroutine visc_replpress !======================================================================= - subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & - zetax2T3, zetax2T4, & - etax2T1, etax2T2, & - etax2T3, etax2T4, & - mask1, mask2, & - mask3, mask4, & - area1, area2, & - area3, area4, & - deltaU, zetax2U, etax2U, rep_prsU) + subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & + zetax2T3, zetax2T4, & + etax2T1, etax2T2, & + etax2T3, etax2T4, & + mask1, mask2, & + mask3, mask4, & + area1, area2, & + area3, area4, & + deltaU, zetax2U, etax2U, rep_prsU) real (kind=dbl_kind), intent(in):: & zetax2T1,zetax2T2,zetax2T3,zetax2T4, & @@ -2466,9 +2188,10 @@ subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & ! local variables real (kind=dbl_kind) :: & - areatmp + lzetax2U, & ! local variable + areatmp - character(len=*), parameter :: subname = '(visccoeff_replpress_avgzeta)' + character(len=*), parameter :: subname = '(visc_replpress_avgzeta)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code @@ -2478,10 +2201,13 @@ subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & mask2 * area2) if (present(rep_prsU) .or. present(zetax2U)) then - zetax2U = (mask1 * area1 * zetax2T1 + & - mask4 * area4 * zetax2T4 + & - mask3 * area3 * zetax2T3 + & - mask2 * area2 * zetax2T2) / areatmp + lzetax2U = (mask1 * area1 * zetax2T1 + & + mask4 * area4 * zetax2T4 + & + mask3 * area3 * zetax2T3 + & + mask2 * area2 * zetax2T2) / areatmp + if (present(zetax2U)) then + zetax2U = lzetax2U + endif endif if (present(etax2U)) then @@ -2492,21 +2218,21 @@ subroutine visccoeff_replpress_avgzeta (zetax2T1, zetax2T2, & endif if (present(rep_prsU)) then - rep_prsU = (c1-Ktens)/(c1+Ktens)*zetax2U*deltaU + rep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU endif - end subroutine visccoeff_replpress_avgzeta + end subroutine visc_replpress_avgzeta !======================================================================= - subroutine visccoeff_replpress_avgstr (strength1, strength2, & - strength3, strength4, & - mask1, mask2, & - mask3, mask4, & - area1, area2, & - area3, area4, & - DminUarea, deltaU, & - zetax2U, etax2U, rep_prsU, capping) + subroutine visc_replpress_avgstr (strength1, strength2, & + strength3, strength4, & + mask1, mask2, & + mask3, mask4, & + area1, area2, & + area3, area4, & + DminUarea, deltaU, & + zetax2U, etax2U, rep_prsU, capping) real (kind=dbl_kind), intent(in):: & strength1,strength2,strength3,strength4, & @@ -2521,7 +2247,7 @@ subroutine visccoeff_replpress_avgstr (strength1, strength2, & real (kind=dbl_kind) :: & areatmp, strtmp ! area and strength average - character(len=*), parameter :: subname = '(visccoeff_replpress_avgstr)' + character(len=*), parameter :: subname = '(visc_replpress_avgstr)' ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code @@ -2535,10 +2261,10 @@ subroutine visccoeff_replpress_avgstr (strength1, strength2, & mask3 * area3 * strength3 + & mask2 * area2 * strength2) / areatmp - call visccoeff_replpress (strtmp, DminUarea, deltaU, & - zetax2U, etax2U, rep_prsU, capping) + call visc_replpress (strtmp, DminUarea, deltaU, & + zetax2U, etax2U, rep_prsU, capping) - end subroutine visccoeff_replpress_avgstr + end subroutine visc_replpress_avgstr !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 17de3bca8..491ba2b6e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -1157,7 +1157,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & zetax2 , etax2 , & rep_prs , stPr) - use ice_dyn_shared, only: strain_rates, visccoeff_replpress, & + use ice_dyn_shared, only: strain_rates, visc_replpress, & capping integer (kind=int_kind), intent(in) :: & @@ -1240,22 +1240,22 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & ! viscous coefficients and replacement pressure !----------------------------------------------------------------- - call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & - Deltane , zetax2 (i,j,1), & - etax2 (i,j,1), rep_prs (i,j,1), & - capping) - call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & - Deltanw , zetax2 (i,j,2), & - etax2 (i,j,2), rep_prs (i,j,2), & - capping) - call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & - Deltasw , zetax2 (i,j,3), & - etax2 (i,j,3), rep_prs (i,j,3), & - capping) - call visccoeff_replpress (strength(i,j) , DminTarea(i,j) , & - Deltase , zetax2 (i,j,4), & - etax2 (i,j,4), rep_prs (i,j,4), & - capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltane , zetax2 (i,j,1), & + etax2 (i,j,1), rep_prs (i,j,1), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltanw , zetax2 (i,j,2), & + etax2 (i,j,2), rep_prs (i,j,2), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltasw , zetax2 (i,j,3), & + etax2 (i,j,3), rep_prs (i,j,3), & + capping) + call visc_replpress (strength(i,j) , DminTarea(i,j) , & + Deltase , zetax2 (i,j,4), & + etax2 (i,j,4), rep_prs (i,j,4), & + capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 From c8644731c4b110bc1f3a36717a52b86935cd1f89 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 17 Mar 2022 16:12:27 +0000 Subject: [PATCH 090/109] Added comments: ref of Bouillon 2009, 2013, Hunke 2002 and Kimmritz 2016 (#75) * Adding ref of Bouillon and Kimmritz * Still in the process of adding the refs * Also added Bouillon 2009 and Hunke 2002 to div_stress subroutine * Done adding these refs Co-authored-by: Tony Craig --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 30 ++++++++++++++++++- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 13 ++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 18396088a..a447e47e6 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -1584,6 +1584,12 @@ end subroutine stress ! updated: D. Bailey, NCAR ! Nov 2021 +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. + +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + subroutine stressC_T (nx_block, ny_block , & icellt , & indxti , indxtj , & @@ -1658,6 +1664,11 @@ subroutine stressC_T (nx_block, ny_block , & i = indxti(ij) j = indxtj(ij) + !----------------------------------------------------------------- + ! Square of shear strain rate at T obtained from interpolation of + ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 + !----------------------------------------------------------------- + shearTsqr = (shrU(i,j)**2 + shrU(i,j-1)**2 + shrU(i-1,j-1)**2 + shrU(i-1,j)**2)/4d0 DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) @@ -1693,6 +1704,12 @@ end subroutine stressC_T ! author: JF Lemieux, ECCC ! Nov 2021 +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. + +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + subroutine stressC_U (nx_block, ny_block, & icellu, & indxui , indxuj, & @@ -1790,7 +1807,9 @@ subroutine stressC_U (nx_block, ny_block, & j = indxuj(ij) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at U point + ! viscosities and replacement pressure at U point + ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + ! avg_strength: C2 method of Kimmritz et al. 2016 !----------------------------------------------------------------- if (visc_coeff_method == 'avg_zeta') then @@ -2101,6 +2120,15 @@ end subroutine stressCD_U ! author: JF Lemieux, ECCC ! Nov 2021 +! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic +! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates +! on a Sphere - Incorporation of Metric Terms. Mon. Weather Rev., +! 130, 1848-1865. + +! Bouillon, S., M. Morales Maqueda, V. Legat and T. Fichefet (2009). An +! elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids. +! Ocean Model., 27, 174-184. + subroutine div_stress (nx_block, ny_block, & icell, & indxi, indxj, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 61631fa95..8b52159e3 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -2165,6 +2165,14 @@ end subroutine visc_replpress !======================================================================= +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. + +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + +! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & zetax2T3, zetax2T4, & etax2T1, etax2T2, & @@ -2225,6 +2233,11 @@ end subroutine visc_replpress_avgzeta !======================================================================= +! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method +! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. + +! avg_strength: C2 method of Kimmritz et al. 2016 + subroutine visc_replpress_avgstr (strength1, strength2, & strength3, strength4, & mask1, mask2, & From 5f3ea0cec1ac7687b93b3acf1628255cc7b18d41 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Thu, 17 Mar 2022 10:41:57 -0600 Subject: [PATCH 091/109] Update master_list.bib (#77) --- doc/source/master_list.bib | 240 ++++++++++++++++++++----------------- 1 file changed, 128 insertions(+), 112 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index 295f5df9d..9b4a2f672 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -77,7 +77,7 @@ @incollection{Assur58 volume = {598}, pages = {106-138} } -@Article{Schwarzacher59 +@Article{Schwarzacher59, author = "W. Schwarzacher", title = "{Pack ice studies in the Arctic Ocean}", journal = JGR, @@ -86,7 +86,7 @@ @Article{Schwarzacher59 pages = {2357-2367}, url = {http://dx.doi.org/10.1029/JZ064i012p02357} } -@Article{Untersteiner64 +@Article{Untersteiner64, author = "N. Untersteiner", title = "{Calculations of temperature regime and heat budget of sea ice in the Central Arctic}", journal = JGR, @@ -105,7 +105,7 @@ @incollection{Ono67 volume = "I", pages = "599--610" } -@Article{Maykut71 +@Article{Maykut71, author = "G.A. Maykut and N. Untersteiner", title = "{Some results from a time dependent thermodynamic model of sea ice}", journal = JGR, @@ -114,7 +114,7 @@ @Article{Maykut71 pages = {1550-1575}, url = {http://dx.doi.org/10.1029/JC076i006p01550} } -@Book{Stroud71 +@Book{Stroud71, author = "A.H. Stroud", title = "{Approximate Calculation of Multiple Integrals}", publisher = "Prentice-Hall", @@ -122,7 +122,7 @@ @Book{Stroud71 year = {1971}, pages = {431 pp}, } -@Article{Arya75 +@Article{Arya75, author = "S.P.S. Arya", title = "{A drag partition theory for determining the large-scale roughness parameter and wind stress on the Arctic pack ice}", journal = JGR, @@ -131,7 +131,7 @@ @Article{Arya75 pages = {3447-3454}, url = {http://dx.doi.org/10.1029/JC080i024p03447} } -@Article{Rothrock75 +@Article{Rothrock75, author = "D.A. Rothrock", title = "{The energetics of plastic deformation of pack ice by ridging}", journal = JGR, @@ -140,7 +140,7 @@ @Article{Rothrock75 pages = {4514-4519}, url = {http://dx.doi.org/10.1029/JC080i033p04514} } -@Article{Thorndike75 +@Article{Thorndike75, author = "A.S. Thorndike and D.A. Rothrock and G.A. Maykut and R. Colony", title = "{The thickness distribution of sea ice}", journal = JGR, @@ -149,7 +149,7 @@ @Article{Thorndike75 pages = {4501-4513}, url = {http://dx.doi.org/10.1029/JC080i033p04501} } -@Article{Semtner76 +@Article{Semtner76, author = "A.J. Semtner", title = "{A Model for the Thermodynamic Growth of Sea Ice in Numerical Investigations of Climate}", journal = JPO, @@ -158,7 +158,7 @@ @Article{Semtner76 pages = {379-389}, url = {http://dx.doi.org/10.1175/1520-0485(1976)006<0379:AMFTTG>2.0.CO;2} } -@Article{Hibler79 +@Article{Hibler79, author = "W.D. Hibler", title = "{A dynamic thermodynamic sea ice model}", journal = JPO, @@ -167,7 +167,7 @@ @Article{Hibler79 pages = {817-846}, url = {http://dx.doi.org/10.1175/1520-0485(1979)009<0815:ADTSIM>2.0.CO;2} } -@Article{Parkinson79 +@Article{Parkinson79, author = "C.L. Parkinson and W.M. Washington", title = "{A large-scale numerical model of sea ice}", journal = JGRO, @@ -177,7 +177,7 @@ @Article{Parkinson79 pages = {331-337}, url = {http://dx.doi.org/10.1029/JC084iC01p00311} } -@Article{Zalesak79 +@Article{Zalesak79, author = "S. T. Zalesak", title = "{Fully multidimensional flux-corrected transport algorithms for fluids}", journal = JCP, @@ -187,7 +187,7 @@ @Article{Zalesak79 pages = {335-362}, url = {http://dx.doi.org/10.1016/0021-9991(79)90051-2} } -@Article{Hibler80 +@Article{Hibler80, author = "W.D. Hibler", title = "{Modeling a variable thickness sea ice cover}", journal = MWR, @@ -196,7 +196,7 @@ @Article{Hibler80 pages = {1943-1973}, url = {http://dx.doi.org/10.1175/1520-0493(1980)108<1943:MAVTSI>2.0.CO;2} } -@Article{Maykut82 +@Article{Maykut82, author = "G.A. Maykut", title = "{Large-scale heat exchange and ice production in the central Arctic}", journal = JGRO, @@ -205,7 +205,7 @@ @Article{Maykut82 pages = {7971-7984}, url = {http://dx.doi.org/10.1029/JC087iC10p07971} } -@incollection{Siedler86 +@incollection{Siedler86, author = "G. Siedler and H. Peters", title = "Physical properties (general) of sea water", booktitle = "Landolt-Börnstein: Numerical data and functional relationships in science and technology, New Series V/3a", @@ -213,7 +213,7 @@ @incollection{Siedler86 year = {1986}, pages = {233-264}, } -@Article{Hibler87 +@Article{Hibler87, author = "W.D. Hibler and K. Bryan", title = "{A diagnostic ice-ocean model}", journal = JPO, @@ -222,7 +222,7 @@ @Article{Hibler87 pages = {987-1015}, url = {http://dx.doi.org/10.1175/1520-0485(1987)017<0987:ADIM>2.0.CO;2} } -@Article{Maykut87 +@Article{Maykut87, author = "G.A. Maykut and D.K. Perovich", title = "{The role of shortwave radiation in the summer decay of a sea ice cover}", journal = JGRO, @@ -231,7 +231,7 @@ @Article{Maykut87 pages = {7032-7044}, url = {http://dx.doi.org/10.1029/JC092iC07p07032} } -@Article{Rosati88 +@Article{Rosati88, author = "A. Rosati and K. Miyakoda", title = "{A general circulation model for upper ocean simulation}", journal = JPO, @@ -240,7 +240,7 @@ @Article{Rosati88 pages = {1601-1626}, url = {http://dx.doi.org/10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2} } -@Article{Steele92 +@Article{Steele92, author = "M. Steele", title = "{Sea ice melting and floe geometry in a simple ice-ocean model}", journal = JGRO, @@ -249,7 +249,7 @@ @Article{Steele92 pages = {17729-17738}, url = {http://dx.doi.org/10.1029/92JC01755} } -@Article{Smith92 +@Article{Smith92, author = "R.D. Smith and J.K. Dukowicz and R.C. Malone", title = "{Parallel ocean general circulation modeling}", journal = PHYS, @@ -259,7 +259,7 @@ @Article{Smith92 pages = {38-61}, url = {http://dx.doi.org/10.1016/0167-2789(92)90225-C} } -@Article{Arrigo93 +@Article{Arrigo93, author = "K.R. Arrigo and J.N. Kremer and C.W. Sullivan", title = "{A simulated Antarctic fast ice ecosystem}", journal = JGRO, @@ -268,7 +268,7 @@ @Article{Arrigo93 pages = {6929-6946}, url = {http://dx.doi.org/10.1029/93JC00141} } -@Article{Dukowicz93 +@Article{Dukowicz93, author = "J.K. Dukowicz and R.D. Smith and R.C. Malone", title = "{A reformulation and implementation of the Bryan-Cox-Semtner ocean model on the connection machine}", journal = JTECH, @@ -278,7 +278,18 @@ @Article{Dukowicz93 pages = {195-208}, url = {http://dx.doi.org/10.1175/1520-0426(1993)010<0195:ARAIOT>2.0.CO;2} } -@Article{Dukowicz94 +@Article{Saad93, + author = "Y. Saad", + title = "{A Flexible Inner-Outer Preconditioned GMRES Algorithm}", + journal = SIAMJCP, + volume = {14}, + number = {2}, + year = {1993}, + pages = {461-469}, + doi = {10.1137/0914028}, + URL = {https://doi.org/10.1137/0914028} +} +@Article{Dukowicz94, author = "J.K. Dukowicz and R.D. Smith and R.C. Malone", title = "{Implicit free-surface method for the Bryan-Cox-Semtner ocean model}", journal = JGRO, @@ -288,7 +299,7 @@ @Article{Dukowicz94 pages = {7991-8014}, url = {http://dx.doi.org/10.1029/93JC03455} } -@Article{Ebert95 +@Article{Ebert95, author = "E.E. Ebert and J.L. Schramm and J.A. Curry", title = "{Disposition of solar radiation in sea ice and the upper ocean}", journal = JGRO, @@ -297,7 +308,7 @@ @Article{Ebert95 pages = {15965-15975}, url = {http://dx.doi.org/10.1029/95JC01672} } -@Article{Flato95 +@Article{Flato95, author = "G.M. Flato and W.D. Hibler", title = "{Ridging and strength in modeling the thickness distribution of Arctic sea ice}", journal = JGRO, @@ -306,7 +317,7 @@ @Article{Flato95 pages = {18611-18626}, url = {http://dx.doi.org/10.1029/95JC02091} } -@Article{Maykut95 +@Article{Maykut95, author = "G.A. Maykut and M.G. McPhee", title = "{Solar heating of the Arctic mixed layer}", journal = JGRO, @@ -315,7 +326,7 @@ @Article{Maykut95 pages = {24691-24703}, url = {http://dx.doi.org/10.1029/95JC02554} } -@Manual{Smith95 +@Manual{Smith95, author = "R.D. Smith and S. Kortas and B. Meltz", title = "{Curvilinear coordinates for global ocean models}", organization = "Technical Report LA-UR-95-1146, Los Alamos National Laboratory", @@ -332,7 +343,7 @@ @Article{Zwiers95 pages = {336-351}, url = {http://dx.doi.org/10.1175/1520-0442(1995)008<0336:TSCIAI>2.0.CO;2} } -@Article{Murray96 +@Article{Murray96, author = "R.J. Murray", title = "{Explicit generation of orthogonal grids for ocean models}", journal = JCT, @@ -341,7 +352,7 @@ @Article{Murray96 pages = {251-273}, url = {http://dx.doi.org/10.1006/jcph.1996.0136} } -@Article{Hunke97 +@Article{Hunke97, author = "E.C. Hunke and J.K. Dukowicz", title = "{An elastic-viscous-plastic model for sea ice dynamics}", journal = JPO, @@ -350,7 +361,7 @@ @Article{Hunke97 pages = {1849-1867}, url = {http://dx.doi.org/10.1175/1520-0485(1997)027<1849:AEVPMF>2.0.CO;2} } -@Article{Steele97 +@Article{Steele97, author = "M. Steele and J. Zhang and D. Rothrock and H. Stern", title = "{The force balance of sea ice in a numerical model of the Arctic Ocean}", journal = JGRO, @@ -360,7 +371,7 @@ @Article{Steele97 pages = {21061-21079}, url = {http://dx.doi.org/10.1029/97JC01454} } -@Article{Geiger98 +@Article{Geiger98, author = "C.A. Geiger and W.D. Hibler and S.F. Ackley", title = "{Large-scale sea ice drift and deformation: Comparison between models and observations in the western Weddell Sea during 1992}", journal = JGRO, @@ -370,7 +381,7 @@ @Article{Geiger98 pages = {21893-21913}, url = {http://dx.doi.org/10.1029/98JC01258} } -@Book{Lipscomb98 +@Book{Lipscomb98, author = "W.H. Lipscomb", title = "{Modeling the Thickness Distribution of Arctic Sea Ice}", publisher = "Dept. of Atmospheric Sciences University of Washington, Seattle", @@ -378,7 +389,7 @@ @Book{Lipscomb98 year = {1998}, url = {http://hdl.handle.net/1773/10081} } -@Article{Bitz99 +@Article{Bitz99, author = "C.M. Bitz and W.H. Lipscomb", title = "{An energy-conserving thermodynamic sea ice model for climate study}", journal = JGRO, @@ -388,7 +399,7 @@ @Article{Bitz99 pages = {15669-15677}, url = {http://dx.doi.org/10.1029/1999JC900100} } -@Article{Hunke99 +@Article{Hunke99, author = "E.C. Hunke and Y. Zhang", title = "{A comparison of sea ice dynamics models at high resolution}", journal = MWR, @@ -397,7 +408,7 @@ @Article{Hunke99 pages = {396-408}, url = {http://dx.doi.org/10.1175/1520-0493(1999)127<0396:ACOSID>2.0.CO;2} } -@Article{Jordan99 +@Article{Jordan99, author = "R.E. Jordan and E.L. Andreas and A.P. Makshtas", title = "{Heat budget of snow-covered sea ice at North Pole 4}", journal = JGRO, @@ -407,7 +418,7 @@ @Article{Jordan99 pages = {7785-7806}, url = {http://dx.doi.org/10.1029/1999JC900011} } -@Book{vonstorch99 +@Book{vonstorch99, author = "H. von Storch and F.W. Zwiers", title = "{Statistical Analysis in Climate Research}", publisher = "Cambridge University Press", @@ -415,7 +426,7 @@ @Book{vonstorch99 year = {1999}, pages = {484 pp}, } -@Article{Dukowicz00 +@Article{Dukowicz00, author = "J.K. Dukowicz and J.R. Baumgardner", title = "{Incremental remapping as a transport/advection algorithm}", journal = JCT, @@ -424,7 +435,7 @@ @Article{Dukowicz00 pages = {318-335}, url = {http://dx.doi.org/10.1006/jcph.2000.6465} } -@Article{Bitz01 +@Article{Bitz01, author = "C.M. Bitz and M.M. Holland and M. Eby and A.J. Weaver", title = "{Simulating the ice-thickness distribution in a coupled climate model}", journal = JGRO, @@ -433,7 +444,7 @@ @Article{Bitz01 pages = {2441-2463}, url = {http://dx.doi.org/10.1029/1999JC000113} } -@Article{Hunke01 +@Article{Hunke01, author = "E.C. Hunke", title = "{Viscous-plastic sea ice dynamics with the EVP model: Linearization issues}", journal = JCP, @@ -442,7 +453,7 @@ @Article{Hunke01 pages = {18-38}, url = {http://dx.doi.org/10.1006/jcph.2001.6710} } -@Article{Lipscomb01 +@Article{Lipscomb01, author = "W.H. Lipscomb", title = "{Remapping the thickness distribution in sea ice models}", journal = JGRO, @@ -451,7 +462,7 @@ @Article{Lipscomb01 pages = {13989-14000}, url = {http://dx.doi.org/10.1029/2000JC000518} } -@Article{He01 +@Article{He01, author = "Y. He and C.H.Q. Ding", title = "{Using Accurate Arithmetics to Improve Numerical Reproducibility and Stability in Parallel Applications}", journal = JOS, @@ -461,7 +472,7 @@ @Article{He01 pages = {259-277}, url = {http://dx.doi.org/10.1023/A:1008153532043} } -@Article{Schulson01 +@Article{Schulson01, author = "E.M. Schulson", title = "{Brittle failure of ice}", journal = EFM, @@ -480,7 +491,7 @@ @Article{Taylor01 pages = {7183-7192}, url = {http://dx.doi.org/10.1029/2000JD900719} } -@Article{Trodahl01 +@Article{Trodahl01, author = "H.J. Trodahl and S.O.F. Wilkinson and M.J. McGuinness and T.G. Haskeel", title = "{Thermal conductivity of sea ice: dependence on temperature and depth}", journal = GRL, @@ -489,7 +500,7 @@ @Article{Trodahl01 pages = {1279-1282}, url = {http://dx.doi.org/10.1029/2000GL012088} } -@Article{Hunke02 +@Article{Hunke02, author = "E.C. Hunke and J.K. Dukowicz", title = "{The Elastic-Viscous-Plastic sea ice dynamics model in general orthogonal curvilinear coordinates on a sphere—Effect of metric terms}", journal = MWR, @@ -498,21 +509,21 @@ @Article{Hunke02 pages = {1848-1865}, url = {http://dx.doi.org/10.1175/1520-0493(2002)130<1848:TEVPSI>2.0.CO;2} } -@Manual{Kauffman02 +@Manual{Kauffman02, author = "B.G. Kauffman and W.G. Large", title = "{The CCSM coupler, version 5.0.1}", journal = NTN, year = {2002}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/KL_NCAR2002.pdf} } -@Manual{Hunke03 +@Manual{Hunke03, author = "E.C. Hunke and J.K. Dukowicz", title = "{The sea ice momentum equation in the free drift regime}", organization = "Technical Report LA-UR-03-2219, Los Alamos National Laboratory", year = {2003}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/LAUR-03-2219.pdf} } -@Article{Amundrud04 +@Article{Amundrud04, author = "T.L. Amundrud and H. Malling and R.G. Ingram", title = "{Geometrical constraints on the evolution of ridged sea ice}", journal = JGRO, @@ -521,7 +532,7 @@ @Article{Amundrud04 issue = {C6}, url = {http://dx.doi.org/10.1029/2003JC002251} } -@Article{Connolley04 +@Article{Connolley04, author = "W.M. Connolley and J.M. Gregory and E.C. Hunke and A.J. McLaren", title = "{On the consistent scaling of terms in the sea ice dynamics equation}", journal = JPO, @@ -530,7 +541,7 @@ @Article{Connolley04 pages = {1776-1780}, url = {http://dx.doi.org/10.1175/1520-0485(2004)034<1776:OTCSOT>2.0.CO;2} } -@Article{Eicken04 +@Article{Eicken04, author = "H. Eicken and T.C. Grenfell and D.K. Perovich and J.A Richter-Menge and K. Frey", title = "{Hydraulic controls of summer Arctic pack ice albedo}", journal = JGRO, @@ -539,7 +550,7 @@ @Article{Eicken04 issue = {C8}, url = {http://dx.doi.org/10.1029/2003JC001989} } -@Article{Lipscomb04 +@Article{Lipscomb04, author = "W.H. Lipscomb and E.C. Hunke", title = "{Modeling sea ice transport using incremental remapping}", journal = MWR, @@ -548,7 +559,7 @@ @Article{Lipscomb04 pages = {1341-1354}, url = {http://dx.doi.org/10.1175/1520-0493(2004)132<1341:MSITUI>2.0.CO;2} } -@Article{Taylor04 +@Article{Taylor04, author = "P.D. Taylor and D.L. Feltham", title = "{A model of melt pond evolution on sea ice}", journal = JGRO, @@ -557,7 +568,7 @@ @Article{Taylor04 issue = {C12}, url = {http://dx.doi.org/10.1029/2004JC002361} } -@Article{Wilchinsky04 +@Article{Wilchinsky04, author = "A.V. Wilchinsky and D.L. Feltham", title = "{Dependence of sea ice yield-curve shape on ice thickness}", journal = JPO, @@ -575,7 +586,7 @@ @Article{Lavoie05 issue = {C11}, url = {http://dx.doi.org/10.1029/2005JC002922} } -@Book{Notz05 +@Book{Notz05, author = "D. Notz", title = "Thermodynamic and Fluid-Dynamical Processes in Sea Ice", publisher = "University of Cambridge, UK", @@ -583,7 +594,7 @@ @Book{Notz05 year = {2005}, url = {http://ulmss-newton.lib.cam.ac.uk/vwebv/holdingsInfo?bibId=27224} } -@Article{Feltham06 +@Article{Feltham06, author = "D.L. Feltham and N. Untersteiner and J.S. Wettlaufer and M.G. Worster", title = "{Sea ice is a mushy layer}", journal = GRL, @@ -618,7 +629,7 @@ @Article{Jin06 pages = {63-72}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/JDWSTWLG06.pdf} } -@Article{Wilchinsky06 +@Article{Wilchinsky06, author = "A.V. Wilchinsky and D.L. Feltham", title = "{Modelling the rheology of sea ice as a collection of diamond-shaped floes}", journal = JNON, @@ -627,7 +638,7 @@ @Article{Wilchinsky06 pages = {22-32}, url = {http://dx.doi.org/10.1016/j.jnnfm.2006.05.001} } -@Book{Wilks06 +@Book{Wilks06, author = "D.S. Wilks", title = "{Statistical methods in the atmospheric sciences}", publisher = "Academic Press", @@ -635,14 +646,14 @@ @Book{Wilks06 year = {2006}, pages = {627 pp}, } -@Manual{Briegleb07 +@Manual{Briegleb07, author = "B.P. Briegleb and B. Light", title = "{A Delta-Eddington multiple scattering parameterization for solar radiation in the sea ice component of the Community Climate System Model}", organization = "NCAR Technical Note NCAR/TN-472+STR, National Center for Atmospheric Research", year = {2007}, url = {https://github.com/CICE-Consortium/CICE/blob/master/doc/PDF/BL_NCAR2007.pdf} } -@Article{Flocco07 +@Article{Flocco07, author = "D. Flocco and D.L. Feltham", title = "{A continuum model of melt pond evolution on Arctic sea ice}", journal = JGRO, @@ -651,7 +662,7 @@ @Article{Flocco07 number = {C8}, url = {http://dx.doi.org/10.1029/2006JC003836} } -@Article{Golden07 +@Article{Golden07, author = "K.M. Golden and H. Eicken and A.L. Heaton and J. Miner and D.J. Pringle and J. Zhu", title = "{Thermal evolution of permeability and microstructure in sea ice}", journal = GRL, @@ -660,7 +671,7 @@ @Article{Golden07 issue = {16}, url = {http://dx.doi.org/10.1029/2007GL030447} } -@Article{Hunke07 +@Article{Hunke07, author = "E. Hunke and M.M. Holland", title = "{Global atmospheric forcing data for Arctic ice-ocean modeling}", journal = JGRO, @@ -669,7 +680,7 @@ @Article{Hunke07 number = {C4}, url = {http://dx.doi.org/10.1029/2006JC003640} } -@Article{Lipscomb07 +@Article{Lipscomb07, author = "W.H. Lipscomb and E.C. Hunke and W. Maslowski and J. Jakacki", title = "{Ridging, strength, and stability in high-resolution sea ice models}", journal = JGRO, @@ -678,7 +689,7 @@ @Article{Lipscomb07 issue = {C3}, url = {http://dx.doi.org/10.1029/2005JC003355} } -@Article{Pringle07 +@Article{Pringle07, author = "D.J. Pringle and H. Eicken and H.J. Trodahl and L.G.E. Backstrom", title = "{Thermal conductivity of landfast Antarctic and Arctic sea ice}", journal = JGRO, @@ -687,7 +698,7 @@ @Article{Pringle07 issue = {C4}, url = {http://dx.doi.org/10.1029/2006JC003641} } -@Article{Stefels07 +@Article{Stefels07, author = "J. Stefels and M. Steinke and S. Turner and G. Malin and S. Belviso", title = "{Environmental constraints on the production and removal of the climatically active gas dimethylsulphide (DMS) and implications for ecosystem modelling}", journal = BGC, @@ -696,7 +707,29 @@ @Article{Stefels07 pages = {245-275}, url = {http://dx.doi.org/10.1007/978-1-4020-6214-8_18} } -@Article{Hunke09 +@Article{Lemieux08, + author = "J.-F. Lemieux and B. Tremblay and S. Thomas and J. Sedláček and L. A. Mysak", + title = "{Using the preconditioned Generalized Minimum RESidual (GMRES) method to solve the sea-ice momentum equation}", + journal = JGRO, + volume = {113}, + number = {C10}, + pages = {}, + keywords = {Sea ice, GMRES, Krylov subspace}, + doi = {10.1029/2007JC004680}, + url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2007JC004680}, + eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2007JC004680}, + year = {2008} +} +@Article{Bouillon09, + author = "S. Bouillon and T. Fichefet and V. Legat and G. Madec", + title = "{An elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids}", + journal = OM, + year = {2009}, + volume = {27}, + pages = {174-184}, + url = {https://doi.org/10.1016/j.ocemod.2009.01.004} +} +@Article{Hunke09, author = "E.C. Hunke and C.M. Bitz", title = "{Age characteristics in a multidecadal Arctic sea ice simulation}", journal = JGRO, @@ -705,7 +738,7 @@ @Article{Hunke09 issue = {CB}, url = {http://dx.doi.org/10.1029/2008JC005186} } -@Article{Large09 +@Article{Large09, author = "W.G. Large and S.G. Yeager", title = "{The global climatology of an interannually varying air-sea flux data set}", journal = OM, @@ -722,7 +755,7 @@ @Article{Tagliabue09 issue = {13}, url = {http://dx.doi.org/10.1029/2009GL038914} } -@Article{Weiss09 +@Article{Weiss09, author = "J. Weiss and E.M. Schulson", title = "{Coulombic faulting from the grain scale to the geophysical scale: lessons from ice}", journal = JPD, @@ -731,7 +764,7 @@ @Article{Weiss09 pages = {214017}, url = {http://dx.doi.org/10.1088/0022-3727/42/21/214017} } -@Article{Flocco10 +@Article{Flocco10, author = "D. Flocco and D.L. Feltham and A.K. Turner", title = "{Incorporation of a physically based melt pond scheme into the sea ice component of a climate model}", journal = JGRO, @@ -740,7 +773,7 @@ @Article{Flocco10 number = {C8}, url = {http://dx.doi.org/10.1029/2009JC005568} } -@Article{Konig10 +@Article{Konig10, author = "C. Konig Beatty and D.M. Holland", title = "{Modeling landfast ice by adding tensile strength}", journal = JPO, @@ -749,7 +782,7 @@ @Article{Konig10 pages = {185-198}, url = {http://dx.doi.org/10.1175/2009JPO4105.1} } -@Article{Armour11 +@Article{Armour11, author = "K.C. Armour and C.M. Bitz and L. Thompson and E.C. Hunke", title = "{Controls on Arctic sea ice from first-year and multi-year ice survivability}", journal = JC, @@ -758,7 +791,7 @@ @Article{Armour11 pages = {2378-2390}, url = {http://dx.doi.org/10.1175/2010JCLI3823.1} } -@Article{Deal11 +@Article{Deal11, author = "C. Deal and M. Jin and S. Elliott and E. Hunke and M. Maltrud and N. Jeffery", title = "{Large scale modeling of primary production and ice algal biomass within Arctic sea ice in 1992}", journal = JGRO, @@ -767,7 +800,7 @@ @Article{Deal11 issue = {C7}, url = {http://dx.doi.org/10.1029/2010JC006409} } -@Article{Lu11 +@Article{Lu11, author = "P. Lu and Z. Li and B. Cheng and M. Lepp{\"{a}}ranta", title = "{A parametrization fo the ice-ocean drag coefficient}", journal = JGRO, @@ -776,7 +809,7 @@ @Article{Lu11 number = {C7}, url = {http://dx.doi.org/10.1029/2010JC006878} } -@Article{Elliott12 +@Article{Elliott12, author = "S. Elliott and C. Deal and G. Humphries and E. Hunke and N. Jeffery and M. Jin and M. Levasseur and J. Stefels", title = "{Pan-Arctic simulation of coupled nutrient-sulfur cycling due to sea ice biology: Preliminary results}", journal = JGRB, @@ -785,7 +818,7 @@ @Article{Elliott12 issue = {G1}, url = {http://dx.doi.org/10.1029/2011JG001649} } -@Article{Flocco12 +@Article{Flocco12, author = "D. Flocco and D. Schroeder and D.L. Feltham and E.C. Hunke", title = "{Impact of melt ponds on Arctic sea ice simulations from 1990 to 2007}", journal = JGRO, @@ -794,7 +827,7 @@ @Article{Flocco12 number = {C9}, url = {http://dx.doi.org/10.1029/2012JC008195} } -@Article{Holland12 +@Article{Holland12, author = "M.M. Holland and D.A. Bailey and B.P. Briegleb and B. Light and E. Hunke", title = "{Improved sea ice shortwave radiation physics in CCSM4: The impact of melt ponds and aerosols on Arctic sea ice}", journal = JC, @@ -803,7 +836,7 @@ @Article{Holland12 pages = {1413-1430}, url = {http://dx.doi.org/10.1175/JCLI-D-11-00078.1} } -@Article{Lemieux12 +@Article{Lemieux12, author = "J.F. Lemieux and D.A. Knoll and B. Tremblay and D.M. Holland and M. Losch", title = "{A comparison of the {J}acobian-free {N}ewton {K}rylov method and the {EVP} model for solving the sea ice momentum equation with a viscous-plastic formulation: a serial algorithm study}", @@ -822,7 +855,7 @@ @Article{Lepparanta12 pages = {83-91}, doi = {http://dx.doi.org/10.1016/j.coldregions.2011.12.005} } -@Article{Lupkes12 +@Article{Lupkes12, author = "C. Lüpkes and V.M. Gryanik and J. Hartmann and E.L. Andreas", title = "{A parametrization, based on sea ice morphology, of the neutral atmospheric drag coefficients for weather prediction and climate models}", journal = JGRA, @@ -831,7 +864,7 @@ @Article{Lupkes12 number = {D13}, url = {http://dx.doi.org/10.1029/2012JD017630} } -@Article{Mirin12 +@Article{Mirin12, author = "A.A. Mirin and P.H. Worley", title = "{Improving the Performance Scalability of the Community Atmosphere Model}", journal = IJHPCA, @@ -841,7 +874,7 @@ @Article{Mirin12 pages = {17-30}, url = {http://dx.doi.org/10.1177/1094342011412630} } -@Article{Bouillon13 +@Article{Bouillon13, author = "S. Bouillon and T. Fichefet and V. Legat and G. Madec", title = "{The elastic-viscous-plastic method revisited}", journal = OM, @@ -850,7 +883,7 @@ @Article{Bouillon13 pages = {1-12}, url = {http://dx.doi.org/10.1016/j.ocemod.2013.05.013} } -@Article{Hunke13 +@Article{Hunke13, author = "E.C. Hunke and D.A. Hebert and O. Lecomte", title = "{Level-ice melt ponds in the Los Alamos Sea Ice Model, CICE}", journal = OM, @@ -859,7 +892,7 @@ @Article{Hunke13 pages = {26-42}, url = {http://dx.doi.org/10.1016/j.ocemod.2012.11.008} } -@Article{Tsamados13 +@Article{Tsamados13, author = "M. Tsamados and D.L. Feltham and A.V. Wilchinsky", title = "{Impact of a new anisotropic rheology on simulations of Arctic sea ice}", journal = JGRO, @@ -868,7 +901,7 @@ @Article{Tsamados13 pages = {91-107}, url = {http://dx.doi.org/10.1029/2012JC007990} } -@Article{Turner13 +@Article{Turner13, author = "A.K. Turner and E.C. Hunke and C.M. Bitz", title = "{Two modes of sea-ice gravity drainage: a parameterization for large-scale modeling}", journal = JGRO, @@ -887,7 +920,7 @@ @Article{Craig14 pages = {154-165}, url = {http://dx.doi.org/10.1177/1094342014548771} } -@Article{Tsamados14 +@Article{Tsamados14, author = "M. Tsamados and D.L. Feltham and D. Schroeder and D. Flocco and S.L. Farrell and N.T. Kurtz and S.W. Laxon and S. Bacon", title = "{Impact of variable atmospheric and oceanic form drag on simulations of Arctic sea ice}", journal = JPO, @@ -896,7 +929,7 @@ @Article{Tsamados14 pages = {1329-1353}, url = {http://dx.doi.org/10.1175/JPO-D-13-0215.1} } -@Article{Kimmritz15 +@Article{Kimmritz15, author = "M. Kimmritz and S. Danilov and M. Losch", title = "{On the convergence of the modified elastic-viscous-plastic method for solving the sea ice momentum equation}", journal = JCP, @@ -915,7 +948,16 @@ @Article{Roberts15 pages = {211-228}, url = {http://dx.doi.org/10.3189/2015AoG69A760} } -@Article{Lemieux16 +@Article{Kimmritz16, + author = "M. Kimmritz and S. Danilov and M. Losch", + title = "{The adaptive EVP method for solving the sea ice momentum equation}", + journal = OM, + year = {2016}, + volume = {101}, + pages = {59-67}, + url = {https://doi.org/10.1016/j.ocemod.2016.03.004} +} +@Article{Lemieux16, author = "J.F. Lemieux and F. Dupont and P. Blain and F. Roy and G.C. Smith and G.M. Flato", title = "{Improving the simulation of landfast ice by combining tensile strength and a parameterization for grounded ridges}", journal = JGRO, @@ -924,7 +966,7 @@ @Article{Lemieux16 pages = {7354-7368}, url = {http://dx.doi.org/10.1002/2016JC012006} } -@Article{Notz16 +@Article{Notz16, author = "D. Notz and A. Jahn and E. Hunke and F. Massonnet and J. Stroeve and B. Tremblay and M. Vancoppenolle", title = "{The CMIP6 Sea-Ice Model Intercomparison Project (SIMIP): understanding sea ice through climate-model simulations}", journal = GMD, @@ -972,7 +1014,6 @@ @incollection{Arakawa77 url = {https://www.sciencedirect.com/science/article/pii/B9780124608177500094}, } -======= @article{Horvat15, author = "C. Horvat and E. Tziperman", journal = {The Cryosphere}, @@ -983,7 +1024,7 @@ @article{Horvat15 volume = {9}, year = {2015} } - @article{Roach18, +@article{Roach18, author = "L. A. Roach and C. Horvat and S. M. Dean and C. M. Bitz", url = {http://dx.doi.org/10.1029/2017JC013692}, journal = JGRO, @@ -994,7 +1035,7 @@ @article{Roach18 year = {2018} } -@Article{Ringeisen21 +@Article{Ringeisen21, author = "D. Ringeisen and L.B. Tremblay and M. Losch", title = "{Non-normal flow rules affect fracture angles in sea ice viscous-plastic rheologies}", journal = TC, @@ -1013,31 +1054,6 @@ @Article{Tsujino18 pages = {79-139}, url = {http://dx.doi.org/10.1016/j.ocemod.2018.07.002} } -@Article{Lemieux08, - author = "J.-F. Lemieux and B. Tremblay and S. Thomas and J. Sedláček and L. A. Mysak", - title = "{Using the preconditioned Generalized Minimum RESidual (GMRES) method to solve the sea-ice momentum equation}", - journal = JGRO, - volume = {113}, - number = {C10}, - pages = {}, - keywords = {Sea ice, GMRES, Krylov subspace}, - doi = {10.1029/2007JC004680}, - url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2007JC004680}, - eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2007JC004680}, - year = {2008} -} -@Article{Saad93, - author = "Y. Saad", - title = "{A Flexible Inner-Outer Preconditioned GMRES Algorithm}", - journal = SIAMJCP, - volume = {14}, - number = {2}, - year = {1993}, - pages = {461-469}, - doi = {10.1137/0914028}, - URL = {https://doi.org/10.1137/0914028} -} - % ********************************************** % For new entries, see example entry in BIB_TEMPLATE.txt % ********************************************** From 6bfde638012917cddc9d6fa4e7ce00d47027a84b Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 17 Mar 2022 18:06:21 +0000 Subject: [PATCH 092/109] Removed multiplication by uvm (not needed) in strain_rates_U (#78) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 16 +++++++--------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 18 ++++++++---------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index a447e47e6..2dc150df8 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -774,7 +774,7 @@ subroutine evp (dt) dxU (:,:,iblk), dyU (:,:,iblk), & ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & - epm(:,:,iblk), npm(:,:,iblk), uvm(:,:,iblk), & + epm(:,:,iblk) , npm(:,:,iblk) , & shearU=shrU(:,:,iblk) ) enddo ! iblk @@ -839,7 +839,7 @@ subroutine evp (dt) ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - hm (:,:,iblk), uvm (:,:,iblk), & + hm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & strength (:,:,iblk), shrU (:,:,iblk), & stress12U (:,:,iblk)) @@ -997,7 +997,7 @@ subroutine evp (dt) ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - hm (:,:,iblk), uvm (:,:,iblk), & + hm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & strength (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & @@ -1721,7 +1721,7 @@ subroutine stressC_U (nx_block, ny_block, & tarea , uarea, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & - epm, npm, hm, uvm, & + epm, npm, hm, & zetax2T , etax2T, & strength, shrU, & stress12 ) @@ -1759,7 +1759,6 @@ subroutine stressC_U (nx_block, ny_block, & epm , & ! E-cell mask npm , & ! N-cell mask hm , & ! T-cell mask - uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point etax2T , & ! 2*eta at the T point shrU , & ! shearU array @@ -1798,7 +1797,7 @@ subroutine stressC_U (nx_block, ny_block, & dxU (:,:), dyU (:,:), & ratiodxN(:,:), ratiodxNr(:,:), & ratiodyE(:,:), ratiodyEr(:,:), & - epm(:,:), npm(:,:), uvm(:,:), & + epm(:,:) , npm(:,:) , & DeltaU = DeltaU(:,:) ) endif @@ -1980,7 +1979,7 @@ subroutine stressCD_U (nx_block, ny_block, & tarea, uarea, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & - epm, npm, hm, uvm, & + epm, npm, hm, & zetax2T, etax2T, & strength, & stresspU, stressmU, & @@ -2019,7 +2018,6 @@ subroutine stressCD_U (nx_block, ny_block, & epm , & ! E-cell mask npm , & ! N-cell mask hm , & ! T-cell mask - uvm , & ! U-cell mask zetax2T , & ! 2*zeta at the T point etax2T , & ! 2*eta at the T point strength ! ice strength at the T point @@ -2058,7 +2056,7 @@ subroutine stressCD_U (nx_block, ny_block, & dxU (:,:), dyU(:,:) , & ratiodxN(:,:), ratiodxNr(:,:), & ratiodyE(:,:), ratiodyEr(:,:), & - epm(:,:), npm(:,:), uvm(:,:), & + epm(:,:) , npm(:,:) , & divU (:,:), tensionU (:,:), & shearU (:,:), DeltaU (:,:) ) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 8b52159e3..da34145fd 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1995,7 +1995,7 @@ subroutine strain_rates_U (nx_block, ny_block, & dxU, dyU, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & - epm, npm, uvm, & + epm, npm, & divU, tensionU, & shearU, DeltaU ) @@ -2023,8 +2023,7 @@ subroutine strain_rates_U (nx_block, ny_block, & ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs epm , & ! E-cell mask - npm , & ! N-cell mask - uvm ! U-cell mask + npm ! N-cell mask real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & divU , & @@ -2071,22 +2070,21 @@ subroutine strain_rates_U (nx_block, ny_block, & vEij = vvelE(i,j) * epm(i,j) & +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - ! MIGHT NOT NEED TO mult by uvm...if done before in calc of uvelU... ! divergence = e_11 + e_22 ldivU = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) if (present(divU)) then divU(i,j) = ldivU endif ! tension strain rate = e_11 - e_22 ltensionU = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) if (present(tensionU)) then tensionU(i,j) = ltensionU endif @@ -2104,9 +2102,9 @@ subroutine strain_rates_U (nx_block, ny_block, & ! shearing strain rate = 2*e_12 lshearU = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * uvm(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * uvm(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + - vvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) if (present(shearU)) then shearU(i,j) = lshearU endif From 3008a247676aa5b041d19eb765018cace72a6050 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 17 Mar 2022 18:19:27 +0000 Subject: [PATCH 093/109] Renamed visc_coeff_method to visc_method and visc coeff (comments) to viscosity (#79) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 38 +++++++++--------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 16 ++++---- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 40 +++++++++---------- cicecore/cicedynB/general/ice_init.F90 | 32 +++++++-------- configuration/scripts/ice_in | 2 +- doc/source/cice_index.rst | 8 ++-- doc/source/user_guide/ug_case_settings.rst | 10 ++--- 7 files changed, 73 insertions(+), 73 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 2dc150df8..795ed1bd2 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -187,8 +187,8 @@ subroutine evp (dt) real (kind=dbl_kind), allocatable :: & shrU (:,:,:), & ! shearU array for gridC - zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2T (:,:,:) ! etax2 = 2*eta (shear viscous coeff) + zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) + etax2T (:,:,:) ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -1347,8 +1347,8 @@ subroutine stress (nx_block, ny_block, & tensionne, tensionnw, tensionse, tensionsw, & ! tension shearne, shearnw, shearse, shearsw , & ! shearing Deltane, Deltanw, Deltase, Deltasw , & ! Delt - zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (visc coeff) - etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (visc coeff) + zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (bulk visc) + etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (shear visc) rep_prsne, rep_prsnw, rep_prsse, rep_prssw, & ! replacement pressure ! puny , & ! puny ssigpn, ssigps, ssigpe, ssigpw , & @@ -1394,7 +1394,7 @@ subroutine stress (nx_block, ny_block, & Deltase, Deltasw ) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure + ! viscosities and replacement pressure !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & @@ -1627,8 +1627,8 @@ subroutine stressC_T (nx_block, ny_block , & DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2T , & ! etax2 = 2*eta (shear viscous coeff) + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) stressp , & ! sigma11+sigma22 stressm ! sigma11-sigma22 @@ -1673,7 +1673,7 @@ subroutine stressC_T (nx_block, ny_block , & DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at T point + ! viscosities and replacement pressure at T point !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), & @@ -1729,7 +1729,7 @@ subroutine stressC_U (nx_block, ny_block, & use ice_dyn_shared, only: strain_rates_U, & visc_replpress_avgstr, & visc_replpress_avgzeta, & - visc_coeff_method, deltaminEVP, capping + visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1786,7 +1786,7 @@ subroutine stressC_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - if (visc_coeff_method == 'avg_strength') then + if (visc_method == 'avg_strength') then call strain_rates_U (nx_block , ny_block , & icellu , & indxui (:) , indxuj (:) , & @@ -1811,7 +1811,7 @@ subroutine stressC_U (nx_block, ny_block, & ! avg_strength: C2 method of Kimmritz et al. 2016 !----------------------------------------------------------------- - if (visc_coeff_method == 'avg_zeta') then + if (visc_method == 'avg_zeta') then DeltaU(i,j) = c0 ! not needed in avgzeta just computing etax2U call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & zetax2T (i+1,j+1), zetax2T (i+1,j ), & @@ -1823,7 +1823,7 @@ subroutine stressC_U (nx_block, ny_block, & tarea (i+1,j+1), tarea (i+1,j ), & DeltaU (i ,j ), etax2U=etax2U) - elseif (visc_coeff_method == 'avg_strength') then + elseif (visc_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is @@ -1895,8 +1895,8 @@ subroutine stressCD_T (nx_block, ny_block, & DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2T , & ! etax2 = 2*eta (shear viscous coeff) + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) stresspT , & ! sigma11+sigma22 stressmT , & ! sigma11-sigma22 stress12T ! sigma12 @@ -1934,7 +1934,7 @@ subroutine stressCD_T (nx_block, ny_block, & j = indxtj(ij) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at T point + ! viscosities and replacement pressure at T point !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), & @@ -1988,7 +1988,7 @@ subroutine stressCD_U (nx_block, ny_block, & use ice_dyn_shared, only: strain_rates_U, & visc_replpress_avgstr, & visc_replpress_avgzeta, & - visc_coeff_method, deltaminEVP, capping + visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2065,10 +2065,10 @@ subroutine stressCD_U (nx_block, ny_block, & j = indxuj(ij) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure at U point + ! viscosities and replacement pressure at U point !----------------------------------------------------------------- - if (visc_coeff_method == 'avg_zeta') then + if (visc_method == 'avg_zeta') then call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & zetax2T (i+1,j+1), zetax2T (i+1,j ), & etax2T (i ,j ), etax2T (i ,j+1), & @@ -2080,7 +2080,7 @@ subroutine stressCD_U (nx_block, ny_block, & DeltaU (i ,j ), & zetax2U, etax2U, rep_prsU) - elseif (visc_coeff_method == 'avg_strength') then + elseif (visc_method == 'avg_strength') then DminUarea = deltaminEVP*uarea(i,j) call visc_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & strength(i+1,j+1), strength(i+1,j ), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index da34145fd..e41d11a32 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -59,7 +59,7 @@ module ice_dyn_shared character (len=char_len), public :: & yield_curve , & ! 'ellipse' ('teardrop' needs further testing) - visc_coeff_method, & ! method for visc coeff at U points (C, CD grids) + visc_method, & ! method for viscosity calc at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. @@ -78,9 +78,9 @@ module ice_dyn_shared epp2i , & ! 1/(e_plasticpot)^2 e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 ecci , & ! temporary for 1d evp - deltaminEVP , & ! minimum delta for viscous coefficients (EVP) - deltaminVP , & ! minimum delta for viscous coefficients (VP) - capping , & ! capping of visc coeff (1=Hibler79, 0=Kreyscher2000) + deltaminEVP , & ! minimum delta for viscosities (EVP) + deltaminVP , & ! minimum delta for viscosities (VP) + capping , & ! capping of viscosities (1=Hibler79, 0=Kreyscher2000) dtei , & ! 1/dte, where dte is subcycling timestep (1/s) ! dte2T , & ! dte/2T denom1 ! constants for stress equation @@ -2120,7 +2120,7 @@ subroutine strain_rates_U (nx_block, ny_block, & end subroutine strain_rates_U !======================================================================= -! Computes viscous coefficients and replacement pressure for stress +! Computes viscosities and replacement pressure for stress ! calculations. Note that tensile strength is included here. ! ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. @@ -2143,7 +2143,7 @@ subroutine visc_replpress(strength, DminArea, Delta, & Delta, capping real (kind=dbl_kind), intent(out):: & - zetax2, etax2, rep_prs ! 2 x visous coeffs, replacement pressure + zetax2, etax2, rep_prs ! 2 x viscosities, replacement pressure ! local variables real (kind=dbl_kind) :: & @@ -2183,13 +2183,13 @@ subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & real (kind=dbl_kind), intent(in):: & zetax2T1,zetax2T2,zetax2T3,zetax2T4, & - etax2T1, etax2T2, etax2T3, etax2T4, & ! 2 x viscous coeffs, replacement pressure + etax2T1, etax2T2, etax2T3, etax2T4, & mask1, mask2, mask3, mask4, & area1, area2, area3, area4, & deltaU real (kind=dbl_kind), optional, intent(out):: & - zetax2U, etax2U, rep_prsU + zetax2U, etax2U, rep_prsU ! 2 x viscosities, replacement pressure ! local variables diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 491ba2b6e..690647f15 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -211,8 +211,8 @@ subroutine implicit_solver (dt) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4):: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure logical (kind=log_kind) :: calc_strair @@ -729,8 +729,8 @@ subroutine anderson_solver (icellt , icellu, & umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure type (ice_halo), intent(in) :: & @@ -1143,7 +1143,7 @@ end subroutine anderson_solver !======================================================================= -! Computes the viscous coefficients and dPr/dx, dPr/dy +! Computes the viscosities and dPr/dx, dPr/dy subroutine calc_zeta_dPr (nx_block, ny_block, & icellt , & @@ -1183,8 +1183,8 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & DminTarea ! deltaminVP*tarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & @@ -1237,7 +1237,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & Deltase , Deltasw) !----------------------------------------------------------------- - ! viscous coefficients and replacement pressure + ! viscosities and replacement pressure !----------------------------------------------------------------- call visc_replpress (strength(i,j) , DminTarea(i,j) , & @@ -1377,8 +1377,8 @@ subroutine stress_vp (nx_block , ny_block , & cxm ! 0.5*HTN - 1.5*HTS real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 , & ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1610,8 +1610,8 @@ subroutine matvec (nx_block, ny_block, & uarear ! 1/uarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & Au , & ! matvec, Fx = bx - Au (N/m^2) @@ -2052,8 +2052,8 @@ subroutine formDiag_step1 (nx_block, ny_block, & cxm ! 0.5*HTN - 1.5*HTS real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & Drheo ! intermediate value for diagonal components of matrix A associated @@ -2714,8 +2714,8 @@ subroutine fgmres (zetax2 , etax2 , & use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw @@ -3110,8 +3110,8 @@ subroutine pgmres (zetax2 , etax2 , & nbiter) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw @@ -3503,8 +3503,8 @@ subroutine precondition(zetax2 , etax2, & wx , wy) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscous coeff) - etax2 ! etax2 = 2*eta (shear viscous coeff) + zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) + etax2 ! etax2 = 2*eta (shear viscosity) real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index da6478069..651bf7880 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -106,7 +106,7 @@ subroutine input_data dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & - evp_algorithm, visc_coeff_method, & + evp_algorithm, visc_method, & seabed_stress, seabed_stress_method, & k1, k2, alphab, threshold_hw, Ktens, & e_yieldcurve, e_plasticpot, coriolis, & @@ -220,7 +220,7 @@ subroutine input_data brlx, arlx, ssh_stress, & advection, coriolis, kridge, ktransport, & kstrength, krdg_partic, krdg_redist, mu_rdg, & - e_yieldcurve, e_plasticpot, visc_coeff_method, & + e_yieldcurve, e_plasticpot, visc_method, & maxits_nonlin, precond, dim_fgmres, & dim_pgmres, maxits_fgmres, maxits_pgmres, monitor_nonlin, & monitor_fgmres, monitor_pgmres, reltol_nonlin, reltol_fgmres, & @@ -384,10 +384,10 @@ subroutine input_data Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential - visc_coeff_method = 'avg_strength' ! calc visc coeff at U point: avg_strength, avg_zeta - deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscous coeff (EVP, Hunke 2001) - deltaminVP = 2e-9_dbl_kind ! minimum delta for viscous coeff (VP, Hibler 1979) - capping = 1.0_dbl_kind ! method for capping of visc coeff (1=Hibler 1979,0=Kreyscher2000) + visc_method = 'avg_strength' ! calc viscosities at U point: avg_strength, avg_zeta + deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) + deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) + capping = 1.0_dbl_kind ! method for capping of viscosities (1=Hibler 1979,0=Kreyscher2000) maxits_nonlin = 4 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -858,7 +858,7 @@ subroutine input_data call broadcast_scalar(Ktens, master_task) call broadcast_scalar(e_yieldcurve, master_task) call broadcast_scalar(e_plasticpot, master_task) - call broadcast_scalar(visc_coeff_method, master_task) + call broadcast_scalar(visc_method, master_task) call broadcast_scalar(deltaminEVP, master_task) call broadcast_scalar(deltaminVP, master_task) call broadcast_scalar(capping, master_task) @@ -1141,10 +1141,10 @@ subroutine input_data endif if (grid_ice == 'C' .or. grid_ice == 'CD') then - if (visc_coeff_method /= 'avg_zeta' .and. visc_coeff_method /= 'avg_strength') then + if (visc_method /= 'avg_zeta' .and. visc_method /= 'avg_strength') then if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid method for viscous coefficients' - write(nu_diag,*) subname//' ERROR: visc_coeff_method should be avg_zeta or avg_strength' + write(nu_diag,*) subname//' ERROR: invalid method for viscosities' + write(nu_diag,*) subname//' ERROR: visc_method should be avg_zeta or avg_strength' endif abort_list = trim(abort_list)//":44" endif @@ -1153,7 +1153,7 @@ subroutine input_data if (kdyn == 1 .or. kdyn == 3) then if (capping /= c0 .and. capping /= c1) then if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid method for capping viscous coefficients' + write(nu_diag,*) subname//' ERROR: invalid method for capping viscosities' write(nu_diag,*) subname//' ERROR: capping should be equal to 0.0 or 1.0' endif abort_list = trim(abort_list)//":45" @@ -1676,11 +1676,11 @@ subroutine input_data endif if (kdyn == 1) then - write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscous coefficients' - write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscous coefficients' + write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscosities' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' elseif (kdyn == 3) then - write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscous coefficients' - write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscous coefficients' + write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscosities' + write(nu_diag,1002) ' capping = ', capping, ' : capping method for viscosities' endif write(nu_diag,1002) ' elasticDamp = ', elasticDamp, ' : coefficient for calculating the parameter E' @@ -1734,7 +1734,7 @@ subroutine input_data endif endif if (grid_ice == 'C' .or. grid_ice == 'CD') then - write(nu_diag,1030) ' visc_coeff_method= ', trim(visc_coeff_method),' : viscous coeff method (U point)' + write(nu_diag,1030) ' visc_method= ', trim(visc_method),' : viscosities method (U point)' endif write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 598f21aea..5075ae205 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -148,7 +148,7 @@ Ktens = 0. e_yieldcurve = 2. e_plasticpot = 2. - visc_coeff_method = 'avg_strength' + visc_method = 'avg_strength' elasticDamp = 0.36d0 deltaminEVP = 1e-11 deltaminVP = 2e-9 diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index b058d1503..523b14058 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -95,7 +95,7 @@ either Celsius or Kelvin units). "calc_dragio", "if true, calculate ``dragio`` from ``iceruf_ocn`` and ``thickness_ocn_layer1``", "F" "calc_strair", "if true, calculate wind stress", "T" "calc_Tsfc", "if true, calculate surface temperature", "T" - "capping", "parameter for capping method of viscous coefficients", "1.0" + "capping", "parameter for capping method of viscosities", "1.0" "Cdn_atm", "atmospheric drag coefficient", "" "Cdn_ocn", "ocean drag coefficient", "" "Cf", "ratio of ridging work to PE change in ridging", "17." @@ -212,7 +212,7 @@ either Celsius or Kelvin units). "eps13", "a small number", "10\ :math:`^{-13}`" "eps16", "a small number", "10\ :math:`^{-16}`" "esno(n)", "energy of melting of snow per unit area (in category n)", "J/m\ :math:`^2`" - "etax2", "2 x eta (shear viscous coefficient)", "kg/s" + "etax2", "2 x eta (shear viscosity)", "kg/s" "evap", "evaporative water flux", "kg/m\ :math:`^2`/s" "ew_boundary_type", "type of east-west boundary condition", "" "elasticDamp", "coefficient for calculating the parameter E, 0\ :math:`<` elasticDamp :math:`<`\ 1", "0.36" @@ -735,7 +735,7 @@ either Celsius or Kelvin units). "vice(n)", "volume per unit area of ice (in category n)", "m" "vicen_init", "ice volume at beginning of timestep", "m" "viscosity_dyn", "dynamic viscosity of brine", ":math:`1.79\times 10^{-3}` kg/m/s" - "visc_coeff_method", "method for calculating viscous coefficients (‘avg_strength’ or ‘avg_zeta’)", "avg_strength" + "visc_method", "method for calculating viscosities (‘avg_strength’ or ‘avg_zeta’)", "avg_strength" "vocn", "ocean current in the y-direction", "m/s" "vonkar", "von Karman constant", "0.4" "vraftn", "volume of rafted ice", "m" @@ -763,7 +763,7 @@ either Celsius or Kelvin units). "yieldstress11(12, 22)", "yield stress tensor components", "" "year_init", "the initial year", "" "**Z**", "", "" - "zetax2", "2 x zeta (bulk viscous coefficient)", "kg/s" + "zetax2", "2 x zeta (bulk viscosity)", "kg/s" "zlvl", "atmospheric level height (momentum)", "m" "zlvs", "atmospheric level height (scalars)", "m" "zref", "reference height for stability", "10. m" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index fb3806bfd..dcfcb0451 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -410,7 +410,7 @@ dynamics_nml "``alphab``", "real", ":math:`\alpha_{b}` factor in :cite:`Lemieux16`", "20.0" "``arlx``", "real", "revised_evp value", "300.0" "``brlx``", "real", "revised_evp value", "300.0" - "``capping``", "real", "method for capping the viscous coefficients", "1.0" + "``capping``", "real", "method for capping the viscosities", "1.0" "", "``0``", "Kreyscher 2000", "" "", "``1``", "Hibler 1979", "" "``Cf``", "real", "ratio of ridging work to PE change in ridging", "17.0" @@ -418,8 +418,8 @@ dynamics_nml "", "``latitude``", "coriolis variable by latitude", "" "", "``zero``", "zero coriolis", "" "``Cstar``", "real", "constant in Hibler strength formula", "20" - "``deltaminEVP``", "real", "minimum delta for viscous coefficients", "1e-11" - "``deltaminVP``", "real", "minimum delta for viscous coefficients", "2e-9" + "``deltaminEVP``", "real", "minimum delta for viscosities", "1e-11" + "``deltaminVP``", "real", "minimum delta for viscosities", "2e-9" "``dim_fgmres``", "integer", "maximum number of Arnoldi iterations for FGMRES solver", "50" "``dim_pgmres``", "integer", "maximum number of Arnoldi iterations for PGMRES preconditioner", "5" "``e_plasticpot``", "real", "aspect ratio of elliptical plastic potential", "2.0" @@ -470,8 +470,8 @@ dynamics_nml "", "``geostropic``", "computed from ocean velocity", "" "``threshold_hw``", "real", "Max water depth for grounding (see :cite:`Amundrud04`)", "30." "``use_mean_vrel``", "logical", "Use mean of two previous iterations for vrel in VP", "``.true.``" - "``visc_coeff_method``", "``avg_strength``", "average strength for visc coeff on U grid", "``avg_strength``" - "", "``avg_zeta``", "average zeta for visc coeff on U grid", "" + "``visc_method``", "``avg_strength``", "average strength for viscosities on U grid", "``avg_strength``" + "", "``avg_zeta``", "average zeta for viscosities on U grid", "" "``yield_curve``", "``ellipse``", "elliptical yield curve", "``ellipse``" "", "", "", "" From 2fb3310e587fa6e4a1ebf2868b17d8489ea22c39 Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 17 Mar 2022 20:23:56 +0100 Subject: [PATCH 094/109] update to include uarea (#80) --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 795ed1bd2..176238c91 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -794,7 +794,7 @@ subroutine evp (dt) uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & - DminTarea (:,:,iblk), & + uarea (:,:,iblk), DminTarea (:,:,iblk), & strength (:,:,iblk), shrU (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) @@ -1597,7 +1597,7 @@ subroutine stressC_T (nx_block, ny_block , & uvelN , vvelN , & dxN , dyE , & dxT , dyT , & - DminTarea, & + uarea , DminTarea, & strength, shrU , & zetax2T , etax2T , & stressp , stressm ) @@ -1624,6 +1624,7 @@ subroutine stressC_T (nx_block, ny_block , & dyT , & ! height of T-cell through the middle (m) strength , & ! ice strength (N/m) shrU , & ! shearU + uarea , & ! area of u cell DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1668,8 +1669,11 @@ subroutine stressC_T (nx_block, ny_block , & ! Square of shear strain rate at T obtained from interpolation of ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 !----------------------------------------------------------------- - - shearTsqr = (shrU(i,j)**2 + shrU(i,j-1)**2 + shrU(i-1,j-1)**2 + shrU(i-1,j)**2)/4d0 + + shearTsqr = (shrU(i,j) **2 * uarea(i,j) + shrU(i,j-1)**2*uarea(i,j-1) & + + shrU(i-1,j-1)**2 * uarea(i-1,j-1)+ shrU(i-1,j)**2*uarea(i-1,j)) & + / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) !----------------------------------------------------------------- From de154bef6c2b602403b27ba44fc305f6e537bfca Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sat, 19 Mar 2022 12:30:42 -0700 Subject: [PATCH 095/109] Update dyn C/CD halo updates to mask and bundle fields (#81) * Update dyn C/CD halo updates to mask and bundle fields Rename stack_velocity_field to stack_fields and overload * Update dyn halo implementation, improve serial performance, add dyn_haloupdate methods * minor clean up --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 10 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 186 ++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 642 +++++++++++++++++- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 14 +- .../infrastructure/comm/mpi/ice_timers.F90 | 2 + .../infrastructure/comm/serial/ice_timers.F90 | 2 + .../cicedynB/infrastructure/ice_domain.F90 | 2 + doc/source/user_guide/ug_implementation.rst | 12 +- 8 files changed, 731 insertions(+), 139 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index f3bb7a935..121c0f85e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,7 +134,7 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_velocity_field, unstack_velocity_field + stack_fields, unstack_fields use ice_flux, only: rdg_conv, strairxT, strairyT, & strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & @@ -389,10 +389,10 @@ subroutine eap (dt) call ice_HaloUpdate (strength, halo_info, & field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then @@ -524,7 +524,7 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -534,7 +534,7 @@ subroutine eap (dt) field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) enddo ! subcycling diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 176238c91..b97550f76 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -113,8 +113,8 @@ subroutine evp (dt) ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout - use ice_dyn_shared, only: evp_algorithm, stack_velocity_field, unstack_velocity_field, DminTarea - use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U + use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, DminTarea + use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U, dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -183,7 +183,10 @@ subroutine evp (dt) emass , & ! total mass of ice and snow (E grid) emassdti ! mass of E-cell/dte (kg/m^2 s) - real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + real (kind=dbl_kind), allocatable :: & + fld2(:,:,:,:), & ! bundled fields size 2 + fld3(:,:,:,:), & ! bundled fields size 3 + fld4(:,:,:,:) ! bundled fields size 4 real (kind=dbl_kind), allocatable :: & shrU (:,:,:), & ! shearU array for gridC @@ -220,6 +223,8 @@ subroutine evp (dt) !----------------------------------------------------------------- allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) if (grid_ice == 'CD' .or. grid_ice == 'C') then @@ -572,16 +577,39 @@ subroutine evp (dt) field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then - call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + if (grid_ice == 'B') then + where (iceumask) halomask = 1 + elseif (grid_ice == 'C' .or. grid_ice == 'CD') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo,jhi + do i = ilo,ihi + if (icetmask(i ,j ,iblk) /= 0 .or. & + icetmask(i-1,j ,iblk) /= 0 .or. & + icetmask(i+1,j ,iblk) /= 0 .or. & + icetmask(i ,j-1,iblk) /= 0 .or. & + icetmask(i ,j+1,iblk) /= 0) then + halomask(i,j,iblk) = 1 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + endif + call ice_timer_start(timer_bound) call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -656,8 +684,6 @@ subroutine evp (dt) endif - call ice_timer_start(timer_evp_2d) - if (evp_algorithm == "shared_mem_1d" ) then if (first_time .and. my_task == master_task) then @@ -669,6 +695,7 @@ subroutine evp (dt) & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') endif + call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & @@ -678,9 +705,7 @@ subroutine evp (dt) stressp_1 ,stressp_2, stressp_3, stressp_4, & stressm_1 ,stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4 ) - call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_kernel() - call ice_timer_stop(timer_evp_1d) call ice_dyn_evp_1d_copyout( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost,& !strocn uvel,vvel, strocnx,strocny, strintx,strinty, & @@ -689,9 +714,11 @@ subroutine evp (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4, & divu,rdg_conv,rdg_shear,shear,taubx,tauby ) + call ice_timer_stop(timer_evp_1d) else ! evp_algorithm == standard_2d (Standard CICE) + call ice_timer_start(timer_evp_2d) do ksub = 1,ndte ! subcycling if (grid_ice == "B") then @@ -780,10 +807,10 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - call ice_timer_start(timer_bound) - call ice_HaloUpdate (shrU, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + shrU) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -817,13 +844,10 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - ! Need to update the halos for the stress components - call ice_timer_start(timer_bound) - call ice_HaloUpdate (zetax2T, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (etax2T, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T, stresspT, stressmT) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -846,15 +870,10 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - ! Need to update the halos for the stress components - call ice_timer_start(timer_bound) - call ice_HaloUpdate (stresspT, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stressmT, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stress12U, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info , halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + stress12U) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -914,25 +933,27 @@ subroutine evp (dt) TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + vvelN) call grid_average_X2Y('A',uvelE,'E',uvelN,'N') call grid_average_X2Y('A',vvelN,'N',vvelE,'E') uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_HaloUpdate (vvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + vvelE) call grid_average_X2Y('S',uvelE,'E',uvel,'U') call grid_average_X2Y('S',vvelN,'N',vvel,'U') @@ -975,13 +996,10 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - ! Need to update the halos for the stress components - call ice_timer_start(timer_bound) - call ice_HaloUpdate (zetax2T, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (etax2T, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1005,21 +1023,13 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - ! Need to update the halos for the stress components - call ice_timer_start(timer_bound) - call ice_HaloUpdate (stresspT, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stressmT, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stress12T, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (stresspU, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (stressmU, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (stress12U, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + stresspT, stressmT, stress12T) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner,field_type_scalar, & + stresspU, stressmU, stress12U) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1086,16 +1096,13 @@ subroutine evp (dt) enddo !$OMP END PARALLEL DO - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_HaloUpdate (uvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_HaloUpdate (vvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_timer_stop(timer_bound) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE, vvelE) + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN, vvelN) call grid_average_X2Y('S',uvelE,'E',uvel,'U') call grid_average_X2Y('S',vvelN,'N',vvel,'U') @@ -1105,31 +1112,24 @@ subroutine evp (dt) endif ! grid_ice - call ice_timer_start(timer_bound) - call stack_velocity_field(uvel, vvel, fld2) - ! maskhalo_dyn causes non bit-for-bit results on different decomps - ! with C/CD in some cases - if (grid_ice == 'B' .and. maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call unstack_velocity_field(fld2, uvel, vvel) - call ice_timer_stop(timer_bound) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) enddo ! subcycling + call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm - call ice_timer_stop(timer_evp_2d) - - deallocate(fld2) + deallocate(fld2,fld3,fld4) if (grid_ice == 'CD' .or. grid_ice == 'C') then deallocate(shrU, zetax2T, etax2T) endif - if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) + if (maskhalo_dyn) then + call ice_HaloDestroy(halo_info_mask) + endif ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index e41d11a32..a379d24ef 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -10,7 +10,7 @@ module ice_dyn_shared use ice_kinds_mod - use ice_communicate, only: my_task, master_task + use ice_communicate, only: my_task, master_task, get_num_procs use ice_constants, only: c0, c1, c2, c3, c4, c6 use ice_constants, only: omega, spval_dbl, p01, p001, p5 use ice_blocks, only: nx_block, ny_block @@ -32,7 +32,8 @@ module ice_dyn_shared visc_replpress, & visc_replpress_avgstr, & visc_replpress_avgzeta, & - stack_velocity_field, unstack_velocity_field + dyn_haloUpdate, & + stack_fields, unstack_fields ! namelist parameters @@ -127,6 +128,28 @@ module ice_dyn_shared threshold_hw ! max water depth for grounding ! see keel data from Amundrud et al. 2004 (JGR) + interface dyn_haloUpdate + module procedure dyn_haloUpdate1 + module procedure dyn_haloUpdate2 + module procedure dyn_haloUpdate3 + module procedure dyn_haloUpdate4 + module procedure dyn_haloUpdate5 + end interface + + interface stack_fields + module procedure stack_fields2 + module procedure stack_fields3 + module procedure stack_fields4 + module procedure stack_fields5 + end interface + + interface unstack_fields + module procedure unstack_fields2 + module procedure unstack_fields3 + module procedure unstack_fields4 + module procedure unstack_fields5 + end interface + !======================================================================= contains @@ -165,7 +188,7 @@ end subroutine alloc_dyn_shared subroutine init_dyn (dt) use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks + use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks use ice_flux, only: rdg_conv, rdg_shear, iceumask, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -182,17 +205,24 @@ subroutine init_dyn (dt) ! local variables integer (kind=int_kind) :: & - i, j, & - iblk ! block index + i, j , & ! indices + nprocs, & ! number of processors + iblk ! block index character(len=*), parameter :: subname = '(init_dyn)' call set_evp_parameters (dt) + ! Set halo_dynbundle, this is empirical at this point, could become namelist + halo_dynbundle = .true. + nprocs = get_num_procs() + if (nx_block*ny_block/nprocs > 100) halo_dynbundle = .false. + if (my_task == master_task) then write(nu_diag,*) 'dt = ',dt write(nu_diag,*) 'dte = ',dt/real(ndte,kind=dbl_kind) write(nu_diag,*) 'tdamp =', elasticDamp * dt + write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle endif allocate(fcor_blk(nx_block,ny_block,max_blocks)) @@ -2278,68 +2308,622 @@ subroutine visc_replpress_avgstr (strength1, strength2, & end subroutine visc_replpress_avgstr !======================================================================= +! Do a halo update on 1 fields + + subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 ! fields to halo + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,1,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate1)' + + call ice_timer_start(timer_bound) + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + endif + + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate1 + +!======================================================================= +! Do a halo update on 2 fields + + subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate2)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate2 + +!======================================================================= +! Do a halo update on 3 fields + + subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate3)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fld3, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate3 + +!======================================================================= +! Do a halo update on 4 fields + + subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate4)' + + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then + + call stack_fields(fld1, fld2, fld3, fld4, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3, fld4) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate4 + +!======================================================================= +! Do a halo update on 5 fields + + subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4, fld5) + + use ice_boundary, only: ice_halo, ice_HaloUpdate + use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + + type (ice_halo), intent(in) :: & + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo + + integer (kind=int_kind), intent(in) :: & + field_loc, & ! field loc + field_type ! field_type + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! iblock + + real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & + fldbundle ! work array for boundary updates + + character(len=*), parameter :: subname = '(dyn_haloUpdate5)' -! Load velocity components into array for boundary updates + call ice_timer_start(timer_bound) + ! single process performs better without bundling fields + if (halo_dynbundle) then - subroutine stack_velocity_field(uvel, vvel, fld2) + call stack_fields(fld1, fld2, fld3, fld4, fld5, fldbundle) + if (maskhalo_dyn) then + call ice_HaloUpdate (fldbundle, halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fldbundle, halo_info , & + field_loc, field_type) + endif + call unstack_fields(fldbundle, fld1, fld2, fld3, fld4, fld5) + + else + + if (maskhalo_dyn) then + call ice_HaloUpdate (fld1 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info_mask, & + field_loc, field_type) + call ice_HaloUpdate (fld5 , halo_info_mask, & + field_loc, field_type) + else + call ice_HaloUpdate (fld1 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld2 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld3 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld4 , halo_info , & + field_loc, field_type) + call ice_HaloUpdate (fld5 , halo_info , & + field_loc, field_type) + endif + + endif + call ice_timer_stop(timer_bound) + + end subroutine dyn_haloUpdate5 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields2(fld1, fld2, fldbundle) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! + fld2 ! + + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(stack_fields2)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine stack_fields2 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields3(fld1, fld2, fld3, fldbundle) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! + fld2 , & ! + fld3 ! + + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(stack_fields3)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine stack_fields3 + +!======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields4(fld1, fld2, fld3, fld4, fldbundle) use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - uvel , & ! u components of velocity vector - vvel ! v components of velocity vector + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! + fld2 , & ! + fld3 , & ! + fld4 ! - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks), intent(out) :: & - fld2 ! work array for boundary updates + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables integer (kind=int_kind) :: & iblk ! block index - character(len=*), parameter :: subname = '(stack_velocity_field)' + character(len=*), parameter :: subname = '(stack_fields4)' - ! load velocity into array for boundary updates + call ice_timer_start(timer_bundbound) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - fld2(:,:,1,iblk) = uvel(:,:,iblk) - fld2(:,:,2,iblk) = vvel(:,:,iblk) + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) + fldbundle(:,:,4,iblk) = fld4(:,:,iblk) enddo !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) - end subroutine stack_velocity_field + end subroutine stack_fields4 !======================================================================= +! Load fields into array for boundary updates + + subroutine stack_fields5(fld1, fld2, fld3, fld4, fld5, fldbundle) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & + fld1 , & ! + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! + + real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(stack_fields5)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fldbundle(:,:,1,iblk) = fld1(:,:,iblk) + fldbundle(:,:,2,iblk) = fld2(:,:,iblk) + fldbundle(:,:,3,iblk) = fld3(:,:,iblk) + fldbundle(:,:,4,iblk) = fld4(:,:,iblk) + fldbundle(:,:,5,iblk) = fld5(:,:,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) -! Unload velocity components from array after boundary updates + end subroutine stack_fields5 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields2(fldbundle, fld1, fld2) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! + fld2 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields2)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields2 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields3(fldbundle, fld1, fld2, fld3) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! + fld2 , & ! + fld3 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields3)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields3 + +!======================================================================= +! Unload fields from array after boundary updates + + subroutine unstack_fields4(fldbundle, fld1, fld2, fld3, fld4) + + use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound + + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) + + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! + fld2 , & ! + fld3 , & ! + fld4 ! + + ! local variables + + integer (kind=int_kind) :: & + iblk ! block index + + character(len=*), parameter :: subname = '(unstack_fields4)' + + call ice_timer_start(timer_bundbound) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + fld4(:,:,iblk) = fldbundle(:,:,4,iblk) + enddo + !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) + + end subroutine unstack_fields4 + +!======================================================================= +! Unload fields from array after boundary updates - subroutine unstack_velocity_field(fld2, uvel, vvel) + subroutine unstack_fields5(fldbundle, fld1, fld2, fld3, fld4, fld5) use ice_domain, only: nblocks + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks), intent(in) :: & - fld2 ! work array for boundary updates + real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & + fldbundle ! work array for boundary updates (i,j,n,iblk) - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(out) :: & - uvel , & ! u components of velocity vector - vvel ! v components of velocity vector + real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & + fld1 , & ! + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! ! local variables integer (kind=int_kind) :: & iblk ! block index - character(len=*), parameter :: subname = '(unstack_velocity_field)' + character(len=*), parameter :: subname = '(unstack_fields5)' - ! Unload velocity from array after boundary updates + call ice_timer_start(timer_bundbound) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - uvel(:,:,iblk) = fld2(:,:,1,iblk) - vvel(:,:,iblk) = fld2(:,:,2,iblk) + fld1(:,:,iblk) = fldbundle(:,:,1,iblk) + fld2(:,:,iblk) = fldbundle(:,:,2,iblk) + fld3(:,:,iblk) = fldbundle(:,:,3,iblk) + fld4(:,:,iblk) = fldbundle(:,:,4,iblk) + fld5(:,:,iblk) = fldbundle(:,:,5,iblk) enddo !$OMP END PARALLEL DO + call ice_timer_stop(timer_bundbound) - end subroutine unstack_velocity_field + end subroutine unstack_fields5 !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 690647f15..9621db4b1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -48,7 +48,7 @@ module ice_dyn_vp use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_velocity_field, unstack_velocity_field + seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag use ice_flux, only: fm use ice_global_reductions, only: global_sum, global_allreduce_sum @@ -406,10 +406,10 @@ subroutine implicit_solver (dt) call ice_HaloUpdate (strength, halo_info, & field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_HaloUpdate (fld2, halo_info, & field_loc_NEcorner, field_type_vector) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) call ice_timer_stop(timer_bound) if (maskhalo_dyn) then @@ -1107,7 +1107,7 @@ subroutine anderson_solver (icellt , icellu, & uvel (:,:,:), vvel (:,:,:)) ! Do halo update so that halo cells contain up to date info for advection - call stack_velocity_field(uvel, vvel, fld2) + call stack_fields(uvel, vvel, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -1117,7 +1117,7 @@ subroutine anderson_solver (icellt , icellu, & field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, uvel, vvel) + call unstack_fields(fld2, uvel, vvel) ! Compute "progress" residual norm !$OMP PARALLEL DO PRIVATE(iblk) @@ -2909,7 +2909,7 @@ subroutine fgmres (zetax2 , etax2 , & orig_basis_y(:,:,:,initer) = workspace_y ! Update workspace with boundary values - call stack_velocity_field(workspace_x, workspace_y, fld2) + call stack_fields(workspace_x, workspace_y, fld2) call ice_timer_start(timer_bound) if (maskhalo_dyn) then call ice_HaloUpdate (fld2, halo_info_mask, & @@ -2919,7 +2919,7 @@ subroutine fgmres (zetax2 , etax2 , & field_loc_NEcorner, field_type_vector) endif call ice_timer_stop(timer_bound) - call unstack_velocity_field(fld2, workspace_x, workspace_y) + call unstack_fields(fld2, workspace_x, workspace_y) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 index bc14e30d3..abec3758f 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 @@ -62,6 +62,7 @@ module ice_timers timer_sndrcv, &! time between send to receive #endif timer_bound, &! boundary updates + timer_bundbound, &! boundary updates bundling timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop @@ -192,6 +193,7 @@ subroutine init_ice_timers call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_hist, 'History ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bound, 'Bound', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bundbound,'Bundbound',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bgc, 'BGC', nblocks,distrb_info%nprocs) call get_ice_timer(timer_forcing, 'Forcing', nblocks,distrb_info%nprocs) #if (defined CESMCOUPLED) diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 index b18c35040..be6e12253 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 @@ -54,6 +54,7 @@ module ice_timers timer_diags, &! diagnostics/history timer_hist, &! diagnostics/history timer_bound, &! boundary updates + timer_bundbound, &! boundary updates timer_bgc, &! biogeochemistry timer_forcing, &! forcing timer_evp_1d, &! timer only loop @@ -206,6 +207,7 @@ subroutine init_ice_timers call get_ice_timer(timer_diags, 'Diags ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_hist, 'History ',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bound, 'Bound', nblocks,distrb_info%nprocs) + call get_ice_timer(timer_bundbound,'Bundbound',nblocks,distrb_info%nprocs) call get_ice_timer(timer_bgc, 'BGC', nblocks,distrb_info%nprocs) call get_ice_timer(timer_forcing, 'Forcing', nblocks,distrb_info%nprocs) call get_ice_timer(timer_evp_1d, '1d-evp', nblocks,distrb_info%nprocs) diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index 0d0502e85..79f5bcb9a 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -63,6 +63,7 @@ module ice_domain maskhalo_dyn , & ! if true, use masked halo updates for dynamics maskhalo_remap , & ! if true, use masked halo updates for transport maskhalo_bound , & ! if true, use masked halo updates for bound_state + halo_dynbundle , & ! if true, bundle halo update in dynamics landblockelim , & ! if true, land block elimination is on orca_halogrid ! if true, input fields are haloed as defined by orca grid @@ -155,6 +156,7 @@ subroutine init_domain_blocks maskhalo_dyn = .false. ! if true, use masked halos for dynamics maskhalo_remap = .false. ! if true, use masked halos for transport maskhalo_bound = .false. ! if true, use masked halos for bound_state + halo_dynbundle = .true. ! if true, bundle halo updates in dynamics add_mpi_barriers = .false. ! if true, throttle communication debug_blocks = .false. ! if true, print verbose block information max_blocks = -1 ! max number of blocks per processor diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 9fc5069d1..857444bc9 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -1294,15 +1294,17 @@ The timers use *MPI\_WTIME* for parallel runs and the F90 intrinsic +--------------+-------------+----------------------------------------------------+ | 14 | Bound | boundary conditions and subdomain communications | +--------------+-------------+----------------------------------------------------+ - | 15 | BGC | biogeochemistry, part of Thermo timer | + | 15 | BundBound | halo update bundle copy | +--------------+-------------+----------------------------------------------------+ - | 16 | Forcing | forcing | + | 16 | BGC | biogeochemistry, part of Thermo timer | +--------------+-------------+----------------------------------------------------+ - | 17 | 1d-evp | 1d evp, part of Dynamics timer | + | 17 | Forcing | forcing | +--------------+-------------+----------------------------------------------------+ - | 18 | 2d-evp | 2d evp, part of Dynamics timer | + | 18 | 1d-evp | 1d evp, part of Dynamics timer | +--------------+-------------+----------------------------------------------------+ - | 19 | UpdState | update state | + | 19 | 2d-evp | 2d evp, part of Dynamics timer | + +--------------+-------------+----------------------------------------------------+ + | 20 | UpdState | update state | +--------------+-------------+----------------------------------------------------+ .. _restartfiles: From ec00315e86630e692013509e809199e9b8f1bc01 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 22 Mar 2022 12:26:45 -0700 Subject: [PATCH 096/109] Clean up whitespace, indentation in dynamics files (#84) Disable grid_ice="CD", enable undocumented option "C_override_D" for testing Add check that grid_ice="C" is only chosen with kdyn=1 (evp) or kdyn<=0 (off) Disable 6 tests in omp_suite that will no longer run, C/CD tests with eap or vp Update documentation of grid_ice. --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 1491 ++++++------ cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 1170 +++++----- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 643 +++--- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 368 +-- .../dynamics/ice_transport_driver.F90 | 1119 +++++---- .../cicedynB/dynamics/ice_transport_remap.F90 | 2040 +++++++++-------- cicecore/cicedynB/general/ice_flux.F90 | 6 +- cicecore/cicedynB/general/ice_init.F90 | 37 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 6 +- configuration/scripts/options/set_nml.gridcd | 2 +- configuration/scripts/tests/omp_suite.ts | 24 +- doc/source/cice_index.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 2 +- doc/source/user_guide/ug_implementation.rst | 8 +- 14 files changed, 3475 insertions(+), 3443 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 121c0f85e..2b356ace0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -5,14 +5,14 @@ ! ! See: ! -! Wilchinsky, A.V. and D.L. Feltham (2006). Modelling the rheology of -! sea ice as a collection of diamond-shaped floes. +! Wilchinsky, A.V. and D.L. Feltham (2006). Modelling the rheology of +! sea ice as a collection of diamond-shaped floes. ! Journal of Non-Newtonian Fluid Mechanics, 138(1), 22-32. ! ! Tsamados, M., D.L. Feltham, and A.V. Wilchinsky (2013). Impact on new ! anisotropic rheology on simulations of Arctic sea ice. JGR, 118, 91-107. ! -! authors: Michel Tsamados, CPOM +! authors: Michel Tsamados, CPOM ! David Schroeder, CPOM module ice_dyn_eap @@ -40,30 +40,30 @@ module ice_dyn_eap alloc_dyn_eap ! Look-up table needed for calculating structure tensor - integer (int_kind), parameter :: & - nx_yield = 41, & - ny_yield = 41, & - na_yield = 21 + integer (int_kind), parameter :: & + nx_yield = 41, & + ny_yield = 41, & + na_yield = 21 - real (kind=dbl_kind), dimension (nx_yield,ny_yield,na_yield) :: & - s11r, s12r, s22r, s11s, s12s, s22s + real (kind=dbl_kind), dimension (nx_yield,ny_yield,na_yield) :: & + s11r, s12r, s22r, s11s, s12s, s22s real (kind=dbl_kind), dimension (:,:,:), allocatable :: & - a11_1, a11_2, a11_3, a11_4, & ! components of - a12_1, a12_2, a12_3, a12_4 ! structure tensor + a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor ! history real (kind=dbl_kind), dimension(:,:,:), allocatable, public :: & - e11 , & ! components of strain rate tensor (1/s) - e12 , & - e22 , & + e11 , & ! components of strain rate tensor (1/s) + e12 , & + e22 , & yieldstress11, & ! components of yield stress tensor (kg/s^2) yieldstress12, & yieldstress22, & - s11 , & ! components of stress tensor (kg/s^2) - s12 , & - s22 , & - a11 , & ! components of structure tensor () + s11 , & ! components of stress tensor (kg/s^2) + s12 , & + s22 , & + a11 , & ! components of structure tensor () a12 ! private for reuse, set in init_eap @@ -76,13 +76,14 @@ module ice_dyn_eap contains !======================================================================= -! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_dyn_eap integer (int_kind) :: ierr + character(len=*), parameter :: subname = '(alloc_dyn_eap)' + allocate( a11_1 (nx_block,ny_block,max_blocks), & a11_2 (nx_block,ny_block,max_blocks), & a11_3 (nx_block,ny_block,max_blocks), & @@ -103,12 +104,11 @@ subroutine alloc_dyn_eap a11 (nx_block,ny_block,max_blocks), & a12 (nx_block,ny_block,max_blocks), & stat=ierr) - if (ierr/=0) call abort_ice('(alloc_dyn_eap): Out of memory') + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') end subroutine alloc_dyn_eap !======================================================================= -! ! Elastic-anisotropic-plastic dynamics driver ! based on subroutine evp @@ -116,9 +116,9 @@ subroutine eap (dt) #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in dyn_prep1 are pointless but carried out to ! minimise code changes. #endif @@ -156,54 +156,56 @@ subroutine eap (dt) ! local variables - integer (kind=int_kind) :: & - ksub , & ! subcycle step - iblk , & ! block index + integer (kind=int_kind) :: & + ksub , & ! subcycle step + iblk , & ! block index ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, ij - integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + integer (kind=int_kind), dimension(max_blocks) :: & + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnU , & ! i ocean current (m/s) - vocnU , & ! j ocean current (m/s) - tmass , & ! total mass of ice and snow (kg/m^2) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - aiu , & ! ice fraction on u-grid - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), allocatable :: fld2(:,:,:,:) + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) + tmass , & ! total mass of ice and snow (kg/m^2) + waterx , & ! for ocean stress calculation, x (m/s) + watery , & ! for ocean stress calculation, y (m/s) + forcex , & ! work array: combined atm stress and ocn tilt, x + forcey , & ! work array: combined atm stress and ocn tilt, y + aiu , & ! ice fraction on u-grid + umass , & ! total mass of ice and snow (u grid) + umassdti ! mass of U-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: & + fld2(:,:,:,:) ! temporary for stacking fields for halo update real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & - strtmp ! stress combinations for momentum equation + strtmp ! stress combinations for momentum equation - logical (kind=log_kind) :: calc_strair + logical (kind=log_kind) :: & + calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) - halomask ! ice mask for halo update + icetmask , & ! ice extent mask (T-cell) + halomask ! ice mask for halo update type (ice_halo) :: & halo_info_mask ! ghost cell update info for masked halo type (block) :: & - this_block ! block information for current block - + this_block ! block information for current block + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary + work1 , & ! temporary + work2 ! temporary character(len=*), parameter :: subname = '(eap)' @@ -220,12 +222,12 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 -! rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 +! rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 e11(i,j,iblk) = c0 e12(i,j,iblk) = c0 e22(i,j,iblk) = c0 @@ -238,20 +240,20 @@ subroutine eap (dt) enddo enddo - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep1 (nx_block, ny_block, & + call dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -266,15 +268,16 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F',tmass,'T',umass,'U') - call grid_average_X2Y('F',aice_init,'T',aiu,'U') - call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') - call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') + call grid_average_X2Y('F', tmass , 'T' , umass, 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU, 'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing ! This wind stress is rotated on u grid and multiplied by aice !---------------------------------------------------------------- + call icepack_query_parameters(calc_strair_out=calc_strair) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -288,61 +291,61 @@ subroutine eap (dt) field_loc_center, field_type_vector) call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('F',strairxT,'T',strairx,'U') - call grid_average_X2Y('F',strairyT,'T',strairy,'U') + call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep2 (nx_block, ny_block, & + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & + icellt (iblk), icellu (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - !----------------------------------------------------------------- - ! Initialize structure tensor - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Initialize structure tensor + !----------------------------------------------------------------- do j = 1, ny_block do i = 1, nx_block if (icetmask(i,j,iblk)==0) then if (tmask(i,j,iblk)) then - ! structure tensor + ! structure tensor a11_1(i,j,iblk) = p5 a11_2(i,j,iblk) = p5 a11_3(i,j,iblk) = p5 @@ -361,21 +364,21 @@ subroutine eap (dt) enddo ! i enddo ! j - !----------------------------------------------------------------- - ! ice strength - ! New strength used in Ukita Moritz rheology - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! ice strength + ! New strength used in Ukita Moritz rheology + !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize do ij = 1, icellt(iblk) i = indxti(ij, iblk) j = indxtj(ij, iblk) call icepack_ice_strength(ncat=ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & strength = strength(i,j, iblk) ) enddo ! ij enddo ! iblk @@ -406,39 +409,39 @@ subroutine eap (dt) endif !----------------------------------------------------------------- - ! seabed stress factor Tbu (Tbu is part of Cb coefficient) + ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - + if (seabed_stress) then if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO elseif ( seabed_stress_method == 'probabilistic' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block, ny_block, & + call seabed_stress_factor_prob (nx_block , ny_block , & icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + aicen(:,:,:,iblk), vicen(:,:,:,iblk), & + hwater (:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO endif endif - + do ksub = 1,ndte ! subcycling - !----------------------------------------------------------------- - ! stress tensor equation, total surface stress - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! stress tensor equation, total surface stress + !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -446,19 +449,19 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt(iblk), & + icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & - arlx1i, denom1, & + arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & dxt (:,:,iblk), dyt (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & tarear (:,:,iblk), strength (:,:,iblk), & - a11_1 (:,:,iblk), a11_2 (:,:,iblk), & - a11_3 (:,:,iblk), a11_4 (:,:,iblk), & - a12_1 (:,:,iblk), a12_2 (:,:,iblk), & - a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -470,53 +473,53 @@ subroutine eap (dt) e22 (:,:,iblk), & s11 (:,:,iblk), s12 (:,:,iblk), & s22 (:,:,iblk), & - yieldstress11 (:,:,iblk), & - yieldstress12 (:,:,iblk), & - yieldstress22 (:,:,iblk), & -! rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & + yieldstress11 (:,:,iblk), & + yieldstress12 (:,:,iblk), & + yieldstress22 (:,:,iblk), & +! rdg_conv (:,:,iblk), rdg_shear (:,:,iblk), & rdg_conv (:,:,iblk), & strtmp (:,:,:)) ! call ice_timer_stop(timer_tmp1,iblk) - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! momentum equation + !----------------------------------------------------------------- ! call ice_timer_start(timer_tmp2,iblk) - call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), strtmp (:,:,:), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & - uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - uvel_init(:,:,iblk), vvel_init(:,:,iblk),& - uvel (:,:,iblk), vvel (:,:,iblk), & + call stepu (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), strtmp (:,:,:), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + umassdti (:,:,iblk), fm (:,:,iblk), & + uarear (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + uvel_init(:,:,iblk), vvel_init(:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) ! call ice_timer_stop(timer_tmp2,iblk) - !----------------------------------------------------------------- - ! evolution of structure tensor A - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! evolution of structure tensor A + !----------------------------------------------------------------- ! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep - call stepa (nx_block, ny_block, & - dtei, icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - a11 (:,:,iblk), a12 (:,:,iblk), & - a11_1 (:,:,iblk), a11_2 (:,:,iblk), & - a11_3 (:,:,iblk), a11_4 (:,:,iblk), & - a12_1 (:,:,iblk), a12_2 (:,:,iblk), & - a12_3 (:,:,iblk), a12_4 (:,:,iblk), & - stressp_1(:,:,iblk), stressp_2(:,:,iblk), & - stressp_3(:,:,iblk), stressp_4(:,:,iblk), & - stressm_1(:,:,iblk), stressm_2(:,:,iblk), & - stressm_3(:,:,iblk), stressm_4(:,:,iblk), & + call stepa (nx_block , ny_block , & + dtei , icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + a11 (:,:,iblk), a12 (:,:,iblk), & + a11_1 (:,:,iblk), a11_2 (:,:,iblk), & + a11_3 (:,:,iblk), a11_4 (:,:,iblk), & + a12_1 (:,:,iblk), a12_2 (:,:,iblk), & + a12_3 (:,:,iblk), a12_4 (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & stress12_1(:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk)) endif @@ -548,11 +551,11 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call dyn_finish & - (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & + call dyn_finish & + (nx_block, ny_block, & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strocnx (:,:,iblk), strocny (:,:,iblk)) @@ -578,23 +581,14 @@ subroutine eap (dt) field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift - call grid_average_X2Y('F',work2,'U',strocnyT,'T') - -! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport -! commented out in order to focus on EVP for now within the cdgrid -! should be used when routine is ready -! if (grid_ice == 'CD' .or. grid_ice == 'C') then -! call grid_average_X2Y('E2US',uvelE,uvel) -! call grid_average_X2Y('N2US',vvelN,vvel) -! endif -!end comment out + call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') + call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap !======================================================================= - ! Initialize parameters and variables needed for the eap dynamics ! (based on init_dyn) @@ -609,16 +603,16 @@ subroutine init_eap i, j, & iblk ! block index - real (kind=dbl_kind), parameter :: & + real (kind=dbl_kind), parameter :: & eps6 = 1.0e-6_dbl_kind - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ix, iy, iz, ia - integer (kind=int_kind), parameter :: & + integer (kind=int_kind), parameter :: & nz = 100 - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & ainit, xinit, yinit, zinit, & da, dx, dy, dz, & phi @@ -637,23 +631,23 @@ subroutine init_eap do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - e11(i,j,iblk) = c0 - e12(i,j,iblk) = c0 - e22(i,j,iblk) = c0 - s11(i,j,iblk) = c0 - s12(i,j,iblk) = c0 - s22(i,j,iblk) = c0 + e11 (i,j,iblk) = c0 + e12 (i,j,iblk) = c0 + e22 (i,j,iblk) = c0 + s11 (i,j,iblk) = c0 + s12 (i,j,iblk) = c0 + s22 (i,j,iblk) = c0 yieldstress11(i,j,iblk) = c0 yieldstress12(i,j,iblk) = c0 yieldstress22(i,j,iblk) = c0 - a11_1 (i,j,iblk) = p5 - a11_2 (i,j,iblk) = p5 - a11_3 (i,j,iblk) = p5 - a11_4 (i,j,iblk) = p5 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 + a11_1 (i,j,iblk) = p5 + a11_2 (i,j,iblk) = p5 + a11_3 (i,j,iblk) = p5 + a11_4 (i,j,iblk) = p5 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 enddo ! i enddo ! j enddo ! iblk @@ -673,8 +667,8 @@ subroutine init_eap yinit = -dy do ia=1,na_yield - do ix=1,nx_yield - do iy=1,ny_yield + do ix=1,nx_yield + do iy=1,ny_yield s11r(ix,iy,ia) = c0 s12r(ix,iy,ia) = c0 s22r(ix,iy,ia) = c0 @@ -682,48 +676,48 @@ subroutine init_eap s12s(ix,iy,ia) = c0 s22s(ix,iy,ia) = c0 if (ia <= na_yield-1) then - do iz=1,nz - s11r(ix,iy,ia) = s11r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s11kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s12r(ix,iy,ia) = s12r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s12kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s22r(ix,iy,ia) = s22r(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s22kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s11s(ix,iy,ia) = s11s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s11ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s12s(ix,iy,ia) = s12s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s12ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - s22s(ix,iy,ia) = s22s(ix,iy,ia) + 1*w1(ainit+ia*da)* & - exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & - s22ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) - enddo - if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 - if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 - if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 - if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 - if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 - if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + do iz=1,nz + s11r(ix,iy,ia) = s11r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12r(ix,iy,ia) = s12r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22r(ix,iy,ia) = s22r(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22kr(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s11s(ix,iy,ia) = s11s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s11ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s12s(ix,iy,ia) = s12s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s12ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + s22s(ix,iy,ia) = s22s(ix,iy,ia) + 1*w1(ainit+ia*da)* & + exp(-w2(ainit+ia*da)*(zinit+iz*dz)*(zinit+iz*dz))* & + s22ks(xinit+ix*dx,yinit+iy*dy,zinit+iz*dz,phi)*dz/sin(c2*phi) + enddo + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 else - s11r(ix,iy,ia) = p5*s11kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s12r(ix,iy,ia) = p5*s12kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s22r(ix,iy,ia) = p5*s22kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s11s(ix,iy,ia) = p5*s11ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s12s(ix,iy,ia) = p5*s12ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - s22s(ix,iy,ia) = p5*s22ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) - if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 - if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 - if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 - if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 - if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 - if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 + s11r(ix,iy,ia) = p5*s11kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12r(ix,iy,ia) = p5*s12kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22r(ix,iy,ia) = p5*s22kr(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s11s(ix,iy,ia) = p5*s11ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s12s(ix,iy,ia) = p5*s12ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + s22s(ix,iy,ia) = p5*s22ks(xinit+ix*dx,yinit+iy*dy,c0,phi)/sin(c2*phi) + if (abs(s11r(ix,iy,ia)) < eps6) s11r(ix,iy,ia) = c0 + if (abs(s12r(ix,iy,ia)) < eps6) s12r(ix,iy,ia) = c0 + if (abs(s22r(ix,iy,ia)) < eps6) s22r(ix,iy,ia) = c0 + if (abs(s11s(ix,iy,ia)) < eps6) s11s(ix,iy,ia) = c0 + if (abs(s12s(ix,iy,ia)) < eps6) s12s(ix,iy,ia) = c0 + if (abs(s22s(ix,iy,ia)) < eps6) s22s(ix,iy,ia) = c0 endif - enddo - enddo + enddo + enddo enddo end subroutine init_eap @@ -771,23 +765,23 @@ end FUNCTION w2 !======================================================================= ! Function : s11kr - FUNCTION s11kr(x,y,z,phi) + FUNCTION s11kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & x,y,z,phi real (kind=dbl_kind) :: & - s11kr, p + s11kr, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s11kr)' @@ -819,15 +813,15 @@ FUNCTION s11kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s11kr = (- Hen1t2 * n1t2i11 - Hen2t1 * n2t1i11) @@ -840,20 +834,20 @@ end FUNCTION s11kr FUNCTION s12kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s12kr, s12r0, s21r0, p + s12kr, s12r0, s21r0, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s12kr)' @@ -883,15 +877,15 @@ FUNCTION s12kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s12r0 = (- Hen1t2 * n1t2i12 - Hen2t1 * n2t1i12) @@ -906,20 +900,20 @@ end FUNCTION s12kr FUNCTION s22kr(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s22kr, p + s22kr, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & -! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, & -! IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & +! t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, & +! IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s22kr)' @@ -949,15 +943,15 @@ FUNCTION s22kr(x,y,z,phi) ! IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s22kr = (- Hen1t2 * n1t2i22 - Hen2t1 * n2t1i22) @@ -970,21 +964,21 @@ end FUNCTION s22kr FUNCTION s11ks(x,y,z,phi) real (kind=dbl_kind), intent(in):: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s11ks, p + s11ks, p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, & - t1t2i12, t1t2i21, t1t2i22, & - t2t1i11, & -! t2t1i12, t2t1i21, t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, & + t1t2i12, t1t2i21, t1t2i22, & + t2t1i11, & +! t2t1i12, t2t1i21, t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s11ks)' @@ -1014,15 +1008,15 @@ FUNCTION s11ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s11ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i11 + Hen2t1 * t2t1i11) @@ -1035,20 +1029,20 @@ end FUNCTION s11ks FUNCTION s12ks(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s12ks,s12s0,s21s0,p + s12ks,s12s0,s21s0,p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i22, & - t2t1i12, t2t1i21, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i22, & + t2t1i12, t2t1i21, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s12ks)' @@ -1078,15 +1072,15 @@ FUNCTION s12ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s12s0 = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i12 + Hen2t1 * t2t1i12) @@ -1098,23 +1092,23 @@ end FUNCTION s12ks !======================================================================= ! Function : s22ks - FUNCTION s22ks(x,y,z,phi) + FUNCTION s22ks(x,y,z,phi) real (kind=dbl_kind), intent(in) :: & - x,y,z,phi + x,y,z,phi real (kind=dbl_kind) :: & - s22ks,p + s22ks,p real (kind=dbl_kind) :: & - n1t2i11, n1t2i12, n1t2i21, n1t2i22, & - n2t1i11, n2t1i12, n2t1i21, n2t1i22, & - t1t2i11, t1t2i12, t1t2i21, t1t2i22, & -! t2t1i11, t2t1i12, t2t1i21, & - t2t1i22, & - d11, d12, d22, & - IIn1t2, IIn2t1, IIt1t2, & - Hen1t2, Hen2t1 + n1t2i11, n1t2i12, n1t2i21, n1t2i22, & + n2t1i11, n2t1i12, n2t1i21, n2t1i22, & + t1t2i11, t1t2i12, t1t2i21, t1t2i22, & +! t2t1i11, t2t1i12, t2t1i21, & + t2t1i22, & + d11, d12, d22, & + IIn1t2, IIn2t1, IIt1t2, & + Hen1t2, Hen2t1 character(len=*), parameter :: subname = '(s22ks)' @@ -1144,24 +1138,22 @@ FUNCTION s22ks(x,y,z,phi) IIt1t2 = t1t2i11 * d11 + (t1t2i12 + t1t2i21) * d12 + t1t2i22 * d22 if (-IIn1t2>=puny) then - Hen1t2 = c1 + Hen1t2 = c1 else - Hen1t2 = c0 + Hen1t2 = c0 endif if (-IIn2t1>=puny) then - Hen2t1 = c1 + Hen2t1 = c1 else - Hen2t1 = c0 + Hen2t1 = c0 endif s22ks = sign(c1,IIt1t2+puny)*(Hen1t2 * t1t2i22 + Hen2t1 * t2t1i22) end FUNCTION s22ks - !======================================================================= - ! Computes the rates of strain and internal stress components for ! each of the four corners on each T-grid cell. ! Computes stress terms for the momentum equation @@ -1198,7 +1190,7 @@ subroutine stress_eap (nx_block, ny_block, & rdg_conv, & strtmp) - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step ndte , & ! number of subcycles @@ -1239,11 +1231,11 @@ subroutine stress_eap (nx_block, ny_block, & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) e11 , & ! components of strain rate tensor (1/s) - e12 , & ! - e22 , & ! + e12 , & ! + e22 , & ! s11 , & ! components of stress tensor (kg/s^2) - s12 , & ! - s22 , & ! + s12 , & ! + s22 , & ! yieldstress11, & ! components of yield stress tensor (kg/s^2) yieldstress12, & yieldstress22, & @@ -1264,22 +1256,22 @@ subroutine stress_eap (nx_block, ny_block, & stress12tmp_1,stress12tmp_2,stress12tmp_3,stress12tmp_4 ! sigma12 real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - ssigpn, ssigps, ssigpe, ssigpw , & - ssigmn, ssigms, ssigme, ssigmw , & - ssig12n, ssig12s, ssig12e, ssig12w , & - ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & - csigpne, csigpnw, csigpse, csigpsw , & - csigmne, csigmnw, csigmse, csigmsw , & - csig12ne, csig12nw, csig12se, csig12sw , & - str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp + divune, divunw, divuse, divusw , & ! divergence + tensionne, tensionnw, tensionse, tensionsw, & ! tension + shearne, shearnw, shearse, shearsw , & ! shearing + ssigpn, ssigps, ssigpe, ssigpw , & + ssigmn, ssigms, ssigme, ssigmw , & + ssig12n, ssig12s, ssig12e, ssig12w , & + ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & + csigpne, csigpnw, csigpse, csigpsw , & + csigmne, csigmnw, csigmse, csigmsw , & + csig12ne, csig12nw, csig12se, csig12sw , & + str12ew, str12we, str12ns, str12sn , & + strp_tmp, strm_tmp real (kind=dbl_kind) :: & - alpharne, alpharnw, alpharsw, alpharse, & - alphasne, alphasnw, alphassw, alphasse + alpharne, alpharnw, alpharsw, alpharse, & + alphasne, alphasnw, alphassw, alphasse character(len=*), parameter :: subname = '(stress_eap)' @@ -1293,10 +1285,11 @@ subroutine stress_eap (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + ! divergence = e_11 + e_22 divune = cyp(i,j)*uvel(i ,j ) - dyt(i,j)*uvel(i-1,j ) & + cxp(i,j)*vvel(i ,j ) - dxt(i,j)*vvel(i ,j-1) @@ -1327,9 +1320,9 @@ subroutine stress_eap (nx_block, ny_block, & shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) - !----------------------------------------------------------------- - ! Stress updated depending on strain rate and structure tensor - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Stress updated depending on strain rate and structure tensor + !----------------------------------------------------------------- ! ne call update_stress_rdg (ksub, ndte, divune, tensionne, & @@ -1356,9 +1349,10 @@ subroutine stress_eap (nx_block, ny_block, & stress12tmp_4, strength(i,j), & alpharse, alphasse) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! on last subcycle, save quantities for mechanical redistribution + !----------------------------------------------------------------- + if (ksub == ndte) then ! diagnostic only @@ -1383,9 +1377,9 @@ subroutine stress_eap (nx_block, ny_block, & e22(i,j) = p5*p25*(divune + divunw + divuse + divusw - & tensionne - tensionnw - tensionse - tensionsw) * tarear(i,j) - !----------------------------------------------------------------- - ! elastic relaxation, see Eq. A12-A14 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! elastic relaxation, see Eq. A12-A14 + !----------------------------------------------------------------- stressp_1(i,j) = (stressp_1(i,j) + stressptmp_1*arlx1i) & * denom1 @@ -1414,14 +1408,14 @@ subroutine stress_eap (nx_block, ny_block, & stress12_4(i,j) = (stress12_4(i,j) + stress12tmp_4*arlx1i) & * denom1 - s11(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & - + stressp_3(i,j) + stressp_4(i,j) & - + stressm_1(i,j) + stressm_2(i,j) & - + stressm_3(i,j) + stressm_4(i,j)) - s22(i,j) = p5 * p25 * (stressp_1(i,j) + stressp_2(i,j) & - + stressp_3(i,j) + stressp_4(i,j) & - - stressm_1(i,j) - stressm_2(i,j) & - - stressm_3(i,j) - stressm_4(i,j)) + s11(i,j) = p5 * p25 * (stressp_1 (i,j) + stressp_2 (i,j) & + + stressp_3 (i,j) + stressp_4 (i,j) & + + stressm_1 (i,j) + stressm_2 (i,j) & + + stressm_3 (i,j) + stressm_4 (i,j)) + s22(i,j) = p5 * p25 * (stressp_1 (i,j) + stressp_2 (i,j) & + + stressp_3 (i,j) + stressp_4 (i,j) & + - stressm_1 (i,j) - stressm_2 (i,j) & + - stressm_3 (i,j) - stressm_4 (i,j)) s12(i,j) = p25 * (stress12_1(i,j) + stress12_2(i,j) & + stress12_3(i,j) + stress12_4(i,j)) @@ -1436,34 +1430,34 @@ subroutine stress_eap (nx_block, ny_block, & yieldstress12(i,j) = p25 * (stress12tmp_1 + stress12tmp_2 & + stress12tmp_3 + stress12tmp_4) - !----------------------------------------------------------------- - ! Eliminate underflows. - ! The following code is commented out because it is relatively - ! expensive and most compilers include a flag that accomplishes - ! the same thing more efficiently. This code is cheaper than - ! handling underflows if the compiler lacks a flag; uncomment - ! it in that case. The compiler flag is often described with the - ! phrase "flush to zero". - !----------------------------------------------------------------- - -! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) -! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) -! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) -! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) - -! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) -! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) -! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) -! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) - -! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) -! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) -! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) -! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) - - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- + +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) + +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) + +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) + + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- ssigpn = stressp_1(i,j) + stressp_2(i,j) ssigps = stressp_3(i,j) + stressp_4(i,j) @@ -1490,12 +1484,12 @@ subroutine stress_eap (nx_block, ny_block, & csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) - + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) - + csig12ne = p222*stress12_1(i,j) + ssig122 & + p055*stress12_3(i,j) csig12nw = p222*stress12_2(i,j) + ssig121 & @@ -1510,9 +1504,10 @@ subroutine stress_eap (nx_block, ny_block, & str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) @@ -1535,9 +1530,10 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(i,j,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -1565,7 +1561,6 @@ subroutine stress_eap (nx_block, ny_block, & end subroutine stress_eap !======================================================================= - ! Updates the stress depending on values of strain rate and structure ! tensor and for ksub=ndte it computes closing and sliding rate @@ -1586,7 +1581,7 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & real (kind=dbl_kind), intent(out) :: & stressp, stressm, stress12, & - alphar, alphas + alphar, alphas ! local variables @@ -1626,263 +1621,268 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' -! Factor to maintain the same stress as in EVP (see Section 3) -! Can be set to 1 otherwise + ! Factor to maintain the same stress as in EVP (see Section 3) + ! Can be set to 1 otherwise - if (first_call) then - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso - endif + if (first_call) then + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + endif -! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates + ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates -! 1) structure tensor + ! 1) structure tensor - a22 = c1-a11 + a22 = c1-a11 -! gamma: angle between general coordinates and principal axis of A -! here Tan2gamma = 2 a12 / (a11 - a22) + ! gamma: angle between general coordinates and principal axis of A + ! here Tan2gamma = 2 a12 / (a11 - a22) - Q11Q11 = c1 - Q12Q12 = puny - Q11Q12 = puny + Q11Q11 = c1 + Q12Q12 = puny + Q11Q12 = puny - if((ABS(a11 - a22) > puny).or.(ABS(a12) > puny)) then - Angle_denom_gamma = sqrt( ( a11 - a22 )*( a11 - a22) + & + if ((ABS(a11 - a22) > puny).or.(ABS(a12) > puny)) then + Angle_denom_gamma = sqrt( ( a11 - a22 )*( a11 - a22) + & c4*a12*a12 ) - Q11Q11 = p5 + ( a11 - a22 )*p5/Angle_denom_gamma !Cos^2 - Q12Q12 = p5 - ( a11 - a22 )*p5/Angle_denom_gamma !Sin^2 - Q11Q12 = a12/Angle_denom_gamma !CosSin - endif + Q11Q11 = p5 + ( a11 - a22 )*p5/Angle_denom_gamma !Cos^2 + Q12Q12 = p5 - ( a11 - a22 )*p5/Angle_denom_gamma !Sin^2 + Q11Q12 = a12/Angle_denom_gamma !CosSin + endif -! rotation Q*atemp*Q^T - atempprime = Q11Q11*a11 + c2*Q11Q12*a12 + Q12Q12*a22 + ! rotation Q*atemp*Q^T + atempprime = Q11Q11*a11 + c2*Q11Q12*a12 + Q12Q12*a22 -! make first principal value the largest - atempprime = max(atempprime, c1 - atempprime) + ! make first principal value the largest + atempprime = max(atempprime, c1 - atempprime) -! 2) strain rate + ! 2) strain rate - dtemp11 = p5*(divu + tension) - dtemp12 = shear*p5 - dtemp22 = p5*(divu - tension) + dtemp11 = p5*(divu + tension) + dtemp12 = shear*p5 + dtemp22 = p5*(divu - tension) -! here Tan2alpha = 2 dtemp12 / (dtemp11 - dtemp22) + ! here Tan2alpha = 2 dtemp12 / (dtemp11 - dtemp22) - Qd11Qd11 = c1 - Qd12Qd12 = puny - Qd11Qd12 = puny + Qd11Qd11 = c1 + Qd12Qd12 = puny + Qd11Qd12 = puny - if((ABS( dtemp11 - dtemp22) > puny).or.(ABS(dtemp12) > puny)) then - Angle_denom_alpha = sqrt( ( dtemp11 - dtemp22 )* & - ( dtemp11 - dtemp22 ) + c4*dtemp12*dtemp12) + if ((ABS( dtemp11 - dtemp22) > puny).or.(ABS(dtemp12) > puny)) then + Angle_denom_alpha = sqrt( ( dtemp11 - dtemp22 )* & + ( dtemp11 - dtemp22 ) + c4*dtemp12*dtemp12) - Qd11Qd11 = p5 + ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Cos^2 - Qd12Qd12 = p5 - ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Sin^2 - Qd11Qd12 = dtemp12/Angle_denom_alpha !CosSin - endif + Qd11Qd11 = p5 + ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Cos^2 + Qd12Qd12 = p5 - ( dtemp11 - dtemp22 )*p5/Angle_denom_alpha !Sin^2 + Qd11Qd12 = dtemp12/Angle_denom_alpha !CosSin + endif - dtemp1 = Qd11Qd11*dtemp11 + c2*Qd11Qd12*dtemp12 + Qd12Qd12*dtemp22 - dtemp2 = Qd12Qd12*dtemp11 - c2*Qd11Qd12*dtemp12 + Qd11Qd11*dtemp22 + dtemp1 = Qd11Qd11*dtemp11 + c2*Qd11Qd12*dtemp12 + Qd12Qd12*dtemp22 + dtemp2 = Qd12Qd12*dtemp11 - c2*Qd11Qd12*dtemp12 + Qd11Qd11*dtemp22 -! In cos and sin values - x = c0 + ! In cos and sin values + x = c0 - if ((ABS(dtemp1) > puny).or.(ABS(dtemp2) > puny)) then -! invleng = c1/sqrt(dtemp1*dtemp1 + dtemp2*dtemp2) ! not sure if this is neccessary -! dtemp1 = dtemp1*invleng -! dtemp2 = dtemp2*invleng - if (dtemp1 == c0) then - x = pih - else - x = atan2(dtemp2,dtemp1) - endif + if ((ABS(dtemp1) > puny).or.(ABS(dtemp2) > puny)) then +! invleng = c1/sqrt(dtemp1*dtemp1 + dtemp2*dtemp2) ! not sure if this is neccessary +! dtemp1 = dtemp1*invleng +! dtemp2 = dtemp2*invleng + if (dtemp1 == c0) then + x = pih + else + x = atan2(dtemp2,dtemp1) endif + endif -!echmod to ensure the angle lies between pi/4 and 9 pi/4 - if (x < piq) x = x + pi2 -!echmod require 0 <= x < (nx_yield-1)*dx = 2 pi -! x = mod(x+pi2, pi2) - -! y: angle between major principal axis of strain rate and structure tensor -! y = gamma - alpha -! Expressesed componently with -! Tany = (Singamma*Cosgamma - Sinalpha*Cosgamma)/(Cos^2gamma - Sin^alpha) - - Tany_1 = Q11Q12 - Qd11Qd12 - Tany_2 = Q11Q11 - Qd12Qd12 - - y = c0 - - if ((ABS(Tany_1) > puny).or.(ABS(Tany_2) > puny)) then -! invleng = c1/sqrt(Tany_1*Tany_1 + Tany_2*Tany_2) ! not sure if this is neccessary -! Tany_1 = Tany_1*invleng -! Tany_2 = Tany_2*invleng - if (Tany_2 == c0) then - y = pih - else - y = atan2(Tany_1,Tany_2) - endif + !echmod to ensure the angle lies between pi/4 and 9 pi/4 + if (x < piq) x = x + pi2 + !echmod require 0 <= x < (nx_yield-1)*dx = 2 pi +! x = mod(x+pi2, pi2) + ! y: angle between major principal axis of strain rate and structure tensor + ! y = gamma - alpha + ! Expressesed componently with + ! Tany = (Singamma*Cosgamma - Sinalpha*Cosgamma)/(Cos^2gamma - Sin^alpha) + + Tany_1 = Q11Q12 - Qd11Qd12 + Tany_2 = Q11Q11 - Qd12Qd12 + + y = c0 + + if ((ABS(Tany_1) > puny).or.(ABS(Tany_2) > puny)) then +! invleng = c1/sqrt(Tany_1*Tany_1 + Tany_2*Tany_2) ! not sure if this is neccessary +! Tany_1 = Tany_1*invleng +! Tany_2 = Tany_2*invleng + if (Tany_2 == c0) then + y = pih + else + y = atan2(Tany_1,Tany_2) endif + endif -! to make sure y is between 0 and pi - if (y > pi) y = y - pi - if (y < 0) y = y + pi - -! Now calculate updated stress tensor - if (first_call) then - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da - endif + ! to make sure y is between 0 and pi - if (interpolate_stress_rdg) then - -! Interpolated lookup - - ! if (x>=9*pi/4) x=9*pi/4-puny; end - ! if (y>=pi/2) y=pi/2-puny; end - ! if (atempprime>=1.0), atempprime=1.0-puny; end - - ! % need 8 coords and 8 weights - ! % range in kx - - kx = int((x-piq-pi)*invdx) + 1 - kxw = c1 - ((x-piq-pi)*invdx - (kx-1)) - - ky = int(y*invdy) + 1 - kyw = c1 - (y*invdy - (ky-1)) - - ka = int((atempprime-p5)*invda) + 1 - kaw = c1 - ((atempprime-p5)*invda - (ka-1)) - -! % Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - - stemp11r = kxw* kyw * kaw * s11r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s11r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s11r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s11r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s11r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s11r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s11r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11r(kx+1,ky+1,ka+1) - - stemp12r = kxw* kyw * kaw * s12r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s12r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s12r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s12r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s12r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s12r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s12r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12r(kx+1,ky+1,ka+1) - - stemp22r = kxw * kyw * kaw * s22r(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s22r(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s22r(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s22r(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s22r(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s22r(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s22r(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22r(kx+1,ky+1,ka+1) - - stemp11s = kxw* kyw * kaw * s11s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s11s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s11s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s11s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s11s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s11s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s11s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11s(kx+1,ky+1,ka+1) - - stemp12s = kxw* kyw * kaw * s12s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s12s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s12s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s12s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s12s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s12s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s12s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12s(kx+1,ky+1,ka+1) - - stemp22s = kxw* kyw * kaw * s22s(kx ,ky ,ka ) & - + (c1-kxw) * kyw * kaw * s22s(kx+1,ky ,ka ) & - + kxw * (c1-kyw) * kaw * s22s(kx ,ky+1,ka ) & - + kxw * kyw * (c1-kaw) * s22s(kx ,ky ,ka+1) & - + (c1-kxw) * (c1-kyw) * kaw * s22s(kx+1,ky+1,ka ) & - + (c1-kxw) * kyw * (c1-kaw) * s22s(kx+1,ky ,ka+1) & - + kxw * (c1-kyw) * (c1-kaw) * s22s(kx ,ky+1,ka+1) & - + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22s(kx+1,ky+1,ka+1) + if (y > pi) y = y - pi + if (y < 0) y = y + pi - else - kx = int((x-piq-pi)*invdx) + 1 - ky = int(y*invdy) + 1 - ka = int((atempprime-p5)*invda) + 1 - -! Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - stemp11r = s11r(kx,ky,ka) - stemp12r = s12r(kx,ky,ka) - stemp22r = s22r(kx,ky,ka) - - stemp11s = s11s(kx,ky,ka) - stemp12s = s12s(kx,ky,ka) - stemp22s = s22s(kx,ky,ka) - endif + ! Now calculate updated stress tensor -! Calculate mean ice stress over a collection of floes (Equation 3) + if (first_call) then + dx = pi/real(nx_yield-1,kind=dbl_kind) + dy = pi/real(ny_yield-1,kind=dbl_kind) + da = p5/real(na_yield-1,kind=dbl_kind) + invdx = c1/dx + invdy = c1/dy + invda = c1/da + endif - stressp = strength*(stemp11r + kfriction*stemp11s & - + stemp22r + kfriction*stemp22s) * invsin - stress12 = strength*(stemp12r + kfriction*stemp12s) * invsin - stressm = strength*(stemp11r + kfriction*stemp11s & - - stemp22r - kfriction*stemp22s) * invsin + if (interpolate_stress_rdg) then + + ! Interpolated lookup + + ! if (x>=9*pi/4) x=9*pi/4-puny; end + ! if (y>=pi/2) y=pi/2-puny; end + ! if (atempprime>=1.0), atempprime=1.0-puny; end + + ! % need 8 coords and 8 weights + ! % range in kx + + kx = int((x-piq-pi)*invdx) + 1 + kxw = c1 - ((x-piq-pi)*invdx - (kx-1)) + + ky = int(y*invdy) + 1 + kyw = c1 - (y*invdy - (ky-1)) + + ka = int((atempprime-p5)*invda) + 1 + kaw = c1 - ((atempprime-p5)*invda - (ka-1)) + + ! % Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) + + stemp11r = kxw* kyw * kaw * s11r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s11r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s11r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s11r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s11r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s11r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s11r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11r(kx+1,ky+1,ka+1) + + stemp12r = kxw* kyw * kaw * s12r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s12r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s12r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s12r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s12r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s12r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s12r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12r(kx+1,ky+1,ka+1) + + stemp22r = kxw * kyw * kaw * s22r(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s22r(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s22r(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s22r(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s22r(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s22r(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s22r(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22r(kx+1,ky+1,ka+1) + + stemp11s = kxw* kyw * kaw * s11s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s11s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s11s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s11s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s11s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s11s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s11s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s11s(kx+1,ky+1,ka+1) + + stemp12s = kxw* kyw * kaw * s12s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s12s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s12s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s12s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s12s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s12s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s12s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s12s(kx+1,ky+1,ka+1) + + stemp22s = kxw* kyw * kaw * s22s(kx ,ky ,ka ) & + + (c1-kxw) * kyw * kaw * s22s(kx+1,ky ,ka ) & + + kxw * (c1-kyw) * kaw * s22s(kx ,ky+1,ka ) & + + kxw * kyw * (c1-kaw) * s22s(kx ,ky ,ka+1) & + + (c1-kxw) * (c1-kyw) * kaw * s22s(kx+1,ky+1,ka ) & + + (c1-kxw) * kyw * (c1-kaw) * s22s(kx+1,ky ,ka+1) & + + kxw * (c1-kyw) * (c1-kaw) * s22s(kx ,ky+1,ka+1) & + + (c1-kxw) * (c1-kyw) * (c1-kaw) * s22s(kx+1,ky+1,ka+1) -! Back - rotation of the stress from principal axes into general coordinates + else -! Update stress - sig11 = p5*(stressp + stressm) - sig12 = stress12 - sig22 = p5*(stressp - stressm) + kx = int((x-piq-pi)*invdx) + 1 + ky = int(y*invdy) + 1 + ka = int((atempprime-p5)*invda) + 1 - sgprm11 = Q11Q11*sig11 + Q12Q12*sig22 - c2*Q11Q12 *sig12 - sgprm12 = Q11Q12*sig11 - Q11Q12*sig22 + (Q11Q11 - Q12Q12)*sig12 - sgprm22 = Q12Q12*sig11 + Q11Q11*sig22 + c2*Q11Q12 *sig12 + ! Determine sigma_r(A1,Zeta,y) and sigma_s (see Section A1) - stressp = sgprm11 + sgprm22 - stress12 = sgprm12 - stressm = sgprm11 - sgprm22 + stemp11r = s11r(kx,ky,ka) + stemp12r = s12r(kx,ky,ka) + stemp22r = s22r(kx,ky,ka) -! Compute ridging and sliding functions in general coordinates (Equation 11) - if (ksub == ndte) then - rotstemp11r = Q11Q11*stemp11r - c2*Q11Q12* stemp12r & - + Q12Q12*stemp22r - rotstemp12r = Q11Q11*stemp12r + Q11Q12*(stemp11r-stemp22r) & - - Q12Q12*stemp12r - rotstemp22r = Q12Q12*stemp11r + c2*Q11Q12* stemp12r & - + Q11Q11*stemp22r - - rotstemp11s = Q11Q11*stemp11s - c2*Q11Q12* stemp12s & - + Q12Q12*stemp22s - rotstemp12s = Q11Q11*stemp12s + Q11Q12*(stemp11s-stemp22s) & - - Q12Q12*stemp12s - rotstemp22s = Q12Q12*stemp11s + c2*Q11Q12* stemp12s & - + Q11Q11*stemp22s - - alphar = rotstemp11r*dtemp11 + c2*rotstemp12r*dtemp12 & - + rotstemp22r*dtemp22 - alphas = rotstemp11s*dtemp11 + c2*rotstemp12s*dtemp12 & - + rotstemp22s*dtemp22 - endif + stemp11s = s11s(kx,ky,ka) + stemp12s = s12s(kx,ky,ka) + stemp22s = s22s(kx,ky,ka) - first_call = .false. + endif + + ! Calculate mean ice stress over a collection of floes (Equation 3) + + stressp = strength*(stemp11r + kfriction*stemp11s & + + stemp22r + kfriction*stemp22s) * invsin + stress12 = strength*(stemp12r + kfriction*stemp12s) * invsin + stressm = strength*(stemp11r + kfriction*stemp11s & + - stemp22r - kfriction*stemp22s) * invsin + + ! Back - rotation of the stress from principal axes into general coordinates + + ! Update stress + + sig11 = p5*(stressp + stressm) + sig12 = stress12 + sig22 = p5*(stressp - stressm) + + sgprm11 = Q11Q11*sig11 + Q12Q12*sig22 - c2*Q11Q12 *sig12 + sgprm12 = Q11Q12*sig11 - Q11Q12*sig22 + (Q11Q11 - Q12Q12)*sig12 + sgprm22 = Q12Q12*sig11 + Q11Q11*sig22 + c2*Q11Q12 *sig12 + + stressp = sgprm11 + sgprm22 + stress12 = sgprm12 + stressm = sgprm11 - sgprm22 + + ! Compute ridging and sliding functions in general coordinates (Equation 11) + + if (ksub == ndte) then + rotstemp11r = Q11Q11*stemp11r - c2*Q11Q12* stemp12r & + + Q12Q12*stemp22r + rotstemp12r = Q11Q11*stemp12r + Q11Q12*(stemp11r-stemp22r) & + - Q12Q12*stemp12r + rotstemp22r = Q12Q12*stemp11r + c2*Q11Q12* stemp12r & + + Q11Q11*stemp22r + + rotstemp11s = Q11Q11*stemp11s - c2*Q11Q12* stemp12s & + + Q12Q12*stemp22s + rotstemp12s = Q11Q11*stemp12s + Q11Q12*(stemp11s-stemp22s) & + - Q12Q12*stemp12s + rotstemp22s = Q12Q12*stemp11s + c2*Q11Q12* stemp12s & + + Q11Q11*stemp22s + + alphar = rotstemp11r*dtemp11 + c2*rotstemp12r*dtemp12 & + + rotstemp22r*dtemp22 + alphas = rotstemp11s*dtemp11 + c2*rotstemp12s*dtemp12 & + + rotstemp22s*dtemp22 + endif + + first_call = .false. end subroutine update_stress_rdg !======================================================================= - -! Solves evolution equation for structure tensor (A19, A20) +! Solves evolution equation for structure tensor (A19, A20) subroutine stepa (nx_block, ny_block, & dtei, icellt, & @@ -1905,19 +1905,19 @@ subroutine stepa (nx_block, ny_block, & dtei ! 1/dte, where dte is subcycling timestep (1/s) integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ! ice stress tensor (kg/s^2) in each corner of T cell - stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 - stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 + stressp_1, stressp_2, stressp_3, stressp_4, & ! sigma11+sigma22 + stressm_1, stressm_2, stressm_3, stressm_4, & ! sigma11-sigma22 stress12_1, stress12_2, stress12_3, stress12_4 ! sigma12 real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & ! structure tensor () in each corner of T cell - a11, a12, a11_1, a11_2, a11_3, a11_4, & ! components of - a12_1, a12_2, a12_3, a12_4 ! structure tensor () + a11, a12, a11_1, a11_2, a11_3, a11_4, & ! components of + a12_1, a12_2, a12_3, a12_4 ! structure tensor () ! local variables @@ -1925,11 +1925,11 @@ subroutine stepa (nx_block, ny_block, & i, j, ij real (kind=dbl_kind) :: & - mresult11, mresult12, & - dteikth, p5kth + mresult11, mresult12, & + dteikth, p5kth real (kind=dbl_kind), parameter :: & - kth = p2*p001 + kth = p2*p001 character(len=*), parameter :: subname = '(stepa)' @@ -1940,62 +1940,61 @@ subroutine stepa (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) -! ne - call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & - stress12_1(i,j), & - a11_1(i,j), a12_1(i,j), & - mresult11, mresult12) + ! ne + call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & + stress12_1(i,j), & + a11_1(i,j), a12_1(i,j), & + mresult11, mresult12) a11_1(i,j) = (a11_1(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_1(i,j) = (a12_1(i,j)*dtei - mresult12) * dteikth ! implicit - -! nw - call calc_ffrac(stressp_2(i,j), stressm_2(i,j), & - stress12_2(i,j), & - a11_2(i,j), a12_2(i,j), & - mresult11, mresult12) + + ! nw + call calc_ffrac(stressp_2(i,j), stressm_2(i,j), & + stress12_2(i,j), & + a11_2(i,j), a12_2(i,j), & + mresult11, mresult12) a11_2(i,j) = (a11_2(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_2(i,j) = (a12_2(i,j)*dtei - mresult12) * dteikth ! implicit -! sw - call calc_ffrac(stressp_3(i,j), stressm_3(i,j), & - stress12_3(i,j), & - a11_3(i,j), a12_3(i,j), & - mresult11, mresult12) + ! sw + call calc_ffrac(stressp_3(i,j), stressm_3(i,j), & + stress12_3(i,j), & + a11_3(i,j), a12_3(i,j), & + mresult11, mresult12) a11_3(i,j) = (a11_3(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_3(i,j) = (a12_3(i,j)*dtei - mresult12) * dteikth ! implicit - -! se - call calc_ffrac(stressp_4(i,j), stressm_4(i,j), & - stress12_4(i,j), & - a11_4(i,j), a12_4(i,j), & - mresult11, mresult12) + + ! se + call calc_ffrac(stressp_4(i,j), stressm_4(i,j), & + stress12_4(i,j), & + a11_4(i,j), a12_4(i,j), & + mresult11, mresult12) a11_4(i,j) = (a11_4(i,j)*dtei + p5kth - mresult11) * dteikth ! implicit a12_4(i,j) = (a12_4(i,j)*dtei - mresult12) * dteikth ! implicit -! average structure tensor + ! average structure tensor a11(i,j) = p25*(a11_1(i,j) + a11_2(i,j) + a11_3(i,j) + a11_4(i,j)) a12(i,j) = p25*(a12_1(i,j) + a12_2(i,j) + a12_3(i,j) + a12_4(i,j)) - + enddo ! ij - + end subroutine stepa !======================================================================= - ! computes term in evolution equation for structure tensor which determines ! the ice floe re-orientation due to fracture ! Eq. 7: Ffrac = -kf(A-S) or = 0 depending on sigma_1 and sigma_2 subroutine calc_ffrac (stressp, stressm, & - stress12, & - a1x, a2x, & + stress12, & + a1x, a2x, & mresult1, mresult2) real (kind=dbl_kind), intent(in) :: & @@ -2017,62 +2016,61 @@ subroutine calc_ffrac (stressp, stressm, & character(len=*), parameter :: subname = '(calc_ffrac)' - sigma11 = p5*(stressp+stressm) - sigma12 = stress12 - sigma22 = p5*(stressp-stressm) + sigma11 = p5*(stressp+stressm) + sigma12 = stress12 + sigma22 = p5*(stressp-stressm) -! if ((sigma11-sigma22) == c0) then sigma11-sigma22 == 0 => stressn ==0 - if (stressm == c0) then +! if ((sigma11-sigma22) == c0) then sigma11-sigma22 == 0 => stressn ==0 + if (stressm == c0) then gamma = p5*(pih) - else + else gamma = p5*atan2((c2*sigma12),(sigma11-sigma22)) - endif + endif -! rotate tensor to get into sigma principal axis + ! rotate tensor to get into sigma principal axis - Q11 = cos(gamma) - Q12 = sin(gamma) + Q11 = cos(gamma) + Q12 = sin(gamma) - Q11Q11 = Q11*Q11 - Q11Q12 = Q11*Q12 - Q12Q12 = Q12*Q12 + Q11Q11 = Q11*Q11 + Q11Q12 = Q11*Q12 + Q12Q12 = Q12*Q12 - sigma_1 = Q11Q11*sigma11 + c2*Q11Q12*sigma12 & - + Q12Q12*sigma22 ! S(1,1) - sigma_2 = Q12Q12*sigma11 - c2*Q11Q12*sigma12 & - + Q11Q11*sigma22 ! S(2,2) + sigma_1 = Q11Q11*sigma11 + c2*Q11Q12*sigma12 & + + Q12Q12*sigma22 ! S(1,1) + sigma_2 = Q12Q12*sigma11 - c2*Q11Q12*sigma12 & + + Q11Q11*sigma22 ! S(2,2) -! Pure divergence - if ((sigma_1 >= c0).and.(sigma_2 >= c0)) then + ! Pure divergence + if ((sigma_1 >= c0).and.(sigma_2 >= c0)) then mresult1 = c0 mresult2 = c0 -! Unconfined compression: cracking of blocks not along the axial splitting direction -! which leads to the loss of their shape, so we again model it through diffusion - elseif ((sigma_1 >= c0).and.(sigma_2 < c0)) then + ! Unconfined compression: cracking of blocks not along the axial splitting direction + ! which leads to the loss of their shape, so we again model it through diffusion + elseif ((sigma_1 >= c0).and.(sigma_2 < c0)) then mresult1 = kfrac * (a1x - Q12Q12) mresult2 = kfrac * (a2x + Q11Q12) -! Shear faulting - elseif (sigma_2 == c0) then - mresult1 = c0 - mresult2 = c0 - elseif ((sigma_1 <= c0).and.(sigma_1/sigma_2 <= threshold)) then + ! Shear faulting + elseif (sigma_2 == c0) then + mresult1 = c0 + mresult2 = c0 + elseif ((sigma_1 <= c0).and.(sigma_1/sigma_2 <= threshold)) then mresult1 = kfrac * (a1x - Q12Q12) mresult2 = kfrac * (a2x + Q11Q12) -! Horizontal spalling - else + ! Horizontal spalling + else mresult1 = c0 mresult2 = c0 - endif + endif end subroutine calc_ffrac !======================================================================= !---! these subroutines write/read Fortran unformatted data files .. !======================================================================= - ! Dumps all values needed for a restart subroutine write_restart_eap () @@ -2088,7 +2086,7 @@ subroutine write_restart_eap () diag = .true. !----------------------------------------------------------------- - ! structure tensor + ! structure tensor !----------------------------------------------------------------- call write_restart_field(nu_dump_eap,0,a11_1,'ruf8','a11_1',1,diag) @@ -2104,7 +2102,6 @@ subroutine write_restart_eap () end subroutine write_restart_eap !======================================================================= - ! Reads all values needed for elastic anisotropic plastic dynamics restart subroutine read_restart_eap() @@ -2133,9 +2130,9 @@ subroutine read_restart_eap() ! Structure tensor must be read and scattered in pairs in order ! to properly match corner values across a tripole grid cut. !----------------------------------------------------------------- - if (my_task == master_task) write(nu_diag,*) & - 'structure tensor restart data' - + if (my_task == master_task) write(nu_diag,*) & + 'structure tensor restart data' + call read_restart_field(nu_restart_eap,0,a11_1,'ruf8', & 'a11_1',1,diag,field_loc_center,field_type_scalar) ! a11_1 call read_restart_field(nu_restart_eap,0,a11_3,'ruf8', & @@ -2156,22 +2153,22 @@ subroutine read_restart_eap() if (trim(grid_type) == 'tripole') then - call ice_HaloUpdate_stress(a11_1, a11_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_3, a11_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_2, a11_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a11_4, a11_2, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_1, a12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_3, a12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_2, a12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(a12_4, a12_2, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_1, a11_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_3, a11_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_2, a11_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a11_4, a11_2, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_1, a12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_3, a12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_2, a12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(a12_4, a12_2, halo_info, & + field_loc_center, field_type_scalar) endif @@ -2179,34 +2176,34 @@ subroutine read_restart_eap() ! Ensure unused values in west and south ghost cells are 0 !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do j = 1, nghost - do i = 1, nx_block - a11_1 (i,j,iblk) = c0 - a11_2 (i,j,iblk) = c0 - a11_3 (i,j,iblk) = c0 - a11_4 (i,j,iblk) = c0 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 - enddo - enddo - do j = 1, ny_block - do i = 1, nghost - a11_1 (i,j,iblk) = c0 - a11_2 (i,j,iblk) = c0 - a11_3 (i,j,iblk) = c0 - a11_4 (i,j,iblk) = c0 - a12_1 (i,j,iblk) = c0 - a12_2 (i,j,iblk) = c0 - a12_3 (i,j,iblk) = c0 - a12_4 (i,j,iblk) = c0 - enddo - enddo + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + do j = 1, nghost + do i = 1, nx_block + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 enddo - !$OMP END PARALLEL DO + enddo + do j = 1, ny_block + do i = 1, nghost + a11_1 (i,j,iblk) = c0 + a11_2 (i,j,iblk) = c0 + a11_3 (i,j,iblk) = c0 + a11_4 (i,j,iblk) = c0 + a12_1 (i,j,iblk) = c0 + a12_2 (i,j,iblk) = c0 + a12_3 (i,j,iblk) = c0 + a12_4 (i,j,iblk) = c0 + enddo + enddo + enddo + !$OMP END PARALLEL DO end subroutine read_restart_eap diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index b97550f76..45b4c2062 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -22,9 +22,9 @@ ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. ! Oceanogr., 9, 817-846. ! -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The ! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! +! ! author: Elizabeth C. Hunke, LANL ! ! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb (LANL) @@ -62,14 +62,13 @@ module ice_dyn_evp contains !======================================================================= - ! Elastic-viscous-plastic dynamics driver ! #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to +! via NEMO (unless calc_strair is true). These values are supplied +! rotated on u grid and multiplied by aice. strairxT = 0 in this +! case so operations in dyn_prep1 are pointless but carried out to ! minimise code changes. #endif ! @@ -101,7 +100,7 @@ subroutine evp (dt) stresspU, stressmU, stress12U use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & dxe, dxn, dxt, dxu, dye, dyn, dyt, dyu, & - ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & + ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & grid_type, grid_ice, & @@ -121,13 +120,13 @@ subroutine evp (dt) ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ksub , & ! subcycle step iblk , & ! block index ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - i, j, ij + i, j, ij ! local indices - integer (kind=int_kind), dimension(max_blocks) :: & + integer (kind=int_kind), dimension(max_blocks) :: & icellt , & ! no. of cells where icetmask = 1 icelln , & ! no. of cells where icenmask = .true. icelle , & ! no. of cells where iceemask = .true. @@ -184,19 +183,20 @@ subroutine evp (dt) emassdti ! mass of E-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! bundled fields size 2 - fld3(:,:,:,:), & ! bundled fields size 3 - fld4(:,:,:,:) ! bundled fields size 4 + fld2(:,:,:,:) , & ! bundled fields size 2 + fld3(:,:,:,:) , & ! bundled fields size 3 + fld4(:,:,:,:) ! bundled fields size 4 real (kind=dbl_kind), allocatable :: & - shrU (:,:,:), & ! shearU array for gridC - zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) - etax2T (:,:,:) ! etax2 = 2*eta (shear viscosity) - + shrU (:,:,:), & ! shearU array for gridC + zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) + etax2T (:,:,:) ! etax2 = 2*eta (shear viscosity) + real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation - logical (kind=log_kind) :: calc_strair + logical (kind=log_kind) :: & + calc_strair ! calculate air/ice stress integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & icetmask, & ! ice extent mask (T-cell) @@ -206,14 +206,15 @@ subroutine evp (dt) halo_info_mask ! ghost cell update info for masked halo type (block) :: & - this_block ! block information for current block + this_block ! block information for current block real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary + work1, & ! temporary + work2 ! temporary + + logical (kind=log_kind), save :: & + first_time = .true. ! first time logical - logical (kind=log_kind), save :: first_time = .true. - character(len=*), parameter :: subname = '(evp)' call ice_timer_start(timer_dynamics) ! dynamics @@ -234,15 +235,15 @@ subroutine evp (dt) shrU (:,:,:) = c0 zetax2T(:,:,:) = c0 etax2T (:,:,:) = c0 - + endif - + ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) !----------------------------------------------------------------- ! boundary updates - ! commented out because the ghost cells are freshly + ! commented out because the ghost cells are freshly ! updated after cleanup_itd !----------------------------------------------------------------- @@ -258,29 +259,29 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 + do j = 1, ny_block + do i = 1, nx_block + rdg_conv (i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 + divu (i,j,iblk) = c0 + shear(i,j,iblk) = c0 enddo enddo - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep1 (nx_block, ny_block, & + call dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & + aice (:,:,iblk), vice (:,:,iblk), & + vsno (:,:,iblk), tmask (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -295,22 +296,22 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F',tmass,'T',umass,'U') - call grid_average_X2Y('F',aice_init,'T',aiu,'U') - call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') - call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') - call grid_average_X2Y('S',ss_tltx,grid_ocn_dynu,ss_tltxU,'U') - call grid_average_X2Y('S',ss_tlty,grid_ocn_dynv,ss_tltyU,'U') + call grid_average_X2Y('F', tmass , 'T' , umass , 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') + call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') + call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('F',tmass,'T',emass,'E') - call grid_average_X2Y('F',aice_init,'T', aie,'E') - call grid_average_X2Y('F',tmass,'T',nmass,'N') - call grid_average_X2Y('F',aice_init,'T', ain,'N') - call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnN,'N') - call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnN,'N') - call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnE,'E') - call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnE,'E') + call grid_average_X2Y('F', tmass , 'T' , emass, 'E') + call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('F', tmass , 'T' , nmass, 'N') + call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN, 'N') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN, 'N') + call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE, 'E') + call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE, 'E') endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -330,63 +331,63 @@ subroutine evp (dt) field_loc_center, field_type_vector) call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('F',strairxT,'T',strairx,'U') - call grid_average_X2Y('F',strairyT,'T',strairy,'U') + call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') endif if (grid_ice == 'CD' .or. grid_ice == 'C') then if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairxN, 'N') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairyN, 'N') - call grid_average_X2Y('F', strax, grid_atm_dynu, strairxE, 'E') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairyE, 'E') + call grid_average_X2Y('F', strax , grid_atm_dynu, strairxN, 'N') + call grid_average_X2Y('F', stray , grid_atm_dynv, strairyN, 'N') + call grid_average_X2Y('F', strax , grid_atm_dynu, strairxE, 'E') + call grid_average_X2Y('F', stray , grid_atm_dynv, strairyE, 'E') else - call grid_average_X2Y('F',strairxT,'T',strairxN,'N') - call grid_average_X2Y('F',strairyT,'T',strairyN,'N') - call grid_average_X2Y('F',strairxT,'T',strairxE,'E') - call grid_average_X2Y('F',strairyT,'T',strairyE,'E') - endif + call grid_average_X2Y('F', strairxT, 'T' , strairxN, 'N') + call grid_average_X2Y('F', strairyT, 'T' , strairyN, 'N') + call grid_average_X2Y('F', strairxT, 'T' , strairxE, 'E') + call grid_average_X2Y('F', strairyT, 'T' , strairyE, 'E') + endif endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + if (trim(grid_ice) == 'B') then - call dyn_prep2 (nx_block, ny_block, & + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & + icellt (iblk), icellu (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + aiu (:,:,iblk), umass (:,:,iblk), & + umassdti (:,:,iblk), fcor_blk (:,:,iblk), & + umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + strairx (:,:,iblk), strairy (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & + icetmask (:,:,iblk), iceumask (:,:,iblk), & + fm (:,:,iblk), dt, & + strtltx (:,:,iblk), strtlty (:,:,iblk), & + strocnx (:,:,iblk), strocny (:,:,iblk), & + strintx (:,:,iblk), strinty (:,:,iblk), & + taubx (:,:,iblk), tauby (:,:,iblk), & + waterx (:,:,iblk), watery (:,:,iblk), & + forcex (:,:,iblk), forcey (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) @@ -394,7 +395,7 @@ subroutine evp (dt) elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & + icellt (iblk), icellu (iblk), & indxti (:,iblk), indxtj (:,iblk), & indxui (:,iblk), indxuj (:,iblk), & aiu (:,:,iblk), umass (:,:,iblk), & @@ -422,20 +423,20 @@ subroutine evp (dt) Tbu (:,:,iblk)) endif - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize do ij = 1, icellt(iblk) i = indxti(ij, iblk) j = indxtj(ij, iblk) call icepack_ice_strength(ncat = ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & strength = strength(i,j, iblk) ) enddo ! ij @@ -447,89 +448,89 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics on N grid - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - call dyn_prep2 (nx_block, ny_block, & + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icelln(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - aiN (:,:,iblk), nmass (:,:,iblk), & - nmassdti (:,:,iblk), fcorN_blk (:,:,iblk),& + icellt (iblk), icelln (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + aiN (:,:,iblk), nmass (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & nmask (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - strairxN (:,:,iblk), strairyN (:,:,iblk), & - ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - icetmask (:,:,iblk), icenmask (:,:,iblk), & - fmN (:,:,iblk), dt, & - strtltxN (:,:,iblk), strtltyN (:,:,iblk), & - strocnxN (:,:,iblk), strocnyN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1 (:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelN_init (:,:,iblk), vvelN_init (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + strairxN (:,:,iblk), strairyN (:,:,iblk), & + ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & + icetmask (:,:,iblk), icenmask (:,:,iblk), & + fmN (:,:,iblk), dt, & + strtltxN (:,:,iblk), strtltyN (:,:,iblk), & + strocnxN (:,:,iblk), strocnyN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & TbN (:,:,iblk)) - !----------------------------------------------------------------- - ! more preparation for dynamics on E grid - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics on E grid + !----------------------------------------------------------------- - call dyn_prep2 (nx_block, ny_block, & + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icelle(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - aiE (:,:,iblk), emass (:,:,iblk), & - emassdti (:,:,iblk), fcorE_blk (:,:,iblk),& - emask (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - strairxE (:,:,iblk), strairyE (:,:,iblk), & - ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - icetmask (:,:,iblk), iceemask (:,:,iblk), & - fmE (:,:,iblk), dt, & - strtltxE (:,:,iblk), strtltyE (:,:,iblk), & - strocnxE (:,:,iblk), strocnyE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1 (:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelE_init (:,:,iblk), vvelE_init (:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & + icellt (iblk), icelle (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + aiE (:,:,iblk), emass (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emask (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + strairxE (:,:,iblk), strairyE (:,:,iblk), & + ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & + icetmask (:,:,iblk), iceemask (:,:,iblk), & + fmE (:,:,iblk), dt, & + strtltxE (:,:,iblk), strtltyE (:,:,iblk), & + strocnxE (:,:,iblk), strocnyE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & TbE (:,:,iblk)) do i=1,nx_block do j=1,ny_block if (.not.iceumask(i,j,iblk)) then - stresspU(i,j,iblk) = c0 - stressmU(i,j,iblk) = c0 + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 stress12U(i,j,iblk) = c0 endif if (icetmask(i,j,iblk) == 0) then - stresspT(i,j,iblk) = c0 - stressmT(i,j,iblk) = c0 + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 stress12T(i,j,iblk) = c0 endif enddo @@ -546,35 +547,35 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelE, halo_info, & + call ice_HaloUpdate (uvelE, halo_info, & field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vvelN, halo_info, & + call ice_HaloUpdate (vvelN, halo_info, & field_loc_Nface, field_type_vector) call ice_timer_stop(timer_bound) if (grid_ice == 'C') then - call grid_average_X2Y('A',uvelE,'E',uvelN,'N') - call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') + call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) endif call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelN, halo_info, & + call ice_HaloUpdate (uvelN, halo_info, & field_loc_Nface, field_type_vector) - call ice_HaloUpdate (vvelE, halo_info, & + call ice_HaloUpdate (vvelE, halo_info, & field_loc_Eface, field_type_vector) call ice_timer_stop(timer_bound) - call grid_average_X2Y('S',uvelE,'E',uvel,'U') - call grid_average_X2Y('S',vvelN,'N',vvel,'U') + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) endif call ice_timer_start(timer_bound) - call ice_HaloUpdate (strength, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (strength, halo_info, & + field_loc_center, field_type_scalar) ! velocities may have changed in dyn_prep2 call stack_fields(uvel, vvel, fld2) @@ -590,7 +591,7 @@ subroutine evp (dt) elseif (grid_ice == 'C' .or. grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -610,8 +611,8 @@ subroutine evp (dt) !$OMP END PARALLEL DO endif call ice_timer_start(timer_bound) - call ice_HaloUpdate (halomask, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (halomask, halo_info, & + field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) call ice_HaloMask(halo_info_mask, halo_info, halomask) endif @@ -619,7 +620,7 @@ subroutine evp (dt) !----------------------------------------------------------------- ! seabed stress factor Tbu (Tbu is part of Cb coefficient) !----------------------------------------------------------------- - + if (seabed_stress) then if (grid_ice == "B") then @@ -627,22 +628,22 @@ subroutine evp (dt) if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO elseif ( seabed_stress_method == 'probabilistic' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block, ny_block, & + call seabed_stress_factor_prob (nx_block , ny_block , & icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & + hwater (:,:,iblk), Tbu (:,:,iblk)) enddo !$OMP END PARALLEL DO endif @@ -652,28 +653,28 @@ subroutine evp (dt) if ( seabed_stress_method == 'LKD' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icelle (iblk), & - indxei(:,iblk), indxej(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbE(:,:,iblk)) - call seabed_stress_factor_LKD (nx_block, ny_block, & - icelln (iblk), & - indxni(:,iblk), indxnj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbN(:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block, & + icelle (iblk), & + indxei (:,iblk), indxej(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbE (:,:,iblk)) + call seabed_stress_factor_LKD (nx_block , ny_block, & + icelln (iblk), & + indxni (:,iblk), indxnj(:,iblk), & + vice (:,:,iblk), aice(:,:,iblk), & + hwater(:,:,iblk), TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO elseif ( seabed_stress_method == 'probabilistic' ) then !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block, ny_block, & + call seabed_stress_factor_prob (nx_block , ny_block , & icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk), & - TbE(:,:,iblk), TbN(:,:,iblk), & + aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & + hwater (:,:,iblk), Tbu (:,:,iblk) , & + TbE (:,:,iblk), TbN (:,:,iblk) , & icelle(iblk), indxei(:,iblk), indxej(:,iblk), & icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) enddo @@ -681,7 +682,7 @@ subroutine evp (dt) endif endif - + endif if (evp_algorithm == "shared_mem_1d" ) then @@ -696,19 +697,19 @@ subroutine evp (dt) endif call ice_timer_start(timer_evp_1d) - call ice_dyn_evp_1d_copyin( & + call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & + cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& strength,uvel,vvel,dxt,dyt, & stressp_1 ,stressp_2, stressp_3, stressp_4, & stressm_1 ,stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4 ) call ice_dyn_evp_1d_kernel() - call ice_dyn_evp_1d_copyout( & - nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost,& -!strocn uvel,vvel, strocnx,strocny, strintx,strinty, & + call ice_dyn_evp_1d_copyout( & + nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & +!strocn uvel,vvel, strocnx,strocny, strintx,strinty, & uvel,vvel, strintx,strinty, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & @@ -725,12 +726,12 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks - + !----------------------------------------------------------------- ! stress tensor equation, total surface stress !----------------------------------------------------------------- - call stress (nx_block, ny_block, & - icellt(iblk), & + call stress (nx_block , ny_block , & + icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxt (:,:,iblk), dyt (:,:,iblk), & @@ -766,7 +767,7 @@ subroutine evp (dt) !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- - call stepu (nx_block, ny_block, & + call stepu (nx_block , ny_block , & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & aiu (:,:,iblk), strtmp (:,:,:), & @@ -781,7 +782,7 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - + enddo ! iblk !$OMP END PARALLEL DO @@ -801,21 +802,21 @@ subroutine evp (dt) dxU (:,:,iblk), dyU (:,:,iblk), & ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & - epm(:,:,iblk) , npm(:,:,iblk) , & - shearU=shrU(:,:,iblk) ) + epm (:,:,iblk), npm (:,:,iblk), & + shearU = shrU(:,:,iblk) ) enddo ! iblk !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & shrU) - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stressC_T (nx_block, ny_block, & - icellt(iblk), & + call stressC_T (nx_block , ny_block , & + icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -845,14 +846,14 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T, stresspT, stressmT) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stressC_U (nx_block, ny_block, & - icellu(iblk), & + call stressC_U (nx_block , ny_block , & + icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -866,49 +867,49 @@ subroutine evp (dt) hm (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & strength (:,:,iblk), shrU (:,:,iblk), & - stress12U (:,:,iblk)) + stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info , halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & + call dyn_HaloUpdate (halo_info , halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & stress12U) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call div_stress (nx_block, ny_block, & ! E point - icelle(iblk), & + call div_stress (nx_block , ny_block , & ! E point + icelle (iblk), & indxei (:,iblk), indxej (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk), & - stresspF1 = stresspT (:,:,iblk), & - stressmF1 = stressmT (:,:,iblk), & - stress12F1 = stress12U (:,:,iblk), & - F1 = strintxE (:,:,iblk), & + earear (:,:,iblk) , & + stresspF1 = stresspT (:,:,iblk) , & + stressmF1 = stressmT (:,:,iblk) , & + stress12F1 = stress12U (:,:,iblk) , & + F1 = strintxE (:,:,iblk) , & grid_location = 'E') - call div_stress (nx_block, ny_block, & ! N point - icelln(iblk), & + call div_stress (nx_block , ny_block , & ! N point + icelln (iblk), & indxni (:,iblk), indxnj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk), & - stresspF2 = stresspT (:,:,iblk), & - stressmF2 = stressmT (:,:,iblk), & - stress12F2 = stress12U (:,:,iblk), & - F2 = strintyN (:,:,iblk), & + narear (:,:,iblk) , & + stresspF2 = stresspT (:,:,iblk) , & + stressmF2 = stressmT (:,:,iblk) , & + stress12F2 = stress12U (:,:,iblk) , & + F2 = strintyN (:,:,iblk) , & grid_location = 'N') - + enddo !$OMP END PARALLEL DO !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stepu_C (nx_block, ny_block, & ! u, E point + call stepu_C (nx_block , ny_block , & ! u, E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & aiE (:,:,iblk), & @@ -935,28 +936,28 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & vvelN) - call grid_average_X2Y('A',uvelE,'E',uvelN,'N') - call grid_average_X2Y('A',vvelN,'N',vvelE,'E') + call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') + call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & vvelE) - call grid_average_X2Y('S',uvelE,'E',uvel,'U') - call grid_average_X2Y('S',vvelN,'N',vvel,'U') + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) @@ -965,8 +966,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stressCD_T (nx_block, ny_block, & - icellt(iblk), & + call stressCD_T (nx_block , ny_block , & + icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -997,14 +998,14 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stressCD_U (nx_block, ny_block, & - icellu(iblk), & + call stressCD_U (nx_block , ny_block , & + icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & @@ -1019,23 +1020,23 @@ subroutine evp (dt) zetax2T (:,:,iblk), etax2T (:,:,iblk), & strength (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U (:,:,iblk)) + stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & stresspT, stressmT, stress12T) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner,field_type_scalar, & stresspU, stressmU, stress12U) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call div_stress (nx_block, ny_block, & ! E point - icelle(iblk), & + call div_stress (nx_block , ny_block , & ! E point + icelle (iblk), & indxei (:,iblk), indxej (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & @@ -1047,8 +1048,8 @@ subroutine evp (dt) strintxE (:,:,iblk), strintyE (:,:,iblk), & 'E') - call div_stress (nx_block, ny_block, & ! N point - icelln(iblk), & + call div_stress (nx_block , ny_block , & ! N point + icelln (iblk), & indxni (:,iblk), indxnj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & @@ -1059,14 +1060,14 @@ subroutine evp (dt) stress12U (:,:,iblk), & strintxN (:,:,iblk), strintyN (:,:,iblk), & 'N') - + enddo !$OMP END PARALLEL DO !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stepuv_CD (nx_block, ny_block, & ! E point + call stepuv_CD (nx_block , ny_block , & ! E point icelle (iblk), Cdn_ocn (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & aiE (:,:,iblk), & @@ -1080,7 +1081,7 @@ subroutine evp (dt) uvelE (:,:,iblk), vvelE (:,:,iblk), & TbE (:,:,iblk)) - call stepuv_CD (nx_block, ny_block, & ! N point + call stepuv_CD (nx_block , ny_block , & ! N point icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & aiN (:,:,iblk), & @@ -1097,15 +1098,15 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE, vvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN, vvelN) - call grid_average_X2Y('S',uvelE,'E',uvel,'U') - call grid_average_X2Y('S',vvelN,'N',vvel,'U') + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) @@ -1114,10 +1115,10 @@ subroutine evp (dt) ! U fields at NE corner ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner, field_type_vector, & uvel, vvel) - + enddo ! subcycling call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm @@ -1126,14 +1127,14 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then deallocate(shrU, zetax2T, etax2T) endif - + if (maskhalo_dyn) then call ice_HaloDestroy(halo_info_mask) endif ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then - ! TODO: CD-grid + ! TODO: C/CD-grid if (maskhalo_dyn) then !------------------------------------------------------- ! set halomask to zero because ice_HaloMask always keeps @@ -1142,61 +1143,61 @@ subroutine evp (dt) halomask = 0 call ice_HaloMask(halo_info_mask, halo_info, halomask) - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info_mask, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info_mask, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info_mask, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info_mask, & + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info_mask, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) call ice_HaloDestroy(halo_info_mask) else - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info, & + field_loc_center, field_type_scalar) + + call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & + field_loc_center, field_type_scalar) + call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & + field_loc_center, field_type_scalar) endif ! maskhalo endif ! tripole @@ -1206,16 +1207,14 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - - call dyn_finish & - (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & + call dyn_finish & + (nx_block , ny_block , & + icellu (iblk), Cdn_ocn (:,:,iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiu (:,:,iblk), fm (:,:,iblk), & + aiu (:,:,iblk), fm (:,:,iblk), & strocnx (:,:,iblk), strocny (:,:,iblk)) - enddo !$OMP END PARALLEL DO @@ -1223,23 +1222,23 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - - call dyn_finish & - (nx_block, ny_block, & - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - aiN (:,:,iblk), fmN (:,:,iblk), & + + call dyn_finish & + (nx_block , ny_block , & + icelln (iblk), Cdn_ocn (:,:,iblk), & + indxni (:,iblk), indxnj (:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + aiN (:,:,iblk), fmN (:,:,iblk), & strocnxN(:,:,iblk), strocnyN(:,:,iblk)) - call dyn_finish & - (nx_block, ny_block, & - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - aiE (:,:,iblk), fmE (:,:,iblk), & + call dyn_finish & + (nx_block , ny_block , & + icelle (iblk), Cdn_ocn (:,:,iblk), & + indxei (:,iblk), indxej (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + aiE (:,:,iblk), fmE (:,:,iblk), & strocnxE(:,:,iblk), strocnyE(:,:,iblk)) enddo @@ -1267,12 +1266,12 @@ subroutine evp (dt) field_loc_NEcorner, field_type_vector) call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift - call grid_average_X2Y('F',work2,'U',strocnyT,'T') + call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('S',strintxE,'E',strintx,'U') ! diagnostic - call grid_average_X2Y('S',strintyN,'N',strinty,'U') ! diagnostic + call grid_average_X2Y('S', strintxE, 'E', strintx, 'U') ! diagnostic + call grid_average_X2Y('S', strintyN, 'N', strinty, 'U') ! diagnostic endif call ice_timer_stop(timer_dynamics) ! dynamics @@ -1280,34 +1279,33 @@ subroutine evp (dt) end subroutine evp !======================================================================= - ! Computes the rates of strain and internal stress components for ! each of the four corners on each T-grid cell. ! Computes stress terms for the momentum equation ! ! author: Elizabeth C. Hunke, LANL - subroutine stress (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvel, vvel, & - dxt, dyt, & - dxhy, dyhx, & - cxp, cyp, & - cxm, cym, & - DminTarea, & - strength, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & + subroutine stress (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvel, vvel, & + dxt, dyt, & + dxhy, dyhx, & + cxp, cyp, & + cxm, cym, & + DminTarea, & + strength, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & str ) use ice_dyn_shared, only: strain_rates, visc_replpress, capping - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -1347,7 +1345,7 @@ subroutine stress (nx_block, ny_block, & tensionne, tensionnw, tensionse, tensionsw, & ! tension shearne, shearnw, shearse, shearsw , & ! shearing Deltane, Deltanw, Deltase, Deltasw , & ! Delt - zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (bulk visc) + zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (bulk visc) etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (shear visc) rep_prsne, rep_prsnw, rep_prsse, rep_prssw, & ! replacement pressure ! puny , & ! puny @@ -1373,10 +1371,10 @@ subroutine stress (nx_block, ny_block, & i = indxti(ij) j = indxtj(ij) - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates (nx_block, ny_block, & i, j, & @@ -1393,9 +1391,9 @@ subroutine stress (nx_block, ny_block, & Deltane, Deltanw, & Deltase, Deltasw ) - !----------------------------------------------------------------- - ! viscosities and replacement pressure - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! viscosities and replacement pressure + !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & zetax2ne, etax2ne, rep_prsne, capping) @@ -1409,73 +1407,73 @@ subroutine stress (nx_block, ny_block, & call visc_replpress (strength(i,j), DminTarea(i,j), Deltase, & zetax2se, etax2se, rep_prsse, capping) - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! the stresses ! kg/s^2 + ! (1) northeast, (2) northwest, (3) southwest, (4) southeast + !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stressp_1(i,j) = (stressp_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 - stressp_2(i,j) = (stressp_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2nw*divunw - rep_prsnw)) * denom1 - stressp_3(i,j) = (stressp_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2sw*divusw - rep_prssw)) * denom1 - stressp_4(i,j) = (stressp_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2se*divuse - rep_prsse)) * denom1 - - stressm_1(i,j) = (stressm_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2ne*tensionne) * denom1 - stressm_2(i,j) = (stressm_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2nw*tensionnw) * denom1 - stressm_3(i,j) = (stressm_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2sw*tensionsw) * denom1 - stressm_4(i,j) = (stressm_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2se*tensionse) * denom1 - - stress12_1(i,j) = (stress12_1(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2ne*shearne) * denom1 - stress12_2(i,j) = (stress12_2(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2nw*shearnw) * denom1 - stress12_3(i,j) = (stress12_3(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2sw*shearsw) * denom1 - stress12_4(i,j) = (stress12_4(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2se*shearse) * denom1 + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stressp_1 (i,j) = (stressp_1 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 + stressp_2 (i,j) = (stressp_2 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2nw*divunw - rep_prsnw)) * denom1 + stressp_3 (i,j) = (stressp_3 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2sw*divusw - rep_prssw)) * denom1 + stressp_4 (i,j) = (stressp_4 (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2se*divuse - rep_prsse)) * denom1 + + stressm_1 (i,j) = (stressm_1 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2ne*tensionne) * denom1 + stressm_2 (i,j) = (stressm_2 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2nw*tensionnw) * denom1 + stressm_3 (i,j) = (stressm_3 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2sw*tensionsw) * denom1 + stressm_4 (i,j) = (stressm_4 (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2se*tensionse) * denom1 + + stress12_1(i,j) = (stress12_1(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2ne*shearne) * denom1 + stress12_2(i,j) = (stress12_2(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2nw*shearnw) * denom1 + stress12_3(i,j) = (stress12_3(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2sw*shearsw) * denom1 + stress12_4(i,j) = (stress12_4(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2se*shearse) * denom1 - !----------------------------------------------------------------- - ! Eliminate underflows. - ! The following code is commented out because it is relatively - ! expensive and most compilers include a flag that accomplishes - ! the same thing more efficiently. This code is cheaper than - ! handling underflows if the compiler lacks a flag; uncomment - ! it in that case. The compiler flag is often described with the - ! phrase "flush to zero". - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Eliminate underflows. + ! The following code is commented out because it is relatively + ! expensive and most compilers include a flag that accomplishes + ! the same thing more efficiently. This code is cheaper than + ! handling underflows if the compiler lacks a flag; uncomment + ! it in that case. The compiler flag is often described with the + ! phrase "flush to zero". + !----------------------------------------------------------------- -! call icepack_query_parameters(puny_out=puny) -! call icepack_warnings_flush(nu_diag) -! if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & -! file=__FILE__, line=__LINE__) +! call icepack_query_parameters(puny_out=puny) +! call icepack_warnings_flush(nu_diag) +! if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & +! file=__FILE__, line=__LINE__) -! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) -! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) -! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) -! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) +! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) +! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) +! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) +! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) -! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) -! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) -! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) -! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) +! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) +! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) +! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) +! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) -! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) -! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) -! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) -! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) +! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) +! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) +! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) +! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! combinations of the stresses for the momentum equation ! kg/s^2 + !----------------------------------------------------------------- ssigpn = stressp_1(i,j) + stressp_2(i,j) ssigps = stressp_3(i,j) + stressp_4(i,j) @@ -1502,12 +1500,12 @@ subroutine stress (nx_block, ny_block, & csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) - + csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) - + csig12ne = p222*stress12_1(i,j) + ssig122 & + p055*stress12_3(i,j) csig12nw = p222*stress12_2(i,j) + ssig121 & @@ -1522,9 +1520,9 @@ subroutine stress (nx_block, ny_block, & str12ns = p5*dyt(i,j)*(p333*ssig12n + p166*ssig12s) str12sn = p5*dyt(i,j)*(p333*ssig12s + p166*ssig12n) - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dx (u momentum) + !----------------------------------------------------------------- strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) @@ -1547,9 +1545,9 @@ subroutine stress (nx_block, ny_block, & str(i,j,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! for dF/dy (v momentum) + !----------------------------------------------------------------- strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -1577,27 +1575,26 @@ subroutine stress (nx_block, ny_block, & end subroutine stress !======================================================================= - ! Computes the strain rates and internal stress components for C grid - +! ! author: JF Lemieux, ECCC ! updated: D. Bailey, NCAR -! Nov 2021 - -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! Nov 2021 +! +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The ! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. - +! ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - - subroutine stressC_T (nx_block, ny_block , & - icellt , & + + subroutine stressC_T (nx_block, ny_block , & + icellt , & indxti , indxtj , & uvelE , vvelE , & uvelN , vvelN , & dxN , dyE , & dxT , dyT , & - uarea , DminTarea, & + uarea , DminTarea, & strength, shrU , & zetax2T , etax2T , & stressp , stressm ) @@ -1605,7 +1602,7 @@ subroutine stressC_T (nx_block, ny_block , & use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress, e_factor - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -1639,11 +1636,13 @@ subroutine stressC_T (nx_block, ny_block , & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT, tensionT + divT , & ! divergence at T point + tensionT ! tension at T point real (kind=dbl_kind) :: & - shearTsqr, DeltaT, & ! strain rates at T point - rep_prsT ! replacement pressure at T point + shearTsqr , & ! strain rates squared at T point + DeltaT , & ! delt at T point + rep_prsT ! replacement pressure at T point character(len=*), parameter :: subname = '(stressC_T)' @@ -1653,7 +1652,7 @@ subroutine stressC_T (nx_block, ny_block , & call strain_rates_T (nx_block , ny_block , & icellt , & - indxti(:) , indxtj (:) , & + indxti(:) , indxtj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1670,12 +1669,12 @@ subroutine stressC_T (nx_block, ny_block , & ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 !----------------------------------------------------------------- - shearTsqr = (shrU(i,j) **2 * uarea(i,j) + shrU(i,j-1)**2*uarea(i,j-1) & + shearTsqr = (shrU(i,j) **2 * uarea(i,j) + shrU(i,j-1)**2*uarea(i,j-1) & + shrU(i-1,j-1)**2 * uarea(i-1,j-1)+ shrU(i-1,j)**2*uarea(i-1,j)) & / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) - + !----------------------------------------------------------------- ! viscosities and replacement pressure at T point !----------------------------------------------------------------- @@ -1689,33 +1688,33 @@ subroutine stressC_T (nx_block, ny_block , & ! the stresses ! kg/s^2 !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 - stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 enddo ! ij end subroutine stressC_T - -!======================================================================= +!======================================================================= +! ! Computes the strain rates and internal stress components for U points - +! ! author: JF Lemieux, ECCC -! Nov 2021 - -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! Nov 2021 +! +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The ! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. - +! ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - - subroutine stressC_U (nx_block, ny_block, & - icellu, & + + subroutine stressC_U (nx_block, ny_block, & + icellu, & indxui , indxuj, & uvelE , vvelE, & uvelN , vvelN, & @@ -1735,7 +1734,7 @@ subroutine stressC_U (nx_block, ny_block, & visc_replpress_avgzeta, & visc_method, deltaminEVP, capping - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellu ! no. of cells where iceumask = 1 @@ -1767,9 +1766,9 @@ subroutine stressC_U (nx_block, ny_block, & etax2T , & ! 2*eta at the T point shrU , & ! shearU array strength ! ice strength at the T point - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stress12 ! sigma12 + stress12 ! sigma12 ! local variables @@ -1777,14 +1776,18 @@ subroutine stressC_U (nx_block, ny_block, & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - DeltaU ! strain rates at U point + DeltaU ! delt at U point real (kind=dbl_kind) :: & - zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - DminUarea, strtmp, areatmp ! Dmin on U and tmp variables + zetax2U , & ! bulk viscosity at U point + etax2U , & ! shear viscosity at U point + rep_prsU , & ! replacement pressure at U point + DminUarea, & ! Dmin on U + strtmp , & ! tmp variable + areatmp ! tmp variable character(len=*), parameter :: subname = '(stressC_U)' - + !----------------------------------------------------------------- ! strain rates at U point ! NOTE these are actually strain rates * area (m^2/s) @@ -1845,40 +1848,38 @@ subroutine stressC_U (nx_block, ny_block, & ! the stresses ! kg/s^2 !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2U*shrU(i,j)) * denom1 + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2U*shrU(i,j)) * denom1 enddo ! ij - end subroutine stressC_U !======================================================================= - ! Computes the strain rates and internal stress components for T points - +! ! author: JF Lemieux, ECCC -! Nov 2021 +! Nov 2021 - subroutine stressCD_T (nx_block, ny_block, & - icellt, & + subroutine stressCD_T (nx_block, ny_block, & + icellt, & indxti, indxtj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & dxT, dyT, & - DminTarea, & + DminTarea, & strength, & zetax2T, etax2T, & - stresspT, stressmT, & + stresspT, stressmT, & stress12T ) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress - - integer (kind=int_kind), intent(in) :: & + + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 @@ -1911,10 +1912,13 @@ subroutine stressCD_T (nx_block, ny_block, & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT, tensionT, shearT, DeltaT ! strain rates at T point + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! sheat at T point + DeltaT ! delt at T point real (kind=dbl_kind) :: & - rep_prsT ! replacement pressure at T point + rep_prsT ! replacement pressure at T point character(len=*), parameter :: subname = '(stressCD_T)' @@ -1945,35 +1949,34 @@ subroutine stressCD_T (nx_block, ny_block, & DeltaT (i,j), zetax2T (i,j), & etax2T (i,j), rep_prsT , & capping) - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stresspT(i,j) = (stresspT(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - stressmT(i,j) = (stressmT(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + stresspT(i,j) = (stresspT (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 - stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2T(i,j)*shearT(i,j)) * denom1 + stressmT(i,j) = (stressmT (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + + stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2T(i,j)*shearT(i,j)) * denom1 enddo ! ij end subroutine stressCD_T !======================================================================= - ! Computes the strain rates and internal stress components for U points - +! ! author: JF Lemieux, ECCC -! Nov 2021 +! Nov 2021 - subroutine stressCD_U (nx_block, ny_block, & - icellu, & + subroutine stressCD_U (nx_block, ny_block, & + icellu, & indxui, indxuj, & uvelE, vvelE, & uvelN, vvelN, & @@ -1986,7 +1989,7 @@ subroutine stressCD_U (nx_block, ny_block, & epm, npm, hm, & zetax2T, etax2T, & strength, & - stresspU, stressmU, & + stresspU, stressmU, & stress12U ) use ice_dyn_shared, only: strain_rates_U, & @@ -1994,7 +1997,7 @@ subroutine stressCD_U (nx_block, ny_block, & visc_replpress_avgzeta, & visc_method, deltaminEVP, capping - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellu ! no. of cells where iceumask = 1 @@ -2025,7 +2028,7 @@ subroutine stressCD_U (nx_block, ny_block, & zetax2T , & ! 2*zeta at the T point etax2T , & ! 2*eta at the T point strength ! ice strength at the T point - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stresspU , & ! sigma11+sigma22 stressmU , & ! sigma11-sigma22 @@ -2037,14 +2040,19 @@ subroutine stressCD_U (nx_block, ny_block, & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divU, tensionU, shearU, DeltaU ! strain rates at U point + divU , & ! divergence at U point + tensionU , & ! tension at U point + shearU , & ! shear at U point + DeltaU ! delt at U point real (kind=dbl_kind) :: & - zetax2U, etax2U, rep_prsU, & ! replacement pressure at U point - DminUarea ! Dmin on U and tmp variables + zetax2U , & ! bulk viscosity at U point + etax2U , & ! shear viscosity at U point + rep_prsU , & ! replacement pressure at U point + DminUarea ! Dmin on U character(len=*), parameter :: subname = '(stressCD_U)' - + !----------------------------------------------------------------- ! strain rates at U point ! NOTE these are actually strain rates * area (m^2/s) @@ -2095,86 +2103,84 @@ subroutine stressCD_U (nx_block, ny_block, & DminUarea , DeltaU (i ,j ), & zetax2U, etax2U, rep_prsU, capping) endif - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stresspU(i,j) = (stresspU(i,j)*(c1-arlx1i*revp) + & - arlx1i*(zetax2U*divU(i,j) - rep_prsU)) * denom1 + ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + + stresspU(i,j) = (stresspU (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2U*divU(i,j) - rep_prsU)) * denom1 - stressmU(i,j) = (stressmU(i,j)*(c1-arlx1i*revp) + & - arlx1i*etax2U*tensionU(i,j)) * denom1 + stressmU(i,j) = (stressmU (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2U*tensionU(i,j)) * denom1 - stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) + & - arlx1i*p5*etax2U*shearU(i,j)) * denom1 + stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2U*shearU(i,j)) * denom1 enddo ! ij end subroutine stressCD_U - -!======================================================================= +!======================================================================= ! Computes divergence of stress tensor at the E or N point for the mom equation - +! ! author: JF Lemieux, ECCC -! Nov 2021 - +! Nov 2021 +! ! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic ! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates ! on a Sphere - Incorporation of Metric Terms. Mon. Weather Rev., ! 130, 1848-1865. - -! Bouillon, S., M. Morales Maqueda, V. Legat and T. Fichefet (2009). An +! +! Bouillon, S., M. Morales Maqueda, V. Legat and T. Fichefet (2009). An ! elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids. ! Ocean Model., 27, 174-184. - - subroutine div_stress (nx_block, ny_block, & - icell, & - indxi, indxj, & - dxE_N, dyE_N, & - dxT_U, dyT_U, & - arear, & - stresspF1, stressmF1, & - stress12F1, & - stresspF2, stressmF2, & - stress12F2, & - F1, F2, & + + subroutine div_stress (nx_block , ny_block , & + icell , & + indxi , indxj , & + dxE_N , dyE_N , & + dxT_U , dyT_U , & + arear , & + stresspF1 , stressmF1, & + stress12F1, & + stresspF2 , stressmF2, & + stress12F2, & + F1 , F2 , & grid_location) - - integer (kind=int_kind), intent(in) :: & + + integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icell ! no. of cells where epm (or npm) = 1 + icell ! no. of cells where epm (or npm) = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxE_N , & ! width of E or N-cell through the middle (m) - dyE_N , & ! height of E or N-cell through the middle (m) - dxT_U , & ! width of T or U-cell through the middle (m) - dyT_U , & ! height of T or U-cell through the middle (m) - arear ! earear or narear + dxE_N , & ! width of E or N-cell through the middle (m) + dyE_N , & ! height of E or N-cell through the middle (m) + dxT_U , & ! width of T or U-cell through the middle (m) + dyT_U , & ! height of T or U-cell through the middle (m) + arear ! earear or narear real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stresspF1 , & ! stressp (U or T) used for F1 calculation - stressmF1 , & ! stressm (U or T) used for F1 calculation - stress12F1 , & ! stress12 (U or T) used for F1 calculation - stresspF2 , & ! stressp (U or T) used for F2 calculation - stressmF2 , & ! stressm (U or T) used for F2 calculation - stress12F2 ! stress12 (U or T) used for F2 calculation + stresspF1 , & ! stressp (U or T) used for F1 calculation + stressmF1 , & ! stressm (U or T) used for F1 calculation + stress12F1, & ! stress12 (U or T) used for F1 calculation + stresspF2 , & ! stressp (U or T) used for F2 calculation + stressmF2 , & ! stressm (U or T) used for F2 calculation + stress12F2 ! stress12 (U or T) used for F2 calculation character(len=*), intent(in) :: & grid_location ! E (East) or N (North) ! TO BE IMPROVED!!!! - + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - F1 , & ! div of stress tensor for u component - F2 ! div of stress tensor for v component + F1 , & ! div of stress tensor for u component + F2 ! div of stress tensor for v component ! local variables @@ -2209,11 +2215,11 @@ subroutine div_stress (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j)-stresspF1(i,j) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stressmF1(i+1,j) & - -(dyT_U(i,j)**2)*stressmF1(i,j) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stress12F1(i,j) & - -(dxT_U(i,j-1)**2)*stress12F1(i,j-1) ) ) + ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j ) - stresspF1 (i ,j ) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j )**2) * stressmF1 (i+1,j ) & + -(dyT_U(i ,j )**2) * stressmF1 (i ,j ) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i ,j )**2) * stress12F1(i ,j ) & + -(dxT_U(i ,j-1)**2) * stress12F1(i ,j-1) ) ) enddo endif @@ -2222,11 +2228,11 @@ subroutine div_stress (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stresspF2(i,j)-stresspF2(i,j-1) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i,j)**2) * stressmF2(i,j) & - -(dxT_U(i,j-1)**2)*stressmF2(i,j-1) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j)**2) * stress12F2(i+1,j) & - -(dyT_U(i,j)**2)*stress12F2(i,j) ) ) + ( p5 * dxE_N(i,j) * ( stresspF2(i ,j ) - stresspF2 (i ,j-1) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i ,j )**2) * stressmF2 (i ,j ) & + -(dxT_U(i ,j-1)**2) * stressmF2 (i ,j-1) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j )**2) * stress12F2(i+1,j ) & + -(dyT_U(i ,j )**2) * stress12F2(i ,j ) ) ) enddo endif @@ -2235,11 +2241,11 @@ subroutine div_stress (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stresspF1(i,j)-stresspF1(i-1,j) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stressmF1(i,j) & - -(dyT_U(i-1,j)**2)*stressmF1(i-1,j) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stress12F1(i,j+1) & - -(dxT_U(i,j)**2)*stress12F1(i,j) ) ) + ( p5 * dyE_N(i,j) * ( stresspF1(i ,j ) - stresspF1 (i-1,j ) ) & + + (p5/dyE_N(i,j)) * ( (dyT_U(i ,j )**2) * stressmF1 (i ,j ) & + -(dyT_U(i-1,j )**2) * stressmF1 (i-1,j ) ) & + + (c1/dxE_N(i,j)) * ( (dxT_U(i ,j+1)**2) * stress12F1(i ,j+1) & + -(dxT_U(i ,j )**2) * stress12F1(i ,j ) ) ) enddo endif @@ -2248,11 +2254,11 @@ subroutine div_stress (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stresspF2(i,j+1)-stresspF2(i,j) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i,j+1)**2) * stressmF2(i,j+1) & - -(dxT_U(i,j)**2)*stressmF2(i,j) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i,j)**2) * stress12F2(i,j) & - -(dyT_U(i-1,j)**2)*stress12F2(i-1,j) ) ) + ( p5 * dxE_N(i,j) * ( stresspF2(i ,j+1) - stresspF2 (i ,j ) ) & + - (p5/dxE_N(i,j)) * ( (dxT_U(i ,j+1)**2) * stressmF2 (i ,j+1) & + -(dxT_U(i ,j )**2) * stressmF2 (i ,j ) ) & + + (c1/dyE_N(i,j)) * ( (dyT_U(i ,j )**2) * stress12F2(i ,j ) & + -(dyT_U(i-1,j )**2) * stress12F2(i-1,j ) ) ) enddo endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index a379d24ef..78a476f5a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -40,7 +40,7 @@ module ice_dyn_shared integer (kind=int_kind), public :: & kdyn , & ! type of dynamics ( -1, 0 = off, 1 = evp, 2 = eap ) kridge , & ! set to "-1" to turn off ridging - ndte ! number of subcycles: ndte=dt/dte + ndte ! number of subcycles: ndte=dt/dte character (len=char_len), public :: & coriolis , & ! 'constant', 'zero', or 'latitude' @@ -51,20 +51,19 @@ module ice_dyn_shared character (len=char_len), public :: & evp_algorithm ! standard_2d = 2D org version (standard) - ! shared_mem_1d = 1d without mpi call and refactorization to 1d + ! shared_mem_1d = 1d without mpi call and refactorization to 1d real (kind=dbl_kind), public :: & elasticDamp ! coefficient for calculating the parameter E, elastic damping parameter ! other EVP parameters - character (len=char_len), public :: & - yield_curve , & ! 'ellipse' ('teardrop' needs further testing) - visc_method, & ! method for viscosity calc at U points (C, CD grids) + character (len=char_len), public :: & + yield_curve , & ! 'ellipse' ('teardrop' needs further testing) + visc_method , & ! method for viscosity calc at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. - - + real (kind=dbl_kind), parameter, public :: & u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) cosw = c1 , & ! cos(ocean turning angle) ! turning angle = 0 @@ -83,50 +82,50 @@ module ice_dyn_shared deltaminVP , & ! minimum delta for viscosities (VP) capping , & ! capping of viscosities (1=Hibler79, 0=Kreyscher2000) dtei , & ! 1/dte, where dte is subcycling timestep (1/s) -! dte2T , & ! dte/2T - denom1 ! constants for stress equation +! dte2T , & ! dte/2T + denom1 ! constants for stress equation real (kind=dbl_kind), public :: & ! Bouillon et al relaxation constants - arlx , & ! alpha for stressp - arlx1i , & ! (inverse of alpha) for stressp - brlx ! beta for momentum + arlx , & ! alpha for stressp + arlx1i , & ! (inverse of alpha) for stressp + brlx ! beta for momentum - real (kind=dbl_kind), allocatable, public :: & - fcor_blk(:,:,:) ! Coriolis parameter (1/s) + real (kind=dbl_kind), allocatable, public :: & + fcor_blk(:,:,:) ! Coriolis parameter (1/s) - real (kind=dbl_kind), allocatable, public :: & - fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) - fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) + real (kind=dbl_kind), allocatable, public :: & + fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) + fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvel_init, & ! x-component of velocity (m/s), beginning of timestep - vvel_init ! y-component of velocity (m/s), beginning of timestep + uvel_init , & ! x-component of velocity (m/s), beginning of timestep + vvel_init ! y-component of velocity (m/s), beginning of timestep real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvelN_init, & ! x-component of velocity (m/s), beginning of timestep - vvelN_init ! y-component of velocity (m/s), beginning of timestep + uvelN_init , & ! x-component of velocity (m/s), beginning of timestep + vvelN_init ! y-component of velocity (m/s), beginning of timestep real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvelE_init, & ! x-component of velocity (m/s), beginning of timestep - vvelE_init ! y-component of velocity (m/s), beginning of timestep + uvelE_init , & ! x-component of velocity (m/s), beginning of timestep + vvelE_init ! y-component of velocity (m/s), beginning of timestep + + real (kind=dbl_kind), allocatable, public :: & + DminTarea(:,:,:) ! deltamin * tarea (m^2/s) - real (kind=dbl_kind), allocatable, public :: & - DminTarea(:,:,:) ! deltamin * tarea (m^2/s) - ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & - Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) + Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) ! seabed (basal) stress parameters and settings logical (kind=log_kind), public :: & - seabed_stress ! if true, seabed stress for landfast on + seabed_stress ! if true, seabed stress for landfast on real (kind=dbl_kind), public :: & - k1 , & ! 1st free parameter for seabed1 grounding parameterization - k2 , & ! second free parameter (N/m^3) for seabed1 grounding parametrization - alphab , & ! alphab=Cb factor in Lemieux et al 2015 - threshold_hw ! max water depth for grounding - ! see keel data from Amundrud et al. 2004 (JGR) + k1 , & ! 1st free parameter for seabed1 grounding parameterization + k2 , & ! second free parameter (N/m^3) for seabed1 grounding parametrization + alphab , & ! alphab=Cb factor in Lemieux et al 2015 + threshold_hw ! max water depth for grounding + ! see keel data from Amundrud et al. 2004 (JGR) interface dyn_haloUpdate module procedure dyn_haloUpdate1 @@ -156,17 +155,19 @@ module ice_dyn_shared !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_dyn_shared integer (int_kind) :: ierr + character(len=*), parameter :: subname = '(alloc_dyn_shared)' + allocate( & uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep stat=ierr) - if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') + if (ierr/=0) call abort_ice(subname//': Out of memory') if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & @@ -175,13 +176,12 @@ subroutine alloc_dyn_shared uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep stat=ierr) - if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') + if (ierr/=0) call abort_ice(subname//': Out of memory') endif end subroutine alloc_dyn_shared !======================================================================= - ! Initialize parameters and variables needed for the dynamics ! author: Elizabeth C. Hunke, LANL @@ -227,7 +227,7 @@ subroutine init_dyn (dt) allocate(fcor_blk(nx_block,ny_block,max_blocks)) allocate(DminTarea(nx_block,ny_block,max_blocks)) - + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate(fcorE_blk(nx_block,ny_block,max_blocks)) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) @@ -301,9 +301,9 @@ subroutine init_dyn (dt) stress12U (i,j,iblk) = c0 endif - if (kdyn == 1) then + if (kdyn == 1) then DminTarea(i,j,iblk) = deltaminEVP*tarea(i,j,iblk) - elseif (kdyn == 3) then + elseif (kdyn == 3) then DminTarea(i,j,iblk) = deltaminVP*tarea(i,j,iblk) endif @@ -318,7 +318,6 @@ subroutine init_dyn (dt) end subroutine init_dyn !======================================================================= - ! Set parameters needed for the evp dynamics. ! Note: This subroutine is currently called only during initialization. ! If the dynamics time step can vary during runtime, it should @@ -342,7 +341,7 @@ subroutine set_evp_parameters (dt) ! elastic time step !dte = dt/real(ndte,kind=dbl_kind) ! s !dtei = c1/dte ! 1/s - dtei = real(ndte,kind=dbl_kind)/dt + dtei = real(ndte,kind=dbl_kind)/dt ! variables for elliptical yield curve and plastic potential epp2i = c1/e_plasticpot**2 @@ -375,7 +374,6 @@ subroutine set_evp_parameters (dt) end subroutine set_evp_parameters !======================================================================= - ! Computes quantities needed in the stress tensor (sigma) ! and momentum (u) equations, but which do not change during ! the thermodynamics/transport time step: @@ -383,10 +381,10 @@ end subroutine set_evp_parameters ! ! author: Elizabeth C. Hunke, LANL - subroutine dyn_prep1 (nx_block, ny_block, & + subroutine dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - aice, vice, & - vsno, tmask, & + aice, vice, & + vsno, tmask, & tmass, icetmask) integer (kind=int_kind), intent(in) :: & @@ -428,26 +426,26 @@ subroutine dyn_prep1 (nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block - !----------------------------------------------------------------- - ! total mass of ice and snow, centered in T-cell - ! NOTE: vice and vsno must be up to date in all grid cells, - ! including ghost cells - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! total mass of ice and snow, centered in T-cell + ! NOTE: vice and vsno must be up to date in all grid cells, + ! including ghost cells + !----------------------------------------------------------------- if (tmask(i,j)) then tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 else tmass(i,j) = c0 endif - !----------------------------------------------------------------- - ! ice extent mask (T-cells) - !----------------------------------------------------------------- - tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & + !----------------------------------------------------------------- + ! ice extent mask (T-cells) + !----------------------------------------------------------------- + tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & .and. (tmass(i,j) > m_min) - !----------------------------------------------------------------- - ! augmented mask (land + open ocean) - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! augmented mask (land + open ocean) + !----------------------------------------------------------------- icetmask (i,j) = 0 enddo @@ -457,8 +455,8 @@ subroutine dyn_prep1 (nx_block, ny_block, & do i = ilo, ihi ! extend ice extent mask (T-cells) to points around pack - if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & - tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & + if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & + tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then icetmask(i,j) = 1 endif @@ -480,31 +478,31 @@ end subroutine dyn_prep1 ! ! author: Elizabeth C. Hunke, LANL - subroutine dyn_prep2 (nx_block, ny_block, & + subroutine dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt, icellu, & - indxti, indxtj, & - indxui, indxuj, & - aiu, umass, & - umassdti, fcor, & - umask, & - uocn, vocn, & - strairx, strairy, & - ss_tltx, ss_tlty, & - icetmask, iceumask, & - fm, dt, & - strtltx, strtlty, & + icellt, icellu, & + indxti, indxtj, & + indxui, indxuj, & + aiu, umass, & + umassdti, fcor, & + umask, & + uocn, vocn, & + strairx, strairy, & + ss_tltx, ss_tlty, & + icetmask, iceumask, & + fm, dt, & + strtltx, strtlty, & strocnx, strocny, & strintx, strinty, & taubx, tauby, & - waterx, watery, & - forcex, forcey, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & + waterx, watery, & + forcex, forcey, & + stressp_1, stressp_2, & + stressp_3, stressp_4, & + stressm_1, stressm_2, & + stressm_3, stressm_4, & + stress12_1, stress12_2, & + stress12_3, stress12_4, & uvel_init, vvel_init, & uvel, vvel, & Tbu) @@ -514,14 +512,14 @@ subroutine dyn_prep2 (nx_block, ny_block, & ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellt , & ! no. of cells where icetmask = 1 + icellu ! no. of cells where iceumask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & umask ! land/boundary mask, thickness (U-cell) @@ -577,7 +575,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind) :: gravit + real (kind=dbl_kind) :: & + gravit logical (kind=log_kind), dimension(nx_block,ny_block) :: & iceumask_old ! old-time iceumask @@ -599,7 +598,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & taubx (i,j) = c0 tauby (i,j) = c0 - if (icetmask(i,j)==0) then + if (icetmask(i,j)==0) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -612,7 +611,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & stress12_2(i,j) = c0 stress12_3(i,j) = c0 stress12_4(i,j) = c0 - endif + endif enddo ! i enddo ! j @@ -640,16 +639,16 @@ subroutine dyn_prep2 (nx_block, ny_block, & !----------------------------------------------------------------- icellu = 0 - + do j = jlo, jhi do i = ilo, ihi iceumask_old(i,j) = iceumask(i,j) ! save ! if (grid_ice == 'B') then ! include ice mask. ! ice extent mask (U-cells) - iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & + iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & .and. (umass(i,j) > m_min) ! else ! ice mask shpuld be applied to cd grid. For now it is not implemented. -! iceumask(i,j) = umask(i,j) +! iceumask(i,j) = umask(i,j) ! endif if (iceumask(i,j)) then @@ -676,6 +675,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & vvel_init(i,j) = vvel(i,j) enddo enddo + !----------------------------------------------------------------- ! Define variables for momentum equation !----------------------------------------------------------------- @@ -719,7 +719,6 @@ subroutine dyn_prep2 (nx_block, ny_block, & end subroutine dyn_prep2 !======================================================================= - ! Calculation of the surface stresses ! Integration of the momentum equation to find velocity (u,v) ! @@ -764,7 +763,7 @@ subroutine stepu (nx_block, ny_block, & uarear ! 1/uarea real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(in) :: & - str + str ! temporary real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & uvel , & ! x-component of velocity (m/s) @@ -777,7 +776,7 @@ subroutine stepu (nx_block, ny_block, & tauby ! seabed stress, y-direction (N/m^2) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient + Cw ! ocean-ice neutral drag coefficient ! local variables @@ -816,11 +815,11 @@ subroutine stepu (nx_block, ny_block, & ! ice/ocean stress taux = vrel*waterx(i,j) ! NOTE this is not the entire tauy = vrel*watery(i,j) ! ocn stress term - + Cb = Tbu(i,j) / (sqrt(uold**2 + vold**2) + u0) ! for seabed stress ! revp = 0 for classic evp, 1 for revised evp cca = (brlx + revp)*umassdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s ab2 = cca**2 + ccb**2 @@ -840,7 +839,7 @@ subroutine stepu (nx_block, ny_block, & uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - ! calculate seabed stress component for outputs + ! calculate seabed stress component for outputs ! only needed on last iteration. taubx(i,j) = -uvel(i,j)*Cb tauby(i,j) = -vvel(i,j)*Cb @@ -849,7 +848,6 @@ subroutine stepu (nx_block, ny_block, & end subroutine stepu !======================================================================= - ! Integration of the momentum equation to find velocity (u,v) at E and N locations subroutine stepuv_CD (nx_block, ny_block, & @@ -899,7 +897,7 @@ subroutine stepuv_CD (nx_block, ny_block, & tauby ! seabed stress, y-direction (N/m^2) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient + Cw ! ocean-ice neutral drag coefficient ! local variables @@ -947,7 +945,7 @@ subroutine stepuv_CD (nx_block, ny_block, & ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - ab2 = cca**2 + ccb**2 + ab2 = cca**2 + ccb**2 ! compute the velocity components cc1 = strintx(i,j) + forcex(i,j) + taux & @@ -967,7 +965,6 @@ subroutine stepuv_CD (nx_block, ny_block, & end subroutine stepuv_CD !======================================================================= - ! Integration of the momentum equation to find velocity u at E location on C grid subroutine stepu_C (nx_block, ny_block, & @@ -1003,7 +1000,7 @@ subroutine stepu_C (nx_block, ny_block, & strintx , & ! divergence of internal ice stress, x (N/m^2) Cw , & ! ocean-ice neutral drag coefficient vvel ! y-component of velocity (m/s) interpolated to E location - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & uvel , & ! x-component of velocity (m/s) taubx ! seabed stress, x-direction (N/m^2) @@ -1067,7 +1064,6 @@ subroutine stepu_C (nx_block, ny_block, & end subroutine stepu_C !======================================================================= - ! Integration of the momentum equation to find velocity v at N location on C grid subroutine stepv_C (nx_block, ny_block, & @@ -1157,7 +1153,7 @@ subroutine stepv_C (nx_block, ny_block, & + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) vvel(i,j) = (-ccb*uold + cc2) / cca - + ! calculate seabed stress component for outputs ! only needed on last iteration. tauby(i,j) = -vvel(i,j)*Cb @@ -1165,9 +1161,8 @@ subroutine stepv_C (nx_block, ny_block, & enddo ! ij end subroutine stepv_C - -!======================================================================= +!======================================================================= ! Calculation of the ice-ocean stress. ! ...the sign will be reversed later... ! @@ -1179,7 +1174,7 @@ subroutine dyn_finish (nx_block, ny_block, & uvel, vvel, & uocn, vocn, & aiu, fm, & - strocnx, strocny) + strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1202,14 +1197,16 @@ subroutine dyn_finish (nx_block, ny_block, & strocny ! ice-ocean stress, y-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient + Cw ! ocean-ice neutral drag coefficient ! local variables integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind) :: vrel, rhow + real (kind=dbl_kind) :: & + vrel , & ! + rhow ! character(len=*), parameter :: subname = '(dyn_finish)' @@ -1231,7 +1228,7 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocny(i,j) = strocny(i,j) & ! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiu(i,j) - ! update strocnx to most recent iterate and complete the term + ! update strocnx to most recent iterate and complete the term vrel = vrel * aiu(i,j) strocnx(i,j) = vrel*((uocn(i,j) - uvel(i,j))*cosw & - (vocn(i,j) - vvel(i,j))*sinw*sign(c1,fm(i,j))) @@ -1250,16 +1247,16 @@ end subroutine dyn_finish !======================================================================= ! Computes seabed (basal) stress factor Tbu (landfast ice) based on mean ! thickness and bathymetry data. LKD refers to linear keel draft. This -! parameterization assumes that the largest keel draft varies linearly +! parameterization assumes that the largest keel draft varies linearly ! with the mean thickness. ! -! Lemieux, J. F., B. Tremblay, F. Dupont, M. Plante, G.C. Smith, D. Dumont (2015). -! A basal stress parameterization form modeling landfast ice, J. Geophys. Res. +! Lemieux, J. F., B. Tremblay, F. Dupont, M. Plante, G.C. Smith, D. Dumont (2015). +! A basal stress parameterization form modeling landfast ice, J. Geophys. Res. ! Oceans, 120, 3157-3173. ! -! Lemieux, J. F., F. Dupont, P. Blain, F. Roy, G.C. Smith, G.M. Flato (2016). +! Lemieux, J. F., F. Dupont, P. Blain, F. Roy, G.C. Smith, G.M. Flato (2016). ! Improving the simulation of landfast ice by combining tensile strength and a -! parameterization for grounded ridges, J. Geophys. Res. Oceans, 121, 7354-7368. +! parameterization for grounded ridges, J. Geophys. Res. Oceans, 121, 7354-7368. ! ! author: JF Lemieux, Philippe Blain (ECCC) ! @@ -1280,16 +1277,16 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & icellu ! no. of cells where ice[uen]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice , & ! concentration of ice at tracer location - vice , & ! volume per unit area of ice at tracer location (m) - hwater ! water depth at tracer location (m) + aice , & ! concentration of ice at tracer location + vice , & ! volume per unit area of ice at tracer location (m) + hwater ! water depth at tracer location (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor at 'grid_location' (N/m^2) + Tbu ! seabed stress factor at 'grid_location' (N/m^2) character(len=*), optional, intent(inout) :: & grid_location ! grid location (U, E, N), U assumed if not present @@ -1305,10 +1302,10 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & i, j, ij character(len=char_len) :: & - l_grid_location ! local version of 'grid_location' + l_grid_location ! local version of 'grid_location' character(len=*), parameter :: subname = '(seabed_stress_factor_LKD)' - + ! Assume U location (NE corner) if grid_location not present if (.not. (present(grid_location))) then l_grid_location = 'U' @@ -1321,19 +1318,19 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & j = indxuj(ij) ! convert quantities to grid_location - + hwu = grid_neighbor_min(hwater, i, j, l_grid_location) - docalc_tbu = merge(c1,c0,hwu < threshold_hw) - - + docalc_tbu = merge(c1,c0,hwu < threshold_hw) + + au = grid_neighbor_max(aice, i, j, l_grid_location) hu = grid_neighbor_max(vice, i, j, l_grid_location) ! 1- calculate critical thickness hcu = au * hwu / k1 - ! 2- calculate seabed stress factor + ! 2- calculate seabed stress factor Tbu(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) enddo ! ij @@ -1341,15 +1338,15 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & end subroutine seabed_stress_factor_LKD !======================================================================= -! Computes seabed (basal) stress factor Tbu (landfast ice) based on -! probability of contact between the ITD and the seabed. The water depth -! could take into account variations of the SSH. In the simplest +! Computes seabed (basal) stress factor Tbu (landfast ice) based on +! probability of contact between the ITD and the seabed. The water depth +! could take into account variations of the SSH. In the simplest ! formulation, hwater is simply the value of the bathymetry. To calculate -! the probability of contact, it is assumed that the bathymetry follows -! a normal distribution with sigma_b = 2.5d0. An improvement would -! be to provide the distribution based on high resolution data. -! -! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. +! the probability of contact, it is assumed that the bathymetry follows +! a normal distribution with sigma_b = 2.5d0. An improvement would +! be to provide the distribution based on high resolution data. +! +! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. ! in prep. ! ! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont @@ -1363,7 +1360,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & icelle, indxei, indxej, & icelln, indxni, indxnj) ! use modules - + use ice_arrays_column, only: hin_max use ice_domain_size, only: ncat use ice_grid, only: grid_neighbor_min, grid_neighbor_max @@ -1371,21 +1368,20 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt, icellu ! no. of cells where ice[tu]mask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction - + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj , & ! compressed index in j-direction + indxui , & ! compressed index in i-direction + indxuj ! compressed index in j-direction + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & hwater ! water depth at tracer location (m) - + real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & aicen, & ! partial concentration for last thickness category in ITD vicen ! partial volume for last thickness category in ITD (m) - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & Tbu ! seabed stress factor at U location (N/m^2) @@ -1394,16 +1390,15 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & TbN ! seabed stress factor at N location (N/m^2) integer (kind=int_kind), intent(in), optional :: & - icelle, icelln ! no. of cells where ice[en]mask = 1 + icelle, icelln ! no. of cells where ice[en]mask = 1 - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in), optional :: & + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in), optional :: & indxei , & ! compressed index in i-direction indxej , & ! compressed index in j-direction indxni , & ! compressed index in i-direction indxnj ! compressed index in j-direction -! local variables +! local variables integer (kind=int_kind) :: & i, j, ij, ii, n @@ -1413,7 +1408,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & ncat_i = 100 ! number of ice thickness categories (log-normal) real (kind=dbl_kind), parameter :: & - max_depth = 50.0_dbl_kind, & ! initial range of log-normal distribution + max_depth = 50.0_dbl_kind, & ! initial range of log-normal distribution mu_s = 0.1_dbl_kind, & ! friction coefficient sigma_b = 2.5d0 ! Standard deviation of bathymetry @@ -1425,28 +1420,38 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & real (kind=dbl_kind), dimension(ncat_b) :: & ! normal dist for bathymetry y_n, & ! center of bathymetry categories (m) b_n, & ! probability density function (bathymetry, 1/m) - P_y ! probability for each bathymetry category + P_y ! probability for each bathymetry category real (kind=dbl_kind), dimension(ncat) :: & - vcat, acat + vcat, acat ! vice, aice temporary arrays integer, dimension(ncat_b) :: & - tmp ! Temporary vector tmp = merge(1,0,gt) - + tmp ! Temporary vector tmp = merge(1,0,gt) + logical, dimension (ncat_b) :: & - gt + gt ! + + real (kind=dbl_kind) :: & + wid_i, wid_b , & ! parameters for PDFs + mu_i, sigma_i , & ! + mu_b, m_i, v_i, & ! + atot, x_kmax , & ! + cut , & ! + rhoi, rhow , & ! + gravit , & ! + pi, puny ! + + real (kind=dbl_kind), dimension(ncat_i) :: & + tb_tmp - real (kind=dbl_kind) :: wid_i, wid_b, mu_i, sigma_i, mu_b, m_i, v_i ! parameters for PDFs - real (kind=dbl_kind), dimension(ncat_i):: tb_tmp - real (kind=dbl_kind), dimension (nx_block,ny_block):: Tbt ! seabed stress factor at t point (N/m^2) - real (kind=dbl_kind) :: atot, x_kmax - real (kind=dbl_kind) :: cut, rhoi, rhow, gravit, pi, puny + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + Tbt ! seabed stress factor at t point (N/m^2) character(len=*), parameter :: subname = '(seabed_stress_factor_prob)' call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi) call icepack_query_parameters(gravit_out=gravit) - call icepack_query_parameters(pi_out=pi) + call icepack_query_parameters(pi_out=pi) call icepack_query_parameters(puny_out=puny) Tbt=c0 @@ -1463,7 +1468,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & wid_i = max_depth/ncat_i ! width of ice categories wid_b = c6*sigma_b/ncat_b ! width of bathymetry categories (6 sigma_b = 2x3 sigma_b) - x_k = (/( wid_i*( real(i,kind=dbl_kind) - p5 ), i=1, ncat_i )/) + x_k = (/( wid_i*( real(i,kind=dbl_kind) - p5 ), i=1, ncat_i )/) y_n = (/( ( mu_b-c3*sigma_b )+( real(i,kind=dbl_kind) - p5 )*( c6*sigma_b/ncat_b ), i=1, ncat_b )/) vcat(1:ncat) = vicen(i,j,1:ncat) @@ -1481,12 +1486,12 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & sigma_i = sqrt(log(c1 + v_i/m_i**2)) ! max thickness associated with percentile of log-normal PDF - ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) + ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) x_kmax = exp(mu_i + sqrt(c2*sigma_i)*1.9430d0) ! Set x_kmax to hlev of the last category where there is ice - ! when there is no ice in the last category + ! when there is no ice in the last category cut = x_k(ncat_i) do n = ncat,-1,1 if (acat(n) < puny) then @@ -1527,9 +1532,9 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - ! convert quantities to U-location + ! convert quantities to U-location Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') - enddo ! ij + enddo ! ij elseif (grid_ice == "C" .or. grid_ice == "CD") then if (present(Tbe) .and. present(TbN) .and. & present(icelle) .and. present(icelln) .and. & @@ -1553,11 +1558,10 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) endif endif - + end subroutine seabed_stress_factor_prob - -!======================================================================= +!======================================================================= ! Computes principal stresses for comparison with the theoretical ! yield curve ! @@ -1585,9 +1589,11 @@ subroutine principal_stress(nx_block, ny_block, & ! local variables - integer (kind=int_kind) :: i, j + integer (kind=int_kind) :: & + i, j - real (kind=dbl_kind) :: puny + real (kind=dbl_kind) :: & + puny character(len=*), parameter :: subname = '(principal_stress)' @@ -1599,16 +1605,16 @@ subroutine principal_stress(nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block if (strength(i,j) > puny) then - ! ice internal pressure - sigP(i,j) = -p5*stressp(i,j) - + ! ice internal pressure + sigP(i,j) = -p5*stressp(i,j) + ! normalized principal stresses sig1(i,j) = (p5*(stressp(i,j) & + sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & / strength(i,j) sig2(i,j) = (p5*(stressp(i,j) & - sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & - / strength(i,j) + / strength(i,j) else sig1(i,j) = spval_dbl sig2(i,j) = spval_dbl @@ -1620,7 +1626,6 @@ subroutine principal_stress(nx_block, ny_block, & end subroutine principal_stress !======================================================================= - ! Compute deformations for mechanical redistribution ! ! author: Elizabeth C. Hunke, LANL @@ -1644,8 +1649,7 @@ subroutine deformations (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in) :: & + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxti , & ! compressed index in i-direction indxtj ! compressed index in j-direction @@ -1659,9 +1663,8 @@ subroutine deformations (nx_block, ny_block, & cym , & ! 0.5*HTE - 1.5*HTW cxm , & ! 0.5*HTN - 1.5*HTS tarear ! 1/tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), & - intent(inout) :: & + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -1680,15 +1683,15 @@ subroutine deformations (nx_block, ny_block, & tmp ! useful combination character(len=*), parameter :: subname = '(deformations)' - + do ij = 1, icellt i = indxti(ij) j = indxtj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates (nx_block, ny_block, & i, j, & uvel, vvel, & @@ -1703,9 +1706,9 @@ subroutine deformations (nx_block, ny_block, & shearse, shearsw, & Deltane, Deltanw, & Deltase, Deltasw ) - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- divu(i,j) = p25*(divune + divunw + divuse + divusw) * tarear(i,j) tmp = p25*(Deltane + Deltanw + Deltase + Deltasw) * tarear(i,j) rdg_conv(i,j) = -min(divu(i,j),c0) @@ -1720,9 +1723,8 @@ subroutine deformations (nx_block, ny_block, & enddo ! ij end subroutine deformations - -!======================================================================= +!======================================================================= ! Compute deformations for mechanical redistribution at T point ! ! author: JF Lemieux, ECCC @@ -1745,8 +1747,7 @@ subroutine deformations_T (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions icellt ! no. of cells where icetmask = 1 - integer (kind=int_kind), dimension (nx_block*ny_block), & - intent(in) :: & + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxti , & ! compressed index in i-direction indxtj ! compressed index in j-direction @@ -1760,7 +1761,7 @@ subroutine deformations_T (nx_block, ny_block, & dxT , & ! width of T-cell through the middle (m) dyT , & ! height of T-cell through the middle (m) tarear ! 1/tarea - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) @@ -1772,14 +1773,17 @@ subroutine deformations_T (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT, tensionT, shearT, DeltaT ! strain rates at T point + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point + DeltaT ! delt at T point - real (kind=dbl_kind) :: & - tmp ! useful combination + real (kind=dbl_kind) :: & + tmp ! useful combination character(len=*), parameter :: subname = '(deformations_T)' - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -1814,9 +1818,8 @@ subroutine deformations_T (nx_block, ny_block, & enddo ! ij end subroutine deformations_T - -!======================================================================= +!======================================================================= ! Compute strain rates ! ! author: Elizabeth C. Hunke, LANL @@ -1840,10 +1843,10 @@ subroutine strain_rates (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block ! block dimensions - - integer (kind=int_kind) :: & + + integer (kind=int_kind), intent(in) :: & i, j ! indices - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) @@ -1853,15 +1856,15 @@ subroutine strain_rates (nx_block, ny_block, & cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW cxm ! 0.5*HTN - 1.5*HTS - + real (kind=dbl_kind), intent(out):: & ! at each corner : divune, divunw, divuse, divusw , & ! divergence tensionne, tensionnw, tensionse, tensionsw, & ! tension shearne, shearnw, shearse, shearsw , & ! shearing Deltane, Deltanw, Deltase, Deltasw ! Delta - + character(len=*), parameter :: subname = '(strain_rates)' - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -1896,7 +1899,7 @@ subroutine strain_rates (nx_block, ny_block, & - cxp(i,j)*uvel(i-1,j-1) + dxt(i,j)*uvel(i-1,j ) shearse = -cym(i,j)*vvel(i ,j-1) - dyt(i,j)*vvel(i-1,j-1) & - cxp(i,j)*uvel(i ,j-1) + dxt(i,j)*uvel(i ,j ) - + ! Delta (in the denominator of zeta, eta) Deltane = sqrt(divune**2 + e_factor*(tensionne**2 + shearne**2)) Deltanw = sqrt(divunw**2 + e_factor*(tensionnw**2 + shearnw**2)) @@ -1906,7 +1909,6 @@ subroutine strain_rates (nx_block, ny_block, & end subroutine strain_rates !======================================================================= - ! Compute strain rates at the T point ! ! author: JF Lemieux, ECCC @@ -1925,7 +1927,7 @@ subroutine strain_rates_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions icellt - + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxti , & ! compressed index in i-direction indxtj ! compressed index in j-direction @@ -1939,11 +1941,11 @@ subroutine strain_rates_T (nx_block, ny_block, & dyE , & ! height of E-cell through the middle (m) dxT , & ! width of T-cell through the middle (m) dyT ! height of T-cell through the middle (m) - + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & - divT , & - tensionT , & - shearT , & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point DeltaT ! strain rates at the T point ! local variables @@ -1955,7 +1957,7 @@ subroutine strain_rates_T (nx_block, ny_block, & integer (kind=int_kind) :: & ij, i, j ! indices - + character(len=*), parameter :: subname = '(strain_rates_T)' !----------------------------------------------------------------- @@ -1998,7 +2000,7 @@ subroutine strain_rates_T (nx_block, ny_block, & shearT(i,j) = lshearT endif endif - + ! Delta (in the denominator of zeta, eta) if (present(deltaT)) then DeltaT (i,j) = sqrt(ldivT**2 + e_factor*(ltensionT**2 + lshearT**2)) @@ -2009,7 +2011,6 @@ subroutine strain_rates_T (nx_block, ny_block, & end subroutine strain_rates_T !======================================================================= - ! Compute strain rates at the U point including boundary conditions ! ! author: JF Lemieux, ECCC @@ -2054,28 +2055,28 @@ subroutine strain_rates_U (nx_block, ny_block, & ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs epm , & ! E-cell mask npm ! N-cell mask - + real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & - divU , & - tensionU , & - shearU , & - DeltaU ! strain rates at the U point + divU , & ! divergence at U point + tensionU , & ! tension at U point + shearU , & ! shear at U point + DeltaU ! delt at the U point ! local variables integer (kind=int_kind) :: & ij, i, j ! indices - + real (kind=dbl_kind) :: & ldivU , & ltensionU , & lshearU ! local values - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij - + character(len=*), parameter :: subname = '(strain_rates_U)' - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -2100,7 +2101,7 @@ subroutine strain_rates_U (nx_block, ny_block, & vEij = vvelE(i,j) * epm(i,j) & +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - + ! divergence = e_11 + e_22 ldivU = dyU(i,j) * ( uNip1j - uNij ) & + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & @@ -2129,7 +2130,7 @@ subroutine strain_rates_U (nx_block, ny_block, & +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) vNij = vvelN(i,j) * npm(i,j) & +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - + ! shearing strain rate = 2*e_12 lshearU = dxU(i,j) * ( uEijp1 - uEij ) & - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & @@ -2150,7 +2151,7 @@ subroutine strain_rates_U (nx_block, ny_block, & end subroutine strain_rates_U !======================================================================= -! Computes viscosities and replacement pressure for stress +! Computes viscosities and replacement pressure for stress ! calculations. Note that tensile strength is included here. ! ! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. @@ -2167,17 +2168,21 @@ subroutine visc_replpress(strength, DminArea, Delta, & zetax2, etax2, rep_prs, capping) real (kind=dbl_kind), intent(in):: & - strength, DminArea + strength, & ! + DminArea ! real (kind=dbl_kind), intent(in):: & - Delta, capping + Delta , & ! + capping ! real (kind=dbl_kind), intent(out):: & - zetax2, etax2, rep_prs ! 2 x viscosities, replacement pressure + zetax2 , & ! bulk viscosity + etax2 , & ! shear viscosity + rep_prs ! replacement pressure ! local variables real (kind=dbl_kind) :: & - tmpcalc + tmpcalc ! temporary character(len=*), parameter :: subname = '(visc_replpress)' @@ -2192,19 +2197,18 @@ subroutine visc_replpress(strength, DminArea, Delta, & end subroutine visc_replpress !======================================================================= - -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The +! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The ! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. - +! ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - +! ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 - + subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & zetax2T3, zetax2T4, & etax2T1, etax2T2, & - etax2T3, etax2T4, & + etax2T3, etax2T4, & mask1, mask2, & mask3, mask4, & area1, area2, & @@ -2260,12 +2264,11 @@ subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & end subroutine visc_replpress_avgzeta !======================================================================= - ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - +! ! avg_strength: C2 method of Kimmritz et al. 2016 - + subroutine visc_replpress_avgstr (strength1, strength2, & strength3, strength4, & mask1, mask2, & @@ -2304,11 +2307,11 @@ subroutine visc_replpress_avgstr (strength1, strength2, & call visc_replpress (strtmp, DminUarea, deltaU, & zetax2U, etax2U, rep_prsU, capping) - + end subroutine visc_replpress_avgstr !======================================================================= -! Do a halo update on 1 fields +! Do a halo update on 1 field subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) @@ -2317,23 +2320,23 @@ subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type + field_loc , & ! field loc + field_type ! field_type real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 ! fields to halo + fld1 ! fields to halo ! local variables integer (kind=int_kind) :: & - iblk ! iblock + iblk ! iblock real (kind=dbl_kind), dimension (nx_block,ny_block,1,max_blocks) :: & - fldbundle ! work array for boundary updates + fldbundle ! work array for boundary updates character(len=*), parameter :: subname = '(dyn_haloUpdate1)' @@ -2361,24 +2364,24 @@ subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type + field_loc , & ! field loc + field_type ! field_type real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 ! + fld1 , & ! fields to halo + fld2 ! ! local variables integer (kind=int_kind) :: & - iblk ! iblock + iblk ! iblock real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & - fldbundle ! work array for boundary updates + fldbundle ! work array for boundary updates character(len=*), parameter :: subname = '(dyn_haloUpdate2)' @@ -2425,25 +2428,25 @@ subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type + field_loc , & ! field loc + field_type ! field_type real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 ! + fld1 , & ! fields to halo + fld2 , & ! + fld3 ! ! local variables integer (kind=int_kind) :: & - iblk ! iblock + iblk ! iblock real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & - fldbundle ! work array for boundary updates + fldbundle ! work array for boundary updates character(len=*), parameter :: subname = '(dyn_haloUpdate3)' @@ -2494,26 +2497,26 @@ subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type + field_loc, & ! field loc + field_type ! field_type real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 , & ! - fld4 ! + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 ! ! local variables integer (kind=int_kind) :: & - iblk ! iblock + iblk ! iblock real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & - fldbundle ! work array for boundary updates + fldbundle ! work array for boundary updates character(len=*), parameter :: subname = '(dyn_haloUpdate4)' @@ -2568,27 +2571,27 @@ subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo + halo_info , & ! standard unmasked halo + halo_info_mask ! masked halo integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type + field_loc , & ! field loc + field_type ! field_type real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 , & ! - fld4 , & ! - fld5 ! + fld1 , & ! fields to halo + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! ! local variables integer (kind=int_kind) :: & - iblk ! iblock + iblk ! iblock real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & - fldbundle ! work array for boundary updates + fldbundle ! work array for boundary updates character(len=*), parameter :: subname = '(dyn_haloUpdate5)' @@ -2646,11 +2649,11 @@ subroutine stack_fields2(fld1, fld2, fldbundle) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! + fld1 , & ! fields to stack fld2 ! real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables @@ -2679,12 +2682,12 @@ subroutine stack_fields3(fld1, fld2, fld3, fldbundle) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! - fld2 , & ! - fld3 ! + fld1 , & ! fields to stack + fld2 , & ! + fld3 ! real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables @@ -2714,13 +2717,13 @@ subroutine stack_fields4(fld1, fld2, fld3, fld4, fldbundle) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! - fld2 , & ! - fld3 , & ! - fld4 ! + fld1 , & ! fields to stack + fld2 , & ! + fld3 , & ! + fld4 ! real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables @@ -2751,14 +2754,14 @@ subroutine stack_fields5(fld1, fld2, fld3, fld4, fld5, fldbundle) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! - fld2 , & ! - fld3 , & ! - fld4 , & ! - fld5 ! + fld1 , & ! fields to stack + fld2 , & ! + fld3 , & ! + fld4 , & ! + fld5 ! real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) ! local variables @@ -2790,10 +2793,10 @@ subroutine unstack_fields2(fldbundle, fld1, fld2) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! + fld1 , & ! fields to unstack fld2 ! ! local variables @@ -2823,10 +2826,10 @@ subroutine unstack_fields3(fldbundle, fld1, fld2, fld3) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! + fld1 , & ! fields to unstack fld2 , & ! fld3 ! @@ -2858,10 +2861,10 @@ subroutine unstack_fields4(fldbundle, fld1, fld2, fld3, fld4) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! + fld1 , & ! fields to unstack fld2 , & ! fld3 , & ! fld4 ! @@ -2895,10 +2898,10 @@ subroutine unstack_fields5(fldbundle, fld1, fld2, fld3, fld4, fld5) use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) + fldbundle ! work array for boundary updates (i,j,n,iblk) real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! + fld1 , & ! fields to unstack fld2 , & ! fld3 , & ! fld4 , & ! @@ -2926,7 +2929,7 @@ subroutine unstack_fields5(fldbundle, fld1, fld2, fld3, fld4, fld5) end subroutine unstack_fields5 !======================================================================= - + end module ice_dyn_shared !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 9621db4b1..4796669e9 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -103,7 +103,7 @@ module ice_dyn_vp indxui(:,:) , & ! compressed index in i-direction indxuj(:,:) ! compressed index in j-direction - real (kind=dbl_kind), allocatable :: & + real (kind=dbl_kind), allocatable :: & fld2(:,:,:,:) ! work array for boundary updates !======================================================================= @@ -140,7 +140,7 @@ subroutine init_vp indxui(nx_block*ny_block, max_blocks), & indxuj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) - + end subroutine init_vp !======================================================================= @@ -214,7 +214,7 @@ subroutine implicit_solver (dt) zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure - + logical (kind=log_kind) :: calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -241,7 +241,7 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Initialize !----------------------------------------------------------------- - + ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -377,7 +377,7 @@ subroutine implicit_solver (dt) forcex (:,:,iblk), forcey (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk)) - + !----------------------------------------------------------------- ! ice strength !----------------------------------------------------------------- @@ -451,20 +451,20 @@ subroutine implicit_solver (dt) endif endif - - + + !----------------------------------------------------------------- ! calc size of problem (ntot) and allocate solution vector !----------------------------------------------------------------- - + ntot = 0 do iblk = 1, nblocks ntot = ntot + icellu(iblk) enddo ntot = 2 * ntot ! times 2 because of u and v - + allocate(sol(ntot)) - + !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- @@ -485,7 +485,7 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- deallocate(sol) - + if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -528,7 +528,7 @@ subroutine implicit_solver (dt) rdg_conv (:,:,iblk), rdg_shear (:,:,iblk)) enddo !$OMP END PARALLEL DO - + !----------------------------------------------------------------- ! Compute seabed stress (diagnostic) !----------------------------------------------------------------- @@ -544,7 +544,7 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO endif - + ! Force symmetry across the tripole seam if (trim(grid_type) == 'tripole') then if (maskhalo_dyn) then @@ -732,7 +732,7 @@ subroutine anderson_solver (icellt , icellu, & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs ! replacement pressure - + type (ice_halo), intent(in) :: & halo_info_mask ! ghost cell update info for masked halo @@ -816,14 +816,14 @@ subroutine anderson_solver (icellt , icellu, & ! Initialization res_num = 0 L2norm = c0 - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks uprev_k(:,:,iblk) = uvel(:,:,iblk) vprev_k(:,:,iblk) = vvel(:,:,iblk) enddo !$OMP END PARALLEL DO - + ! Start iterations do it_nl = 0, maxits_nonlin ! nonlinear iteration loop ! Compute quantities needed for computing PDE residual and g(x) (fixed point map) @@ -842,7 +842,7 @@ subroutine anderson_solver (icellt , icellu, & endif uprev_k(:,:,iblk) = uvel(:,:,iblk) vprev_k(:,:,iblk) = vvel(:,:,iblk) - + call calc_zeta_dPr (nx_block , ny_block , & icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & @@ -854,7 +854,7 @@ subroutine anderson_solver (icellt , icellu, & DminTarea (:,:,iblk),strength (:,:,iblk),& zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:),& rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) - + call calc_vrel_Cb (nx_block , ny_block , & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -862,7 +862,7 @@ subroutine anderson_solver (icellt , icellu, & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & vrel (:,:,iblk), Cb (:,:,iblk)) - + ! prepare b vector (RHS) call calc_bvec (nx_block , ny_block , & icellu (iblk), & @@ -872,7 +872,7 @@ subroutine anderson_solver (icellt , icellu, & bxfix (:,:,iblk), byfix (:,:,iblk), & bx (:,:,iblk), by (:,:,iblk), & vrel (:,:,iblk)) - + ! Compute nonlinear residual norm (PDE residual) call matvec (nx_block , ny_block , & icellu (iblk) , icellt (iblk), & @@ -906,12 +906,12 @@ subroutine anderson_solver (icellt , icellu, & if (it_nl == 0) then tol_nl = reltol_nonlin*nlres_norm endif - + ! Check for nonlinear convergence if (nlres_norm < tol_nl) then exit endif - + ! Put initial guess for FGMRES in solx,soly and sol (needed for anderson) solx = uprev_k soly = vprev_k @@ -921,11 +921,11 @@ subroutine anderson_solver (icellt , icellu, & indxui (:,:), indxuj (:,:), & uprev_k (:,:,:), vprev_k (:,:,:), & sol (:)) - + ! Compute fixed point map g(x) if (fpfunc_andacc == 1) then ! g_1(x) = FGMRES(A(x), b(x)) - + ! Prepare diagonal for preconditioner if (precond == 'diag' .or. precond == 'pgmres') then !$OMP PARALLEL DO PRIVATE(iblk,diag_rheo) @@ -951,7 +951,7 @@ subroutine anderson_solver (icellt , icellu, & enddo !$OMP END PARALLEL DO endif - + ! FGMRES linear solver call fgmres (zetax2 , etax2 , & Cb , vrel , & @@ -1002,13 +1002,13 @@ subroutine anderson_solver (icellt , icellu, & write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " fixed_point_res_L2norm= ", fpres_norm endif - + ! Not used for now (only nonlinear residual is checked) ! ! Store initial residual norm ! if (it_nl == 0) then ! tol = reltol_andacc*fpres_norm ! endif - ! + ! ! ! Check residual ! if (fpres_norm < tol) then ! exit @@ -1066,7 +1066,7 @@ subroutine anderson_solver (icellt , icellu, & endif ! TODO: here, drop more columns to improve conditioning ! if (droptol) then - + ! endif ! Solve least square problem for coefficients ! 1. Compute rhs_tri = Q^T * res @@ -1080,7 +1080,7 @@ subroutine anderson_solver (icellt , icellu, & ! Apply damping if (damping_andacc > 0 .and. damping_andacc /= 1) then ! x = x - (1-beta) (res - Q*R*coeffs) - + ! tmp = R*coeffs call dgemv ('n', res_num, res_num, c1, R(1:res_num,1:res_num), res_num, coeffs, inc, c0, tmp, inc) ! res = res - Q*tmp @@ -1095,7 +1095,7 @@ subroutine anderson_solver (icellt , icellu, & file=__FILE__, line=__LINE__) #endif endif - + !----------------------------------------------------------------------- ! Put vector sol in uvel and vvel arrays !----------------------------------------------------------------------- @@ -1105,7 +1105,7 @@ subroutine anderson_solver (icellt , icellu, & indxui (:,:), indxuj (:,:), & sol (:), & uvel (:,:,:), vvel (:,:,:)) - + ! Do halo update so that halo cells contain up to date info for advection call stack_fields(uvel, vvel, fld2) call ice_timer_start(timer_bound) @@ -1118,7 +1118,7 @@ subroutine anderson_solver (icellt , icellu, & endif call ice_timer_stop(timer_bound) call unstack_fields(fld2, uvel, vvel) - + ! Compute "progress" residual norm !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1136,9 +1136,9 @@ subroutine anderson_solver (icellt , icellu, & write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " progress_res_L2norm= ", prog_norm endif - + enddo ! nonlinear iteration loop - + end subroutine anderson_solver !======================================================================= @@ -1185,8 +1185,8 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(out) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 , & ! etax2 = 2*eta (shear viscosity) - rep_prs ! replacement pressure - + rep_prs ! replacement pressure + real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & stPr ! stress combinations from replacement pressure @@ -1236,9 +1236,9 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & Deltane , Deltanw , & Deltase , Deltasw) - !----------------------------------------------------------------- - ! viscosities and replacement pressure - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! viscosities and replacement pressure + !----------------------------------------------------------------- call visc_replpress (strength(i,j) , DminTarea(i,j) , & Deltane , zetax2 (i,j,1), & @@ -1266,7 +1266,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & stressp_2 = -rep_prs(i,j,2) stressp_3 = -rep_prs(i,j,3) stressp_4 = -rep_prs(i,j,4) - + !----------------------------------------------------------------- ! combinations of the Pr related stresses for the momentum equation ! kg/s^2 !----------------------------------------------------------------- @@ -1282,7 +1282,7 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- @@ -1339,7 +1339,7 @@ end subroutine calc_zeta_dPr ! Lemieux, J.-F., and Dupont, F. (2020), On the calculation of normalized ! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, - + subroutine stress_vp (nx_block , ny_block , & icellt , & indxti , indxtj , & @@ -1380,7 +1380,7 @@ subroutine stress_vp (nx_block , ny_block , & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 , & ! etax2 = 2*eta (shear viscosity) rep_prs - + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 @@ -1426,17 +1426,17 @@ subroutine stress_vp (nx_block , ny_block , & ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast !----------------------------------------------------------------- - + stressp_1(i,j) = zetax2(i,j,1)*divune - rep_prs(i,j,1) stressp_2(i,j) = zetax2(i,j,2)*divunw - rep_prs(i,j,2) stressp_3(i,j) = zetax2(i,j,3)*divusw - rep_prs(i,j,3) stressp_4(i,j) = zetax2(i,j,4)*divuse - rep_prs(i,j,4) - + stressm_1(i,j) = etax2(i,j,1)*tensionne stressm_2(i,j) = etax2(i,j,2)*tensionnw stressm_3(i,j) = etax2(i,j,3)*tensionsw stressm_4(i,j) = etax2(i,j,4)*tensionse - + stress12_1(i,j) = etax2(i,j,1)*shearne*p5 stress12_2(i,j) = etax2(i,j,2)*shearnw*p5 stress12_3(i,j) = etax2(i,j,3)*shearsw*p5 @@ -1505,7 +1505,7 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & ! (magnitude of relative ocean current)*rhow*drag*aice vrel(i,j) = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & (vocn(i,j) - vvel(i,j))**2) ! m/s - + Cb(i,j) = Tbu(i,j) / (sqrt(uvel(i,j)**2 + vvel(i,j)**2) + u0) ! for seabed stress enddo ! ij @@ -1549,7 +1549,7 @@ subroutine calc_seabed_stress (nx_block, ny_block, & do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - + taubx(i,j) = -uvel(i,j)*Cb(i,j) tauby(i,j) = -vvel(i,j)*Cb(i,j) enddo ! ij @@ -1690,12 +1690,12 @@ subroutine matvec (nx_block, ny_block, & stressp_2 = zetax2(i,j,2)*divunw! - Deltanw*(c1-Ktens)) stressp_3 = zetax2(i,j,3)*divusw! - Deltasw*(c1-Ktens)) stressp_4 = zetax2(i,j,4)*divuse! - Deltase*(c1-Ktens)) - + stressm_1 = etax2(i,j,1)*tensionne stressm_2 = etax2(i,j,2)*tensionnw stressm_3 = etax2(i,j,3)*tensionsw stressm_4 = etax2(i,j,4)*tensionse - + stress12_1 = etax2(i,j,1)*shearne*p5 stress12_2 = etax2(i,j,2)*shearnw*p5 stress12_3 = etax2(i,j,3)*shearsw*p5 @@ -1730,12 +1730,12 @@ subroutine matvec (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - + csig12ne = p222*stress12_1 + ssig122 & + p055*stress12_3 csig12nw = p222*stress12_2 + ssig121 & @@ -1801,7 +1801,7 @@ subroutine matvec (nx_block, ny_block, & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw enddo ! ij - icellt - + !----------------------------------------------------------------- ! Form Au and Av !----------------------------------------------------------------- @@ -1811,7 +1811,7 @@ subroutine matvec (nx_block, ny_block, & j = indxuj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - + ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel(i,j) * sinw ! kg/m^2 s ! divergence of the internal stress tensor @@ -1924,11 +1924,11 @@ subroutine calc_bvec (nx_block, ny_block, & rhow ! character(len=*), parameter :: subname = '(calc_bvec)' - + !----------------------------------------------------------------- ! calc b vector !----------------------------------------------------------------- - + call icepack_query_parameters(rhow_out=rhow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -1941,7 +1941,7 @@ subroutine calc_bvec (nx_block, ny_block, & ! ice/ocean stress taux = vrel(i,j)*waterx(i,j) ! NOTE this is not the entire tauy = vrel(i,j)*watery(i,j) ! ocn stress term - + ! divergence of the internal stress tensor (only Pr part, i.e. dPr/dx, dPr/dy) strintx = uarear(i,j)* & (stPr(i,j,1) + stPr(i+1,j,2) + stPr(i,j+1,3) + stPr(i+1,j+1,4)) @@ -2004,7 +2004,7 @@ subroutine residual_vec (nx_block , ny_block, & if (present(sum_squared)) then sum_squared = c0 endif - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) @@ -2089,11 +2089,11 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- Drheo(:,:,:) = c0 - + ! Be careful: Drheo contains 4 terms for u and 4 terms for v. ! These 8 terms come from the surrounding T cells but are all ! refrerenced to the i,j (u point) : - + ! Drheo(i,j,1) corresponds to str(i,j,1) ! Drheo(i,j,2) corresponds to str(i+1,j,2) ! Drheo(i,j,3) corresponds to str(i,j+1,3) @@ -2102,9 +2102,9 @@ subroutine formDiag_step1 (nx_block, ny_block, & ! Drheo(i,j,6) corresponds to str(i,j+1,6) ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - + do cc = 1, 8 ! 4 for u and 4 for v - + if (cc == 1) then ! u comp, T cell i,j uij = c1 ui1j = c0 @@ -2196,12 +2196,12 @@ subroutine formDiag_step1 (nx_block, ny_block, & endif do ij = 1, icellu - + iu = indxui(ij) ju = indxuj(ij) i = iu + di j = ju + dj - + !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) @@ -2235,22 +2235,22 @@ subroutine formDiag_step1 (nx_block, ny_block, & - cxp(i,j)*ui1j1 + dxt(i,j)*ui1j shearse = -cym(i,j)*vij1 - dyt(i,j)*vi1j1 & - cxp(i,j)*uij1 + dxt(i,j)*uij - + !----------------------------------------------------------------- ! the stresses ! kg/s^2 ! (1) northeast, (2) northwest, (3) southwest, (4) southeast !----------------------------------------------------------------- - + stressp_1 = zetax2(i,j,1)*divune stressp_2 = zetax2(i,j,2)*divunw stressp_3 = zetax2(i,j,3)*divusw stressp_4 = zetax2(i,j,4)*divuse - + stressm_1 = etax2(i,j,1)*tensionne stressm_2 = etax2(i,j,2)*tensionnw stressm_3 = etax2(i,j,3)*tensionsw stressm_4 = etax2(i,j,4)*tensionse - + stress12_1 = etax2(i,j,1)*shearne*p5 stress12_2 = etax2(i,j,2)*shearnw*p5 stress12_3 = etax2(i,j,3)*shearsw*p5 @@ -2285,12 +2285,12 @@ subroutine formDiag_step1 (nx_block, ny_block, & csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - + csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - + csig12ne = p222*stress12_1 + ssig122 & + p055*stress12_3 csig12nw = p222*stress12_2 + ssig121 & @@ -2308,27 +2308,27 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dx (u momentum) !----------------------------------------------------------------- - + if (cc == 1) then ! T cell i,j - + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) ! northeast (i,j) Drheo(iu,ju,1) = -strp_tmp - strm_tmp - str12ew & + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne - + elseif (cc == 2) then ! T cell i+1,j - + strp_tmp = p25*dyt(i,j)*(p333*ssigpn + p166*ssigps) strm_tmp = p25*dyt(i,j)*(p333*ssigmn + p166*ssigms) - + ! northwest (i+1,j) Drheo(iu,ju,2) = strp_tmp + strm_tmp - str12we & + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw elseif (cc == 3) then ! T cell i,j+1 - + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) @@ -2337,10 +2337,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se elseif (cc == 4) then ! T cell i+1,j+1 - + strp_tmp = p25*dyt(i,j)*(p333*ssigps + p166*ssigpn) strm_tmp = p25*dyt(i,j)*(p333*ssigms + p166*ssigmn) - + ! southwest (i+1,j+1) Drheo(iu,ju,4) = strp_tmp + strm_tmp + str12we & + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw @@ -2348,9 +2348,9 @@ subroutine formDiag_step1 (nx_block, ny_block, & !----------------------------------------------------------------- ! for dF/dy (v momentum) !----------------------------------------------------------------- - + elseif (cc == 5) then ! T cell i,j - + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) @@ -2359,16 +2359,16 @@ subroutine formDiag_step1 (nx_block, ny_block, & - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne elseif (cc == 6) then ! T cell i,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpe + p166*ssigpw) strm_tmp = p25*dxt(i,j)*(p333*ssigme + p166*ssigmw) - + ! southeast (i,j+1) Drheo(iu,ju,6) = strp_tmp - strm_tmp - str12sn & - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se elseif (cc == 7) then ! T cell i,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) @@ -2377,14 +2377,14 @@ subroutine formDiag_step1 (nx_block, ny_block, & - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw elseif (cc == 8) then ! T cell i+1,j+1 - + strp_tmp = p25*dxt(i,j)*(p333*ssigpw + p166*ssigpe) strm_tmp = p25*dxt(i,j)*(p333*ssigmw + p166*ssigme) - + ! southwest (i+1,j+1) Drheo(iu,ju,8) = strp_tmp - strm_tmp + str12sn & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - + endif enddo ! ij @@ -2444,7 +2444,7 @@ subroutine formDiag_step2 (nx_block, ny_block, & strintx = c0 strinty = c0 - + ! Be careful: Drheo contains 4 terms for u and 4 terms for v. ! These 8 terms come from the surrounding T cells but are all ! refrerenced to the i,j (u point) : @@ -2457,13 +2457,13 @@ subroutine formDiag_step2 (nx_block, ny_block, & ! Drheo(i,j,6) corresponds to str(i,j+1,6) ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - + strintx = uarear(i,j)* & (Drheo(i,j,1) + Drheo(i,j,2) + Drheo(i,j,3) + Drheo(i,j,4)) strinty = uarear(i,j)* & @@ -2512,14 +2512,14 @@ subroutine calc_L2norm_squared (nx_block, ny_block, & !----------------------------------------------------------------- L2norm = c0 - + do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - + L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 enddo ! ij - + end subroutine calc_L2norm_squared !======================================================================= @@ -2566,7 +2566,7 @@ subroutine arrays_to_vec (nx_block, ny_block , & outvec(:) = c0 tot = 0 - + do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -2603,7 +2603,7 @@ subroutine vec_to_arrays (nx_block, ny_block , & integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & indxui , & ! compressed index in i-direction indxuj ! compressed index in j-direction - + real (kind=dbl_kind), dimension (ntot), intent(in) :: & invec ! input 1D vector @@ -2625,7 +2625,7 @@ subroutine vec_to_arrays (nx_block, ny_block , & tpu(:,:,:) = c0 tpv(:,:,:) = c0 tot = 0 - + do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) @@ -2654,9 +2654,9 @@ subroutine qr_delete(Q, R) real (kind=dbl_kind), intent(inout) :: & Q(:,:), & ! Q factor R(:,:) ! R factor - + ! local variables - + integer (kind=int_kind) :: & i, j, k, & ! loop indices m, n ! size of Q matrix @@ -2665,7 +2665,7 @@ subroutine qr_delete(Q, R) temp, c, s character(len=*), parameter :: subname = '(qr_delete)' - + n = size(Q, 1) m = size(Q, 2) do i = 1, m-1 @@ -2688,7 +2688,7 @@ subroutine qr_delete(Q, R) enddo enddo R(:, 1:m-1) = R(:, 2:m) - + end subroutine qr_delete !======================================================================= @@ -2716,7 +2716,7 @@ subroutine fgmres (zetax2 , etax2 , & real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) - + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -2802,18 +2802,18 @@ subroutine fgmres (zetax2 , etax2 , & ! Initialize outiter = 0 nbiter = 0 - + norm_squared = c0 precond_type = precond - + ! Cells with no ice should be zero-initialized workspace_x = c0 workspace_y = c0 arnoldi_basis_x = c0 arnoldi_basis_y = c0 - + ! Residual of the initial iterate - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -2839,7 +2839,7 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y (:,:,iblk, 1)) enddo !$OMP END PARALLEL DO - + ! Start outer (restarts) loop do ! Compute norm of initial residual @@ -2855,17 +2855,17 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) - + if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & " fgmres_L2norm= ", norm_residual endif - + ! Current guess is a good enough solution TODO: reactivate and test this ! if (norm_residual < tolerance) then ! return ! end if - + ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -2879,20 +2879,20 @@ subroutine fgmres (zetax2 , etax2 , & enddo ! ij enddo !$OMP END PARALLEL DO - + if (outiter == 0) then relative_tolerance = tolerance * norm_residual end if - + ! Initialize 1-st term of RHS of Hessenberg system rhs_hess(1) = norm_residual rhs_hess(2:) = c0 - + initer = 0 - + ! Start of inner (Arnoldi) loop do - + nbiter = nbiter + 1 initer = initer + 1 nextit = initer + 1 @@ -2907,7 +2907,7 @@ subroutine fgmres (zetax2 , etax2 , & workspace_x , workspace_y) orig_basis_x(:,:,:,initer) = workspace_x orig_basis_y(:,:,:,initer) = workspace_y - + ! Update workspace with boundary values call stack_fields(workspace_x, workspace_y, fld2) call ice_timer_start(timer_bound) @@ -2940,13 +2940,13 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,iblk,nextit)) enddo !$OMP END PARALLEL DO - + ! Orthogonalize the new vector call orthogonalize(ortho_type , initer , & nextit , maxinner , & arnoldi_basis_x, arnoldi_basis_y, & hessenberg) - + ! Compute norm of new Arnoldi vector and update Hessenberg matrix !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -2959,7 +2959,7 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) - + ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector @@ -2969,14 +2969,14 @@ subroutine fgmres (zetax2 , etax2 , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm enddo ! ij enddo !$OMP END PARALLEL DO end if - + ! Apply previous Givens rotation to the last column of the Hessenberg matrix if (initer > 1) then do k = 2, initer @@ -2985,33 +2985,33 @@ subroutine fgmres (zetax2 , etax2 , & hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) end do end if - + ! Compute and apply new Givens rotation nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) if (.not. almost_zero(nu)) then rot_cos(initer) = hessenberg(initer,initer) / nu rot_sin(initer) = hessenberg(nextit,initer) / nu - + rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - + hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) end if - + ! Check for convergence norm_residual = abs(rhs_hess(nextit)) - + if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & " fgmres_L2norm= ", norm_residual endif - + if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then exit endif - + end do ! end of inner (Arnoldi) loop - + ! At this point either the maximum number of inner iterations ! was reached or the absolute residual is below the scaled tolerance. @@ -3026,7 +3026,7 @@ subroutine fgmres (zetax2 , etax2 , & end do rhs_hess(k) = t / hessenberg(k,k) end do - + ! Form linear combination to get new solution iterate do it = 1, initer t = rhs_hess(it) @@ -3042,7 +3042,7 @@ subroutine fgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO end do - + ! Increment outer loop counter and check for convergence outiter = outiter + 1 if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then @@ -3050,7 +3050,7 @@ subroutine fgmres (zetax2 , etax2 , & end if ! Solution is not convergent : compute residual vector and continue. - + ! The residual vector is computed here using (see Saad p. 177) : ! \begin{equation} ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) @@ -3061,7 +3061,7 @@ subroutine fgmres (zetax2 , etax2 , & ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 ! $gamma_{m+1}$ is the last element of rhs_hess ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - + ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, ! store the result in rhs_hess do it = 1, initer @@ -3069,7 +3069,7 @@ subroutine fgmres (zetax2 , etax2 , & rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) end do - + ! Compute the residual by multiplying V_{m+1} and rhs_hess workspace_x = c0 workspace_y = c0 @@ -3089,7 +3089,7 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,:,1) = workspace_y end do end do ! end of outer (restarts) loop - + end subroutine fgmres !======================================================================= @@ -3112,7 +3112,7 @@ subroutine pgmres (zetax2 , etax2 , & real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) - + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -3188,25 +3188,25 @@ subroutine pgmres (zetax2 , etax2 , & relative_tolerance ! relative_tolerance, i.e. tolerance*norm(initial residual) character(len=*), parameter :: subname = '(pgmres)' - + ! Here we go ! ! Initialize outiter = 0 nbiter = 0 - + norm_squared = c0 precond_type = 'diag' ! Jacobi preconditioner ortho_type = 'cgs' ! classical gram-schmidt TODO: try with MGS - + ! Cells with no ice should be zero-initialized workspace_x = c0 workspace_y = c0 arnoldi_basis_x = c0 arnoldi_basis_y = c0 - + ! Residual of the initial iterate - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -3232,7 +3232,7 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y (:,:,iblk, 1)) enddo !$OMP END PARALLEL DO - + ! Start outer (restarts) loop do ! Compute norm of initial residual @@ -3248,17 +3248,17 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) - + if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & " pgmres_L2norm= ", norm_residual endif - + ! Current guess is a good enough solution ! if (norm_residual < tolerance) then ! return ! end if - + ! Normalize the first Arnoldi vector inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -3272,24 +3272,24 @@ subroutine pgmres (zetax2 , etax2 , & enddo ! ij enddo !$OMP END PARALLEL DO - + if (outiter == 0) then relative_tolerance = tolerance * norm_residual end if - + ! Initialize 1-st term of RHS of Hessenberg system rhs_hess(1) = norm_residual rhs_hess(2:) = c0 - + initer = 0 - + ! Start of inner (Arnoldi) loop do - + nbiter = nbiter + 1 initer = initer + 1 nextit = initer + 1 - + ! precondition the current Arnoldi vector call precondition(zetax2 , etax2 , & Cb , vrel , & @@ -3299,10 +3299,10 @@ subroutine pgmres (zetax2 , etax2 , & diagx , diagy , & precond_type, & workspace_x , workspace_y) - + ! NOTE: halo updates for (workspace_x, workspace_y) ! are skipped here for efficiency since this is just a preconditioner - + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & @@ -3322,13 +3322,13 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,iblk,nextit)) enddo !$OMP END PARALLEL DO - + ! Orthogonalize the new vector call orthogonalize(ortho_type , initer , & nextit , maxinner , & arnoldi_basis_x, arnoldi_basis_y, & hessenberg) - + ! Compute norm of new Arnoldi vector and update Hessenberg matrix !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -3341,7 +3341,7 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) - + ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then ! Normalize next Arnoldi vector @@ -3351,14 +3351,14 @@ subroutine pgmres (zetax2 , etax2 , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm enddo ! ij enddo !$OMP END PARALLEL DO end if - + ! Apply previous Givens rotation to the last column of the Hessenberg matrix if (initer > 1) then do k = 2, initer @@ -3367,33 +3367,33 @@ subroutine pgmres (zetax2 , etax2 , & hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) end do end if - + ! Compute and apply new Givens rotation nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) if (.not. almost_zero(nu)) then rot_cos(initer) = hessenberg(initer,initer) / nu rot_sin(initer) = hessenberg(nextit,initer) / nu - + rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - + hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) end if - + ! Check for convergence norm_residual = abs(rhs_hess(nextit)) - + if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & " pgmres_L2norm= ", norm_residual endif - + if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then exit endif - + end do ! end of inner (Arnoldi) loop - + ! At this point either the maximum number of inner iterations ! was reached or the absolute residual is below the scaled tolerance. @@ -3408,7 +3408,7 @@ subroutine pgmres (zetax2 , etax2 , & end do rhs_hess(k) = t / hessenberg(k,k) end do - + ! Form linear combination to get new solution iterate workspace_x = c0 workspace_y = c0 @@ -3426,7 +3426,7 @@ subroutine pgmres (zetax2 , etax2 , & enddo !$OMP END PARALLEL DO end do - + ! Call preconditioner call precondition(zetax2 , etax2 , & Cb , vrel , & @@ -3435,10 +3435,10 @@ subroutine pgmres (zetax2 , etax2 , & diagx , diagy , & precond_type, & workspace_x , workspace_y) - + solx = solx + workspace_x soly = soly + workspace_y - + ! Increment outer loop counter and check for convergence outiter = outiter + 1 if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then @@ -3446,7 +3446,7 @@ subroutine pgmres (zetax2 , etax2 , & end if ! Solution is not convergent : compute residual vector and continue. - + ! The residual vector is computed here using (see Saad p. 177) : ! \begin{equation} ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) @@ -3457,7 +3457,7 @@ subroutine pgmres (zetax2 , etax2 , & ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 ! $gamma_{m+1}$ is the last element of rhs_hess ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - + ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, ! store the result in rhs_hess do it = 1, initer @@ -3465,7 +3465,7 @@ subroutine pgmres (zetax2 , etax2 , & rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) end do - + ! Compute the residual by multiplying V_{m+1} and rhs_hess workspace_x = c0 workspace_y = c0 @@ -3485,7 +3485,7 @@ subroutine pgmres (zetax2 , etax2 , & arnoldi_basis_y(:,:,:,1) = workspace_y end do end do ! end of outer (restarts) loop - + end subroutine pgmres !======================================================================= @@ -3505,7 +3505,7 @@ subroutine precondition(zetax2 , etax2, & real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) - + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & vrel , & ! coefficient for tauw Cb , & ! seabed stress coefficient @@ -3633,20 +3633,20 @@ subroutine orthogonalize(ortho_type , initer , & dotprod_local = c0 do it = 1, initer local_dot = c0 - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) enddo ! ij enddo !$OMP END PARALLEL DO - + dotprod_local(it) = sum(local_dot) end do @@ -3659,7 +3659,7 @@ subroutine orthogonalize(ortho_type , initer , & do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & @@ -3672,28 +3672,28 @@ subroutine orthogonalize(ortho_type , initer , & ! Modified Gram-Schmidt orthogonalisation process do it = 1, initer local_dot = c0 - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) enddo ! ij enddo !$OMP END PARALLEL DO - + hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) - + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks do ij = 1, icellu(iblk) i = indxui(ij, iblk) j = indxuj(ij, iblk) - + arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & @@ -3706,7 +3706,7 @@ subroutine orthogonalize(ortho_type , initer , & call abort_ice(error_message='wrong orthonalization in ' // subname, & file=__FILE__, line=__LINE__) endif - + end subroutine orthogonalize !======================================================================= @@ -3737,7 +3737,7 @@ logical function almost_zero(A) result(retval) end if ! lexicographic order test with a tolerance of 1 adjacent float retval = (abs(aBit) <= 1) - + end function almost_zero !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 38650459f..9c9fa25d4 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -2,12 +2,12 @@ ! ! Drivers for remapping and upwind ice transport ! -! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL +! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL ! ! 2004: Revised by William Lipscomb from ice_transport_mpdata. ! Stripped out mpdata, retained upwind, and added block structure. ! 2006: Incorporated remap transport driver and renamed from -! ice_transport_upwind. +! ice_transport_upwind. ! 2011: ECH moved edgearea arrays into ice_transport_remap.F90 module ice_transport_driver @@ -36,17 +36,17 @@ module ice_transport_driver ! 'upwind' => 1st order donor cell scheme ! 'remap' => remapping scheme - logical, parameter :: & ! if true, prescribe area flux across each edge - l_fixed_area = .false. + logical, parameter :: & + l_fixed_area = .false. ! if true, prescribe area flux across each edge ! NOTE: For remapping, hice and hsno are considered tracers. ! ntrace is not equal to ntrcr! integer (kind=int_kind) :: & ntrace ! number of tracers in use - + integer (kind=int_kind), dimension(:), allocatable, public :: & - tracer_type ,&! = 1, 2, or 3 (depends on 0, 1 or 2 other tracers) + tracer_type , & ! = 1, 2, or 3 (depends on 0, 1 or 2 other tracers) depend ! tracer dependencies (see below) logical (kind=log_kind), dimension (:), allocatable, public :: & @@ -56,13 +56,13 @@ module ice_transport_driver conserv_check ! if true, check conservation integer (kind=int_kind), parameter :: & - integral_order = 3 ! polynomial order of quadrature integrals - ! linear=1, quadratic=2, cubic=3 + integral_order = 3 ! polynomial order of quadrature integrals + ! linear=1, quadratic=2, cubic=3 logical (kind=log_kind), parameter :: & - l_dp_midpt = .true. ! if true, find departure points using - ! corrected midpoint velocity - + l_dp_midpt = .true. ! if true, find departure points using + ! corrected midpoint velocity + !======================================================================= contains @@ -84,158 +84,160 @@ subroutine init_transport integer (kind=int_kind) :: & k, nt, nt1 ! tracer indices - integer (kind=int_kind) :: ntrcr, nt_Tsfc, nt_qice, nt_qsno, & - nt_sice, nt_fbri, nt_iage, nt_FY, nt_alvl, nt_vlvl, & - nt_apnd, nt_hpnd, nt_ipnd, nt_fsd, & - nt_smice, nt_smliq, nt_rhos, nt_rsnw, & - nt_isosno, nt_isoice, nt_bgc_Nit, nt_bgc_S + integer (kind=int_kind) :: & + ntrcr , nt_Tsfc , nt_qice , nt_qsno , & + nt_sice , nt_fbri , nt_iage , nt_FY , & + nt_alvl , nt_vlvl , & + nt_apnd , nt_hpnd , nt_ipnd , nt_fsd , & + nt_smice , nt_smliq , nt_rhos , nt_rsnw , & + nt_isosno, nt_isoice, nt_bgc_Nit, nt_bgc_S character(len=*), parameter :: subname = '(init_transport)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_qice_out=nt_qice, & - nt_qsno_out=nt_qsno, nt_sice_out=nt_sice, nt_fbri_out=nt_fbri, & - nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_fsd_out=nt_fsd, & - nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & - nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & - nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, nt_rhos_out=nt_rhos, & - nt_rsnw_out=nt_rsnw, & - nt_bgc_Nit_out=nt_bgc_Nit, nt_bgc_S_out=nt_bgc_S, & - nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + nt_qsno_out=nt_qsno, nt_sice_out=nt_sice, nt_fbri_out=nt_fbri, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_fsd_out=nt_fsd, & + nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, nt_rhos_out=nt_rhos, & + nt_rsnw_out=nt_rsnw, & + nt_bgc_Nit_out=nt_bgc_Nit, nt_bgc_S_out=nt_bgc_S, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) ntrace = 2 + ntrcr ! hice,hsno,trcr - if (allocated(tracer_type)) deallocate(tracer_type) - if (allocated(depend)) deallocate(depend) + if (allocated(tracer_type)) deallocate(tracer_type) + if (allocated(depend)) deallocate(depend) if (allocated(has_dependents)) deallocate(has_dependents) allocate (tracer_type (ntrace), & depend (ntrace), & has_dependents(ntrace)) - ! define tracer dependency arrays - ! see comments in remapping routine - - depend(1:2) = 0 ! hice, hsno - tracer_type(1:2) = 1 ! no dependency - - k = 2 - - do nt = 1, ntrcr - depend(k+nt) = trcr_depend(nt) ! 0 for ice area tracers - ! 1 for ice volume tracers - ! 2 for snow volume tracers - tracer_type(k+nt) = 2 ! depends on 1 other tracer - if (trcr_depend(nt) == 0) then - tracer_type(k+nt) = 1 ! depends on no other tracers - elseif (trcr_depend(nt) > 2) then - if (trcr_depend(trcr_depend(nt)-2) > 0) then - tracer_type(k+nt) = 3 ! depends on 2 other tracers - endif - endif - enddo - - has_dependents = .false. - do nt = 1, ntrace - if (depend(nt) > 0) then - nt1 = depend(nt) - has_dependents(nt1) = .true. - if (nt1 > nt) then - write(nu_diag,*) & - 'Tracer nt2 =',nt,' depends on tracer nt1 =',nt1 - call abort_ice(subname// & - 'ERROR: remap transport: Must have nt2 > nt1') - endif - endif - enddo ! ntrace - - ! diagnostic output - if (my_task == master_task) then - write (nu_diag, *) 'tracer index depend type has_dependents' - nt = 1 - write(nu_diag,1000) 'hi ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - nt = 2 - write(nu_diag,1000) 'hs ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - k=2 - do nt = k+1, k+ntrcr - if (nt-k==nt_Tsfc) & - write(nu_diag,1000) 'nt_Tsfc ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_qice) & - write(nu_diag,1000) 'nt_qice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_qsno) & - write(nu_diag,1000) 'nt_qsno ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_sice) & - write(nu_diag,1000) 'nt_sice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_fbri) & - write(nu_diag,1000) 'nt_fbri ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_iage) & - write(nu_diag,1000) 'nt_iage ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_FY) & - write(nu_diag,1000) 'nt_FY ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_alvl) & - write(nu_diag,1000) 'nt_alvl ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_vlvl) & - write(nu_diag,1000) 'nt_vlvl ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_apnd) & - write(nu_diag,1000) 'nt_apnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_hpnd) & - write(nu_diag,1000) 'nt_hpnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_ipnd) & - write(nu_diag,1000) 'nt_ipnd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_smice) & - write(nu_diag,1000) 'nt_smice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_smliq) & - write(nu_diag,1000) 'nt_smliq ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_rhos) & - write(nu_diag,1000) 'nt_rhos ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_rsnw) & - write(nu_diag,1000) 'nt_rsnw ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_fsd) & - write(nu_diag,1000) 'nt_fsd ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_isosno) & - write(nu_diag,1000) 'nt_isosno ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_isoice) & - write(nu_diag,1000) 'nt_isoice ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_bgc_Nit) & - write(nu_diag,1000) 'nt_bgc_Nit ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - if (nt-k==nt_bgc_S) & - write(nu_diag,1000) 'nt_bgc_S ',nt,depend(nt),tracer_type(nt),& - has_dependents(nt) - enddo - write(nu_diag,*) ' ' - endif ! master_task - 1000 format (1x,a,2x,i6,2x,i6,2x,i4,4x,l4) - - if (trim(advection)=='remap') call init_remap ! grid quantities - - call ice_timer_stop(timer_advect) ! advection + ! define tracer dependency arrays + ! see comments in remapping routine + + depend(1:2) = 0 ! hice, hsno + tracer_type(1:2) = 1 ! no dependency + + k = 2 + + do nt = 1, ntrcr + depend(k+nt) = trcr_depend(nt) ! 0 for ice area tracers + ! 1 for ice volume tracers + ! 2 for snow volume tracers + tracer_type(k+nt) = 2 ! depends on 1 other tracer + if (trcr_depend(nt) == 0) then + tracer_type(k+nt) = 1 ! depends on no other tracers + elseif (trcr_depend(nt) > 2) then + if (trcr_depend(trcr_depend(nt)-2) > 0) then + tracer_type(k+nt) = 3 ! depends on 2 other tracers + endif + endif + enddo + + has_dependents = .false. + do nt = 1, ntrace + if (depend(nt) > 0) then + nt1 = depend(nt) + has_dependents(nt1) = .true. + if (nt1 > nt) then + write(nu_diag,*) & + 'Tracer nt2 =',nt,' depends on tracer nt1 =',nt1 + call abort_ice(subname// & + 'ERROR: remap transport: Must have nt2 > nt1') + endif + endif + enddo ! ntrace + + ! diagnostic output + if (my_task == master_task) then + write (nu_diag, *) 'tracer index depend type has_dependents' + nt = 1 + write(nu_diag,1000) 'hi ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + nt = 2 + write(nu_diag,1000) 'hs ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + k=2 + do nt = k+1, k+ntrcr + if (nt-k==nt_Tsfc) & + write(nu_diag,1000) 'nt_Tsfc ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qice) & + write(nu_diag,1000) 'nt_qice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_qsno) & + write(nu_diag,1000) 'nt_qsno ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_sice) & + write(nu_diag,1000) 'nt_sice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_fbri) & + write(nu_diag,1000) 'nt_fbri ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_iage) & + write(nu_diag,1000) 'nt_iage ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_FY) & + write(nu_diag,1000) 'nt_FY ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_alvl) & + write(nu_diag,1000) 'nt_alvl ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_vlvl) & + write(nu_diag,1000) 'nt_vlvl ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_apnd) & + write(nu_diag,1000) 'nt_apnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_hpnd) & + write(nu_diag,1000) 'nt_hpnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_ipnd) & + write(nu_diag,1000) 'nt_ipnd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_smice) & + write(nu_diag,1000) 'nt_smice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_smliq) & + write(nu_diag,1000) 'nt_smliq ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_rhos) & + write(nu_diag,1000) 'nt_rhos ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_rsnw) & + write(nu_diag,1000) 'nt_rsnw ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_fsd) & + write(nu_diag,1000) 'nt_fsd ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_isosno) & + write(nu_diag,1000) 'nt_isosno ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_isoice) & + write(nu_diag,1000) 'nt_isoice ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_bgc_Nit) & + write(nu_diag,1000) 'nt_bgc_Nit ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + if (nt-k==nt_bgc_S) & + write(nu_diag,1000) 'nt_bgc_S ',nt,depend(nt),tracer_type(nt),& + has_dependents(nt) + enddo + write(nu_diag,*) ' ' + endif ! master_task + 1000 format (1x,a,2x,i6,2x,i6,2x,i4,4x,l4) + + if (trim(advection)=='remap') call init_remap ! grid quantities + + call ice_timer_stop(timer_advect) ! advection end subroutine init_transport @@ -248,7 +250,7 @@ end subroutine init_transport ! ! This scheme preserves monotonicity of ice area and tracers. That is, ! it does not produce new extrema. It is second-order accurate in space, -! except where gradients are limited to preserve monotonicity. +! except where gradients are limited to preserve monotonicity. ! ! authors William H. Lipscomb, LANL @@ -273,88 +275,87 @@ subroutine transport_remap (dt) ! local variables - integer (kind=int_kind) :: & - iblk ,&! block index - ilo,ihi,jlo,jhi,&! beginning and end of physical domain - n ,&! ice category index - nt, nt1, nt2 ! tracer indices - - real (kind=dbl_kind), & - dimension (nx_block,ny_block,0:ncat,max_blocks) :: & - aim ,&! mean ice category areas in each grid cell + integer (kind=int_kind) :: & + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n , & ! ice category index + nt, nt1, nt2 ! tracer indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat,max_blocks) :: & + aim , & ! mean ice category areas in each grid cell aimask ! = 1. if ice is present, = 0. otherwise - real (kind=dbl_kind), & - dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - trm ,&! mean tracer values in each grid cell + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & + trm , & ! mean tracer values in each grid cell trmask ! = 1. if tracer is present, = 0. otherwise - logical (kind=log_kind) :: & + logical (kind=log_kind) :: & ckflag ! if true, abort the model - integer (kind=int_kind) :: & - istop, jstop ! indices of grid cell where model aborts + integer (kind=int_kind) :: & + istop, jstop ! indices of grid cell where model aborts - integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & + integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & icellsnc ! number of cells with ice - integer (kind=int_kind), & - dimension(nx_block*ny_block,0:ncat,max_blocks) :: & - indxinc, indxjnc ! compressed i/j indices + integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat,max_blocks) :: & + indxinc, indxjnc ! compressed i/j indices integer (kind=int_kind) :: & - ntrcr + ntrcr ! type (block) :: & - this_block ! block information for current block - + this_block ! block information for current block + ! variables related to optional bug checks - logical (kind=log_kind), parameter :: & + logical (kind=log_kind), parameter :: & l_monotonicity_check = .false. ! if true, check monotonicity - real (kind=dbl_kind), dimension(0:ncat) :: & - asum_init ,&! initial global ice area + real (kind=dbl_kind), dimension(0:ncat) :: & + asum_init , & ! initial global ice area asum_final ! final global ice area - real (kind=dbl_kind), dimension(ntrace,ncat) :: & - atsum_init ,&! initial global ice area*tracer + real (kind=dbl_kind), dimension(ntrace,ncat) :: & + atsum_init , & ! initial global ice area*tracer atsum_final ! final global ice area*tracer - real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & - tmin ,&! local min tracer - tmax ! local max tracer + real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable :: & + tmin , & ! local min tracer + tmax ! local max tracer - integer (kind=int_kind) :: alloc_error + integer (kind=int_kind) :: & + alloc_error real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & work1 - character(len=char_len_long) :: fieldid + character(len=char_len_long) :: & + fieldid character(len=*), parameter :: subname = '(transport_remap)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) -!---!------------------------------------------------------------------- -!---! Prepare for remapping. -!---! Initialize, update ghost cells, fill tracer arrays. -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Prepare for remapping. + ! Initialize, update ghost cells, fill tracer arrays. + !------------------------------------------------------------------- ckflag = .false. istop = 0 jstop = 0 - !------------------------------------------------------------------- - ! Compute open water area in each grid cell. - ! Note: An aggregate_area call is needed only if the open - ! water area has changed since the previous call. - ! Here we assume that aice0 is up to date. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute open water area in each grid cell. + ! Note: An aggregate_area call is needed only if the open + ! water area has changed since the previous call. + ! Here we assume that aice0 is up to date. + !------------------------------------------------------------------- ! !$OMP PARALLEL DO PRIVATE(i,j,iblk) SCHEDULE(runtime) ! do iblk = 1, nblocks @@ -363,16 +364,16 @@ subroutine transport_remap (dt) ! call aggregate_area (ncat, ! aicen(i,j,:,iblk), & ! aice (i,j, iblk), & -! aice0(i,j, iblk)) +! aice0(i,j, iblk)) ! enddo ! enddo ! enddo ! !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - ! Commented out because ghost cells are updated after cleanup_itd. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + ! Commented out because ghost cells are updated after cleanup_itd. + !------------------------------------------------------------------- ! call ice_timer_start(timer_bound) ! call ice_HaloUpdate (aice0, halo_info, & @@ -384,11 +385,11 @@ subroutine transport_remap (dt) ! call ice_timer_stop(timer_bound) - !------------------------------------------------------------------- - ! Ghost cell updates for ice velocity. - ! Commented out because ghost cell velocities are computed - ! in ice_dyn_evp. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for ice velocity. + ! Commented out because ghost cell velocities are computed + ! in ice_dyn_evp. + !------------------------------------------------------------------- ! call ice_timer_start(timer_bound) ! call ice_HaloUpdate (uvel, halo_info, & @@ -401,29 +402,29 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - !------------------------------------------------------------------- - ! Fill arrays with fields to be remapped. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Fill arrays with fields to be remapped. + !------------------------------------------------------------------- - call state_to_tracers(nx_block, ny_block, & - ntrcr, ntrace, & - aice0(:,:, iblk), aicen(:,:,:,iblk), & - trcrn(:,:,:,:,iblk), & - vicen(:,:,:,iblk), vsnon(:,:, :,iblk), & - aim (:,:,:,iblk), trm (:,:,:,:,iblk)) + call state_to_tracers(nx_block, ny_block, & + ntrcr, ntrace, & + aice0(:,:, iblk), aicen(:,:,:, iblk), & + trcrn(:,:,:,:,iblk), & + vicen(:,:,:, iblk), vsnon(:,:,:, iblk), & + aim (:,:,:, iblk), trm (:,:,:,:,iblk)) enddo !$OMP END PARALLEL DO -!---!------------------------------------------------------------------- -!---! Optional conservation and monotonicity checks. -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Optional conservation and monotonicity checks. + !------------------------------------------------------------------- if (conserv_check) then - !------------------------------------------------------------------- - ! Compute initial values of globally conserved quantities. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute initial values of globally conserved quantities. + !------------------------------------------------------------------- do n = 0, ncat asum_init(n) = global_sum(aim(:,:,n,:), distrb_info, & @@ -458,7 +459,7 @@ subroutine transport_remap (dt) enddo ! n endif ! conserv_check - + if (l_monotonicity_check) then allocate(tmin(nx_block,ny_block,ntrace,ncat,max_blocks), & @@ -473,33 +474,33 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! Compute masks. - ! Masks are used to prevent tracer values in cells without ice - ! from being used in the monotonicity check. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute masks. + ! Masks are used to prevent tracer values in cells without ice + ! from being used in the monotonicity check. + !------------------------------------------------------------------- call make_masks (nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, ntrace, & has_dependents, & - icellsnc(:,iblk), & - indxinc(:,:,iblk), indxjnc(:,:,iblk), & - aim(:,:,:,iblk), aimask(:,:,:,iblk), & + icellsnc (:,iblk), & + indxinc(:,:,iblk), indxjnc(:,:, iblk), & + aim(:,:,:, iblk), aimask(:,:,:, iblk), & trm(:,:,:,:,iblk), trmask(:,:,:,:,iblk)) - !------------------------------------------------------------------- - ! Compute local max and min of tracer fields. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute local max and min of tracer fields. + !------------------------------------------------------------------- do n = 1, ncat - call local_max_min & + call local_max_min & (nx_block, ny_block, & ilo, ihi, jlo, jhi, & trm (:,:,:,n,iblk), & @@ -518,16 +519,16 @@ subroutine transport_remap (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do n = 1, ncat - call quasilocal_max_min (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - tmin(:,:,:,n,iblk), & + call quasilocal_max_min (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), & tmax(:,:,:,n,iblk)) enddo enddo @@ -535,48 +536,49 @@ subroutine transport_remap (dt) endif ! l_monotonicity_check - !------------------------------------------------------------------- - ! Main remapping routine: Step ice area and tracers forward in time. - !------------------------------------------------------------------- - if (grid_ice == 'CD' .or. grid_ice == 'C') then - call horizontal_remap (dt, ntrace, & - uvel (:,:,:), vvel (:,:,:), & - aim (:,:,:,:), trm (:,:,:,:,:), & - l_fixed_area, & - tracer_type, depend, & - has_dependents, integral_order, & - l_dp_midpt, grid_ice, & - uvelE(:,:,:),vvelN(:,:,:)) - else - call horizontal_remap (dt, ntrace, & - uvel (:,:,:), vvel (:,:,:), & - aim (:,:,:,:), trm (:,:,:,:,:), & - l_fixed_area, & - tracer_type, depend, & - has_dependents, integral_order, & - l_dp_midpt, grid_ice) - endif - - !------------------------------------------------------------------- - ! Given new fields, recompute state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Main remapping routine: Step ice area and tracers forward in time. + !------------------------------------------------------------------- + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm(:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt, grid_ice, & + uvelE (:,:,:), vvelN (:,:,:)) + else + call horizontal_remap (dt, ntrace, & + uvel (:,:,:), vvel (:,:,:), & + aim (:,:,:,:), trm(:,:,:,:,:), & + l_fixed_area, & + tracer_type, depend, & + has_dependents, integral_order, & + l_dp_midpt, grid_ice) + endif + + !------------------------------------------------------------------- + ! Given new fields, recompute state variables. + !------------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call tracers_to_state (nx_block, ny_block, & - ntrcr, ntrace, & - aim (:,:,:,iblk), trm (:,:,:,:,iblk), & - aice0(:,:, iblk), aicen(:,:,:,iblk), & - trcrn(:,:,:,:,iblk), & - vicen(:,:,:,iblk), vsnon(:,:, :,iblk)) + call tracers_to_state (nx_block, ny_block, & + ntrcr, ntrace, & + aim (:,:,:, iblk), trm (:,:,:,:,iblk), & + aice0(:,:, iblk), aicen(:,:,:, iblk), & + trcrn(:,:,:,:,iblk), & + vicen(:,:,:, iblk), vsnon(:,:, :,iblk)) enddo ! iblk !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- call ice_timer_start(timer_bound) @@ -586,14 +588,14 @@ subroutine transport_remap (dt) call ice_timer_stop(timer_bound) -!---!------------------------------------------------------------------- -!---! Optional conservation and monotonicity checks -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Optional conservation and monotonicity checks + !------------------------------------------------------------------- - !------------------------------------------------------------------- - ! Compute final values of globally conserved quantities. - ! Check global conservation of area and area*tracers. (Optional) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute final values of globally conserved quantities. + ! Check global conservation of area and area*tracers. (Optional) + !------------------------------------------------------------------- if (conserv_check) then @@ -660,14 +662,14 @@ subroutine transport_remap (dt) endif ! conserv_check - !------------------------------------------------------------------- - ! Check tracer monotonicity. (Optional) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Check tracer monotonicity. (Optional) + !------------------------------------------------------------------- if (l_monotonicity_check) then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n,ckflag,istop,jstop) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -678,13 +680,12 @@ subroutine transport_remap (dt) jstop = 0 do n = 1, ncat - call check_monotonicity & - (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - tmin(:,:,:,n,iblk), tmax(:,:,:,n,iblk), & - aim (:,:, n,iblk), trm (:,:,:,n,iblk), & - ckflag, & - istop, jstop) + call check_monotonicity (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + tmin(:,:,:,n,iblk), tmax(:,:,:,n,iblk), & + aim (:,:, n,iblk), trm (:,:,:,n,iblk), & + ckflag, & + istop, jstop) if (ckflag) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & @@ -701,8 +702,8 @@ subroutine transport_remap (dt) endif ! l_monotonicity_check - call ice_timer_stop(timer_advect) ! advection - + call ice_timer_stop(timer_advect) ! advection + end subroutine transport_remap !======================================================================= @@ -723,32 +724,31 @@ subroutine transport_upwind (dt) use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect - real (kind=dbl_kind), intent(in) :: & + real (kind=dbl_kind), intent(in) :: & dt ! time step ! local variables - integer (kind=int_kind) :: & - ntrcr, & ! + integer (kind=int_kind) :: & + ntrcr , & ! narr ! max number of state variable arrays - integer (kind=int_kind) :: & - i, j, iblk ,&! horizontal indices + integer (kind=int_kind) :: & + i, j, iblk , & ! horizontal indices ilo,ihi,jlo,jhi ! beginning and end of physical domain - real (kind=dbl_kind), dimension (nx_block,ny_block,nblocks) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block,nblocks) :: & uee, vnn ! cell edge velocities - real (kind=dbl_kind), & - dimension (:,:,:,:), allocatable :: & + real (kind=dbl_kind), dimension (:,:,:,:), allocatable :: & works ! work array type (block) :: & - this_block ! block information for current block + this_block ! block information for current block character(len=*), parameter :: subname = '(transport_upwind)' - call ice_timer_start(timer_advect) ! advection + call ice_timer_start(timer_advect) ! advection call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) @@ -759,31 +759,31 @@ subroutine transport_upwind (dt) allocate (works(nx_block,ny_block,narr,max_blocks)) - !------------------------------------------------------------------- - ! Get ghost cell values of state variables. - ! (Assume velocities are already known for ghost cells, also.) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Get ghost cell values of state variables. + ! (Assume velocities are already known for ghost cells, also.) + !------------------------------------------------------------------- ! call bound_state (aicen, & ! vicen, vsnon, & ! ntrcr, trcrn) ! call ice_timer_start(timer_bound) -! call ice_HaloUpdate (uvel, halo_info, & +! call ice_HaloUpdate (uvel, halo_info, & ! field_loc_NEcorner, field_type_vector) -! call ice_HaloUpdate (vvel, halo_info, & +! call ice_HaloUpdate (vvel, halo_info, & ! field_loc_NEcorner, field_type_vector) ! call ice_timer_stop(timer_bound) - !------------------------------------------------------------------- - ! Average corner velocities to edges. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Average corner velocities to edges. + !------------------------------------------------------------------- if (grid_ice == 'CD' .or. grid_ice == 'C') then uee(:,:,:)=uvelE(:,:,:) vnn(:,:,:)=vvelN(:,:,:) else !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -791,8 +791,8 @@ subroutine transport_upwind (dt) do j = jlo, jhi do i = ilo, ihi - uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i,j-1,iblk)) - vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j,iblk)) + uee(i,j,iblk) = p5*(uvel(i,j,iblk) + uvel(i ,j-1,iblk)) + vnn(i,j,iblk) = p5*(vvel(i,j,iblk) + vvel(i-1,j ,iblk)) enddo enddo enddo @@ -808,16 +808,15 @@ subroutine transport_upwind (dt) !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - - !----------------------------------------------------------------- - ! fill work arrays with fields to be advected - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! fill work arrays with fields to be advected + !----------------------------------------------------------------- call state_to_work (nx_block, ny_block, & ntrcr, & @@ -826,21 +825,21 @@ subroutine transport_upwind (dt) vicen (:,:, :,iblk), vsnon (:,:, :,iblk), & aice0 (:,:, iblk), works (:,:, :,iblk)) - !----------------------------------------------------------------- - ! advect - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! advect + !----------------------------------------------------------------- call upwind_field (nx_block, ny_block, & ilo, ihi, jlo, jhi, & dt, & narr, works(:,:,:,iblk), & - uee(:,:,iblk), vnn (:,:,iblk), & - HTE(:,:,iblk), HTN (:,:,iblk), & + uee (:,:,iblk), vnn (:,:,iblk), & + HTE (:,:,iblk), HTN (:,:,iblk), & tarea(:,:,iblk)) - !----------------------------------------------------------------- - ! convert work arrays back to state variables - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! convert work arrays back to state variables + !----------------------------------------------------------------- call work_to_state (nx_block, ny_block, & ntrcr, narr, & @@ -849,16 +848,16 @@ subroutine transport_upwind (dt) tmask(:,:, iblk), & aicen(:,:, :,iblk), trcrn (:,:,:,:,iblk), & vicen(:,:, :,iblk), vsnon (:,:, :,iblk), & - aice0(:,:, iblk), works (:,:, :,iblk)) + aice0(:,:, iblk), works (:,:, :,iblk)) enddo ! iblk !$OMP END PARALLEL DO - + deallocate (works) - !------------------------------------------------------------------- - ! Ghost cell updates for state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates for state variables. + !------------------------------------------------------------------- call ice_timer_start(timer_bound) @@ -868,7 +867,7 @@ subroutine transport_upwind (dt) call ice_timer_stop(timer_bound) - call ice_timer_stop(timer_advect) ! advection + call ice_timer_stop(timer_advect) ! advection end subroutine transport_upwind @@ -878,7 +877,7 @@ end subroutine transport_upwind !======================================================================= ! ! Fill ice area and tracer arrays. -! Assume that the advected tracers are hicen, hsnon, trcrn, +! Assume that the advected tracers are hicen, hsnon, trcrn, ! qicen(1:nilyr), and qsnon(1:nslyr). ! This subroutine must be modified if a different set of tracers ! is to be transported. The rule for ordering tracers @@ -897,47 +896,47 @@ subroutine state_to_tracers (nx_block, ny_block, & use ice_domain_size, only: ncat, nslyr integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ntrcr , & ! number of tracers in use - ntrace ! number of tracers in use incl. hi, hs + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice0 ! fractional open water area + aice0 ! fractional open water area real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & - aicen ,&! fractional ice area - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! fractional ice area + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(in) :: & - trcrn ! ice area tracers + trcrn ! ice area tracers real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(out) :: & - aim ! mean ice area in each grid cell + aim ! mean ice area in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), intent(out) :: & - trm ! mean tracer values in each grid cell + trm ! mean tracer values in each grid cell ! local variables integer (kind=int_kind) :: & - nt_qsno ,&! - i, j, n ,&! standard indices - it, kt ,&! tracer indices - ij ! combined i/j index + nt_qsno , & ! + i, j, n , & ! standard indices + it, kt , & ! tracer indices + ij ! combined i/j index real (kind=dbl_kind) :: & - puny ,&! - rhos ,&! - Lfresh ,&! - w1 ! work variable + puny , & ! + rhos , & ! + Lfresh , & ! + w1 ! work variable integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & - indxi ,&! compressed i/j indices - indxj + indxi , & ! compressed i/j indices + indxj integer (kind=int_kind), dimension(0:ncat) :: & - icells ! number of cells with ice + icells ! number of cells with ice character(len=*), parameter :: subname = '(state_to_tracers)' @@ -954,9 +953,9 @@ subroutine state_to_tracers (nx_block, ny_block, & trm(:,:,:,n) = c0 - !------------------------------------------------------------------- - ! Find grid cells where ice is present and fill area array. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Find grid cells where ice is present and fill area array. + !------------------------------------------------------------------- icells(n) = 0 do j = 1, ny_block @@ -970,13 +969,13 @@ subroutine state_to_tracers (nx_block, ny_block, & endif ! aim > puny enddo enddo - - !------------------------------------------------------------------- - ! Fill tracer array - ! Note: If aice > 0, then hice > 0, but we can have hsno = 0. - ! Alse note: We transport qice*nilyr rather than qice, so as to - ! avoid extra operations here and in tracers_to_state. - !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Fill tracer array + ! Note: If aice > 0, then hice > 0, but we can have hsno = 0. + ! Alse note: We transport qice*nilyr rather than qice, so as to + ! avoid extra operations here and in tracers_to_state. + !------------------------------------------------------------------- do ij = 1, icells(n) i = indxi(ij,n) @@ -1003,7 +1002,7 @@ subroutine state_to_tracers (nx_block, ny_block, & endif enddo enddo ! ncat - + end subroutine state_to_tracers !======================================================================= @@ -1022,42 +1021,42 @@ subroutine tracers_to_state (nx_block, ny_block, & use ice_domain_size, only: ncat, nslyr integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ntrcr , & ! number of tracers in use - ntrace ! number of tracers in use incl. hi, hs + nx_block, ny_block, & ! block dimensions + ntrcr , & ! number of tracers in use + ntrace ! number of tracers in use incl. hi, hs real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(in) :: & - aim ! fractional ice area + aim ! fractional ice area real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat), intent(in) :: & - trm ! mean tracer values in each grid cell + trm ! mean tracer values in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - aice0 ! fractional ice area + aice0 ! fractional ice area real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(inout) :: & - aicen ,&! fractional ice area - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! fractional ice area + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(inout) :: & - trcrn ! tracers + trcrn ! tracers ! local variables integer (kind=int_kind) :: & - nt_qsno ,&! - i, j, n ,&! standard indices - it, kt ,&! tracer indices - icells ,&! number of cells with ice - ij + nt_qsno , & ! + i, j, n , & ! standard indices + it, kt , & ! tracer indices + icells , & ! number of cells with ice + ij real (kind=dbl_kind) :: & - rhos, & - Lfresh + rhos , & ! + Lfresh ! integer (kind=int_kind), dimension (nx_block*ny_block) :: & - indxi, indxj ! compressed indices + indxi, indxj ! compressed indices character(len=*), parameter :: subname = '(tracers_to_state)' @@ -1071,20 +1070,20 @@ subroutine tracers_to_state (nx_block, ny_block, & do n = 1, ncat - icells = 0 - do j = 1, ny_block - do i = 1, nx_block - if (aim(i,j,n) > c0) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo + icells = 0 + do j = 1, ny_block + do i = 1, nx_block + if (aim(i,j,n) > c0) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + enddo + enddo - !------------------------------------------------------------------- - ! Compute state variables. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute state variables. + !------------------------------------------------------------------- do ij = 1, icells i = indxi(ij) @@ -1102,7 +1101,7 @@ subroutine tracers_to_state (nx_block, ny_block, & j = indxj(ij) trcrn(i,j,it,n) = trm(i,j,kt+it,n) - rhos*Lfresh ! snow enthalpy enddo - else + else do ij = 1, icells i = indxi(ij) j = indxj(ij) @@ -1129,24 +1128,24 @@ subroutine global_conservation (ckflag, fieldid, & fieldid ! field information string real (kind=dbl_kind), intent(in) :: & - asum_init ,&! initial global ice area + asum_init , & ! initial global ice area asum_final ! final global ice area real (kind=dbl_kind), dimension(ntrace), intent(in), optional :: & - atsum_init ,&! initial global ice area*tracer + atsum_init, & ! initial global ice area*tracer atsum_final ! final global ice area*tracer logical (kind=log_kind), intent(inout) :: & - ckflag ! if true, abort on return + ckflag ! if true, abort on return ! local variables integer (kind=int_kind) :: & - nt ! tracer index + nt ! tracer index real (kind=dbl_kind) :: & - puny ,&! - diff ! difference between initial and final values + puny , & ! + diff ! difference between initial and final values character(len=*), parameter :: subname = '(global_conservation)' @@ -1169,21 +1168,21 @@ subroutine global_conservation (ckflag, fieldid, & endif if (present(atsum_init)) then - do nt = 1, ntrace - if (abs(atsum_init(nt)) > puny) then - diff = atsum_final(nt) - atsum_init(nt) - if (abs(diff/atsum_init(nt)) > puny) then - ckflag = .true. - write (nu_diag,*) - write (nu_diag,*) subname,'Ice area*tracer conserv error ', trim(fieldid),nt - write (nu_diag,*) subname,' Tracer index =', nt - write (nu_diag,*) subname,' Initial global area*tracer =', atsum_init(nt) - write (nu_diag,*) subname,' Final global area*tracer =', atsum_final(nt) - write (nu_diag,*) subname,' Fractional error =', abs(diff)/atsum_init(nt) - write (nu_diag,*) subname,' atsum_final-atsum_init =', diff + do nt = 1, ntrace + if (abs(atsum_init(nt)) > puny) then + diff = atsum_final(nt) - atsum_init(nt) + if (abs(diff/atsum_init(nt)) > puny) then + ckflag = .true. + write (nu_diag,*) + write (nu_diag,*) subname,'Ice area*tracer conserv error ', trim(fieldid),nt + write (nu_diag,*) subname,' Tracer index =', nt + write (nu_diag,*) subname,' Initial global area*tracer =', atsum_init(nt) + write (nu_diag,*) subname,' Final global area*tracer =', atsum_final(nt) + write (nu_diag,*) subname,' Fractional error =', abs(diff)/atsum_init(nt) + write (nu_diag,*) subname,' atsum_final-atsum_init =', diff + endif endif - endif - enddo + enddo endif ! present(atsum_init) end subroutine global_conservation @@ -1193,7 +1192,7 @@ end subroutine global_conservation ! At each grid point, compute the local max and min of a scalar ! field phi: i.e., the max and min values in the nine-cell region ! consisting of the home cell and its eight neighbors. -! +! ! To extend to the neighbors of the neighbors (25 cells in all), ! follow this call with a call to quasilocal_max_min. ! @@ -1206,33 +1205,33 @@ subroutine local_max_min (nx_block, ny_block, & aimask, trmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(in), dimension(nx_block,ny_block) :: & - aimask ! ice area mask + aimask ! ice area mask real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - trm ,&! tracer fields - trmask ! tracer mask + trm , & ! tracer fields + trmask ! tracer mask real (kind=dbl_kind), intent(out), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1 ! tracer indices + i, j , & ! horizontal indices + nt, nt1 ! tracer indices real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - phimask ! aimask or trmask, as appropriate + phimask ! aimask or trmask, as appropriate real (kind=dbl_kind) :: & - phi_nw, phi_n, phi_ne ,&! field values in 8 neighbor cells - phi_w, phi_e ,& - phi_sw, phi_s, phi_se + phi_nw, phi_n, phi_ne , & ! field values in 8 neighbor cells + phi_w , phi_e , & + phi_sw, phi_s, phi_se character(len=*), parameter :: subname = '(local_max_min)' @@ -1257,46 +1256,46 @@ subroutine local_max_min (nx_block, ny_block, & endif -!----------------------------------------------------------------------- -! Store values of trm in the 8 neighbor cells. -! If aimask = 1, use the true value; otherwise use the home cell value -! so that non-physical values of phi do not contribute to the gradient. -!----------------------------------------------------------------------- + !----------------------------------------------------------------------- + ! Store values of trm in the 8 neighbor cells. + ! If aimask = 1, use the true value; otherwise use the home cell value + ! so that non-physical values of phi do not contribute to the gradient. + !----------------------------------------------------------------------- do j = jlo, jhi - do i = ilo, ihi - - phi_nw = phimask(i-1,j+1) * trm(i-1,j+1,nt) & - + (c1-phimask(i-1,j+1))* trm(i, j, nt) - phi_n = phimask(i, j+1) * trm(i, j+1,nt) & - + (c1-phimask(i, j+1))* trm(i, j, nt) - phi_ne = phimask(i+1,j+1) * trm(i+1,j+1,nt) & - + (c1-phimask(i+1,j+1))* trm(i, j, nt) - phi_w = phimask(i-1,j) * trm(i-1,j, nt) & - + (c1-phimask(i-1,j)) * trm(i, j, nt) - phi_e = phimask(i+1,j) * trm(i+1,j, nt) & - + (c1-phimask(i+1,j)) * trm(i, j, nt) - phi_sw = phimask(i-1,j-1) * trm(i-1,j-1,nt) & - + (c1-phimask(i-1,j-1))* trm(i, j, nt) - phi_s = phimask(i, j-1) * trm(i, j-1,nt) & - + (c1-phimask(i, j-1))* trm(i, j, nt) - phi_se = phimask(i+1,j-1) * trm(i+1,j-1,nt) & - + (c1-phimask(i+1,j-1))* trm(i, j, nt) - -!----------------------------------------------------------------------- -! Compute the minimum and maximum among the nine local cells. -!----------------------------------------------------------------------- - - tmax(i,j,nt) = max (phi_nw, phi_n, phi_ne, phi_w, & - trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) - - tmin(i,j,nt) = min (phi_nw, phi_n, phi_ne, phi_w, & - trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) - - enddo ! i - enddo ! j + do i = ilo, ihi - enddo ! nt + phi_nw = phimask(i-1,j+1) * trm(i-1,j+1,nt) & + + (c1-phimask(i-1,j+1))* trm(i, j, nt) + phi_n = phimask(i, j+1) * trm(i, j+1,nt) & + + (c1-phimask(i, j+1))* trm(i, j, nt) + phi_ne = phimask(i+1,j+1) * trm(i+1,j+1,nt) & + + (c1-phimask(i+1,j+1))* trm(i, j, nt) + phi_w = phimask(i-1,j) * trm(i-1,j, nt) & + + (c1-phimask(i-1,j)) * trm(i, j, nt) + phi_e = phimask(i+1,j) * trm(i+1,j, nt) & + + (c1-phimask(i+1,j)) * trm(i, j, nt) + phi_sw = phimask(i-1,j-1) * trm(i-1,j-1,nt) & + + (c1-phimask(i-1,j-1))* trm(i, j, nt) + phi_s = phimask(i, j-1) * trm(i, j-1,nt) & + + (c1-phimask(i, j-1))* trm(i, j, nt) + phi_se = phimask(i+1,j-1) * trm(i+1,j-1,nt) & + + (c1-phimask(i+1,j-1))* trm(i, j, nt) + + !----------------------------------------------------------------------- + ! Compute the minimum and maximum among the nine local cells. + !----------------------------------------------------------------------- + + tmax(i,j,nt) = max (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + tmin(i,j,nt) = min (phi_nw, phi_n, phi_ne, phi_w, & + trm(i,j,nt), phi_e, phi_sw, phi_s, phi_se) + + enddo ! i + enddo ! j + + enddo ! nt end subroutine local_max_min @@ -1313,18 +1312,18 @@ subroutine quasilocal_max_min (nx_block, ny_block, & tmin, tmax) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt ! tracer index + i, j , & ! horizontal indices + nt ! tracer index character(len=*), parameter :: subname = '(quasilocal_max_min)' @@ -1365,37 +1364,37 @@ subroutine check_monotonicity (nx_block, ny_block, & istop, jstop) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi ! beginning and end of physical domain real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block) :: & - aim ! new ice area + aim ! new ice area real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - trm ! new tracers + trm ! new tracers real (kind=dbl_kind), intent(in), dimension (nx_block,ny_block,ntrace) :: & - tmin ,&! local min tracer - tmax ! local max tracer + tmin , & ! local min tracer + tmax ! local max tracer logical (kind=log_kind), intent(inout) :: & - ckflag ! if true, abort on return + ckflag ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1, nt2 ! tracer indices + i, j , & ! horizontal indices + nt, nt1, nt2 ! tracer indices real (kind=dbl_kind) :: & - puny ,&! - w1, w2 ! work variables + puny , & ! + w1, w2 ! work variables logical (kind=log_kind), dimension (nx_block, ny_block) :: & - l_check ! if true, check monotonicity + l_check ! if true, check monotonicity character(len=*), parameter :: subname = '(check_monotonicity)' @@ -1406,15 +1405,15 @@ subroutine check_monotonicity (nx_block, ny_block, & do nt = 1, ntrace - !------------------------------------------------------------------- - ! Load logical array to identify tracers that need checking. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Load logical array to identify tracers that need checking. + !------------------------------------------------------------------- if (tracer_type(nt)==1) then ! does not depend on another tracer do j = jlo, jhi do i = ilo, ihi - if (aim(i,j) > puny) then + if (aim(i,j) > puny) then l_check(i,j) = .true. else l_check(i,j) = .false. @@ -1451,9 +1450,9 @@ subroutine check_monotonicity (nx_block, ny_block, & enddo endif - !------------------------------------------------------------------- - ! Make sure new values lie between tmin and tmax - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Make sure new values lie between tmin and tmax + !------------------------------------------------------------------- do j = jlo, jhi do i = ilo, ihi @@ -1510,24 +1509,24 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ntrcr , & ! number of tracers in use - narr ! number of 2D state variable arrays in works array + narr ! number of 2D state variable arrays in works array integer (kind=int_kind), dimension (ntrcr), intent(in) :: & trcr_depend ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & - aicen ,&! concentration of ice - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat), intent(in) :: & - trcrn ! ice tracers + trcrn ! ice tracers real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice0 ! concentration of open water + aice0 ! concentration of open water real (kind=dbl_kind), dimension(nx_block,ny_block,narr), intent (out) :: & - works ! work array + works ! work array ! local variables @@ -1538,8 +1537,8 @@ subroutine state_to_work (nx_block, ny_block, & tr_pond_cesm, tr_pond_lvl, tr_pond_topo integer (kind=int_kind) :: & - i, j, n, it ,&! counting indices - narrays ! counter for number of state variable arrays + i, j, n, it, & ! counting indices + narrays ! counter for number of state variable arrays character(len=*), parameter :: subname = '(state_to_work)' @@ -1598,36 +1597,36 @@ subroutine state_to_work (nx_block, ny_block, & elseif (trcr_depend(it) == 2+nt_alvl) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_alvl,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_cesm .or. tr_pond_topo) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_apnd,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_lvl) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = aicen(i,j,n) & + works(i,j,narrays+it) = aicen(i,j ,n) & * trcrn(i,j,nt_alvl,n) & * trcrn(i,j,nt_apnd,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo elseif (trcr_depend(it) == 2+nt_fbri) then do j = 1, ny_block do i = 1, nx_block - works(i,j,narrays+it) = vicen(i,j,n) & + works(i,j,narrays+it) = vicen(i,j ,n) & * trcrn(i,j,nt_fbri,n) & - * trcrn(i,j,it,n) + * trcrn(i,j,it ,n) enddo enddo endif @@ -1645,23 +1644,23 @@ end subroutine state_to_work ! ! Convert work array back to state variables - subroutine work_to_state (nx_block, ny_block, & - ntrcr, narr, & - trcr_depend, & - trcr_base, & - n_trcr_strata, & - nt_strata, & - tmask, & - aicen, trcrn, & - vicen, vsnon, & - aice0, works) + subroutine work_to_state (nx_block, ny_block, & + ntrcr, narr, & + trcr_depend, & + trcr_base, & + n_trcr_strata, & + nt_strata, & + tmask, & + aicen, trcrn, & + vicen, vsnon, & + aice0, works) use ice_domain_size, only: ncat - integer (kind=int_kind), intent (in) :: & + integer (kind=int_kind), intent (in) :: & nx_block, ny_block, & ! block dimensions ntrcr , & ! number of tracers in use - narr ! number of 2D state variable arrays in works array + narr ! number of 2D state variable arrays in works array integer (kind=int_kind), dimension (ntrcr), intent(in) :: & trcr_depend, & ! = 0 for aicen tracers, 1 for vicen, 2 for vsnon @@ -1674,36 +1673,36 @@ subroutine work_to_state (nx_block, ny_block, & integer (kind=int_kind), dimension (ntrcr,2), intent(in) :: & nt_strata ! indices of underlying tracer layers - logical (kind=log_kind), intent (in) :: & + logical (kind=log_kind), intent (in) :: & tmask (nx_block,ny_block) - real (kind=dbl_kind), intent (in) :: & + real (kind=dbl_kind), intent (in) :: & works (nx_block,ny_block,narr) real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(out) :: & - aicen ,&! concentration of ice - vicen ,&! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) + aicen , & ! concentration of ice + vicen , & ! volume per unit area of ice (m) + vsnon ! volume per unit area of snow (m) real (kind=dbl_kind), dimension (nx_block,ny_block,ntrcr,ncat),intent(out) :: & - trcrn ! ice tracers + trcrn ! ice tracers real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - aice0 ! concentration of open water + aice0 ! concentration of open water ! local variables - integer (kind=int_kind) :: & - i, j, ij, n ,&! counting indices - narrays ,&! counter for number of state variable arrays - nt_Tsfc ,&! Tsfc tracer number - icells ! number of ocean/ice cells + integer (kind=int_kind) :: & + i, j, ij, n, & ! counting indices + narrays , & ! counter for number of state variable arrays + nt_Tsfc , & ! Tsfc tracer number + icells ! number of ocean/ice cells - integer (kind=int_kind), dimension (nx_block*ny_block) :: & + integer (kind=int_kind), dimension (nx_block*ny_block) :: & indxi, indxj - real (kind=dbl_kind), dimension (nx_block*ny_block,narr) :: & - work + real (kind=dbl_kind), dimension (nx_block*ny_block,narr) :: & + work character(len=*), parameter :: subname = '(work_to_state)' @@ -1745,15 +1744,16 @@ subroutine work_to_state (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) - call icepack_compute_tracers(ntrcr=ntrcr, trcr_depend=trcr_depend(:), & - atrcrn = work (ij,narrays+1:narrays+ntrcr), & - aicen = aicen(i,j,n), & - vicen = vicen(i,j,n), & - vsnon = vsnon(i,j,n), & + call icepack_compute_tracers(ntrcr = ntrcr, & + trcr_depend = trcr_depend(:), & + atrcrn = work (ij,narrays+1:narrays+ntrcr), & + aicen = aicen(i,j,n), & + vicen = vicen(i,j,n), & + vsnon = vsnon(i,j,n), & trcr_base = trcr_base(:,:), & n_trcr_strata = n_trcr_strata(:), & nt_strata = nt_strata(:,:), & - trcrn = trcrn(i,j,:,n)) + trcrn = trcrn(i,j,:,n)) ! tcraig, don't let land points get non-zero Tsfc if (.not.tmask(i,j)) then @@ -1785,53 +1785,53 @@ subroutine upwind_field (nx_block, ny_block, & tarea) integer (kind=int_kind), intent (in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - narrays ! number of 2D arrays to be transported + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + narrays ! number of 2D arrays to be transported real (kind=dbl_kind), intent(in) :: & - dt ! time step + dt ! time step real (kind=dbl_kind), dimension(nx_block,ny_block,narrays), intent(inout) :: & - phi ! scalar field + phi ! scalar field real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - uee, vnn ! cell edge velocities + uee, vnn ! cell edge velocities real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - HTE ,&! length of east cell edge - HTN ,&! length of north cell edge - tarea ! grid cell area + HTE , & ! length of east cell edge + HTN , & ! length of north cell edge + tarea ! grid cell area ! local variables integer (kind=int_kind) :: & - i, j, n ! standard indices + i, j, n ! standard indices real (kind=dbl_kind), dimension (nx_block,ny_block) :: & worka, workb character(len=*), parameter :: subname = '(upwind_field)' - !------------------------------------------------------------------- - ! upwind transport - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! upwind transport + !------------------------------------------------------------------- do n = 1, narrays do j = 1, jhi do i = 1, ihi worka(i,j)= & - upwind(phi(i,j,n),phi(i+1,j,n),uee(i,j),HTE(i,j),dt) + upwind(phi(i,j,n),phi(i+1,j ,n),uee(i,j),HTE(i,j),dt) workb(i,j)= & - upwind(phi(i,j,n),phi(i,j+1,n),vnn(i,j),HTN(i,j),dt) + upwind(phi(i,j,n),phi(i ,j+1,n),vnn(i,j),HTN(i,j),dt) enddo enddo do j = jlo, jhi do i = ilo, ihi - phi(i,j,n) = phi(i,j,n) - ( worka(i,j)-worka(i-1,j) & - + workb(i,j)-workb(i,j-1) ) & + phi(i,j,n) = phi(i,j,n) - ( worka(i,j)-worka(i-1,j ) & + + workb(i,j)-workb(i ,j-1) ) & / tarea(i,j) enddo enddo @@ -1841,10 +1841,9 @@ subroutine upwind_field (nx_block, ny_block, & end subroutine upwind_field !======================================================================= - - !------------------------------------------------------------------- - ! Define upwind function - !------------------------------------------------------------------- +! +! Define upwind function +! real(kind=dbl_kind) function upwind(y1,y2,a,h,dt) diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 6f35b2da8..95ae33613 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -1,5 +1,4 @@ !======================================================================= -! ! Transports quantities using the second-order conservative remapping ! scheme developed by John Dukowicz and John Baumgardner (DB) and modified ! for sea ice by William Lipscomb and Elizabeth Hunke. @@ -20,11 +19,11 @@ ! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb ! 2004-05: Block structure added (WHL) ! 2006: Moved remap driver to ice_transport_driver -! Geometry changes: +! Geometry changes: ! (1) Reconstruct fields in stretched logically rectangular coordinates ! (2) Modify geometry so that the area flux across each edge ! can be specified (following an idea of Mats Bentsen) -! 2010: ECH removed unnecessary grid arrays and optional arguments from +! 2010: ECH removed unnecessary grid arrays and optional arguments from ! horizontal_remap module ice_transport_remap @@ -52,7 +51,7 @@ module ice_transport_remap nvert = 3 ! number of vertices in a triangle ! for triangle integral formulas - real (kind=dbl_kind), parameter :: & + real (kind=dbl_kind), parameter :: & p5625m = -9._dbl_kind/16._dbl_kind ,& p52083 = 25._dbl_kind/48._dbl_kind @@ -60,141 +59,141 @@ module ice_transport_remap !======================================================================= ! Here is some information about how the incremental remapping scheme -! works in CICE and how it can be adapted for use in other models. +! works in CICE and how it can be adapted for use in other models. ! -! The remapping routine is designed to transport a generic mass-like +! The remapping routine is designed to transport a generic mass-like ! field (in CICE, the ice fractional area) along with an arbitrary number -! of tracers in two dimensions. The velocity components are assumed -! to lie at grid cell corners and the transported scalars at cell centers. -! Incremental remapping has the following desirable properties: -! -! (1) Tracer monotonicity is preserved. That is, no new local -! extrema are produced in fields like ice thickness or internal -! energy. -! (2) The reconstucted mass and tracer fields vary linearly in x and y. -! This means that remapping is 2nd-order accurate in space, -! except where horizontal gradients are limited to preserve -! monotonicity. -! (3) There are economies of scale. Transporting a single field -! is rather expensive, but additional fields have a relatively -! low marginal cost. -! -! The following generic conservation equations may be solved: -! -! dm/dt = del*(u*m) (0) -! d(m*T1)/dt = del*(u*m*T1) (1) -! d(m*T1*T2)/dt = del*(u*m*T1*T2) (2) -! d(m*T1*T2*T3)/dt = del*(u*m*T1*T2*T3) (3) +! of tracers in two dimensions. The velocity components are assumed +! to lie at grid cell corners and the transported scalars at cell centers. +! Incremental remapping has the following desirable properties: +! +! (1) Tracer monotonicity is preserved. That is, no new local +! extrema are produced in fields like ice thickness or internal +! energy. +! (2) The reconstucted mass and tracer fields vary linearly in x and y. +! This means that remapping is 2nd-order accurate in space, +! except where horizontal gradients are limited to preserve +! monotonicity. +! (3) There are economies of scale. Transporting a single field +! is rather expensive, but additional fields have a relatively +! low marginal cost. +! +! The following generic conservation equations may be solved: +! +! dm/dt = del*(u*m) (0) +! d(m*T1)/dt = del*(u*m*T1) (1) +! d(m*T1*T2)/dt = del*(u*m*T1*T2) (2) +! d(m*T1*T2*T3)/dt = del*(u*m*T1*T2*T3) (3) ! ! where d is a partial derivative, del is the 2D divergence operator, ! u is the horizontal velocity, m is the mass density field, and ! T1, T2, and T3 are tracers. ! ! In CICE, these equations have the form -! +! ! da/dt = del*(u*a) (4) ! dv/dt = d(a*h)/dt = del*(u*a*h) (5) ! de/dt = d(a*h*q)/dt = del*(u*a*h*q) (6) ! d(aT)/dt = del*(u*a*t) (7) -! -! where a = fractional ice area, v = ice/snow volume, h = v/a = thickness, -! e = ice/snow internal energy (J/m^2), q = e/v = internal energy per -! unit volume (J/m^3), and T is a tracer. These equations express +! +! where a = fractional ice area, v = ice/snow volume, h = v/a = thickness, +! e = ice/snow internal energy (J/m^2), q = e/v = internal energy per +! unit volume (J/m^3), and T is a tracer. These equations express ! conservation of ice area, volume, internal energy, and area-weighted -! tracer, respectively. +! tracer, respectively. ! ! (Note: In CICE, a, v and e are prognostic quantities from which ! h and q are diagnosed. The remapping routine works with tracers, ! which means that h and q must be derived from a, v, and e before -! calling the remapping routine.) +! calling the remapping routine.) ! -! Earlier versions of CICE assumed fixed ice and snow density. -! Beginning with CICE 4.0, the ice and snow density can be variable. -! In this case, equations (5) and (6) are replaced by -! -! dv/dt = d(a*h)/dt = del*(u*a*h) (8) +! Earlier versions of CICE assumed fixed ice and snow density. +! Beginning with CICE 4.0, the ice and snow density can be variable. +! In this case, equations (5) and (6) are replaced by +! +! dv/dt = d(a*h)/dt = del*(u*a*h) (8) ! dm/dt = d(a*h*rho)/dt = del*(u*a*h*rho) (9) ! de/dt = d(a*h*rho*qm)/dt = del*(u*a*h*rho*qm) (10) -! -! where rho = density and qm = internal energy per unit mass (J/kg). -! Eq. (9) expresses mass conservation, which in the variable-density -! case is no longer equivalent to volume conservation (8). -! -! Tracers satisfying equations of the form (1) are called "type 1." -! In CICE the paradigmatic type 1 tracers are hi and hs. -! -! Tracers satisfying equations of the form (2) are called "type 2". -! The paradigmatic type 2 tracers are qi and qs (or rhoi and rhos -! in the variable-density case). -! +! +! where rho = density and qm = internal energy per unit mass (J/kg). +! Eq. (9) expresses mass conservation, which in the variable-density +! case is no longer equivalent to volume conservation (8). +! +! Tracers satisfying equations of the form (1) are called "type 1." +! In CICE the paradigmatic type 1 tracers are hi and hs. +! +! Tracers satisfying equations of the form (2) are called "type 2". +! The paradigmatic type 2 tracers are qi and qs (or rhoi and rhos +! in the variable-density case). +! ! Tracers satisfying equations of the form (3) are called "type 3." ! The paradigmatic type 3 tracers are qmi and qms in the variable-density -! case. There are no such tracers in the constant-density case. -! -! The fields a, T1, and T2 are reconstructed in each grid cell with -! 2nd-order accuracy. T3 is reconstructed with 1st-order accuracy -! (i.e., it is transported in upwind fashion) in order to avoid -! additional mathematical complexity. -! -! The mass-like field lives in the array "mm" (shorthand for mean -! mass) and the tracers fields in the array "tm" (mean tracers). -! In order to transport tracers correctly, the remapping routine -! needs to know the tracers types and relationships. This is done -! as follows: -! -! Each field in the "tm" array is assigned an index, 1:ntrace. -! (Note: ntrace is not the same as ntrcr, the number of tracers -! in the trcrn state variable array. For remapping purposes we -! have additional tracers hi and hs.) -! -! The tracer types (1,2,3) are contained in the "tracer_type" array. -! For standard CICE: -! -! tracer_type = (1 1 1 2 2 2 2 2) -! -! Type 2 and type 3 tracers are said to depend on type 1 tracers. -! For instance, qi depends on hi, which is to say that -! there is a conservation equation of the form (2) or (6). -! Thus we define a "depend" array. For standard CICE: -! -! depend = (0 0 0 1 1 1 1 2) -! -! which implies that elements 1-3 (hi, hs, Ts) are type 1, -! elements 4-7 (qi) depend on element 1 (hi), and element 8 (qs) -! depends on element 2 (hs). -! -! We also define a logical array "has_dependents". In standard CICE: -! -! has_dependents = (T T F F F F F F), -! -! which means that only elements 1 and 2 (hi and hs) have dependent -! tracers. -! -! For the variable-density case, things are a bit more complicated. -! Suppose we have 4 variable-density ice layers and one variable- -! density snow layer. Then the indexing is as follows: -! 1 = hi -! 2 = hs -! 3 = Ts -! 4-7 = rhoi -! 8 = rhos -! 9-12 = qmi -! 13 = qms -! -! The key arrays are: -! -! tracer_type = (1 1 1 2 2 2 2 2 3 3 3 3 3) -! -! depend = (0 0 0 1 1 1 1 2 4 5 6 7 8) -! -! has_dependents = (T T F T T T T T F F F F F) -! -! which imply that hi and hs are type 1 with dependents rhoi and rhos, -! while rhoi and rhos are type 2 with dependents qmi and qms. -! -! Tracers added to the ntrcr array are handled automatically -! by the remapping with little extra coding. It is necessary -! only to provide the correct type and dependency information. +! case. There are no such tracers in the constant-density case. +! +! The fields a, T1, and T2 are reconstructed in each grid cell with +! 2nd-order accuracy. T3 is reconstructed with 1st-order accuracy +! (i.e., it is transported in upwind fashion) in order to avoid +! additional mathematical complexity. +! +! The mass-like field lives in the array "mm" (shorthand for mean +! mass) and the tracers fields in the array "tm" (mean tracers). +! In order to transport tracers correctly, the remapping routine +! needs to know the tracers types and relationships. This is done +! as follows: +! +! Each field in the "tm" array is assigned an index, 1:ntrace. +! (Note: ntrace is not the same as ntrcr, the number of tracers +! in the trcrn state variable array. For remapping purposes we +! have additional tracers hi and hs.) +! +! The tracer types (1,2,3) are contained in the "tracer_type" array. +! For standard CICE: +! +! tracer_type = (1 1 1 2 2 2 2 2) +! +! Type 2 and type 3 tracers are said to depend on type 1 tracers. +! For instance, qi depends on hi, which is to say that +! there is a conservation equation of the form (2) or (6). +! Thus we define a "depend" array. For standard CICE: +! +! depend = (0 0 0 1 1 1 1 2) +! +! which implies that elements 1-3 (hi, hs, Ts) are type 1, +! elements 4-7 (qi) depend on element 1 (hi), and element 8 (qs) +! depends on element 2 (hs). +! +! We also define a logical array "has_dependents". In standard CICE: +! +! has_dependents = (T T F F F F F F), +! +! which means that only elements 1 and 2 (hi and hs) have dependent +! tracers. +! +! For the variable-density case, things are a bit more complicated. +! Suppose we have 4 variable-density ice layers and one variable- +! density snow layer. Then the indexing is as follows: +! 1 = hi +! 2 = hs +! 3 = Ts +! 4-7 = rhoi +! 8 = rhos +! 9-12 = qmi +! 13 = qms +! +! The key arrays are: +! +! tracer_type = (1 1 1 2 2 2 2 2 3 3 3 3 3) +! +! depend = (0 0 0 1 1 1 1 2 4 5 6 7 8) +! +! has_dependents = (T T F T T T T T F F F F F) +! +! which imply that hi and hs are type 1 with dependents rhoi and rhos, +! while rhoi and rhos are type 2 with dependents qmi and qms. +! +! Tracers added to the ntrcr array are handled automatically +! by the remapping with little extra coding. It is necessary +! only to provide the correct type and dependency information. ! ! When using this routine in other models, most of the tracer dependency ! apparatus may be irrelevant. In a layered ocean model, for example, @@ -237,7 +236,7 @@ module ice_transport_remap ! regions are then tweaked, following an idea by Mats Bentsen, such ! that they have the desired area. If l_fixed_area = F, these regions ! are not tweaked, and the edgearea arrays are output variables. -! +! !======================================================================= contains @@ -247,7 +246,7 @@ module ice_transport_remap ! Grid quantities used by the remapping transport scheme ! ! Note: the arrays xyav, xxxav, etc are not needed for rectangular grids -! but may be needed in the future for other nonuniform grids. They have +! but may be needed in the future for other nonuniform grids. They have ! been commented out here to save memory and flops. ! ! author William H. Lipscomb, LANL @@ -277,7 +276,7 @@ subroutine init_remap xav(i,j,iblk) = c0 yav(i,j,iblk) = c0 !!! These formulas would be used on a rectangular grid -!!! with dimensions (dxt, dyt): +!!! with dimensions (dxt, dyt): !!! xxav(i,j,iblk) = dxt(i,j,iblk)**2 / c12 !!! yyav(i,j,iblk) = dyt(i,j,iblk)**2 / c12 xxav(i,j,iblk) = c1/c12 @@ -291,7 +290,7 @@ subroutine init_remap enddo enddo !$OMP END PARALLEL DO - + end subroutine init_remap !======================================================================= @@ -302,25 +301,25 @@ end subroutine init_remap ! ! This scheme preserves monotonicity of ice area and tracers. That is, ! it does not produce new extrema. It is second-order accurate in space, -! except where gradients are limited to preserve monotonicity. +! except where gradients are limited to preserve monotonicity. ! ! This version of the remapping allows the user to specify the areal ! flux across each edge, based on an idea developed by Mats Bentsen. ! ! author William H. Lipscomb, LANL -! 2006: Moved driver (subroutine transport_remap) into separate module. +! 2006: Moved driver (subroutine transport_remap) into separate module. ! Geometry changes (logically rectangular coordinates, fixed ! area fluxes) - subroutine horizontal_remap (dt, ntrace, & - uvel, vvel, & - mm, tm, & - l_fixed_area, & - tracer_type, depend, & - has_dependents, & - integral_order, & - l_dp_midpt, grid_ice, & - uvelE, vvelN) + subroutine horizontal_remap (dt, ntrace, & + uvel, vvel, & + mm, tm, & + l_fixed_area, & + tracer_type, depend, & + has_dependents, & + integral_order, & + l_dp_midpt, grid_ice, & + uvelE, vvelN) use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & ice_HaloDestroy @@ -334,133 +333,135 @@ subroutine horizontal_remap (dt, ntrace, & use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & - dt ! time step + dt ! time step integer (kind=int_kind), intent(in) :: & - ntrace ! number of tracers in use + ntrace ! number of tracers in use real (kind=dbl_kind), intent(in), dimension(nx_block,ny_block,max_blocks) :: & - uvel ,&! x-component of velocity (m/s) ugrid - vvel ! y-component of velocity (m/s) ugrid + uvel, & ! x-component of velocity (m/s) ugrid + vvel ! y-component of velocity (m/s) ugrid real (kind=dbl_kind), intent(in), optional, dimension(nx_block,ny_block,max_blocks) :: & - uvelE ,&! x-component of velocity (m/s) egrid - vvelN ! y-component of velocity (m/s) ngrid + uvelE, & ! x-component of velocity (m/s) egrid + vvelN ! y-component of velocity (m/s) ngrid real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,0:ncat,max_blocks) :: & - mm ! mean mass values in each grid cell + mm ! mean mass values in each grid cell real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - tm ! mean tracer values in each grid cell + tm ! mean tracer values in each grid cell - character (len=char_len_long), intent(in) :: grid_ice + character (len=char_len_long), intent(in) :: & + grid_ice ! ice grid, B, C, etc - !------------------------------------------------------------------- - ! If l_fixed_area is true, the area of each departure region is - ! computed in advance (e.g., by taking the divergence of the - ! velocity field and passed to locate_triangles. The departure - ! regions are adjusted to obtain the desired area. - ! If false, edgearea is computed in locate_triangles and passed out. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! If l_fixed_area is true, the area of each departure region is + ! computed in advance (e.g., by taking the divergence of the + ! velocity field and passed to locate_triangles. The departure + ! regions are adjusted to obtain the desired area. + ! If false, edgearea is computed in locate_triangles and passed out. + !------------------------------------------------------------------- logical, intent(in) :: & - l_fixed_area ! if true, edgearea_e and edgearea_n are prescribed - ! if false, edgearea is computed here and passed out + l_fixed_area ! if true, edgearea_e and edgearea_n are prescribed + ! if false, edgearea is computed here and passed out integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), intent(in) :: & - integral_order ! polynomial order for triangle integrals + integral_order ! polynomial order for triangle integrals logical (kind=log_kind), intent(in) :: & - l_dp_midpt ! if true, find departure points using - ! corrected midpoint velocity + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - iblk ,&! block index - ilo,ihi,jlo,jhi,&! beginning and end of physical domain - n, m ! ice category, tracer indices + i, j , & ! horizontal indices + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + n, m ! ice category, tracer indices integer (kind=int_kind), dimension(0:ncat,max_blocks) :: & - icellsnc ! number of cells with ice + icellsnc ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat) :: & indxinc, indxjnc ! compressed i/j indices real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - edgearea_e ,&! area of departure regions for east edges - edgearea_n ! area of departure regions for north edges + edgearea_e , & ! area of departure regions for east edges + edgearea_n ! area of departure regions for north edges real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - dpx ,&! x coordinates of departure points at cell corners - dpy ! y coordinates of departure points at cell corners + dpx , & ! x coordinates of departure points at cell corners + dpy ! y coordinates of departure points at cell corners real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat,max_blocks) :: & - mc ,&! mass at geometric center of cell - mx, my ! limited derivative of mass wrt x and y + mc , & ! mass at geometric center of cell + mx, my ! limited derivative of mass wrt x and y real (kind=dbl_kind), dimension(nx_block,ny_block,0:ncat) :: & - mmask ! = 1. if mass is present, = 0. otherwise + mmask ! = 1. if mass is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & - tc ,&! tracer values at geometric center of cell - tx, ty ! limited derivative of tracer wrt x and y + tc , & ! tracer values at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat) :: & - tmask ! = 1. if tracer is present, = 0. otherwise + tmask ! = 1. if tracer is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat) :: & - mflxe, mflxn ! mass transports across E and N cell edges + mflxe, mflxn ! mass transports across E and N cell edges real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace,ncat) :: & - mtflxe, mtflxn ! mass*tracer transports across E and N cell edges + mtflxe, mtflxn ! mass*tracer transports across E and N cell edges real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & - triarea ! area of east-edge departure triangle + triarea ! area of east-edge departure triangle real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups) :: & - xp, yp ! x and y coordinates of special triangle points - ! (need 4 points for triangle integrals) + xp, yp ! x and y coordinates of special triangle points + ! (need 4 points for triangle integrals) integer (kind=int_kind), dimension (nx_block,ny_block,ngroups) :: & - iflux ,&! i index of cell contributing transport - jflux ! j index of cell contributing transport + iflux , & ! i index of cell contributing transport + jflux ! j index of cell contributing transport integer (kind=int_kind), dimension(ngroups,max_blocks) :: & - icellsng ! number of cells with ice + icellsng ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,ngroups) :: & - indxing, indxjng ! compressed i/j indices + indxing, indxjng ! compressed i/j indices integer (kind=int_kind), dimension(nx_block,ny_block,max_blocks) :: & - halomask ! temporary mask for fast halo updates + halomask ! temporary mask for fast halo updates logical (kind=log_kind) :: & - l_stop ! if true, abort the model + l_stop ! if true, abort the model integer (kind=int_kind) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts character (len=char_len) :: & - edge ! 'north' or 'east' + edge ! 'north' or 'east' - type (ice_halo) :: halo_info_tracer + type (ice_halo) :: & + halo_info_tracer ! masked halo type (block) :: & - this_block ! block information for current block + this_block ! block information for current block character(len=*), parameter :: subname = '(horizontal_remap)' -!---!------------------------------------------------------------------- -!---! Remap the ice area and associated tracers. -!---! Remap the open water area (without tracers). -!---!------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Remap the ice area and associated tracers. + ! Remap the open water area (without tracers). + !------------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,n, & !$OMP indxinc,indxjnc,mmask,tmask,istop,jstop,l_stop) & @@ -471,48 +472,48 @@ subroutine horizontal_remap (dt, ntrace, & istop = 0 jstop = 0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! Compute masks and count ice cells. - ! Masks are used to prevent tracer values in cells without ice from - ! being used to compute tracer gradients. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute masks and count ice cells. + ! Masks are used to prevent tracer values in cells without ice from + ! being used to compute tracer gradients. + !------------------------------------------------------------------- call make_masks (nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, ntrace, & has_dependents, icellsnc(:,iblk), & - indxinc(:,:), indxjnc(:,:), & - mm(:,:,:,iblk), mmask(:,:,:), & + indxinc(:,:), indxjnc(:,:), & + mm (:,:,:,iblk), mmask(:,:,:), & tm(:,:,:,:,iblk), tmask(:,:,:,:)) - !------------------------------------------------------------------- - ! Construct linear fields, limiting gradients to preserve monotonicity. - ! Note: Pass in unit arrays instead of true distances HTE, HTN, etc. - ! The resulting gradients are in scaled coordinates. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Construct linear fields, limiting gradients to preserve monotonicity. + ! Note: Pass in unit arrays instead of true distances HTE, HTN, etc. + ! The resulting gradients are in scaled coordinates. + !------------------------------------------------------------------- ! open water - call construct_fields(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, ntrace, & - tracer_type, depend, & - has_dependents, icellsnc (0,iblk), & - indxinc (:,0), indxjnc(:,0), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav(:,:,iblk), & -! xyyav (:,:,iblk), yyyav(:,:,iblk), & - mm (:,:,0,iblk), mc(:,:,0,iblk), & - mx (:,:,0,iblk), my(:,:,0,iblk), & - mmask (:,:,0) ) + call construct_fields(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + tracer_type, depend, & + has_dependents, icellsnc(0,iblk), & + indxinc(:,0), indxjnc(:,0), & + hm (:,:,iblk), xav (:,:,iblk), & + yav (:,:,iblk), xxav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & +! xxxav (:,:,iblk), xxyav (:,:,iblk), & +! xyyav (:,:,iblk), yyyav (:,:,iblk), & + mm (:,:,0,iblk), mc (:,:,0,iblk), & + mx (:,:,0,iblk), my (:,:,0,iblk), & + mmask(:,:,0) ) ! ice categories @@ -523,26 +524,26 @@ subroutine horizontal_remap (dt, ntrace, & nghost, ntrace, & tracer_type, depend, & has_dependents, icellsnc (n,iblk), & - indxinc (:,n), indxjnc(:,n), & + indxinc (:,n), indxjnc(:,n), & hm (:,:,iblk), xav (:,:,iblk), & yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & + yyav (:,:,iblk), & +! xyav (:,:,iblk), & ! xxxav (:,:,iblk), xxyav (:,:,iblk), & ! xyyav (:,:,iblk), yyyav (:,:,iblk), & - mm (:,:,n,iblk), mc (:,:,n,iblk), & - mx (:,:,n,iblk), my (:,:,n,iblk), & - mmask (:,:,n), & - tm (:,:,:,n,iblk), tc(:,:,:,n,iblk), & - tx (:,:,:,n,iblk), ty(:,:,:,n,iblk), & + mm (:,:,n,iblk), mc (:,:,n,iblk), & + mx (:,:,n,iblk), my (:,:,n,iblk), & + mmask (:,:,n), & + tm (:,:,:,n,iblk), tc(:,:,:,n,iblk), & + tx (:,:,:,n,iblk), ty(:,:,:,n,iblk), & tmask(:,:,:,n) ) enddo ! n - - !------------------------------------------------------------------- - ! Given velocity field at cell corners, compute departure points - ! of trajectories. - !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Given velocity field at cell corners, compute departure points + ! of trajectories. + !------------------------------------------------------------------- call departure_points(nx_block, ny_block, & ilo, ihi, jlo, jhi, & @@ -551,27 +552,27 @@ subroutine horizontal_remap (dt, ntrace, & dxu (:,:,iblk), dyu (:,:,iblk), & HTN (:,:,iblk), HTE (:,:,iblk), & dpx (:,:,iblk), dpy (:,:,iblk), & - l_dp_midpt, l_stop, & + l_dp_midpt, l_stop, & istop, jstop) if (l_stop) then write(nu_diag,*) 'istep1, my_task, iblk =', & istep1, my_task, iblk write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) + if (istop > 0 .and. jstop > 0) & + write(nu_diag,*) 'Global i and j:', & + this_block%i_glob(istop), & + this_block%j_glob(jstop) call abort_ice(subname//'ERROR: bad departure points') endif enddo ! iblk !$OMP END PARALLEL DO - !------------------------------------------------------------------- - ! Ghost cell updates - ! If nghost >= 2, these calls are not needed - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Ghost cell updates + ! If nghost >= 2, these calls are not needed + !------------------------------------------------------------------- if (nghost==1) then @@ -591,12 +592,12 @@ subroutine horizontal_remap (dt, ntrace, & call ice_HaloUpdate (my, halo_info, & field_loc_center, field_type_vector) - ! tracer fields + ! tracer fields if (maskhalo_remap) then halomask(:,:,:) = 0 !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,n,m,j,i) SCHEDULE(runtime) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -615,8 +616,8 @@ subroutine horizontal_remap (dt, ntrace, & enddo enddo !$OMP END PARALLEL DO - call ice_HaloUpdate(halomask, halo_info, & - field_loc_center, field_type_scalar) + call ice_HaloUpdate (halomask, halo_info, & + field_loc_center, field_type_scalar) call ice_HaloMask(halo_info_tracer, halo_info, halomask) call ice_HaloUpdate (tc, halo_info_tracer, & @@ -649,16 +650,16 @@ subroutine horizontal_remap (dt, ntrace, & istop = 0 jstop = 0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - !------------------------------------------------------------------- - ! If l_fixed_area is true, compute edgearea by taking the divergence - ! of the velocity field. Otherwise, initialize edgearea. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! If l_fixed_area is true, compute edgearea by taking the divergence + ! of the velocity field. Otherwise, initialize edgearea. + !------------------------------------------------------------------- do j = 1, ny_block do i = 1, nx_block @@ -669,6 +670,10 @@ subroutine horizontal_remap (dt, ntrace, & if (l_fixed_area) then if (grid_ice == 'CD' .or. grid_ice == 'C') then ! velocities are already on the center + if (.not.present(uvelE).or..not.present(vvelN)) then + call abort_ice (subname//'ERROR: uvelE,vvelN required with C|CD and l_fixed_area') + endif + do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt @@ -681,7 +686,7 @@ subroutine horizontal_remap (dt, ntrace, & enddo enddo - else + else do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = (uvel(i,j,iblk) + uvel(i,j-1,iblk)) & @@ -696,125 +701,125 @@ subroutine horizontal_remap (dt, ntrace, & enddo enddo endif - endif + endif - !------------------------------------------------------------------- - ! Transports for east cell edges. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Transports for east cell edges. + !------------------------------------------------------------------- - !------------------------------------------------------------------- - ! Compute areas and vertices of departure triangles. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute areas and vertices of departure triangles. + !------------------------------------------------------------------- edge = 'east' call locate_triangles(nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, edge, & - icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - dpx (:,:,iblk), dpy (:,:,iblk), & - dxu (:,:,iblk), dyu (:,:,iblk), & - xp(:,:,:,:), yp(:,:,:,:), & + icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy(:,:,iblk), & + dxu (:,:,iblk), dyu(:,:,iblk), & + xp (:,:,:,:), yp (:,:,:,:), & iflux, jflux, & triarea, & l_fixed_area, edgearea_e(:,:)) - !------------------------------------------------------------------- - ! Given triangle vertices, compute coordinates of triangle points - ! needed for transport integrals. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Given triangle vertices, compute coordinates of triangle points + ! needed for transport integrals. + !------------------------------------------------------------------- - call triangle_coordinates (nx_block, ny_block, & - integral_order, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - xp, yp) + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) - !------------------------------------------------------------------- - ! Compute the transport across east cell edges by summing contributions - ! from each triangle. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute the transport across east cell edges by summing contributions + ! from each triangle. + !------------------------------------------------------------------- ! open water - call transport_integrals(nx_block, ny_block, & - ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - tracer_type, depend, & - integral_order, triarea, & - iflux, jflux, & - xp, yp, & - mc(:,:,0,iblk), mx (:,:,0,iblk), & - my(:,:,0,iblk), mflxe(:,:,0)) + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx (:,:,0,iblk), & + my(:,:,0,iblk), mflxe(:,:,0)) ! ice categories do n = 1, ncat call transport_integrals & (nx_block, ny_block, & ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & + indxing(:,:), indxjng(:,:), & tracer_type, depend, & integral_order, triarea, & iflux, jflux, & xp, yp, & - mc(:,:, n,iblk), mx (:,:, n,iblk), & - my(:,:, n,iblk), mflxe(:,:, n), & - tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxe (:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & ty(:,:,:,n,iblk), mtflxe(:,:,:,n)) enddo - !------------------------------------------------------------------- - ! Repeat for north edges - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Repeat for north edges + !------------------------------------------------------------------- edge = 'north' call locate_triangles(nx_block, ny_block, & ilo, ihi, jlo, jhi, & nghost, edge, & - icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - dpx (:,:,iblk), dpy (:,:,iblk), & - dxu (:,:,iblk), dyu (:,:,iblk), & - xp(:,:,:,:), yp(:,:,:,:), & + icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + dpx (:,:,iblk), dpy (:,:,iblk), & + dxu (:,:,iblk), dyu (:,:,iblk), & + xp (:,:,:,:), yp(:,:,:,:), & iflux, jflux, & triarea, & l_fixed_area, edgearea_n(:,:)) - call triangle_coordinates (nx_block, ny_block, & - integral_order, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - xp, yp) + call triangle_coordinates (nx_block, ny_block, & + integral_order, icellsng(:,iblk), & + indxing(:,:), indxjng(:,:), & + xp, yp) ! open water - call transport_integrals(nx_block, ny_block, & - ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & - tracer_type, depend, & - integral_order, triarea, & - iflux, jflux, & - xp, yp, & - mc(:,:,0,iblk), mx(:,:,0,iblk), & - my(:,:,0,iblk), mflxn(:,:,0)) + call transport_integrals(nx_block, ny_block, & + ntrace, icellsng (:,iblk), & + indxing(:,:), indxjng(:,:), & + tracer_type, depend, & + integral_order, triarea, & + iflux, jflux, & + xp, yp, & + mc(:,:,0,iblk), mx (:,:,0,iblk), & + my(:,:,0,iblk), mflxn(:,:,0)) ! ice categories do n = 1, ncat call transport_integrals & (nx_block, ny_block, & ntrace, icellsng (:,iblk), & - indxing(:,:), indxjng(:,:), & + indxing(:,:), indxjng(:,:), & tracer_type, depend, & integral_order, triarea, & iflux, jflux, & xp, yp, & - mc(:,:, n,iblk), mx (:,:, n,iblk), & - my(:,:, n,iblk), mflxn(:,:, n), & - tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & + mc(:,:, n,iblk), mx (:,:, n,iblk), & + my(:,:, n,iblk), mflxn (:,:, n), & + tc(:,:,:,n,iblk), tx (:,:,:,n,iblk), & ty(:,:,:,n,iblk), mtflxn(:,:,:,n)) enddo ! n - !------------------------------------------------------------------- - ! Update the ice area and tracers. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update the ice area and tracers. + !------------------------------------------------------------------- ! open water call update_fields (nx_block, ny_block, & @@ -827,14 +832,14 @@ subroutine horizontal_remap (dt, ntrace, & mm (:,:,0,iblk)) if (l_stop) then - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, '0' write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & + if (istop > 0 .and. jstop > 0) & write(nu_diag,*) 'Global i and j:', & this_block%i_glob(istop), & - this_block%j_glob(jstop) + this_block%j_glob(jstop) call abort_ice (subname//'ERROR: negative area (open water)') endif @@ -845,12 +850,12 @@ subroutine horizontal_remap (dt, ntrace, & ilo, ihi, jlo, jhi, & ntrace, & tracer_type, depend, & - tarear(:,:,iblk), l_stop, & + tarear (:,:,iblk), l_stop, & istop, jstop, & - mflxe(:,:, n), mflxn(:,:, n), & - mm (:,:, n,iblk), & + mflxe (:,:, n), mflxn (:,:, n), & + mm (:,:, n,iblk), & mtflxe(:,:,:,n), mtflxn(:,:,:,n), & - tm (:,:,:,n,iblk)) + tm (:,:,:,n,iblk)) if (l_stop) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & @@ -859,7 +864,7 @@ subroutine horizontal_remap (dt, ntrace, & if (istop > 0 .and. jstop > 0) & write(nu_diag,*) 'Global i and j:', & this_block%i_glob(istop), & - this_block%j_glob(jstop) + this_block%j_glob(jstop) call abort_ice (subname//'ERROR: negative area (ice)') endif enddo ! n @@ -884,53 +889,53 @@ end subroutine horizontal_remap ! ! author William H. Lipscomb, LANL - subroutine make_masks (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, ntrace, & - has_dependents, & - icells, & - indxi, indxj, & - mm, mmask, & - tm, tmask) + subroutine make_masks (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, ntrace, & + has_dependents, & + icells, & + indxi, indxj, & + mm, mmask, & + tm, tmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ,&! number of ghost cells - ntrace ! number of tracers in use + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost , & ! number of ghost cells + ntrace ! number of tracers in use logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), dimension(0:ncat), intent(out) :: & - icells ! number of cells with ice + icells ! number of cells with ice integer (kind=int_kind), dimension(nx_block*ny_block,0:ncat), intent(out) :: & - indxi ,&! compressed i/j indices - indxj + indxi , & ! compressed i/j indices + indxj real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(in) :: & - mm ! mean ice area in each grid cell + mm ! mean ice area in each grid cell real (kind=dbl_kind), dimension (nx_block,ny_block,0:ncat), intent(out) :: & - mmask ! = 1. if ice is present, else = 0. + mmask ! = 1. if ice is present, else = 0. real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), intent(in), optional :: & - tm ! mean tracer values in each grid cell + tm ! mean tracer values in each grid cell real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace, ncat), intent(out), optional :: & - tmask ! = 1. if tracer is present, else = 0. + tmask ! = 1. if tracer is present, else = 0. ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices - n ,&! ice category index - nt ! tracer index + i, j, ij , & ! horizontal indices + n , & ! ice category index + nt ! tracer index real (kind=dbl_kind) :: & - puny ! + puny ! character(len=*), parameter :: subname = '(make_masks)' @@ -946,9 +951,9 @@ subroutine make_masks (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! open water mask - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! open water mask + !------------------------------------------------------------------- icells(0) = 0 do j = 1, ny_block @@ -967,9 +972,9 @@ subroutine make_masks (nx_block, ny_block, & do n = 1, ncat - !------------------------------------------------------------------- - ! Find grid cells where ice is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Find grid cells where ice is present. + !------------------------------------------------------------------- icells(n) = 0 do j = 1, ny_block @@ -983,9 +988,9 @@ subroutine make_masks (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! ice area mask - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! ice area mask + !------------------------------------------------------------------- mmask(:,:,n) = c0 do ij = 1, icells(n) @@ -994,9 +999,9 @@ subroutine make_masks (nx_block, ny_block, & mmask(i,j,n) = c1 enddo - !------------------------------------------------------------------- - ! tracer masks - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! tracer masks + !------------------------------------------------------------------- if (present(tm)) then @@ -1016,11 +1021,11 @@ subroutine make_masks (nx_block, ny_block, & endif ! present(tm) - !------------------------------------------------------------------- - ! Redefine icells - ! For nghost = 1, exclude ghost cells - ! For nghost = 2, include one layer of ghost cells - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Redefine icells + ! For nghost = 1, exclude ghost cells + ! For nghost = 2, include one layer of ghost cells + !------------------------------------------------------------------- icells(n) = 0 do j = jlo-nghost+1, jhi+nghost-1 @@ -1033,7 +1038,7 @@ subroutine make_masks (nx_block, ny_block, & endif ! mm > puny enddo enddo - + enddo ! n end subroutine make_masks @@ -1065,109 +1070,109 @@ subroutine construct_fields (nx_block, ny_block, & tmask) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ,&! number of ghost cells - ntrace ,&! number of tracers in use + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost , & ! number of ghost cells + ntrace , & ! number of tracers in use icells ! number of cells with mass integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) logical (kind=log_kind), dimension (ntrace), intent(in) :: & - has_dependents ! true if a tracer has dependent tracers + has_dependents ! true if a tracer has dependent tracers integer (kind=int_kind), dimension(nx_block*ny_block), intent(in) :: & - indxi ,&! compressed i/j indices + indxi , & ! compressed i/j indices indxj real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hm ,&! land/boundary mask, thickness (T-cell) - xav, yav ,&! mean T-cell values of x, y - xxav, yyav ! mean T-cell values of xx, yy -! xyav, ,&! mean T-cell values of xy + hm , & ! land/boundary mask, thickness (T-cell) + xav, yav , & ! mean T-cell values of x, y + xxav, yyav ! mean T-cell values of xx, yy +! xyav, , & ! mean T-cell values of xy ! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - mm ,&! mean value of mass field - mmask ! = 1. if ice is present, = 0. otherwise + mm , & ! mean value of mass field + mmask ! = 1. if ice is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), intent(in), optional :: & - tm ,&! mean tracer - tmask ! = 1. if tracer is present, = 0. otherwise + tm , & ! mean tracer + tmask ! = 1. if tracer is present, = 0. otherwise real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - mc ,&! mass value at geometric center of cell - mx, my ! limited derivative of mass wrt x and y + mc , & ! mass value at geometric center of cell + mx, my ! limited derivative of mass wrt x and y real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace), intent(out), optional :: & - tc ,&! tracer at geometric center of cell - tx, ty ! limited derivative of tracer wrt x and y + tc , & ! tracer at geometric center of cell + tx, ty ! limited derivative of tracer wrt x and y ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices - nt, nt1 ,&! tracer indices - ij ! combined i/j horizontal index + i, j , & ! horizontal indices + nt, nt1 , & ! tracer indices + ij ! combined i/j horizontal index real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - mxav ,&! x coordinate of center of mass - myav ! y coordinate of center of mass + mxav , & ! x coordinate of center of mass + myav ! y coordinate of center of mass real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace) :: & - mtxav ,&! x coordinate of center of mass*tracer - mtyav ! y coordinate of center of mass*tracer + mtxav , & ! x coordinate of center of mass*tracer + mtyav ! y coordinate of center of mass*tracer real (kind=dbl_kind) :: & puny, & - w1, w2, w3, w7 ! work variables + w1, w2, w3, w7 ! work variables character(len=*), parameter :: subname = '(construct_fields)' - !------------------------------------------------------------------- - ! Compute field values at the geometric center of each grid cell, - ! and compute limited gradients in the x and y directions. - ! - ! For second order accuracy, each state variable is approximated as - ! a field varying linearly over x and y within each cell. For each - ! category, the integrated value of m(x,y) over the cell must - ! equal mm(i,j,n)*tarea(i,j), where tarea(i,j) is the cell area. - ! Similarly, the integrated value of m(x,y)*t(x,y) must equal - ! the total mass*tracer, mm(i,j,n)*tm(i,j,n)*tarea(i,j). - ! - ! These integral conditions are satisfied for linear fields if we - ! stipulate the following: - ! (1) The mean mass, mm, is equal to the mass at the cell centroid. - ! (2) The mean value tm1 of type 1 tracers is equal to the value - ! at the center of mass. - ! (3) The mean value tm2 of type 2 tracers is equal to the value - ! at the center of mass*tm1, where tm2 depends on tm1. - ! (See comments at the top of the module.) - ! - ! We want to find the value of each state variable at a standard - ! reference point, which we choose to be the geometric center of - ! the cell. The geometric center is located at the intersection - ! of the line joining the midpoints of the north and south edges - ! with the line joining the midpoints of the east and west edges. - ! To find the value at the geometric center, we must know the - ! location of the cell centroid/center of mass, along with the - ! mean value and the gradients with respect to x and y. - ! - ! The cell gradients are first computed from the difference between - ! values in the neighboring cells, then limited by requiring that - ! no new extrema are created within the cell. - ! - ! For rectangular coordinates the centroid and the geometric - ! center coincide, which means that some of the equations in this - ! subroutine could be simplified. However, the full equations - ! are retained for generality. - !------------------------------------------------------------------- - - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute field values at the geometric center of each grid cell, + ! and compute limited gradients in the x and y directions. + ! + ! For second order accuracy, each state variable is approximated as + ! a field varying linearly over x and y within each cell. For each + ! category, the integrated value of m(x,y) over the cell must + ! equal mm(i,j,n)*tarea(i,j), where tarea(i,j) is the cell area. + ! Similarly, the integrated value of m(x,y)*t(x,y) must equal + ! the total mass*tracer, mm(i,j,n)*tm(i,j,n)*tarea(i,j). + ! + ! These integral conditions are satisfied for linear fields if we + ! stipulate the following: + ! (1) The mean mass, mm, is equal to the mass at the cell centroid. + ! (2) The mean value tm1 of type 1 tracers is equal to the value + ! at the center of mass. + ! (3) The mean value tm2 of type 2 tracers is equal to the value + ! at the center of mass*tm1, where tm2 depends on tm1. + ! (See comments at the top of the module.) + ! + ! We want to find the value of each state variable at a standard + ! reference point, which we choose to be the geometric center of + ! the cell. The geometric center is located at the intersection + ! of the line joining the midpoints of the north and south edges + ! with the line joining the midpoints of the east and west edges. + ! To find the value at the geometric center, we must know the + ! location of the cell centroid/center of mass, along with the + ! mean value and the gradients with respect to x and y. + ! + ! The cell gradients are first computed from the difference between + ! values in the neighboring cells, then limited by requiring that + ! no new extrema are created within the cell. + ! + ! For rectangular coordinates the centroid and the geometric + ! center coincide, which means that some of the equations in this + ! subroutine could be simplified. However, the full equations + ! are retained for generality. + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1195,7 +1200,7 @@ subroutine construct_fields (nx_block, ny_block, & enddo enddo endif - + ! limited gradient of mass field in each cell (except masked cells) ! Note: The gradient is computed in scaled coordinates with ! dxt = dyt = hte = htn = 1. @@ -1212,7 +1217,7 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! mass field at geometric center -!echmod: xav = yav = 0 + ! echmod: xav = yav = 0 mc(i,j) = mm(i,j) ! mc(i,j) = mm(i,j) - xav(i,j)*mx(i,j) & @@ -1224,129 +1229,130 @@ subroutine construct_fields (nx_block, ny_block, & if (present(tm)) then - do ij = 1,icells ! cells with mass - i = indxi(ij) - j = indxj(ij) - - ! center of mass (mxav,myav) for each cell -!echmod: xyav = 0 - mxav(i,j) = (mx(i,j)*xxav(i,j) & - + mc(i,j)*xav (i,j)) / mm(i,j) - myav(i,j) = (my(i,j)*yyav(i,j) & - + mc(i,j)*yav(i,j)) / mm(i,j) - -! mxav(i,j) = (mx(i,j)*xxav(i,j) & -! + my(i,j)*xyav(i,j) & -! + mc(i,j)*xav (i,j)) / mm(i,j) -! myav(i,j) = (mx(i,j)*xyav(i,j) & -! + my(i,j)*yyav(i,j) & -! + mc(i,j)*yav(i,j)) / mm(i,j) - enddo - - do nt = 1, ntrace - - if (tracer_type(nt)==1) then ! independent of other tracers - - call limited_gradient(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - tm(:,:,nt), mmask, & - mxav, myav, & - tx(:,:,nt), ty(:,:,nt)) - - if (has_dependents(nt)) then ! need center of area*tracer - - do j = 1, ny_block - do i = 1, nx_block - mtxav(i,j,nt) = c0 - mtyav(i,j,nt) = c0 - enddo - enddo - - do ij = 1, icells ! Note: no tx or ty in ghost cells - ! (bound calls are later) - i = indxi(ij) - j = indxj(ij) + do ij = 1,icells ! cells with mass + i = indxi(ij) + j = indxj(ij) + + ! center of mass (mxav,myav) for each cell + ! echmod: xyav = 0 + mxav(i,j) = (mx(i,j)*xxav(i,j) & + + mc(i,j)*xav (i,j)) / mm(i,j) + myav(i,j) = (my(i,j)*yyav(i,j) & + + mc(i,j)*yav(i,j)) / mm(i,j) + +! mxav(i,j) = (mx(i,j)*xxav(i,j) & +! + my(i,j)*xyav(i,j) & +! + mc(i,j)*xav (i,j)) / mm(i,j) +! myav(i,j) = (mx(i,j)*xyav(i,j) & +! + my(i,j)*yyav(i,j) & +! + mc(i,j)*yav(i,j)) / mm(i,j) + enddo - ! tracer value at geometric center - tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & - - ty(i,j,nt)*myav(i,j) - - if (tmask(i,j,nt) > puny) then - - ! center of area*tracer - w1 = mc(i,j)*tc(i,j,nt) - w2 = mc(i,j)*tx(i,j,nt) & - + mx(i,j)*tc(i,j,nt) - w3 = mc(i,j)*ty(i,j,nt) & - + my(i,j)*tc(i,j,nt) -! w4 = mx(i,j)*tx(i,j,nt) -! w5 = mx(i,j)*ty(i,j,nt) & -! + my(i,j)*tx(i,j,nt) -! w6 = my(i,j)*ty(i,j,nt) - w7 = c1 / (mm(i,j)*tm(i,j,nt)) -!echmod: grid arrays = 0 - mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & - * w7 - mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & - * w7 - -! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & -! + w3*xyav (i,j) + w4*xxxav(i,j) & -! + w5*xxyav(i,j) + w6*xyyav(i,j)) & -! * w7 -! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & -! + w3*yyav(i,j) + w4*xxyav(i,j) & -! + w5*xyyav(i,j) + w6*yyyav(i,j)) & -! * w7 - endif ! tmask + do nt = 1, ntrace - enddo ! ij + if (tracer_type(nt)==1) then ! independent of other tracers - else ! no dependents + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm(:,:,nt), mmask, & + mxav, myav, & + tx(:,:,nt), ty(:,:,nt)) - do ij = 1, icells ! mass is present - i = indxi(ij) - j = indxj(ij) + if (has_dependents(nt)) then ! need center of area*tracer - ! tracer value at geometric center - tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & - - ty(i,j,nt)*myav(i,j) - enddo ! ij + do j = 1, ny_block + do i = 1, nx_block + mtxav(i,j,nt) = c0 + mtyav(i,j,nt) = c0 + enddo + enddo - endif ! has_dependents + do ij = 1, icells ! Note: no tx or ty in ghost cells + ! (bound calls are later) + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + + if (tmask(i,j,nt) > puny) then + + ! center of area*tracer + w1 = mc(i,j)*tc(i,j,nt) + w2 = mc(i,j)*tx(i,j,nt) & + + mx(i,j)*tc(i,j,nt) + w3 = mc(i,j)*ty(i,j,nt) & + + my(i,j)*tc(i,j,nt) +! w4 = mx(i,j)*tx(i,j,nt) +! w5 = mx(i,j)*ty(i,j,nt) & +! + my(i,j)*tx(i,j,nt) +! w6 = my(i,j)*ty(i,j,nt) + w7 = c1 / (mm(i,j)*tm(i,j,nt)) + ! echmod: grid arrays = 0 + mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & + * w7 + mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & + * w7 + +! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & +! + w3*xyav (i,j) + w4*xxxav(i,j) & +! + w5*xxyav(i,j) + w6*xyyav(i,j)) & +! * w7 +! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & +! + w3*yyav(i,j) + w4*xxyav(i,j) & +! + w5*xyyav(i,j) + w6*yyyav(i,j)) & +! * w7 + endif ! tmask + + enddo ! ij + + else ! no dependents + + do ij = 1, icells ! mass is present + i = indxi(ij) + j = indxj(ij) + + ! tracer value at geometric center + tc(i,j,nt) = tm(i,j,nt) - tx(i,j,nt)*mxav(i,j) & + - ty(i,j,nt)*myav(i,j) + enddo ! ij + + endif ! has_dependents + + elseif (tracer_type(nt)==2) then ! tracer nt depends on nt1 + nt1 = depend(nt) - elseif (tracer_type(nt)==2) then ! tracer nt depends on nt1 - nt1 = depend(nt) + call limited_gradient(nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + nghost, & + tm (:,:,nt), tmask(:,:,nt1), & + mtxav(:,:,nt1), mtyav(:,:,nt1), & + tx (:,:,nt), ty (:,:,nt)) - call limited_gradient(nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - nghost, & - tm(:,:,nt), tmask(:,:,nt1), & - mtxav(:,:,nt1), mtyav(:,:,nt1), & - tx(:,:,nt), ty(:,:,nt)) + do ij = 1, icells ! ice is present + i = indxi(ij) + j = indxj(ij) + tc(i,j,nt) = tm(i,j,nt) & + - tx(i,j,nt) * mtxav(i,j,nt1) & + - ty(i,j,nt) * mtyav(i,j,nt1) + enddo ! ij - do ij = 1, icells ! ice is present - i = indxi(ij) - j = indxj(ij) - tc(i,j,nt) = tm(i,j,nt) & - - tx(i,j,nt) * mtxav(i,j,nt1) & - - ty(i,j,nt) * mtyav(i,j,nt1) - enddo ! ij + elseif (tracer_type(nt)==3) then ! upwind approx; gradient = 0 - elseif (tracer_type(nt)==3) then ! upwind approx; gradient = 0 + do ij = 1, icells + i = indxi(ij) + j = indxj(ij) - do ij = 1, icells - i = indxi(ij) - j = indxj(ij) + tc(i,j,nt) = tm(i,j,nt) +! tx(i,j,nt) = c0 ! already initialized to 0. +! ty(i,j,nt) = c0 + enddo ! ij - tc(i,j,nt) = tm(i,j,nt) -! tx(i,j,nt) = c0 ! already initialized to 0. -! ty(i,j,nt) = c0 - enddo ! ij + endif ! tracer_type - endif ! tracer_type - enddo ! ntrace + enddo ! ntrace endif ! present (tm) @@ -1371,43 +1377,42 @@ subroutine limited_gradient (nx_block, ny_block, & gx, gy) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost ! number of ghost cells real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & - phi ,&! input tracer field (mean values in each grid cell) - cnx ,&! x-coordinate of phi relative to geometric center of cell - cny ,&! y-coordinate of phi relative to geometric center of cell - phimask - ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. - ! For instance, aice has no physical meaning in land cells, - ! and hice no physical meaning where aice = 0. + phi , & ! input tracer field (mean values in each grid cell) + cnx , & ! x-coordinate of phi relative to geometric center of cell + cny , & ! y-coordinate of phi relative to geometric center of cell + phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. + ! For instance, aice has no physical meaning in land cells, + ! and hice no physical meaning where aice = 0. real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - gx ,&! limited x-direction gradient - gy ! limited y-direction gradient + gx , & ! limited x-direction gradient + gy ! limited y-direction gradient ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! standard indices - icells ! number of cells to limit + i, j, ij , & ! standard indices + icells ! number of cells to limit integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi, indxj ! combined i/j horizontal indices + indxi, indxj ! combined i/j horizontal indices real (kind=dbl_kind) :: & - phi_nw, phi_n, phi_ne ,&! values of phi in 8 neighbor cells - phi_w, phi_e ,& - phi_sw, phi_s, phi_se ,& - qmn, qmx ,&! min and max value of phi within grid cell - pmn, pmx ,&! min and max value of phi among neighbor cells - w1, w2, w3, w4 ! work variables + phi_nw, phi_n, phi_ne , & ! values of phi in 8 neighbor cells + phi_w, phi_e , & + phi_sw, phi_s, phi_se , & + qmn, qmx , & ! min and max value of phi within grid cell + pmn, pmx , & ! min and max value of phi among neighbor cells + w1, w2, w3, w4 ! work variables real (kind=dbl_kind) :: & - puny, & ! - gxtmp, gytmp ! temporary term for x- and y- limited gradient + puny , & ! + gxtmp, gytmp ! temporary term for x- and y- limited gradient character(len=*), parameter :: subname = '(limited_gradient)' @@ -1441,22 +1446,22 @@ subroutine limited_gradient (nx_block, ny_block, & ! Note: phimask = 1. or 0. If phimask = 1., use the true value; ! if phimask = 0., use the home cell value so that non-physical ! values of phi do not contribute to the gradient. - phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & - + (c1-phimask(i-1,j+1))* phi(i,j) - phi_n = phimask(i,j+1) * phi(i,j+1) & - + (c1-phimask(i,j+1)) * phi(i,j) - phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & - + (c1-phimask(i+1,j+1))* phi(i,j) - phi_w = phimask(i-1,j) * phi(i-1,j) & - + (c1-phimask(i-1,j)) * phi(i,j) - phi_e = phimask(i+1,j) * phi(i+1,j) & - + (c1-phimask(i+1,j)) * phi(i,j) - phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & - + (c1-phimask(i-1,j-1))* phi(i,j) - phi_s = phimask(i,j-1) * phi(i,j-1) & - + (c1-phimask(i,j-1)) * phi(i,j) - phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & - + (c1-phimask(i+1,j-1))* phi(i,j) + phi_nw = phimask(i-1,j+1) * phi(i-1,j+1) & + + (c1-phimask(i-1,j+1))* phi(i ,j ) + phi_n = phimask(i ,j+1) * phi(i ,j+1) & + + (c1-phimask(i ,j+1))* phi(i ,j ) + phi_ne = phimask(i+1,j+1) * phi(i+1,j+1) & + + (c1-phimask(i+1,j+1))* phi(i ,j ) + phi_w = phimask(i-1,j ) * phi(i-1,j ) & + + (c1-phimask(i-1,j ))* phi(i ,j ) + phi_e = phimask(i+1,j ) * phi(i+1,j ) & + + (c1-phimask(i+1,j ))* phi(i ,j ) + phi_sw = phimask(i-1,j-1) * phi(i-1,j-1) & + + (c1-phimask(i-1,j-1))* phi(i ,j ) + phi_s = phimask(i ,j-1) * phi(i ,j-1) & + + (c1-phimask(i ,j-1))* phi(i ,j ) + phi_se = phimask(i+1,j-1) * phi(i+1,j-1) & + + (c1-phimask(i+1,j-1))* phi(i ,j ) ! unlimited gradient components ! (factors of two cancel out) @@ -1527,34 +1532,34 @@ subroutine departure_points (nx_block, ny_block, & istop, jstop) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi, &! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + nghost ! number of ghost cells real (kind=dbl_kind), intent(in) :: & dt ! time step (s) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel ,&! x-component of velocity (m/s) - vvel ,&! y-component of velocity (m/s) - dxu ,&! E-W dimensions of U-cell (m) - dyu ,&! N-S dimensions of U-cell (m) - HTN ,&! length of north face of T-cell (m) - HTE ! length of east face of T-cell (m) + uvel , & ! x-component of velocity (m/s) + vvel , & ! y-component of velocity (m/s) + dxu , & ! E-W dimensions of U-cell (m) + dyu , & ! N-S dimensions of U-cell (m) + HTN , & ! length of north face of T-cell (m) + HTE ! length of east face of T-cell (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - dpx ,&! coordinates of departure points (m) - dpy ! coordinates of departure points (m) + dpx , & ! coordinates of departure points (m) + dpy ! coordinates of departure points (m) logical (kind=log_kind), intent(in) :: & - l_dp_midpt ! if true, find departure points using - ! corrected midpoint velocity + l_dp_midpt ! if true, find departure points using + ! corrected midpoint velocity logical (kind=log_kind), intent(inout) :: & - l_stop ! if true, abort on return + l_stop ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables @@ -1562,20 +1567,20 @@ subroutine departure_points (nx_block, ny_block, & i, j, i2, j2 ! horizontal indices real (kind=dbl_kind) :: & - mpx, mpy ,&! coordinates of midpoint of back trajectory, + mpx, mpy , & ! coordinates of midpoint of back trajectory, ! relative to cell corner - mpxt, mpyt ,&! midpoint coordinates relative to cell center + mpxt, mpyt , & ! midpoint coordinates relative to cell center ump, vmp ! corrected velocity at midpoint character(len=*), parameter :: subname = '(departure_points)' - !------------------------------------------------------------------- - ! Estimate departure points. - ! This estimate is 1st-order accurate in time; improve accuracy by - ! using midpoint approximation (to add later). - ! For nghost = 1, loop over physical cells and update ghost cells later. - ! For nghost = 2, loop over a layer of ghost cells and skip update. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Estimate departure points. + ! This estimate is 1st-order accurate in time; improve accuracy by + ! using midpoint approximation (to add later). + ! For nghost = 1, loop over physical cells and update ghost cells later. + ! For nghost = 2, loop over a layer of ghost cells and skip update. + !------------------------------------------------------------------- dpx(:,:) = c0 dpy(:,:) = c0 @@ -1610,84 +1615,84 @@ subroutine departure_points (nx_block, ny_block, & return endif - if (l_dp_midpt) then ! find dep pts using corrected midpt velocity - - do j = jlo-nghost+1, jhi+nghost-1 - do i = ilo-nghost+1, ihi+nghost-1 - if (uvel(i,j)/=c0 .or. vvel(i,j)/=c0) then - - !------------------------------------------------------------------- - ! Scale departure points to coordinate system in which grid cells - ! have sides of unit length. - !------------------------------------------------------------------- - - dpx(i,j) = dpx(i,j) / dxu(i,j) - dpy(i,j) = dpy(i,j) / dyu(i,j) - - !------------------------------------------------------------------- - ! Estimate midpoint of backward trajectory relative to corner (i,j). - !------------------------------------------------------------------- - - mpx = p5 * dpx(i,j) - mpy = p5 * dpy(i,j) - - !------------------------------------------------------------------- - ! Determine the indices (i2,j2) of the cell where the trajectory lies. - ! Compute the coordinates of the midpoint of the backward trajectory - ! relative to the cell center in a stretch coordinate system - ! with vertices at (1/2, 1/2), (1/2, -1/2), etc. - !------------------------------------------------------------------- - - if (mpx >= c0 .and. mpy >= c0) then ! cell (i+1,j+1) - i2 = i+1 - j2 = j+1 - mpxt = mpx - p5 - mpyt = mpy - p5 - elseif (mpx < c0 .and. mpy < c0) then ! cell (i,j) - i2 = i - j2 = j - mpxt = mpx + p5 - mpyt = mpy + p5 - elseif (mpx >= c0 .and. mpy < c0) then ! cell (i+1,j) - i2 = i+1 - j2 = j - mpxt = mpx - p5 - mpyt = mpy + p5 - elseif (mpx < c0 .and. mpy >= c0) then ! cell (i,j+1) - i2 = i - j2 = j+1 - mpxt = mpx + p5 - mpyt = mpy - p5 - endif - - !------------------------------------------------------------------- - ! Using a bilinear approximation, estimate the velocity at the - ! trajectory midpoint in the (i2,j2) reference frame. - !------------------------------------------------------------------- - - ump = uvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & - - uvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & - + uvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & - - uvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) - - vmp = vvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & - - vvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & - + vvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & - - vvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) - - !------------------------------------------------------------------- - ! Use the midpoint velocity to estimate the coordinates of the - ! departure point relative to corner (i,j). - !------------------------------------------------------------------- - - dpx(i,j) = -dt * ump - dpy(i,j) = -dt * vmp - - endif ! nonzero velocity - - enddo ! i - enddo ! j - + if (l_dp_midpt) then ! find dep pts using corrected midpt velocity + + do j = jlo-nghost+1, jhi+nghost-1 + do i = ilo-nghost+1, ihi+nghost-1 + if (uvel(i,j)/=c0 .or. vvel(i,j)/=c0) then + + !------------------------------------------------------------------- + ! Scale departure points to coordinate system in which grid cells + ! have sides of unit length. + !------------------------------------------------------------------- + + dpx(i,j) = dpx(i,j) / dxu(i,j) + dpy(i,j) = dpy(i,j) / dyu(i,j) + + !------------------------------------------------------------------- + ! Estimate midpoint of backward trajectory relative to corner (i,j). + !------------------------------------------------------------------- + + mpx = p5 * dpx(i,j) + mpy = p5 * dpy(i,j) + + !------------------------------------------------------------------- + ! Determine the indices (i2,j2) of the cell where the trajectory lies. + ! Compute the coordinates of the midpoint of the backward trajectory + ! relative to the cell center in a stretch coordinate system + ! with vertices at (1/2, 1/2), (1/2, -1/2), etc. + !------------------------------------------------------------------- + + if (mpx >= c0 .and. mpy >= c0) then ! cell (i+1,j+1) + i2 = i+1 + j2 = j+1 + mpxt = mpx - p5 + mpyt = mpy - p5 + elseif (mpx < c0 .and. mpy < c0) then ! cell (i,j) + i2 = i + j2 = j + mpxt = mpx + p5 + mpyt = mpy + p5 + elseif (mpx >= c0 .and. mpy < c0) then ! cell (i+1,j) + i2 = i+1 + j2 = j + mpxt = mpx - p5 + mpyt = mpy + p5 + elseif (mpx < c0 .and. mpy >= c0) then ! cell (i,j+1) + i2 = i + j2 = j+1 + mpxt = mpx + p5 + mpyt = mpy - p5 + endif + + !------------------------------------------------------------------- + ! Using a bilinear approximation, estimate the velocity at the + ! trajectory midpoint in the (i2,j2) reference frame. + !------------------------------------------------------------------- + + ump = uvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - uvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + uvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - uvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + vmp = vvel(i2-1,j2-1)*(mpxt-p5)*(mpyt-p5) & + - vvel(i2, j2-1)*(mpxt+p5)*(mpyt-p5) & + + vvel(i2, j2 )*(mpxt+p5)*(mpyt+p5) & + - vvel(i2-1,j2 )*(mpxt-p5)*(mpyt+p5) + + !------------------------------------------------------------------- + ! Use the midpoint velocity to estimate the coordinates of the + ! departure point relative to corner (i,j). + !------------------------------------------------------------------- + + dpx(i,j) = -dt * ump + dpy(i,j) = -dt * vmp + + endif ! nonzero velocity + + enddo ! i + enddo ! j + endif ! l_dp_midpt end subroutine departure_points @@ -1713,17 +1718,17 @@ subroutine locate_triangles (nx_block, ny_block, & l_fixed_area, edgearea) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain - nghost ! number of ghost cells + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain + nghost ! number of ghost cells character (len=char_len), intent(in) :: & edge ! 'north' or 'east' real (kind=dbl_kind), dimension(nx_block,ny_block), intent(in) :: & - dpx ,&! x coordinates of departure points at cell corners - dpy ,&! y coordinates of departure points at cell corners - dxu ,&! E-W dimension of U-cell (m) + dpx , & ! x coordinates of departure points at cell corners + dpy , & ! y coordinates of departure points at cell corners + dxu , & ! E-W dimension of U-cell (m) dyu ! N-S dimension of U-cell (m) real (kind=dbl_kind), dimension (nx_block,ny_block,0:nvert,ngroups), intent(out) :: & @@ -1733,14 +1738,14 @@ subroutine locate_triangles (nx_block, ny_block, & triarea ! area of departure triangle integer (kind=int_kind), dimension (nx_block,ny_block,ngroups), intent(out) :: & - iflux ,&! i index of cell contributing transport + iflux , & ! i index of cell contributing transport jflux ! j index of cell contributing transport integer (kind=int_kind), dimension (ngroups), intent(out) :: & icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(out) :: & - indxi ,&! compressed index in i-direction + indxi , & ! compressed index in i-direction indxj ! compressed index in j-direction logical, intent(in) :: & @@ -1748,7 +1753,7 @@ subroutine locate_triangles (nx_block, ny_block, & ! passed in as edgearea ! if false, edgearea if determined internally ! and is passed out - + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(inout) :: & edgearea ! area of departure region for each edge ! edgearea > 0 for eastward/northward flow @@ -1756,50 +1761,50 @@ subroutine locate_triangles (nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & - i, j, ij, ic ,&! horizontal indices - ib, ie, jb, je ,&! limits for loops over edges - ng, nv ,&! triangle indices - ishift, jshift ,&! differences between neighbor cells - ishift_tl, jshift_tl ,&! i,j indices of TL cell relative to edge - ishift_bl, jshift_bl ,&! i,j indices of BL cell relative to edge - ishift_tr, jshift_tr ,&! i,j indices of TR cell relative to edge - ishift_br, jshift_br ,&! i,j indices of BR cell relative to edge - ishift_tc, jshift_tc ,&! i,j indices of TC cell relative to edge - ishift_bc, jshift_bc ! i,j indices of BC cell relative to edge + i, j, ij, ic , & ! horizontal indices + ib, ie, jb, je , & ! limits for loops over edges + ng, nv , & ! triangle indices + ishift , jshift , & ! differences between neighbor cells + ishift_tl, jshift_tl , & ! i,j indices of TL cell relative to edge + ishift_bl, jshift_bl , & ! i,j indices of BL cell relative to edge + ishift_tr, jshift_tr , & ! i,j indices of TR cell relative to edge + ishift_br, jshift_br , & ! i,j indices of BR cell relative to edge + ishift_tc, jshift_tc , & ! i,j indices of TC cell relative to edge + ishift_bc, jshift_bc ! i,j indices of BC cell relative to edge integer (kind=int_kind) :: & icellsd ! number of cells where departure area > 0. integer (kind=int_kind), dimension (nx_block*ny_block) :: & - indxid ,&! compressed index in i-direction + indxid , & ! compressed index in i-direction indxjd ! compressed index in j-direction real (kind=dbl_kind), dimension(nx_block,ny_block) :: & - dx, dy ,&! scaled departure points - areafac_c ,&! area scale factor at center of edge - areafac_l ,&! area scale factor at left corner + dx, dy , & ! scaled departure points + areafac_c , & ! area scale factor at center of edge + areafac_l , & ! area scale factor at left corner areafac_r ! area scale factor at right corner real (kind=dbl_kind) :: & - xcl, ycl ,&! coordinates of left corner point + xcl, ycl , & ! coordinates of left corner point ! (relative to midpoint of edge) - xdl, ydl ,&! left departure point - xil, yil ,&! left intersection point - xcr, ycr ,&! right corner point - xdr, ydr ,&! right departure point - xir, yir ,&! right intersection point - xic, yic ,&! x-axis intersection point - xicl, yicl ,&! left-hand x-axis intersection point - xicr, yicr ,&! right-hand x-axis intersection point - xdm, ydm ,&! midpoint of segment connecting DL and DR; + xdl, ydl , & ! left departure point + xil, yil , & ! left intersection point + xcr, ycr , & ! right corner point + xdr, ydr , & ! right departure point + xir, yir , & ! right intersection point + xic, yic , & ! x-axis intersection point + xicl, yicl , & ! left-hand x-axis intersection point + xicr, yicr , & ! right-hand x-axis intersection point + xdm, ydm , & ! midpoint of segment connecting DL and DR; ! shifted if l_fixed_area = T - md ,&! slope of line connecting DL and DR - mdl ,&! slope of line connecting DL and DM - mdr ,&! slope of line connecting DR and DM - area1, area2 ,&! temporary triangle areas - area3, area4 ,&! - area_c ,&! center polygon area - puny ,&! + md , & ! slope of line connecting DL and DR + mdl , & ! slope of line connecting DL and DM + mdr , & ! slope of line connecting DR and DM + area1, area2 , & ! temporary triangle areas + area3, area4 , & ! + area_c , & ! center polygon area + puny , & ! w1, w2 ! work variables real (kind=dbl_kind), dimension (nx_block,ny_block,ngroups) :: & @@ -1807,61 +1812,61 @@ subroutine locate_triangles (nx_block, ny_block, & real (kind=dbl_kind), dimension(nx_block,ny_block) :: & areasum ! sum of triangle areas for a given edge - + character(len=*), parameter :: subname = '(locate_triangles)' - !------------------------------------------------------------------- - ! Triangle notation: - ! For each edge, there are 20 triangles that can contribute, - ! but many of these are mutually exclusive. It turns out that - ! at most 5 triangles can contribute to transport integrals at once. - ! - ! See Figure 3 in DB for pictures of these triangles. - ! See Table 1 in DB for logical conditions. - ! - ! For the north edge, DB refer to these triangles as: - ! (1) NW, NW1, W, W2 - ! (2) NE, NE1, E, E2 - ! (3) NW2, W1, NE2, E1 - ! (4) H1a, H1b, N1a, N1b - ! (5) H2a, H2b, N2a, N2b - ! - ! For the east edge, DB refer to these triangles as: - ! (1) NE, NE1, N, N2 - ! (2) SE, SE1, S, S2 - ! (3) NE2, N1, SE2, S1 - ! (4) H1a, H1b, E1a, E2b - ! (5) H2a, H2b, E2a, E2b - ! - ! The code below works for either north or east edges. - ! The respective triangle labels are: - ! (1) TL, TL1, BL, BL2 - ! (2) TR, TR1, BR, BR2 - ! (3) TL2, BL1, TR2, BR1 - ! (4) BC1a, BC1b, TC1a, TC2b - ! (5) BC2a, BC2b, TC2a, TC2b - ! - ! where the cell labels are: - ! - ! | | - ! TL | TC | TR (top left, center, right) - ! | | - ! ------------------------ - ! | | - ! BL | BC | BR (bottom left, center, right) - ! | | - ! - ! and the transport is across the edge between cells TC and TB. - ! - ! Departure points are scaled to a local coordinate system - ! whose origin is at the midpoint of the edge. - ! In this coordinate system, the lefthand corner CL = (-0.5,0) - ! and the righthand corner CR = (0.5, 0). - !------------------------------------------------------------------- - - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Triangle notation: + ! For each edge, there are 20 triangles that can contribute, + ! but many of these are mutually exclusive. It turns out that + ! at most 5 triangles can contribute to transport integrals at once. + ! + ! See Figure 3 in DB for pictures of these triangles. + ! See Table 1 in DB for logical conditions. + ! + ! For the north edge, DB refer to these triangles as: + ! (1) NW, NW1, W, W2 + ! (2) NE, NE1, E, E2 + ! (3) NW2, W1, NE2, E1 + ! (4) H1a, H1b, N1a, N1b + ! (5) H2a, H2b, N2a, N2b + ! + ! For the east edge, DB refer to these triangles as: + ! (1) NE, NE1, N, N2 + ! (2) SE, SE1, S, S2 + ! (3) NE2, N1, SE2, S1 + ! (4) H1a, H1b, E1a, E2b + ! (5) H2a, H2b, E2a, E2b + ! + ! The code below works for either north or east edges. + ! The respective triangle labels are: + ! (1) TL, TL1, BL, BL2 + ! (2) TR, TR1, BR, BR2 + ! (3) TL2, BL1, TR2, BR1 + ! (4) BC1a, BC1b, TC1a, TC2b + ! (5) BC2a, BC2b, TC2a, TC2b + ! + ! where the cell labels are: + ! + ! | | + ! TL | TC | TR (top left, center, right) + ! | | + ! ------------------------ + ! | | + ! BL | BC | BR (bottom left, center, right) + ! | | + ! + ! and the transport is across the edge between cells TC and TB. + ! + ! Departure points are scaled to a local coordinate system + ! whose origin is at the midpoint of the edge. + ! In this coordinate system, the lefthand corner CL = (-0.5,0) + ! and the righthand corner CR = (0.5, 0). + !------------------------------------------------------------------- + + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -1895,7 +1900,7 @@ subroutine locate_triangles (nx_block, ny_block, & ! loop size ib = ilo - ie = ihi + ie = ihi jb = jlo - nghost ! lowest j index is a ghost cell je = jhi @@ -1918,8 +1923,8 @@ subroutine locate_triangles (nx_block, ny_block, & do j = jb, je do i = ib, ie - areafac_l(i,j) = dxu(i-1,j)*dyu(i-1,j) - areafac_r(i,j) = dxu(i,j)*dyu(i,j) + areafac_l(i,j) = dxu(i-1,j)*dyu(i-1,j) + areafac_r(i,j) = dxu(i ,j)*dyu(i ,j) areafac_c(i,j) = p5*(areafac_l(i,j) + areafac_r(i,j)) enddo enddo @@ -1952,7 +1957,7 @@ subroutine locate_triangles (nx_block, ny_block, & do j = jb, je do i = ib, ie - areafac_l(i,j) = dxu(i,j)*dyu(i,j) + areafac_l(i,j) = dxu(i,j )*dyu(i,j ) areafac_r(i,j) = dxu(i,j-1)*dyu(i,j-1) areafac_c(i,j) = p5 * (areafac_l(i,j) + areafac_r(i,j)) enddo @@ -1960,9 +1965,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif - !------------------------------------------------------------------- - ! Compute mask for edges with nonzero departure areas - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute mask for edges with nonzero departure areas + !------------------------------------------------------------------- if (l_fixed_area) then icellsd = 0 @@ -2004,9 +2009,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! edge = north/east endif ! l_fixed_area - !------------------------------------------------------------------- - ! Scale the departure points - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scale the departure points + !------------------------------------------------------------------- do j = 1, je do i = 1, ie @@ -2015,20 +2020,20 @@ subroutine locate_triangles (nx_block, ny_block, & enddo enddo - !------------------------------------------------------------------- - ! Compute departure regions, divide into triangles, and locate - ! vertices of each triangle. - ! Work in a nondimensional coordinate system in which lengths are - ! scaled by the local metric coefficients (dxu and dyu). - ! Note: The do loop includes north faces of the j = 1 ghost cells - ! when edge = 'north'. The loop includes east faces of i = 1 - ! ghost cells when edge = 'east'. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute departure regions, divide into triangles, and locate + ! vertices of each triangle. + ! Work in a nondimensional coordinate system in which lengths are + ! scaled by the local metric coefficients (dxu and dyu). + ! Note: The do loop includes north faces of the j = 1 ghost cells + ! when edge = 'north'. The loop includes east faces of i = 1 + ! ghost cells when edge = 'east'. + !------------------------------------------------------------------- do ij = 1, icellsd i = indxid(ij) j = indxjd(ij) - + xcl = -p5 ycl = c0 @@ -2038,15 +2043,15 @@ subroutine locate_triangles (nx_block, ny_block, & ! Departure points if (trim(edge) == 'north') then ! north edge - xdl = xcl + dx(i-1,j) - ydl = ycl + dy(i-1,j) - xdr = xcr + dx(i,j) - ydr = ycr + dy(i,j) + xdl = xcl + dx(i-1,j ) + ydl = ycl + dy(i-1,j ) + xdr = xcr + dx(i ,j ) + ydr = ycr + dy(i ,j ) else ! east edge; rotate trajectory by pi/2 - xdl = xcl - dy(i,j) - ydl = ycl + dx(i,j) - xdr = xcr - dy(i,j-1) - ydr = ycr + dx(i,j-1) + xdl = xcl - dy(i ,j ) + ydl = ycl + dx(i ,j ) + xdr = xcr - dy(i ,j-1) + ydr = ycr + dx(i ,j-1) endif xdm = p5 * (xdr + xdl) @@ -2056,12 +2061,12 @@ subroutine locate_triangles (nx_block, ny_block, & xil = xcl yil = (xcl*(ydm-ydl) + xdm*ydl - xdl*ydm) / (xdm - xdl) - + xir = xcr - yir = (xcr*(ydr-ydm) - xdm*ydr + xdr*ydm) / (xdr - xdm) - + yir = (xcr*(ydr-ydm) - xdm*ydr + xdr*ydm) / (xdr - xdm) + md = (ydr - ydl) / (xdr - xdl) - + if (abs(md) > puny) then xic = xdl - ydl/md else @@ -2074,14 +2079,14 @@ subroutine locate_triangles (nx_block, ny_block, & xicr = xic yicr = yic - !------------------------------------------------------------------- - ! Locate triangles in TL cell (NW for north edge, NE for east edge) - ! and BL cell (W for north edge, N for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in TL cell (NW for north edge, NE for east edge) + ! and BL cell (W for north edge, N for east edge). + !------------------------------------------------------------------- if (yil > c0 .and. xdl < xcl .and. ydl >= c0) then - ! TL (group 1) + ! TL (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2096,7 +2101,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil < c0 .and. xdl < xcl .and. ydl < c0) then - ! BL (group 1) + ! BL (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2111,7 +2116,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil < c0 .and. xdl < xcl .and. ydl >= c0) then - ! TL1 (group 1) + ! TL1 (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2124,7 +2129,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tl areafact(i,j,ng) = areafac_l(i,j) - ! BL1 (group 3) + ! BL1 (group 3) ng = 3 xp (i,j,1,ng) = xcl @@ -2139,7 +2144,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yil > c0 .and. xdl < xcl .and. ydl < c0) then - ! TL2 (group 3) + ! TL2 (group 3) ng = 3 xp (i,j,1,ng) = xcl @@ -2152,7 +2157,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tl areafact(i,j,ng) = -areafac_l(i,j) - ! BL2 (group 1) + ! BL2 (group 1) ng = 1 xp (i,j,1,ng) = xcl @@ -2167,14 +2172,14 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! TL and BL triangles - !------------------------------------------------------------------- - ! Locate triangles in TR cell (NE for north edge, SE for east edge) - ! and in BR cell (E for north edge, S for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in TR cell (NE for north edge, SE for east edge) + ! and in BR cell (E for north edge, S for east edge). + !------------------------------------------------------------------- if (yir > c0 .and. xdr >= xcr .and. ydr >= c0) then - ! TR (group 2) + ! TR (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2189,7 +2194,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (yir < c0 .and. xdr >= xcr .and. ydr < c0) then - ! BR (group 2) + ! BR (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2202,9 +2207,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_br areafact(i,j,ng) = areafac_r(i,j) - elseif (yir < c0 .and. xdr >= xcr .and. ydr >= c0) then + elseif (yir < c0 .and. xdr >= xcr .and. ydr >= c0) then - ! TR1 (group 2) + ! TR1 (group 2) ng = 2 xp (i,j,1,ng) = xcr @@ -2217,7 +2222,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tr areafact(i,j,ng) = areafac_r(i,j) - ! BR1 (group 3) + ! BR1 (group 3) ng = 3 xp (i,j,1,ng) = xcr @@ -2230,9 +2235,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_br areafact(i,j,ng) = areafac_r(i,j) - elseif (yir > c0 .and. xdr >= xcr .and. ydr < c0) then + elseif (yir > c0 .and. xdr >= xcr .and. ydr < c0) then - ! TR2 (group 3) + ! TR2 (group 3) ng = 3 xp (i,j,1,ng) = xcr @@ -2245,9 +2250,9 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tr areafact(i,j,ng) = -areafac_r(i,j) - ! BR2 (group 2) + ! BR2 (group 2) - ng = 2 + ng = 2 xp (i,j,1,ng) = xcr yp (i,j,1,ng) = ycr xp (i,j,2,ng) = xdr @@ -2260,9 +2265,9 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! TR and BR triangles - !------------------------------------------------------------------- - ! Redefine departure points if not located in central cells (TC or BC) - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Redefine departure points if not located in central cells (TC or BC) + !------------------------------------------------------------------- if (xdl < xcl) then xdl = xil @@ -2274,10 +2279,10 @@ subroutine locate_triangles (nx_block, ny_block, & ydr = yir endif - !------------------------------------------------------------------- - ! For l_fixed_area = T, shift the midpoint so that the departure - ! region has the prescribed area - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! For l_fixed_area = T, shift the midpoint so that the departure + ! region has the prescribed area + !------------------------------------------------------------------- if (l_fixed_area) then @@ -2290,21 +2295,21 @@ subroutine locate_triangles (nx_block, ny_block, & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) ng = 2 area2 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) ng = 3 area3 = p5 * ( (xp(i,j,2,ng)-xp(i,j,1,ng)) * & yp(i,j,3,ng) & - yp(i,j,2,ng) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) !----------------------------------------------------------- ! Check whether the central triangles lie in one grid cell or two. @@ -2359,7 +2364,7 @@ subroutine locate_triangles (nx_block, ny_block, & xdm = p5 * (xdr + xicl) ydm = p5 * ydr - ! compute area of triangle adjacent to left corner + ! compute area of triangle adjacent to left corner area4 = p5 * (xcl - xic) * ydl * areafac_l(i,j) area_c = edgearea(i,j) - area1 - area2 - area3 - area4 @@ -2384,7 +2389,7 @@ subroutine locate_triangles (nx_block, ny_block, & xicr = xic yicr = yic - ! compute midpoint between ICR and DL + ! compute midpoint between ICR and DL xdm = p5 * (xicr + xdl) ydm = p5 * ydl @@ -2412,16 +2417,16 @@ subroutine locate_triangles (nx_block, ny_block, & endif ! l_fixed_area - !------------------------------------------------------------------- - ! Locate triangles in BC cell (H for both north and east edges) - ! and TC cell (N for north edge and E for east edge). - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Locate triangles in BC cell (H for both north and east edges) + ! and TC cell (N for north edge and E for east edge). + !------------------------------------------------------------------- - ! Start with cases where both DPs lie in the same grid cell + ! Start with cases where both DPs lie in the same grid cell if (ydl >= c0 .and. ydr >= c0 .and. ydm >= c0) then - ! TC1a (group 4) + ! TC1a (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2434,7 +2439,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC2a (group 5) + ! TC2a (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2447,7 +2452,8 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC3a (group 6) + ! TC3a (group 6) + ng = 6 xp (i,j,1,ng) = xdl yp (i,j,1,ng) = ydl @@ -2461,7 +2467,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr >= c0 .and. ydm < c0) then ! rare - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2474,7 +2480,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2487,7 +2493,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2502,7 +2508,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr < c0 .and. ydm < c0) then - ! BC1a (group 4) + ! BC1a (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2515,7 +2521,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC2a (group 5) + ! BC2a (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2528,7 +2534,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC3a (group 6) + ! BC3a (group 6) ng = 6 xp (i,j,1,ng) = xdl @@ -2543,7 +2549,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr < c0 .and. ydm >= c0) then ! rare - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2556,7 +2562,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2569,7 +2575,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2582,14 +2588,14 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! Now consider cases where the two DPs lie in different grid cells - ! For these cases, one triangle is given the area factor associated - ! with the adjacent corner, to avoid rare negative masses on curved grids. + ! Now consider cases where the two DPs lie in different grid cells + ! For these cases, one triangle is given the area factor associated + ! with the adjacent corner, to avoid rare negative masses on curved grids. elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & .and. ydm >= c0) then - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2602,7 +2608,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2615,7 +2621,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_r(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xdl @@ -2631,7 +2637,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic >= c0 & .and. ydm < c0 ) then ! less common - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2644,7 +2650,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2657,7 +2663,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_r(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2673,7 +2679,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & .and. ydm < c0) then - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2686,7 +2692,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_l(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2699,7 +2705,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xdr @@ -2715,7 +2721,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl >= c0 .and. ydr < c0 .and. xic < c0 & .and. ydm >= c0) then ! less common - ! TC1b (group 4) + ! TC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2728,7 +2734,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_l(i,j) - ! BC2b (group 5) + ! BC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2741,7 +2747,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2757,7 +2763,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & .and. ydm >= c0) then - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2770,7 +2776,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_l(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2783,7 +2789,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2799,7 +2805,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic < c0 & .and. ydm < c0) then ! less common - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2812,7 +2818,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_l(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2825,7 +2831,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_c(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2841,7 +2847,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & .and. ydm < c0) then - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2854,7 +2860,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2867,7 +2873,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_r(i,j) - ! BC3b (group 6) + ! BC3b (group 6) ng = 6 xp (i,j,1,ng) = xicr @@ -2883,7 +2889,7 @@ subroutine locate_triangles (nx_block, ny_block, & elseif (ydl < c0 .and. ydr >= c0 .and. xic >= c0 & .and. ydm >= c0) then ! less common - ! BC1b (group 4) + ! BC1b (group 4) ng = 4 xp (i,j,1,ng) = xcl @@ -2896,7 +2902,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_bc areafact(i,j,ng) = areafac_c(i,j) - ! TC2b (group 5) + ! TC2b (group 5) ng = 5 xp (i,j,1,ng) = xcr @@ -2909,7 +2915,7 @@ subroutine locate_triangles (nx_block, ny_block, & jflux (i,j,ng) = j + jshift_tc areafact(i,j,ng) = -areafac_r(i,j) - ! TC3b (group 6) + ! TC3b (group 6) ng = 6 xp (i,j,1,ng) = xicl @@ -2926,26 +2932,26 @@ subroutine locate_triangles (nx_block, ny_block, & enddo ! ij - !------------------------------------------------------------------- - ! Compute triangle areas with appropriate sign. - ! These are found by computing the area in scaled coordinates and - ! multiplying by a scale factor (areafact). - ! Note that the scale factor is positive for fluxes out of the cell - ! and negative for fluxes into the cell. - ! - ! Note: The triangle area formula below gives A >=0 iff the triangle - ! points x1, x2, and x3 are taken in counterclockwise order. - ! These points are defined above in such a way that the - ! order is nearly always CCW. - ! In rare cases, we may compute A < 0. In this case, - ! the quadrilateral departure area is equal to the - ! difference of two triangle areas instead of the sum. - ! The fluxes work out correctly in the end. - ! - ! Also compute the cumulative area transported across each edge. - ! If l_fixed_area = T, this area is compared to edgearea as a bug check. - ! If l_fixed_area = F, this area is passed as an output array. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Compute triangle areas with appropriate sign. + ! These are found by computing the area in scaled coordinates and + ! multiplying by a scale factor (areafact). + ! Note that the scale factor is positive for fluxes out of the cell + ! and negative for fluxes into the cell. + ! + ! Note: The triangle area formula below gives A >=0 iff the triangle + ! points x1, x2, and x3 are taken in counterclockwise order. + ! These points are defined above in such a way that the + ! order is nearly always CCW. + ! In rare cases, we may compute A < 0. In this case, + ! the quadrilateral departure area is equal to the + ! difference of two triangle areas instead of the sum. + ! The fluxes work out correctly in the end. + ! + ! Also compute the cumulative area transported across each edge. + ! If l_fixed_area = T, this area is compared to edgearea as a bug check. + ! If l_fixed_area = F, this area is passed as an output array. + !------------------------------------------------------------------- areasum(:,:) = c0 @@ -2960,12 +2966,12 @@ subroutine locate_triangles (nx_block, ny_block, & (yp(i,j,3,ng)-yp(i,j,1,ng)) & - (yp(i,j,2,ng)-yp(i,j,1,ng)) * & (xp(i,j,3,ng)-xp(i,j,1,ng)) ) & - * areafact(i,j,ng) + * areafact(i,j,ng) if (abs(triarea(i,j,ng)) < eps16*areafac_c(i,j)) then triarea(i,j,ng) = c0 else - icells(ng) = icells(ng) + 1 + icells(ng) = icells(ng) + 1 ic = icells(ng) indxi(ic,ng) = i indxj(ic,ng) = j @@ -2977,27 +2983,27 @@ subroutine locate_triangles (nx_block, ny_block, & enddo ! ng if (l_fixed_area) then - if (bugcheck) then ! set bugcheck = F to speed up code - do ij = 1, icellsd - i = indxid(ij) - j = indxjd(ij) - if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then - write(nu_diag,*) '' - write(nu_diag,*) 'Areas do not add up: m, i, j, edge =', & - my_task, i, j, trim(edge) - write(nu_diag,*) 'edgearea =', edgearea(i,j) - write(nu_diag,*) 'areasum =', areasum(i,j) - write(nu_diag,*) 'areafac_c =', areafac_c(i,j) - write(nu_diag,*) '' - write(nu_diag,*) 'Triangle areas:' - do ng = 1, ngroups ! not vector friendly - if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then - write(nu_diag,*) ng, triarea(i,j,ng) - endif - enddo - endif - enddo - endif ! bugcheck + if (bugcheck) then ! set bugcheck = F to speed up code + do ij = 1, icellsd + i = indxid(ij) + j = indxjd(ij) + if (abs(areasum(i,j) - edgearea(i,j)) > eps13*areafac_c(i,j)) then + write(nu_diag,*) '' + write(nu_diag,*) 'Areas do not add up: m, i, j, edge =', & + my_task, i, j, trim(edge) + write(nu_diag,*) 'edgearea =', edgearea(i,j) + write(nu_diag,*) 'areasum =', areasum(i,j) + write(nu_diag,*) 'areafac_c =', areafac_c(i,j) + write(nu_diag,*) '' + write(nu_diag,*) 'Triangle areas:' + do ng = 1, ngroups ! not vector friendly + if (abs(triarea(i,j,ng)) > eps16*abs(areafact(i,j,ng))) then + write(nu_diag,*) ng, triarea(i,j,ng) + endif + enddo + endif + enddo + endif ! bugcheck else ! l_fixed_area = F do ij = 1, icellsd @@ -3007,10 +3013,10 @@ subroutine locate_triangles (nx_block, ny_block, & enddo endif ! l_fixed_area - !------------------------------------------------------------------- - ! Transform triangle vertices to a scaled coordinate system centered - ! in the cell containing the triangle. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Transform triangle vertices to a scaled coordinate system centered + ! in the cell containing the triangle. + !------------------------------------------------------------------- if (trim(edge) == 'north') then do ng = 1, ngroups @@ -3077,10 +3083,10 @@ end subroutine locate_triangles ! to compute integrals of linear, quadratic, or cubic polynomials, ! using formulas from A.H. Stroud, Approximate Calculation of Multiple ! Integrals, Prentice-Hall, 1971. (Section 8.8, formula 3.1.) -! Linear functions can be integrated exactly by evaluating the function +! Linear functions can be integrated exactly by evaluating the function ! at just one point (the midpoint). Quadratic functions require ! 3 points, and cubics require 4 points. -! The default is cubic, but the code can be sped up slightly using +! The default is cubic, but the code can be sped up slightly using ! linear or quadratic integrals, usually with little loss of accuracy. ! ! The formulas are as follows: @@ -3106,24 +3112,24 @@ subroutine triangle_coordinates (nx_block, ny_block, & xp, yp) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - integral_order ! polynomial order for quadrature integrals + nx_block, ny_block, & ! block dimensions + integral_order ! polynomial order for quadrature integrals integer (kind=int_kind), dimension (ngroups), intent(in) :: & - icells ! number of cells where triarea > puny + icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(in) :: & - indxi ,&! compressed index in i-direction - indxj ! compressed index in j-direction + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), intent(inout), dimension (nx_block, ny_block, 0:nvert, ngroups) :: & - xp, yp ! coordinates of triangle points + xp, yp ! coordinates of triangle points ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices - ng ! triangle index + i, j, ij , & ! horizontal indices + ng ! triangle index character(len=*), parameter :: subname = '(triangle_coordinates)' @@ -3190,10 +3196,10 @@ subroutine triangle_coordinates (nx_block, ny_block, & xp(i,j,2,ng) = p4*xp(i,j,2,ng) + p6*xp(i,j,0,ng) yp(i,j,2,ng) = p4*yp(i,j,2,ng) + p6*yp(i,j,0,ng) - + xp(i,j,3,ng) = p4*xp(i,j,3,ng) + p6*xp(i,j,0,ng) yp(i,j,3,ng) = p4*yp(i,j,3,ng) + p6*yp(i,j,0,ng) - + enddo ! ij enddo ! ng @@ -3224,69 +3230,69 @@ subroutine transport_integrals (nx_block, ny_block, & ty, mtflx) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ,&! block dimensions - ntrace ,&! number of tracers in use - integral_order ! polynomial order for quadrature integrals + nx_block, ny_block , & ! block dimensions + ntrace , & ! number of tracers in use + integral_order ! polynomial order for quadrature integrals integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) - depend ! tracer dependencies (see above) + tracer_type , & ! = 1, 2, or 3 (see comments above) + depend ! tracer dependencies (see above) integer (kind=int_kind), dimension (ngroups), intent(in) :: & - icells ! number of cells where triarea > puny + icells ! number of cells where triarea > puny integer (kind=int_kind), dimension (nx_block*ny_block,ngroups), intent(in) :: & - indxi ,&! compressed index in i-direction - indxj ! compressed index in j-direction + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, 0:nvert, ngroups) :: & - xp, yp ! coordinates of triangle points + xp, yp ! coordinates of triangle points real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, ngroups) :: & - triarea ! triangle area + triarea ! triangle area integer (kind=int_kind), intent(in), dimension (nx_block, ny_block, ngroups) :: & - iflux ,& - jflux + iflux ,& + jflux real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block) :: & - mc, mx, my + mc, mx, my real (kind=dbl_kind), intent(out), dimension (nx_block, ny_block) :: & - mflx + mflx real (kind=dbl_kind), intent(in), dimension (nx_block, ny_block, ntrace), optional :: & - tc, tx, ty + tc, tx, ty real (kind=dbl_kind), intent(out), dimension (nx_block, ny_block, ntrace), optional :: & - mtflx + mtflx ! local variables integer (kind=int_kind) :: & - i, j, ij ,&! horizontal indices of edge - i2, j2 ,&! horizontal indices of cell contributing transport - ng ,&! triangle index - nt, nt1 ! tracer indices + i, j, ij , & ! horizontal indices of edge + i2, j2 , & ! horizontal indices of cell contributing transport + ng , & ! triangle index + nt, nt1 ! tracer indices real (kind=dbl_kind) :: & - m0, m1, m2, m3 ,&! mass field at internal points - w0, w1, w2, w3 ! work variables + m0, m1, m2, m3 , & ! mass field at internal points + w0, w1, w2, w3 ! work variables real (kind=dbl_kind), dimension (nx_block, ny_block) :: & - msum, mxsum, mysum ,&! sum of mass, mass*x, and mass*y - mxxsum, mxysum, myysum ! sum of mass*x*x, mass*x*y, mass*y*y + msum, mxsum, mysum , & ! sum of mass, mass*x, and mass*y + mxxsum, mxysum, myysum ! sum of mass*x*x, mass*x*y, mass*y*y real (kind=dbl_kind), dimension (nx_block, ny_block, ntrace) :: & - mtsum ,&! sum of mass*tracer - mtxsum ,&! sum of mass*tracer*x - mtysum ! sum of mass*tracer*y + mtsum , & ! sum of mass*tracer + mtxsum , & ! sum of mass*tracer*x + mtysum ! sum of mass*tracer*y character(len=*), parameter :: subname = '(transport_integrals)' - !------------------------------------------------------------------- - ! Initialize - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Initialize + !------------------------------------------------------------------- mflx(:,:) = c0 if (present(mtflx)) then @@ -3295,9 +3301,9 @@ subroutine transport_integrals (nx_block, ny_block, & enddo endif - !------------------------------------------------------------------- - ! Main loop - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Main loop + !------------------------------------------------------------------- do ng = 1, ngroups @@ -3319,11 +3325,11 @@ subroutine transport_integrals (nx_block, ny_block, & mflx(i,j) = mflx(i,j) + triarea(i,j,ng)*msum(i,j) ! quantities needed for tracer transports - mxsum(i,j) = m0*xp(i,j,0,ng) - mxxsum(i,j) = mxsum(i,j)*xp(i,j,0,ng) - mxysum(i,j) = mxsum(i,j)*yp(i,j,0,ng) - mysum(i,j) = m0*yp(i,j,0,ng) - myysum(i,j) = mysum(i,j)*yp(i,j,0,ng) + mxsum(i,j) = m0*xp(i,j,0,ng) + mxxsum(i,j) = mxsum(i,j)*xp(i,j,0,ng) + mxysum(i,j) = mxsum(i,j)*yp(i,j,0,ng) + mysum(i,j) = m0*yp(i,j,0,ng) + myysum(i,j) = mysum(i,j)*yp(i,j,0,ng) enddo ! ij elseif (integral_order == 2) then ! quadratic (3-point formula) @@ -3355,7 +3361,7 @@ subroutine transport_integrals (nx_block, ny_block, & mxsum(i,j) = w1 + w2 + w3 mxxsum(i,j) = w1*xp(i,j,1,ng) + w2*xp(i,j,2,ng) & - + w3*xp(i,j,3,ng) + + w3*xp(i,j,3,ng) mxysum(i,j) = w1*yp(i,j,1,ng) + w2*yp(i,j,2,ng) & + w3*yp(i,j,3,ng) @@ -3515,16 +3521,16 @@ subroutine update_fields (nx_block, ny_block, & tm) integer (kind=int_kind), intent(in) :: & - nx_block, ny_block,&! block dimensions - ilo,ihi,jlo,jhi ,&! beginning and end of physical domain + nx_block, ny_block, & ! block dimensions + ilo,ihi,jlo,jhi , & ! beginning and end of physical domain ntrace ! number of tracers in use integer (kind=int_kind), dimension (ntrace), intent(in) :: & - tracer_type ,&! = 1, 2, or 3 (see comments above) + tracer_type , & ! = 1, 2, or 3 (see comments above) depend ! tracer dependencies (see above) real (kind=dbl_kind), dimension (nx_block, ny_block), intent(in) :: & - mflxe, mflxn ,&! mass transport across east and north cell edges + mflxe, mflxn , & ! mass transport across east and north cell edges tarear ! 1/tarea real (kind=dbl_kind), dimension (nx_block, ny_block), intent(inout) :: & @@ -3540,12 +3546,12 @@ subroutine update_fields (nx_block, ny_block, & l_stop ! if true, abort on return integer (kind=int_kind), intent(inout) :: & - istop, jstop ! indices of grid cell where model aborts + istop, jstop ! indices of grid cell where model aborts ! local variables integer (kind=int_kind) :: & - i, j ,&! horizontal indices + i, j , & ! horizontal indices nt, nt1, nt2 ! tracer indices real (kind=dbl_kind), dimension(nx_block,ny_block,ntrace) :: & @@ -3556,18 +3562,18 @@ subroutine update_fields (nx_block, ny_block, & w1 ! work variable integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi ,&! compressed indices in i and j directions + indxi , & ! compressed indices in i and j directions indxj integer (kind=int_kind) :: & - icells ,&! number of cells with mm > 0. + icells , & ! number of cells with mm > 0. ij ! combined i/j horizontal index character(len=*), parameter :: subname = '(update_fields)' - !------------------------------------------------------------------- - ! Save starting values of mass*tracer - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Save starting values of mass*tracer + !------------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -3602,15 +3608,15 @@ subroutine update_fields (nx_block, ny_block, & enddo ! nt endif ! present(tm) - !------------------------------------------------------------------- - ! Update mass field - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update mass field + !------------------------------------------------------------------- do j = jlo, jhi do i = ilo, ihi - w1 = mflxe(i,j) - mflxe(i-1,j) & - + mflxn(i,j) - mflxn(i,j-1) + w1 = mflxe(i,j) - mflxe(i-1,j ) & + + mflxn(i,j) - mflxn(i ,j-1) mm(i,j) = mm(i,j) - w1*tarear(i,j) if (mm(i,j) < -puny) then ! abort with negative value @@ -3627,8 +3633,8 @@ subroutine update_fields (nx_block, ny_block, & if (l_stop) then i = istop j = jstop - w1 = mflxe(i,j) - mflxe(i-1,j) & - + mflxn(i,j) - mflxn(i,j-1) + w1 = mflxe(i,j) - mflxe(i-1,j ) & + + mflxn(i,j) - mflxn(i ,j-1) write (nu_diag,*) ' ' write (nu_diag,*) 'New mass < 0, i, j =', i, j write (nu_diag,*) 'Old mass =', mm(i,j) + w1*tarear(i,j) @@ -3637,9 +3643,9 @@ subroutine update_fields (nx_block, ny_block, & return endif - !------------------------------------------------------------------- - ! Update tracers - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Update tracers + !------------------------------------------------------------------- if (present(tm)) then @@ -3668,8 +3674,8 @@ subroutine update_fields (nx_block, ny_block, & i = indxi(ij) j = indxj(ij) - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / mm(i,j) enddo ! ij @@ -3682,8 +3688,8 @@ subroutine update_fields (nx_block, ny_block, & j = indxj(ij) if (abs(tm(i,j,nt1)) > c0) then - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / (mm(i,j) * tm(i,j,nt1)) endif @@ -3700,8 +3706,8 @@ subroutine update_fields (nx_block, ny_block, & if (abs(tm(i,j,nt1)) > c0 .and. & abs(tm(i,j,nt2)) > c0) then - w1 = mtflxe(i,j,nt) - mtflxe(i-1,j,nt) & - + mtflxn(i,j,nt) - mtflxn(i,j-1,nt) + w1 = mtflxe(i,j,nt) - mtflxe(i-1,j ,nt) & + + mtflxn(i,j,nt) - mtflxn(i ,j-1,nt) tm(i,j,nt) = (mtold(i,j,nt) - w1*tarear(i,j)) & / (mm(i,j) * tm(i,j,nt2) * tm(i,j,nt1)) endif diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 72d9ea972..f5289c922 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -56,8 +56,8 @@ module ice_flux ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress at T points, x-direction at T points, mapped from strocnx, per ice fraction - strocnyT ! ice-ocean stress at T points, y-direction at T points, mapped from strocny, per ice fraction + strocnxT, & ! ice-ocean stress, x-direction at T points, mapped from strocnx, per ice fraction + strocnyT ! ice-ocean stress, y-direction at T points, mapped from strocny, per ice fraction ! diagnostic @@ -125,7 +125,7 @@ module ice_flux stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 - ! ice stress tensor at U and T locations (grid_ice = 'CD') (kg/s^2) + ! ice stress tensor at U and T locations (grid_ice = 'C|CD') (kg/s^2) stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 stresspU, stressmU, stress12U ! " diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 651bf7880..4234b20da 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -36,7 +36,7 @@ module ice_init implicit none private - character(len=char_len_long),public :: & + character(len=char_len_long), public :: & ice_ic ! method of ice cover initialization ! 'default' => latitude and sst dependent ! 'none' => no ice @@ -1140,7 +1140,26 @@ subroutine input_data endif endif + if (grid_ice == 'CD') then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: grid_ice = CD not supported yet' + endif + abort_list = trim(abort_list)//":47" + elseif (grid_ice == 'C_override_D') then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: using grid_ice = CD, not supported' + endif + grid_ice = 'CD' + endif + if (grid_ice == 'C' .or. grid_ice == 'CD') then + if (kdyn > 1) then + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: grid_ice = C | CD only supported with kdyn<=1 (evp or off)' + write(nu_diag,*) subname//' ERROR: kdyn and grid_ice inconsistency' + endif + abort_list = trim(abort_list)//":46" + endif if (visc_method /= 'avg_zeta' .and. visc_method /= 'avg_strength') then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: invalid method for viscosities' @@ -2276,9 +2295,9 @@ subroutine input_data abort_list = trim(abort_list)//":26" endif - if (kmt_type /= 'file' .and. & + if (kmt_type /= 'file' .and. & kmt_type /= 'channel' .and. & - kmt_type /= 'wall' .and. & + kmt_type /= 'wall' .and. & kmt_type /= 'default' .and. & kmt_type /= 'boxislands') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) @@ -2849,12 +2868,12 @@ subroutine set_state_var (nx_block, ny_block, & ! ice concentration/thickness !--------------------------------------------------------- - if (trim(ice_data_type) == 'box2001' .or. & + if (trim(ice_data_type) == 'box2001' .or. & trim(ice_data_type) == 'smallblock' .or. & - trim(ice_data_type) == 'channel' .or. & - trim(ice_data_type) == 'bigblock' .or. & - trim(ice_data_type) == 'blockep5' .or. & - trim(ice_data_type) == 'uniformp5' .or. & + trim(ice_data_type) == 'channel' .or. & + trim(ice_data_type) == 'bigblock' .or. & + trim(ice_data_type) == 'blockep5' .or. & + trim(ice_data_type) == 'uniformp5' .or. & trim(ice_data_type) == 'gauss') then hbar = c2 ! initial ice thickness @@ -2868,7 +2887,7 @@ subroutine set_state_var (nx_block, ny_block, & enddo elseif (trim(ice_data_type) == 'boxslotcyl' .or. & - trim(ice_data_type) == 'medblocke' .or. & + trim(ice_data_type) == 'medblocke' .or. & trim(ice_data_type) == 'blocke') then hbar = c1 ! initial ice thickness (1 m) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 39750cc75..1892a396e 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -165,7 +165,7 @@ module ice_grid real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id - uvm , & ! land/boundary mask, velocity (U-cell) + uvm , & ! land/boundary mask (U-cell) npm , & ! land/boundary mask (N-cell) epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) @@ -177,8 +177,8 @@ module ice_grid logical (kind=log_kind), & dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) - umask , & ! land/boundary mask, velocity (U-cell) (1 if all surrounding T cells are ocean) - umaskCD, & ! land/boundary mask, velocity (U-cell) (1 if at least two surrounding T cells are ocean) + umask , & ! land/boundary mask (U-cell) (1 if all surrounding T cells are ocean) + umaskCD, & ! land/boundary mask (U-cell) (1 if at least two surrounding T cells are ocean) nmask , & ! land/boundary mask, (N-cell) emask , & ! land/boundary mask, (E-cell) lmask_n, & ! northern hemisphere mask diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd index c4198f382..104801879 100644 --- a/configuration/scripts/options/set_nml.gridcd +++ b/configuration/scripts/options/set_nml.gridcd @@ -1,2 +1,2 @@ -grid_ice = 'CD' +grid_ice = 'C_override_D' diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index fdf27881a..ea8680170 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -55,7 +55,7 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke gx3 8x4 diag1,reprosum,run10day,gridc smoke gx3 6x2 alt01,reprosum,run10day,gridc smoke gx3 8x2 alt02,reprosum,run10day,gridc -smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc @@ -65,13 +65,13 @@ smoke gx1 15x2 seabedprob,reprosum,run10day,gridc smoke gx3 14x2 fsd12,reprosum,run10day,gridc smoke gx3 11x2 isotope,reprosum,run10day,gridc smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc -smoke gx3 6x4 dynpicard,reprosum,run10day,gridc +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc smoke gx3 8x3 zsal,reprosum,run10day,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread smoke gbox128 8x2 reprosum,run10day,gridc smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc -smoke gbox128 9x2 boxadv,reprosum,run10day,gridc +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc smoke gbox80 4x5 box2001,reprosum,run10day,gridc smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc @@ -80,7 +80,7 @@ smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day -smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day +#smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt04_gridc_reprosum_run10day smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day @@ -90,14 +90,14 @@ smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day +#smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_8x2_gridc_reprosum_run10day smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_12x2_boxnodyn_gridc_reprosum_run10day -smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day +#smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_9x2_boxadv_gridc_reprosum_run10day smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox128_14x2_boxrestore_gridc_reprosum_run10day smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_4x5_box2001_gridc_reprosum_run10day smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridc smoke_gbox80_11x3_boxslotcyl_gridc_reprosum_run10day @@ -107,7 +107,7 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread smoke gx3 8x4 diag1,reprosum,run10day,gridcd smoke gx3 6x2 alt01,reprosum,run10day,gridcd smoke gx3 8x2 alt02,reprosum,run10day,gridcd -smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd @@ -117,13 +117,13 @@ smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd smoke gx3 14x2 fsd12,reprosum,run10day,gridcd smoke gx3 11x2 isotope,reprosum,run10day,gridcd smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd -smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd smoke gx3 8x3 zsal,reprosum,run10day,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread smoke gbox128 8x2 reprosum,run10day,gridcd smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd -smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd smoke gbox80 4x5 box2001,reprosum,run10day,gridcd smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd @@ -132,7 +132,7 @@ smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day -smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day +#smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt04_gridcd_reprosum_run10day smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day @@ -142,14 +142,14 @@ smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain -smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day +#smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread smoke gbox128 8x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_8x2_gridcd_reprosum_run10day smoke gbox128 8x1 boxnodyn,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_12x2_boxnodyn_gridcd_reprosum_run10day -smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day +#smoke gbox128 8x1 boxadv,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_9x2_boxadv_gridcd_reprosum_run10day smoke gbox128 8x1 boxrestore,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox128_14x2_boxrestore_gridcd_reprosum_run10day smoke gbox80 8x1 box2001,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_4x5_box2001_gridcd_reprosum_run10day smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread,gridcd smoke_gbox80_11x3_boxslotcyl_gridcd_reprosum_run10day diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 523b14058..54263267a 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -293,7 +293,7 @@ either Celsius or Kelvin units). "grid_atm_thrm", "grid for atm thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_file", "input file for grid info", "" "grid_format", "format of grid files", "" - "grid_ice", "structure of the model ice grid, ‘B’, ‘CD’, etc", "" + "grid_ice", "structure of the model ice grid, ‘B’, ‘C’, etc", "" "grid_ice_dynu", "grid for ice dynamic-u model fields, 'T', 'U', 'N', 'E'", "" "grid_ice_dynv", "grid for ice dynamic-v model fields, 'T', 'U', 'N', 'E'", "" "grid_ice_thrm", "grid for ice thermodynamic model fields, 'T', 'U', 'N', 'E'", "" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index dcfcb0451..fcc1cc8c9 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -255,7 +255,7 @@ grid_nml "``grid_format``", "``bin``", "read direct access grid and kmt files", "``bin``" "", "``nc``", "read grid and kmt files", "" "``grid_ice``", "``B``", "use B grid structure with T at center and U at NE corner", "``B``" - "", "``CD``", "use CD grid structure with T at center and U/V at N and E edge", "" + "", "``C``", "use C grid structure with T at center, U at E edge, V at N edge", "" "``grid_ocn``", "``A``", "ocn forcing/coupling grid, all fields on T grid", "``A``" "", "``B``", "ocn forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" "", "``C``", "ocn forcing/coupling grid, thermo fields on T grid, dynu fields on E grid, dynv fields on N grid", "" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 857444bc9..921d58a29 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -111,9 +111,11 @@ distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. Schematic of CICE B-grid. -The ability to solve on the CD-grid was added later. With the CD grid, -the u and v velocity points are located on the N and E edges of the T cell -rather than the T cell corners. To support this capability, N and E grids +The ability to solve on the C and CD grids was added later. With the C grid, +the u velocity points are located on the E edges and the v velocity points are +located on the N edges of the T cell rather than at the T cell corners. On +the CD grid, the u and v velocity points are located on both the N and E edges. +To support this capability, N and E grids were added to the existing T and U grids, and the N and E grids are defined at the northern and eastern edge of the T cell. This is shown in Figure :ref:`fig-Cgrid`. From 150182d5afe078e5c51ad4970d76566c6cb78e72 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 23 Mar 2022 12:34:32 -0700 Subject: [PATCH 097/109] Refactor visc_replpres implementation, get rid of separate avgstr and avgzeta routines, (#85) leverage grid_average_X2Y for averaging fields from T to U. Refactor div_stress routines, split into 4 separate methods and get rid of if statements --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 601 ++++++++++-------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 116 ---- 2 files changed, 338 insertions(+), 379 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 45b4c2062..9248a4ecb 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -112,8 +112,9 @@ subroutine evp (dt) ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout - use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, DminTarea - use ice_dyn_shared, only: deformations, deformations_T, strain_rates_U, dyn_haloUpdate + use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & + DminTarea, visc_method, deformations, deformations_T, strain_rates_U, & + dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -188,9 +189,12 @@ subroutine evp (dt) fld4(:,:,:,:) ! bundled fields size 4 real (kind=dbl_kind), allocatable :: & - shrU (:,:,:), & ! shearU array for gridC - zetax2T(:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) - etax2T (:,:,:) ! etax2 = 2*eta (shear viscosity) + strengthU(:,:,:), & ! strength averaged to U points + shrU (:,:,:), & ! shearU array for gridC + zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) + zetax2U (:,:,:), & ! zetax2T averaged to U points + etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) + etax2U (:,:,:) ! etax2T averaged to U points real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -229,12 +233,18 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(shrU (nx_block,ny_block,max_blocks)) - allocate(zetax2T(nx_block,ny_block,max_blocks)) - allocate(etax2T (nx_block,ny_block,max_blocks)) - shrU (:,:,:) = c0 - zetax2T(:,:,:) = c0 - etax2T (:,:,:) = c0 + allocate(strengthU(nx_block,ny_block,max_blocks)) + allocate(shrU (nx_block,ny_block,max_blocks)) + allocate(zetax2T (nx_block,ny_block,max_blocks)) + allocate(zetax2U (nx_block,ny_block,max_blocks)) + allocate(etax2T (nx_block,ny_block,max_blocks)) + allocate(etax2U (nx_block,ny_block,max_blocks)) + strengthU(:,:,:) = c0 + shrU (:,:,:) = c0 + zetax2T (:,:,:) = c0 + zetax2U (:,:,:) = c0 + etax2T (:,:,:) = c0 + etax2U (:,:,:) = c0 endif @@ -445,7 +455,7 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -782,14 +792,12 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - - enddo ! iblk !$OMP END PARALLEL DO elseif (grid_ice == "C") then - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call strain_rates_U (nx_block , ny_block , & @@ -850,6 +858,12 @@ subroutine evp (dt) field_loc_center, field_type_scalar, & zetax2T, etax2T, stresspT, stressmT) + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') + endif + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_U (nx_block , ny_block , & @@ -860,13 +874,12 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & dxE (:,:,iblk), dyN (:,:,iblk), & dxU (:,:,iblk), dyU (:,:,iblk), & - tarea (:,:,iblk), uarea (:,:,iblk), & ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - hm (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - strength (:,:,iblk), shrU (:,:,iblk), & + uarea (:,:,iblk), & + etax2U (:,:,iblk), & + strengthU (:,:,iblk), shrU (:,:,iblk), & stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -879,29 +892,23 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call div_stress (nx_block , ny_block , & ! E point - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspF1 = stresspT (:,:,iblk) , & - stressmF1 = stressmT (:,:,iblk) , & - stress12F1 = stress12U (:,:,iblk) , & - F1 = strintxE (:,:,iblk) , & - grid_location = 'E') - - call div_stress (nx_block , ny_block , & ! N point - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspF2 = stresspT (:,:,iblk) , & - stressmF2 = stressmT (:,:,iblk) , & - stress12F2 = stress12U (:,:,iblk) , & - F2 = strintyN (:,:,iblk) , & - grid_location = 'N') + call div_stress_Ex (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) enddo !$OMP END PARALLEL DO @@ -1002,6 +1009,13 @@ subroutine evp (dt) field_loc_center, field_type_scalar, & zetax2T, etax2T) + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') + endif + !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressCD_U (nx_block , ny_block , & @@ -1012,13 +1026,12 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & dxE (:,:,iblk), dyN (:,:,iblk), & dxU (:,:,iblk), dyU (:,:,iblk), & - tarea (:,:,iblk), uarea (:,:,iblk), & ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - hm (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - strength (:,:,iblk), & + uarea (:,:,iblk), & + zetax2U (:,:,iblk), etax2U (:,:,iblk), & + strengthU (:,:,iblk), & stresspU (:,:,iblk), stressmU (:,:,iblk), & stress12U (:,:,iblk)) enddo @@ -1035,31 +1048,41 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call div_stress (nx_block , ny_block , & ! E point - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - 'E') - - call div_stress (nx_block , ny_block , & ! N point - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - 'N') + call div_stress_Ex (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ey (nx_block , ny_block , & + icelle (iblk), & + indxei (:,iblk), indxej (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintyE (:,:,iblk) ) + + call div_stress_Nx (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintxN (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icelln (iblk), & + indxni (:,iblk), indxnj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) enddo !$OMP END PARALLEL DO @@ -1125,7 +1148,7 @@ subroutine evp (dt) deallocate(fld2,fld3,fld4) if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(shrU, zetax2T, etax2T) + deallocate(strengthU, shrU, zetax2T, zetax2U, etax2T, etax2U) endif if (maskhalo_dyn) then @@ -1679,10 +1702,8 @@ subroutine stressC_T (nx_block, ny_block , & ! viscosities and replacement pressure at T point !----------------------------------------------------------------- - call visc_replpress (strength(i,j), DminTarea(i,j), & - DeltaT , zetax2T (i,j), & - etax2T (i,j), rep_prsT , & - capping) + call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & + zetax2T (i,j), etax2T (i,j), rep_prsT, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1713,25 +1734,24 @@ end subroutine stressC_T ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - subroutine stressC_U (nx_block, ny_block, & - icellu, & - indxui , indxuj, & - uvelE , vvelE, & - uvelN , vvelN, & - uvelU , vvelU, & - dxE , dyN, & - dxU , dyU, & - tarea , uarea, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, hm, & - zetax2T , etax2T, & - strength, shrU, & - stress12 ) + subroutine stressC_U (nx_block , ny_block, & + icellu, & + indxui , indxuj, & + uvelE , vvelE, & + uvelN , vvelN, & + uvelU , vvelU, & + dxE , dyN, & + dxU , dyU, & + ratiodxN , ratiodxNr, & + ratiodyE , ratiodyEr, & + epm , npm, & + uarea , & + etax2U , & + strengthU, shrU, & + stress12 ) use ice_dyn_shared, only: strain_rates_U, & - visc_replpress_avgstr, & - visc_replpress_avgzeta, & + visc_replpress, & visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -1753,19 +1773,16 @@ subroutine stressC_U (nx_block, ny_block, & dyN , & ! height of N-cell through the middle (m) dxU , & ! width of U-cell through the middle (m) dyU , & ! height of U-cell through the middle (m) - tarea , & ! area of T-cell (m^2) - uarea , & ! area of U-cell (m^2) ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline epm , & ! E-cell mask npm , & ! N-cell mask - hm , & ! T-cell mask - zetax2T , & ! 2*zeta at the T point - etax2T , & ! 2*eta at the T point + uarea , & ! area of U point + etax2U , & ! 2*eta at the U point shrU , & ! shearU array - strength ! ice strength at the T point + strengthU ! ice strength at the U point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stress12 ! sigma12 @@ -1779,12 +1796,10 @@ subroutine stressC_U (nx_block, ny_block, & DeltaU ! delt at U point real (kind=dbl_kind) :: & - zetax2U , & ! bulk viscosity at U point - etax2U , & ! shear viscosity at U point - rep_prsU , & ! replacement pressure at U point - DminUarea, & ! Dmin on U - strtmp , & ! tmp variable - areatmp ! tmp variable + lzetax2U , & ! bulk viscosity at U point + letax2U , & ! shear viscosity at U point + lrep_prsU, & ! replacement pressure at U point + lDminUarea ! Dmin on U character(len=*), parameter :: subname = '(stressC_U)' @@ -1819,39 +1834,23 @@ subroutine stressC_U (nx_block, ny_block, & !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then - DeltaU(i,j) = c0 ! not needed in avgzeta just computing etax2U - call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & - zetax2T (i+1,j+1), zetax2T (i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU (i ,j ), etax2U=etax2U) + letax2U = etax2U(i,j) elseif (visc_method == 'avg_strength') then - DminUarea = deltaminEVP*uarea(i,j) + lDminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea, DeltaU(i,j), & - zetax2U, etax2U, rep_prsU, capping) + call visc_replpress (strengthU(i,j), lDminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) + endif !----------------------------------------------------------------- ! the stresses ! kg/s^2 !----------------------------------------------------------------- - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2U*shrU(i,j)) * denom1 + + arlx1i*p5*letax2U*shrU(i,j)) * denom1 enddo ! ij @@ -1945,10 +1944,8 @@ subroutine stressCD_T (nx_block, ny_block, & ! viscosities and replacement pressure at T point !----------------------------------------------------------------- - call visc_replpress (strength(i,j), DminTarea(i,j), & - DeltaT (i,j), zetax2T (i,j), & - etax2T (i,j), rep_prsT , & - capping) + call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & + zetax2T (i,j), etax2T (i,j), rep_prsT , capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1983,18 +1980,17 @@ subroutine stressCD_U (nx_block, ny_block, & uvelU, vvelU, & dxE, dyN, & dxU, dyU, & - tarea, uarea, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & - epm, npm, hm, & - zetax2T, etax2T, & - strength, & + epm, npm, & + uarea, & + zetax2U, etax2U, & + strengthU, & stresspU, stressmU, & stress12U ) use ice_dyn_shared, only: strain_rates_U, & - visc_replpress_avgstr, & - visc_replpress_avgzeta, & + visc_replpress, & visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -2016,18 +2012,16 @@ subroutine stressCD_U (nx_block, ny_block, & dyN , & ! height of N-cell through the middle (m) dxU , & ! width of U-cell through the middle (m) dyU , & ! height of U-cell through the middle (m) - tarea , & ! area of T-cell (m^2) - uarea , & ! area of U-cell (m^2) ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline epm , & ! E-cell mask npm , & ! N-cell mask - hm , & ! T-cell mask - zetax2T , & ! 2*zeta at the T point - etax2T , & ! 2*eta at the T point - strength ! ice strength at the T point + uarea , & ! area of U-cell (m^2) + zetax2U , & ! 2*zeta at the U point + etax2U , & ! 2*eta at the U point + strengthU ! ice strength at the T point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stresspU , & ! sigma11+sigma22 @@ -2046,10 +2040,10 @@ subroutine stressCD_U (nx_block, ny_block, & DeltaU ! delt at U point real (kind=dbl_kind) :: & - zetax2U , & ! bulk viscosity at U point - etax2U , & ! shear viscosity at U point - rep_prsU , & ! replacement pressure at U point - DminUarea ! Dmin on U + lzetax2U , & ! bulk viscosity at U point + letax2U , & ! shear viscosity at U point + lrep_prsU , & ! replacement pressure at U point + lDminUarea ! Dmin on U character(len=*), parameter :: subname = '(stressCD_U)' @@ -2078,30 +2072,21 @@ subroutine stressCD_U (nx_block, ny_block, & !----------------------------------------------------------------- ! viscosities and replacement pressure at U point + ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + ! avg_strength: C2 method of Kimmritz et al. 2016 !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then - call visc_replpress_avgzeta (zetax2T (i ,j ), zetax2T (i ,j+1), & - zetax2T (i+1,j+1), zetax2T (i+1,j ), & - etax2T (i ,j ), etax2T (i ,j+1), & - etax2T (i+1,j+1), etax2T (i+1,j ), & - hm (i ,j ), hm (i ,j+1), & - hm (i+1,j+1), hm (i+1,j ), & - tarea (i ,j ), tarea (i ,j+1), & - tarea (i+1,j+1), tarea (i+1,j ), & - DeltaU (i ,j ), & - zetax2U, etax2U, rep_prsU) + lzetax2U = zetax2U(i,j) + letax2U = etax2U(i,j) + lrep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU(i,j) elseif (visc_method == 'avg_strength') then - DminUarea = deltaminEVP*uarea(i,j) - call visc_replpress_avgstr (strength(i ,j ), strength(i ,j+1), & - strength(i+1,j+1), strength(i+1,j ), & - hm (i ,j ) , hm (i ,j+1), & - hm (i+1,j+1) , hm (i+1,j ), & - tarea (i ,j ) , tarea (i ,j+1), & - tarea (i+1,j+1) , tarea (i+1,j ), & - DminUarea , DeltaU (i ,j ), & - zetax2U, etax2U, rep_prsU, capping) + lDminUarea = deltaminEVP*uarea(i,j) + ! only need etax2U here, but other terms are calculated with etax2U + ! minimal extra calculations here even though it seems like there is + call visc_replpress (strengthU(i,j), lDminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) endif !----------------------------------------------------------------- @@ -2111,13 +2096,13 @@ subroutine stressCD_U (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspU(i,j) = (stresspU (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2U*divU(i,j) - rep_prsU)) * denom1 + + arlx1i*(lzetax2U*divU(i,j) - lrep_prsU)) * denom1 stressmU(i,j) = (stressmU (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2U*tensionU(i,j)) * denom1 + + arlx1i*letax2U*tensionU(i,j)) * denom1 stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2U*shearU(i,j)) * denom1 + + arlx1i*p5*letax2U*shearU(i,j)) * denom1 enddo ! ij @@ -2138,18 +2123,15 @@ end subroutine stressCD_U ! elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids. ! Ocean Model., 27, 174-184. - subroutine div_stress (nx_block , ny_block , & - icell , & - indxi , indxj , & - dxE_N , dyE_N , & - dxT_U , dyT_U , & - arear , & - stresspF1 , stressmF1, & - stress12F1, & - stresspF2 , stressmF2, & - stress12F2, & - F1 , F2 , & - grid_location) + subroutine div_stress_Ex(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxE , dyE , & + dxU , dyT , & + arear , & + stressp , stressm , & + stress12, & + strintx ) integer (kind=int_kind), intent(in) :: & @@ -2157,112 +2139,205 @@ subroutine div_stress (nx_block , ny_block , & icell ! no. of cells where epm (or npm) = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxE_N , & ! width of E or N-cell through the middle (m) - dyE_N , & ! height of E or N-cell through the middle (m) - dxT_U , & ! width of T or U-cell through the middle (m) - dyT_U , & ! height of T or U-cell through the middle (m) + dxE , & ! width of E or N-cell through the middle (m) + dyE , & ! height of E or N-cell through the middle (m) + dxU , & ! width of T or U-cell through the middle (m) + dyT , & ! height of T or U-cell through the middle (m) + arear ! earear or narear + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strintx calculation + stressm , & ! stressm (U or T) used for strintx calculation + stress12 ! stress12 (U or T) used for strintx calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strintx ! div of stress tensor for u component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ex)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strintx(i,j) = arear(i,j) * & + ( p5 * dyE(i,j) * ( stressp(i+1,j ) - stressp (i ,j ) ) & + + (p5/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stressm (i+1,j ) & + -(dyT(i ,j )**2) * stressm (i ,j ) ) & + + (c1/ dxE(i,j)) * ( (dxU(i ,j )**2) * stress12(i ,j ) & + -(dxU(i ,j-1)**2) * stress12(i ,j-1) ) ) + enddo + + end subroutine div_stress_Ex + +!======================================================================= + subroutine div_stress_Ey(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxE , dyE , & + dxU , dyT , & + arear , & + stressp , stressm , & + stress12, & + strinty ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxE , & ! width of E or N-cell through the middle (m) + dyE , & ! height of E or N-cell through the middle (m) + dxU , & ! width of T or U-cell through the middle (m) + dyT , & ! height of T or U-cell through the middle (m) arear ! earear or narear real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stresspF1 , & ! stressp (U or T) used for F1 calculation - stressmF1 , & ! stressm (U or T) used for F1 calculation - stress12F1, & ! stress12 (U or T) used for F1 calculation - stresspF2 , & ! stressp (U or T) used for F2 calculation - stressmF2 , & ! stressm (U or T) used for F2 calculation - stress12F2 ! stress12 (U or T) used for F2 calculation + stressp , & ! stressp (U or T) used for strinty calculation + stressm , & ! stressm (U or T) used for strinty calculation + stress12 ! stress12 (U or T) used for strinty calculation + + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strinty ! div of stress tensor for v component + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ey)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strinty(i,j) = arear(i,j) * & + ( p5 * dxE(i,j) * ( stressp(i ,j ) - stressp (i ,j-1) ) & + - (p5/ dxE(i,j)) * ( (dxU(i ,j )**2) * stressm (i ,j ) & + -(dxU(i ,j-1)**2) * stressm (i ,j-1) ) & + + (c1/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stress12(i+1,j ) & + -(dyT(i ,j )**2) * stress12(i ,j ) ) ) + enddo + + end subroutine div_stress_Ey + +!======================================================================= + subroutine div_stress_Nx(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxN , dyN , & + dxT , dyU , & + arear , & + stressp , stressm , & + stress12, & + strintx ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxN , & ! width of E or N-cell through the middle (m) + dyN , & ! height of E or N-cell through the middle (m) + dxT , & ! width of T or U-cell through the middle (m) + dyU , & ! height of T or U-cell through the middle (m) + arear ! earear or narear - character(len=*), intent(in) :: & - grid_location ! E (East) or N (North) ! TO BE IMPROVED!!!! + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strintx calculation + stressm , & ! stressm (U or T) used for strintx calculation + stress12 ! stress12 (U or T) used for strintx calculation real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - F1 , & ! div of stress tensor for u component - F2 ! div of stress tensor for v component + strintx ! div of stress tensor for u component ! local variables integer (kind=int_kind) :: & i, j, ij - character(len=*), parameter :: subname = '(div_stress)' + character(len=*), parameter :: subname = '(div_stress_Nx)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strintx(i,j) = arear(i,j) * & + ( p5 * dyN(i,j) * ( stressp(i ,j ) - stressp (i-1,j ) ) & + + (p5/ dyN(i,j)) * ( (dyU(i ,j )**2) * stressm (i ,j ) & + -(dyU(i-1,j )**2) * stressm (i-1,j ) ) & + + (c1/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stress12(i ,j+1) & + -(dxT(i ,j )**2) * stress12(i ,j ) ) ) + enddo -!!! Instead of having the if statements below we could define for example -! i+ci, j+cj where ci, cj would change with grid_position + end subroutine div_stress_Nx - if (grid_location /= "E" .and. grid_location /= "N") then - call abort_ice(subname // ' ERROR: unknown grid_location: ' // grid_location) - endif +!======================================================================= + subroutine div_stress_Ny(nx_block, ny_block, & + icell , & + indxi , indxj , & + dxN , dyN , & + dxT , dyU , & + arear , & + stressp , stressm , & + stress12, & + strinty ) - if (present(F1) .and. & - (.not.present(stresspF1) .or. .not.present(stressmF1) .or. .not.present(stress12F1))) then - call abort_ice(subname // ' ERROR: F1 passing arguments ') - endif + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icell ! no. of cells where epm (or npm) = 1 - if (present(F2) .and. & - (.not.present(stresspF2) .or. .not.present(stressmF2) .or. .not.present(stress12F2))) then - call abort_ice(subname // ' ERROR: F2 passing arguments ') - endif + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxi , & ! compressed index in i-direction + indxj ! compressed index in j-direction - !----------------------------------------------------------------- - ! F1,F2 : div of stress tensor for u,v components - !----------------------------------------------------------------- + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + dxN , & ! width of E or N-cell through the middle (m) + dyN , & ! height of E or N-cell through the middle (m) + dxT , & ! width of T or U-cell through the middle (m) + dyU , & ! height of T or U-cell through the middle (m) + arear ! earear or narear - if (grid_location == "E" .and. present(F1)) then - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stresspF1(i+1,j ) - stresspF1 (i ,j ) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i+1,j )**2) * stressmF1 (i+1,j ) & - -(dyT_U(i ,j )**2) * stressmF1 (i ,j ) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i ,j )**2) * stress12F1(i ,j ) & - -(dxT_U(i ,j-1)**2) * stress12F1(i ,j-1) ) ) - enddo - endif + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & + stressp , & ! stressp (U or T) used for strinty calculation + stressm , & ! stressm (U or T) used for strinty calculation + stress12 ! stress12 (U or T) used for strinty calculation - if (grid_location == "E" .and. present(F2)) then - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stresspF2(i ,j ) - stresspF2 (i ,j-1) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i ,j )**2) * stressmF2 (i ,j ) & - -(dxT_U(i ,j-1)**2) * stressmF2 (i ,j-1) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i+1,j )**2) * stress12F2(i+1,j ) & - -(dyT_U(i ,j )**2) * stress12F2(i ,j ) ) ) - enddo - endif + real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & + strinty ! div of stress tensor for v component - if (grid_location == "N" .and. present(F1)) then - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - F1(i,j) = arear(i,j) * & - ( p5 * dyE_N(i,j) * ( stresspF1(i ,j ) - stresspF1 (i-1,j ) ) & - + (p5/dyE_N(i,j)) * ( (dyT_U(i ,j )**2) * stressmF1 (i ,j ) & - -(dyT_U(i-1,j )**2) * stressmF1 (i-1,j ) ) & - + (c1/dxE_N(i,j)) * ( (dxT_U(i ,j+1)**2) * stress12F1(i ,j+1) & - -(dxT_U(i ,j )**2) * stress12F1(i ,j ) ) ) - enddo - endif + ! local variables - if (grid_location == "N" .and. present(F2)) then - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - F2(i,j) = arear(i,j) * & - ( p5 * dxE_N(i,j) * ( stresspF2(i ,j+1) - stresspF2 (i ,j ) ) & - - (p5/dxE_N(i,j)) * ( (dxT_U(i ,j+1)**2) * stressmF2 (i ,j+1) & - -(dxT_U(i ,j )**2) * stressmF2 (i ,j ) ) & - + (c1/dyE_N(i,j)) * ( (dyT_U(i ,j )**2) * stress12F2(i ,j ) & - -(dyT_U(i-1,j )**2) * stress12F2(i-1,j ) ) ) - enddo - endif + integer (kind=int_kind) :: & + i, j, ij + + character(len=*), parameter :: subname = '(div_stress_Ny)' + + do ij = 1, icell + i = indxi(ij) + j = indxj(ij) + strinty(i,j) = arear(i,j) * & + ( p5 * dxN(i,j) * ( stressp(i ,j+1) - stressp (i ,j ) ) & + - (p5/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stressm (i ,j+1) & + -(dxT(i ,j )**2) * stressm (i ,j ) ) & + + (c1/ dyN(i,j)) * ( (dyU(i ,j )**2) * stress12(i ,j ) & + -(dyU(i-1,j )**2) * stress12(i-1,j ) ) ) + enddo - end subroutine div_stress + end subroutine div_stress_Ny !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 78a476f5a..b955be5d7 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -30,8 +30,6 @@ module ice_dyn_shared deformations, deformations_T, & strain_rates, strain_rates_T, strain_rates_U, & visc_replpress, & - visc_replpress_avgstr, & - visc_replpress_avgzeta, & dyn_haloUpdate, & stack_fields, unstack_fields @@ -2196,120 +2194,6 @@ subroutine visc_replpress(strength, DminArea, Delta, & end subroutine visc_replpress -!======================================================================= -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The -! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! -! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method -! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. -! -! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 - - subroutine visc_replpress_avgzeta (zetax2T1, zetax2T2, & - zetax2T3, zetax2T4, & - etax2T1, etax2T2, & - etax2T3, etax2T4, & - mask1, mask2, & - mask3, mask4, & - area1, area2, & - area3, area4, & - deltaU, zetax2U, etax2U, rep_prsU) - - real (kind=dbl_kind), intent(in):: & - zetax2T1,zetax2T2,zetax2T3,zetax2T4, & - etax2T1, etax2T2, etax2T3, etax2T4, & - mask1, mask2, mask3, mask4, & - area1, area2, area3, area4, & - deltaU - - real (kind=dbl_kind), optional, intent(out):: & - zetax2U, etax2U, rep_prsU ! 2 x viscosities, replacement pressure - - ! local variables - - real (kind=dbl_kind) :: & - lzetax2U, & ! local variable - areatmp - - character(len=*), parameter :: subname = '(visc_replpress_avgzeta)' - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - areatmp = (mask1 * area1 + & - mask4 * area4 + & - mask3 * area3 + & - mask2 * area2) - - if (present(rep_prsU) .or. present(zetax2U)) then - lzetax2U = (mask1 * area1 * zetax2T1 + & - mask4 * area4 * zetax2T4 + & - mask3 * area3 * zetax2T3 + & - mask2 * area2 * zetax2T2) / areatmp - if (present(zetax2U)) then - zetax2U = lzetax2U - endif - endif - - if (present(etax2U)) then - etax2U = (mask1 * area1 * etax2T1 + & - mask4 * area4 * etax2T4 + & - mask3 * area3 * etax2T3 + & - mask2 * area2 * etax2T2) / areatmp - endif - - if (present(rep_prsU)) then - rep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU - endif - - end subroutine visc_replpress_avgzeta - -!======================================================================= -! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method -! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. -! -! avg_strength: C2 method of Kimmritz et al. 2016 - - subroutine visc_replpress_avgstr (strength1, strength2, & - strength3, strength4, & - mask1, mask2, & - mask3, mask4, & - area1, area2, & - area3, area4, & - DminUarea, deltaU, & - zetax2U, etax2U, rep_prsU, capping) - - real (kind=dbl_kind), intent(in):: & - strength1,strength2,strength3,strength4, & - mask1, mask2, mask3, mask4, & - area1, area2, area3, area4, & - DminUarea, deltaU, capping - - real (kind=dbl_kind), intent(out):: zetax2U, etax2U, rep_prsU - - ! local variables - - real (kind=dbl_kind) :: & - areatmp, strtmp ! area and strength average - - character(len=*), parameter :: subname = '(visc_replpress_avgstr)' - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - areatmp = (mask1 * area1 + & - mask4 * area4 + & - mask3 * area3 + & - mask2 * area2) - - strtmp = (mask1 * area1 * strength1 + & - mask4 * area4 * strength4 + & - mask3 * area3 * strength3 + & - mask2 * area2 * strength2) / areatmp - - call visc_replpress (strtmp, DminUarea, deltaU, & - zetax2U, etax2U, rep_prsU, capping) - - end subroutine visc_replpress_avgstr - !======================================================================= ! Do a halo update on 1 field From bffe4e8797f4d166d179df8df8ccc44a42295b13 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sat, 26 Mar 2022 13:11:02 -0700 Subject: [PATCH 098/109] Refactor strain_rates_T, strain_rates_U (#86) * Refactor strain_rates_T, strain_rates_U * Rename divUx to divergU --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 289 +++++++----------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 238 ++++++++------- 2 files changed, 243 insertions(+), 284 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 9248a4ecb..a3e916619 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -190,7 +190,10 @@ subroutine evp (dt) real (kind=dbl_kind), allocatable :: & strengthU(:,:,:), & ! strength averaged to U points - shrU (:,:,:), & ! shearU array for gridC + divergU (:,:,:), & ! div array on U points, differentiate from divu + tensionU (:,:,:), & ! tension array on U points + shearU (:,:,:), & ! shear array on U points + deltaU (:,:,:), & ! delta array on U points zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) zetax2U (:,:,:), & ! zetax2T averaged to U points etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) @@ -234,13 +237,19 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate(strengthU(nx_block,ny_block,max_blocks)) - allocate(shrU (nx_block,ny_block,max_blocks)) + allocate(divergU (nx_block,ny_block,max_blocks)) + allocate(tensionU (nx_block,ny_block,max_blocks)) + allocate(shearU (nx_block,ny_block,max_blocks)) + allocate(deltaU (nx_block,ny_block,max_blocks)) allocate(zetax2T (nx_block,ny_block,max_blocks)) allocate(zetax2U (nx_block,ny_block,max_blocks)) allocate(etax2T (nx_block,ny_block,max_blocks)) allocate(etax2U (nx_block,ny_block,max_blocks)) strengthU(:,:,:) = c0 - shrU (:,:,:) = c0 + divergU (:,:,:) = c0 + tensionU (:,:,:) = c0 + shearU (:,:,:) = c0 + deltaU (:,:,:) = c0 zetax2T (:,:,:) = c0 zetax2U (:,:,:) = c0 etax2T (:,:,:) = c0 @@ -800,6 +809,10 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & @@ -811,7 +824,8 @@ subroutine evp (dt) ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & epm (:,:,iblk), npm (:,:,iblk), & - shearU = shrU(:,:,iblk) ) + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), deltaU (:,:,iblk) ) enddo ! iblk !$OMP END PARALLEL DO @@ -819,7 +833,7 @@ subroutine evp (dt) ! calls ice_haloUpdate, controls bundles and masks call dyn_HaloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner, field_type_scalar, & - shrU) + shearU) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -831,7 +845,7 @@ subroutine evp (dt) dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & uarea (:,:,iblk), DminTarea (:,:,iblk), & - strength (:,:,iblk), shrU (:,:,iblk), & + strength (:,:,iblk), shearU (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) @@ -869,17 +883,9 @@ subroutine evp (dt) call stressC_U (nx_block , ny_block , & icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & uarea (:,:,iblk), & - etax2U (:,:,iblk), & - strengthU (:,:,iblk), shrU (:,:,iblk), & + etax2U (:,:,iblk), deltaU (:,:,iblk), & + strengthU (:,:,iblk), shearU (:,:,iblk), & stress12U (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1018,22 +1024,34 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - call stressCD_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr (:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr (:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - uarea (:,:,iblk), & - zetax2U (:,:,iblk), etax2U (:,:,iblk), & - strengthU (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U (:,:,iblk)) + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + call strain_rates_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr(:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk) ) + + call stressCD_U (nx_block , ny_block , & + icellu (iblk), & + indxui (:,iblk), indxuj (:,iblk), & + uarea (:,:,iblk), & + zetax2U (:,:,iblk), etax2U (:,:,iblk), & + strengthU(:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U(:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1148,7 +1166,8 @@ subroutine evp (dt) deallocate(fld2,fld3,fld4) if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(strengthU, shrU, zetax2T, zetax2U, etax2T, etax2U) + deallocate(strengthU, divergU, tensionU, shearU, deltaU) + deallocate(zetax2T, zetax2U, etax2T, etax2U) endif if (maskhalo_dyn) then @@ -1618,7 +1637,7 @@ subroutine stressC_T (nx_block, ny_block , & dxN , dyE , & dxT , dyT , & uarea , DminTarea, & - strength, shrU , & + strength, shearU , & zetax2T , etax2T , & stressp , stressm ) @@ -1643,7 +1662,7 @@ subroutine stressC_T (nx_block, ny_block , & dxT , & ! width of T-cell through the middle (m) dyT , & ! height of T-cell through the middle (m) strength , & ! ice strength (N/m) - shrU , & ! shearU + shearU , & ! shearU uarea , & ! area of u cell DminTarea ! deltaminEVP*tarea @@ -1673,15 +1692,14 @@ subroutine stressC_T (nx_block, ny_block , & ! Initialize !----------------------------------------------------------------- - call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - dxN (:,:), dyE (:,:), & - dxT (:,:), dyT (:,:), & - divT = divT (:,:), & - tensionT = tensionT(:,:) ) + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:) ) do ij = 1, icellt i = indxti(ij) @@ -1692,9 +1710,11 @@ subroutine stressC_T (nx_block, ny_block , & ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 !----------------------------------------------------------------- - shearTsqr = (shrU(i,j) **2 * uarea(i,j) + shrU(i,j-1)**2*uarea(i,j-1) & - + shrU(i-1,j-1)**2 * uarea(i-1,j-1)+ shrU(i-1,j)**2*uarea(i-1,j)) & - / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + + shearU(i ,j-1)**2 * uarea(i ,j-1) & + + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & + + shearU(i-1,j )**2 * uarea(i-1,j )) & + / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) @@ -1737,21 +1757,12 @@ end subroutine stressC_T subroutine stressC_U (nx_block , ny_block, & icellu, & indxui , indxuj, & - uvelE , vvelE, & - uvelN , vvelN, & - uvelU , vvelU, & - dxE , dyN, & - dxU , dyU, & - ratiodxN , ratiodxNr, & - ratiodyE , ratiodyEr, & - epm , npm, & uarea , & - etax2U , & - strengthU, shrU, & + etax2U , deltaU, & + strengthU, shearU, & stress12 ) - use ice_dyn_shared, only: strain_rates_U, & - visc_replpress, & + use ice_dyn_shared, only: visc_replpress, & visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -1763,25 +1774,10 @@ subroutine stressC_U (nx_block , ny_block, & indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - uvelU , & ! x-component of velocity (m/s) at the U point - vvelU , & ! y-component of velocity (m/s) at the U point - dxE , & ! width of E-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline - epm , & ! E-cell mask - npm , & ! N-cell mask uarea , & ! area of U point etax2U , & ! 2*eta at the U point - shrU , & ! shearU array + shearU , & ! shearU array + deltaU , & ! deltaU array strengthU ! ice strength at the U point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1792,67 +1788,43 @@ subroutine stressC_U (nx_block , ny_block, & integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - DeltaU ! delt at U point - real (kind=dbl_kind) :: & lzetax2U , & ! bulk viscosity at U point letax2U , & ! shear viscosity at U point lrep_prsU, & ! replacement pressure at U point - lDminUarea ! Dmin on U + DminUarea ! Dmin on U character(len=*), parameter :: subname = '(stressC_U)' !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) + ! viscosities and replacement pressure at U point + ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 + ! avg_strength: C2 method of Kimmritz et al. 2016 + ! if outside do and stress12 equation repeated in each loop for performance !----------------------------------------------------------------- - if (visc_method == 'avg_strength') then - call strain_rates_U (nx_block , ny_block , & - icellu , & - indxui (:) , indxuj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - uvelU (:,:), vvelU (:,:), & - dxE (:,:), dyN (:,:), & - dxU (:,:), dyU (:,:), & - ratiodxN(:,:), ratiodxNr(:,:), & - ratiodyE(:,:), ratiodyEr(:,:), & - epm(:,:) , npm(:,:) , & - DeltaU = DeltaU(:,:) ) - endif - - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure at U point - ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 - ! avg_strength: C2 method of Kimmritz et al. 2016 - !----------------------------------------------------------------- - - if (visc_method == 'avg_zeta') then - letax2U = etax2U(i,j) + if (visc_method == 'avg_zeta') then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 + enddo - elseif (visc_method == 'avg_strength') then - lDminUarea = deltaminEVP*uarea(i,j) + elseif (visc_method == 'avg_strength') then + do ij = 1, icellu + i = indxui(ij) + j = indxuj(ij) + DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), lDminUarea, DeltaU(i,j), & - lzetax2U , letax2U , lrep_prsU , capping) - - endif - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- - - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*letax2U*shrU(i,j)) * denom1 + call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) + stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*letax2U*shearU(i,j)) * denom1 + enddo - enddo ! ij + endif end subroutine stressC_U @@ -1975,17 +1947,11 @@ end subroutine stressCD_T subroutine stressCD_U (nx_block, ny_block, & icellu, & indxui, indxuj, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, & uarea, & zetax2U, etax2U, & strengthU, & + divergU, tensionU, & + shearU, DeltaU, & stresspU, stressmU, & stress12U ) @@ -2002,26 +1968,14 @@ subroutine stressCD_U (nx_block, ny_block, & indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - uvelU , & ! x-component of velocity (m/s) at the U point - vvelU , & ! y-component of velocity (m/s) at the U point - dxE , & ! width of E-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) factor for BCs across coastline - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) factor for BCs across coastline - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) factor for BCs across coastline - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) factor for BCs across coastline - epm , & ! E-cell mask - npm , & ! N-cell mask uarea , & ! area of U-cell (m^2) - zetax2U , & ! 2*zeta at the U point - etax2U , & ! 2*eta at the U point - strengthU ! ice strength at the T point + zetax2U , & ! 2*zeta at U point + etax2U , & ! 2*eta at U point + strengthU, & ! ice strength at U point + divergU , & ! div at U point + tensionU , & ! tension at U point + shearU , & ! shear at U point + deltaU ! delt at U point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & stresspU , & ! sigma11+sigma22 @@ -2033,39 +1987,14 @@ subroutine stressCD_U (nx_block, ny_block, & integer (kind=int_kind) :: & i, j, ij - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divU , & ! divergence at U point - tensionU , & ! tension at U point - shearU , & ! shear at U point - DeltaU ! delt at U point - real (kind=dbl_kind) :: & lzetax2U , & ! bulk viscosity at U point letax2U , & ! shear viscosity at U point lrep_prsU , & ! replacement pressure at U point - lDminUarea ! Dmin on U + DminUarea ! Dmin on U character(len=*), parameter :: subname = '(stressCD_U)' - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates_U (nx_block , ny_block , & - icellu , & - indxui (:) , indxuj(:) , & - uvelE (:,:), vvelE(:,:) , & - uvelN (:,:), vvelN(:,:) , & - uvelU (:,:), vvelU(:,:) , & - dxE (:,:), dyN(:,:) , & - dxU (:,:), dyU(:,:) , & - ratiodxN(:,:), ratiodxNr(:,:), & - ratiodyE(:,:), ratiodyEr(:,:), & - epm(:,:) , npm(:,:) , & - divU (:,:), tensionU (:,:), & - shearU (:,:), DeltaU (:,:) ) - do ij = 1, icellu i = indxui(ij) j = indxuj(ij) @@ -2082,11 +2011,11 @@ subroutine stressCD_U (nx_block, ny_block, & lrep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU(i,j) elseif (visc_method == 'avg_strength') then - lDminUarea = deltaminEVP*uarea(i,j) + DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), lDminUarea, DeltaU(i,j), & - lzetax2U , letax2U , lrep_prsU , capping) + call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + lzetax2U , letax2U , lrep_prsU , capping) endif !----------------------------------------------------------------- @@ -2096,7 +2025,7 @@ subroutine stressCD_U (nx_block, ny_block, & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code stresspU(i,j) = (stresspU (i,j)*(c1-arlx1i*revp) & - + arlx1i*(lzetax2U*divU(i,j) - lrep_prsU)) * denom1 + + arlx1i*(lzetax2U*divergU(i,j) - lrep_prsU)) * denom1 stressmU(i,j) = (stressmU (i,j)*(c1-arlx1i*revp) & + arlx1i*letax2U*tensionU(i,j)) * denom1 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index b955be5d7..ee33eb09e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -125,6 +125,11 @@ module ice_dyn_shared threshold_hw ! max water depth for grounding ! see keel data from Amundrud et al. 2004 (JGR) + interface strain_rates_T + module procedure strain_rates_Tdt + module procedure strain_rates_Tdtsd + end interface + interface dyn_haloUpdate module procedure dyn_haloUpdate1 module procedure dyn_haloUpdate2 @@ -1907,12 +1912,12 @@ subroutine strain_rates (nx_block, ny_block, & end subroutine strain_rates !======================================================================= -! Compute strain rates at the T point +! Compute dtsd (div, tension, shear, delta) strain rates at the T point ! ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine strain_rates_T (nx_block, ny_block, & + subroutine strain_rates_Tdtsd (nx_block, ny_block, & icellt, & indxti, indxtj, & uvelE, vvelE, & @@ -1940,7 +1945,7 @@ subroutine strain_rates_T (nx_block, ny_block, & dxT , & ! width of T-cell through the middle (m) dyT ! height of T-cell through the middle (m) - real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & divT , & ! divergence at T point tensionT , & ! tension at T point shearT , & ! shear at T point @@ -1948,65 +1953,111 @@ subroutine strain_rates_T (nx_block, ny_block, & ! local variables - real (kind=dbl_kind) :: & - ldivT , & - ltensionT , & - lshearT ! local values - integer (kind=int_kind) :: & ij, i, j ! indices - character(len=*), parameter :: subname = '(strain_rates_T)' + character(len=*), parameter :: subname = '(strain_rates_Tdtsd)' !----------------------------------------------------------------- ! strain rates ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - if (present(divT) ) divT (:,:) = c0 - if (present(tensionT)) tensionT(:,:) = c0 - if (present(shearT) ) shearT (:,:) = c0 - if (present(deltaT) ) deltaT (:,:) = c0 + ! compute divT, tensionT + call strain_rates_Tdt (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT ) + + shearT (:,:) = c0 + deltaT (:,:) = c0 do ij = 1, icellt i = indxti(ij) j = indxtj(ij) - ! divergence = e_11 + e_22 - if (present(deltaT) .or. present(divT)) then - ldivT = dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) - if (present(divT)) then - divT(i,j) = ldivT - endif - endif - - ! tension strain rate = e_11 - e_22 - if (present(deltaT) .or. present(tensionT)) then - ltensionT= (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) - if (present(tensionT)) then - tensionT(i,j) = ltensionT - endif - endif - ! shearing strain rate = 2*e_12 - if (present(deltaT) .or. present(shearT)) then - lshearT = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + shearT(i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) - if (present(shearT)) then - shearT(i,j) = lshearT - endif - endif ! Delta (in the denominator of zeta, eta) - if (present(deltaT)) then - DeltaT (i,j) = sqrt(ldivT**2 + e_factor*(ltensionT**2 + lshearT**2)) - endif + DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearT(i,j)**2)) enddo - end subroutine strain_rates_T + end subroutine strain_rates_Tdtsd + +!======================================================================= +! Compute the dt (div, tension) strain rates at the T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine strain_rates_Tdt (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT ) + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT ! height of T-cell through the middle (m) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + divT , & ! divergence at T point + tensionT ! tension at T point + + ! local variables + + integer (kind=int_kind) :: & + ij, i, j ! indices + + character(len=*), parameter :: subname = '(strain_rates_Tdt)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + divT (:,:) = c0 + tensionT(:,:) = c0 + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + ! divergence = e_11 + e_22 + divT (i,j)= dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & + + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) + + ! tension strain rate = e_11 - e_22 + tensionT(i,j) = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & + - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) + + enddo + + end subroutine strain_rates_Tdt !======================================================================= ! Compute strain rates at the U point including boundary conditions @@ -2025,7 +2076,7 @@ subroutine strain_rates_U (nx_block, ny_block, & ratiodxN, ratiodxNr, & ratiodyE, ratiodyEr, & epm, npm, & - divU, tensionU, & + divergU, tensionU, & shearU, DeltaU ) integer (kind=int_kind), intent(in) :: & @@ -2054,8 +2105,8 @@ subroutine strain_rates_U (nx_block, ny_block, & epm , & ! E-cell mask npm ! N-cell mask - real (kind=dbl_kind), dimension (nx_block,ny_block), optional, intent(out):: & - divU , & ! divergence at U point + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & + divergU , & ! divergence at U point tensionU , & ! tension at U point shearU , & ! shear at U point DeltaU ! delt at the U point @@ -2065,11 +2116,6 @@ subroutine strain_rates_U (nx_block, ny_block, & integer (kind=int_kind) :: & ij, i, j ! indices - real (kind=dbl_kind) :: & - ldivU , & - ltensionU , & - lshearU ! local values - real (kind=dbl_kind) :: & uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij @@ -2080,69 +2126,53 @@ subroutine strain_rates_U (nx_block, ny_block, & ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- - if (present(divU) ) divU (:,:) = c0 - if (present(tensionU)) tensionU(:,:) = c0 - if (present(shearU) ) shearU (:,:) = c0 - if (present(deltaU) ) deltaU (:,:) = c0 + divergU (:,:) = c0 + tensionU(:,:) = c0 + shearU (:,:) = c0 + deltaU (:,:) = c0 do ij = 1, icellu i = indxui(ij) j = indxuj(ij) - if (present(DeltaU) .or. present(divU) .or. present(tensionU)) then - uNip1j = uvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) - uNij = uvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) - vEijp1 = vvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) - vEij = vvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - - - ! divergence = e_11 + e_22 - ldivU = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - if (present(divU)) then - divU(i,j) = ldivU - endif + uNip1j = uvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) + uNij = uvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) + vEijp1 = vvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) + vEij = vvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - ! tension strain rate = e_11 - e_22 - ltensionU = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - if (present(tensionU)) then - tensionU(i,j) = ltensionU - endif - endif + ! divergence = e_11 + e_22 + divergU (i,j) = dyU(i,j) * ( uNip1j - uNij ) & + + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + + dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - if (present(DeltaU) .or. present(shearU)) then - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - - ! shearing strain rate = 2*e_12 - lshearU = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - if (present(shearU)) then - shearU(i,j) = lshearU - endif - endif + ! tension strain rate = e_11 - e_22 + tensionU(i,j) = dyU(i,j) * ( uNip1j - uNij ) & + - uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & + - dxU(i,j) * ( vEijp1 - vEij ) & + + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) + + uEijp1 = uvelE(i,j+1) * epm(i,j+1) & + +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) + uEij = uvelE(i,j) * epm(i,j) & + +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) + vNip1j = vvelN(i+1,j) * npm(i+1,j) & + +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) + vNij = vvelN(i,j) * npm(i,j) & + +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - if (present(DeltaU)) then - ! Delta (in the denominator of zeta, eta) - DeltaU(i,j) = sqrt(ldivU**2 + e_factor*(ltensionU**2 + lshearU**2)) - endif + ! shearing strain rate = 2*e_12 + shearU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & + - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & + + dyU(i,j) * ( vNip1j - vNij ) & + - vvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) + + ! Delta (in the denominator of zeta, eta) + DeltaU(i,j) = sqrt(divergU(i,j)**2 + e_factor*(tensionU(i,j)**2 + shearU(i,j)**2)) enddo From 501c58248746f36060fb05d5b2478af008fbc0d7 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 6 Apr 2022 11:22:23 -0700 Subject: [PATCH 099/109] Update ice_data_type implementations for flexibility and transparency (#87) * Update ice_data_type implementations for flexibility and transparency Add ice_data_conc, ice_data_dist namelist to work with ice_data_type. Update namelist setup as needed to provide backwards compatibility. * Move away from 'default' option for ice_ic, ice_data_type, ice_data_conc, ice_data_dist Fix bug in init state related to hin_max comparison (needs <=) --- cicecore/cicedynB/general/ice_forcing.F90 | 4 +- cicecore/cicedynB/general/ice_init.F90 | 318 ++++++++---------- configuration/scripts/ice_in | 11 +- configuration/scripts/options/set_nml.alt01 | 2 +- configuration/scripts/options/set_nml.alt02 | 2 +- configuration/scripts/options/set_nml.alt03 | 2 +- configuration/scripts/options/set_nml.alt04 | 2 +- configuration/scripts/options/set_nml.alt05 | 2 +- configuration/scripts/options/set_nml.alt06 | 2 +- configuration/scripts/options/set_nml.box2001 | 4 +- configuration/scripts/options/set_nml.boxadv | 4 +- configuration/scripts/options/set_nml.boxchan | 4 +- .../scripts/options/set_nml.boxislandse | 4 +- .../scripts/options/set_nml.boxislandsn | 4 +- .../scripts/options/set_nml.boxislandsne | 4 +- .../scripts/options/set_nml.boxnodyn | 6 +- .../scripts/options/set_nml.boxrestore | 4 +- .../scripts/options/set_nml.boxslotcyl | 4 +- configuration/scripts/options/set_nml.boxsyme | 4 +- configuration/scripts/options/set_nml.boxsymn | 4 +- .../scripts/options/set_nml.boxsymne | 4 +- configuration/scripts/options/set_nml.boxsyms | 4 +- configuration/scripts/options/set_nml.boxsymw | 4 +- configuration/scripts/options/set_nml.boxwall | 6 +- .../scripts/options/set_nml.boxwallblock | 6 +- .../scripts/options/set_nml.boxwallp5 | 6 +- configuration/scripts/options/set_nml.gbox12 | 4 +- configuration/scripts/options/set_nml.gbox128 | 4 +- configuration/scripts/options/set_nml.gbox180 | 4 +- configuration/scripts/options/set_nml.gbox80 | 4 +- .../scripts/options/set_nml.icdefault | 2 +- .../scripts/options/set_nml.iobinary | 2 +- .../scripts/options/set_nml.run10year | 2 +- configuration/scripts/options/set_nml.tx1 | 2 +- configuration/scripts/options/set_nml.zsal | 2 +- doc/source/cice_index.rst | 3 + doc/source/user_guide/ug_case_settings.rst | 39 ++- doc/source/user_guide/ug_testing.rst | 8 +- 38 files changed, 271 insertions(+), 226 deletions(-) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 320042940..2d127ecb2 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -125,7 +125,9 @@ module ice_forcing bgc_data_type, & ! 'default', 'clim' ocn_data_type, & ! 'default', 'clim', 'ncar', 'oned', 'calm', 'box2001' ! 'hadgem_sst' or 'hadgem_sst_uvocn', 'uniform' - ice_data_type, & ! 'default', 'box2001', 'boxslotcyl' + ice_data_type, & ! 'latsst', 'box2001', 'boxslotcyl', etc + ice_data_conc, & ! 'p5','p8','p9','c1','parabolic' + ice_data_dist, & ! 'box2001','gauss', 'uniform' precip_units ! 'mm_per_month', 'mm_per_sec', 'mks','m_per_sec' logical (kind=log_kind), public :: & diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 4234b20da..1f695e513 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -14,7 +14,7 @@ module ice_init use ice_kinds_mod use ice_communicate, only: my_task, master_task, ice_barrier - use ice_constants, only: c0, c1, c2, c3, p2, p5 + use ice_constants, only: c0, c1, c2, c3, c5, p2, p3, p5, p75, p166 use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nu_diag, nu_diag_set, nml_filename, diag_type, & ice_stdout, get_fileunit, release_fileunit, bfbflag, flush_fileunit, & @@ -38,7 +38,7 @@ module ice_init character(len=char_len_long), public :: & ice_ic ! method of ice cover initialization - ! 'default' => latitude and sst dependent + ! 'internal' => set from ice_data_ namelist ! 'none' => no ice ! filename => read file @@ -89,9 +89,9 @@ subroutine input_data atm_data_type, atm_data_dir, precip_units, rotate_wind, & atm_data_format, ocn_data_format, & bgc_data_type, & - ocn_data_type, ocn_data_dir, wave_spec_file, & - oceanmixed_file, restore_ocn, trestore, & - ice_data_type, & + ocn_data_type, ocn_data_dir, wave_spec_file, & + oceanmixed_file, restore_ocn, trestore, & + ice_data_type, ice_data_conc, ice_data_dist, & snw_filename, & snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & snw_rhos_fname, snw_Tgrd_fname, snw_T_fname @@ -258,8 +258,8 @@ subroutine input_data oceanmixed_ice, restore_ice, restore_ocn, trestore, & precip_units, default_season, wave_spec_type,nfreq, & atm_data_type, ocn_data_type, bgc_data_type, fe_data_type, & - ice_data_type, wave_spec_file, restart_coszen, & - fyear_init, ycycle, & + ice_data_type, ice_data_conc, ice_data_dist, & + fyear_init, ycycle, wave_spec_file,restart_coszen, & atm_data_dir, ocn_data_dir, bgc_data_dir, & atm_data_format, ocn_data_format, rotate_wind, & oceanmixed_file @@ -491,7 +491,9 @@ subroutine input_data ocn_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) bgc_data_type = 'default' fe_data_type = 'default' - ice_data_type = 'default' ! used by some tests to initialize ice state (concentration, velocities) + ice_data_type = 'default' ! used by some tests to initialize ice state (overall type and mask) + ice_data_conc = 'default' ! used by some tests to initialize ice state (concentration) + ice_data_dist = 'default' ! used by some tests to initialize ice state (distribution) bgc_data_dir = 'unknown_bgc_data_dir' ocn_data_type = 'default' ocn_data_dir = 'unknown_ocn_data_dir' @@ -749,7 +751,9 @@ subroutine input_data open(nu_diag,file=tmpstr) endif end if - if (trim(ice_ic) /= 'default' .and. trim(ice_ic) /= 'none') then + if (trim(ice_ic) /= 'default' .and. & + trim(ice_ic) /= 'none' .and. & + trim(ice_ic) /= 'internal') then restart = .true. end if #else @@ -961,6 +965,8 @@ subroutine input_data call broadcast_scalar(bgc_data_type, master_task) call broadcast_scalar(fe_data_type, master_task) call broadcast_scalar(ice_data_type, master_task) + call broadcast_scalar(ice_data_conc, master_task) + call broadcast_scalar(ice_data_dist, master_task) call broadcast_scalar(bgc_data_dir, master_task) call broadcast_scalar(ocn_data_type, master_task) call broadcast_scalar(ocn_data_dir, master_task) @@ -1026,6 +1032,15 @@ subroutine input_data pointer_file = trim(pointer_file) // trim(inst_suffix) #endif + !----------------------------------------------------------------- + ! update defaults + !----------------------------------------------------------------- + + if (trim(ice_ic) == 'default') ice_ic = 'internal' + if (trim(ice_data_conc) == 'default') ice_data_conc = 'parabolic' + if (trim(ice_data_dist) == 'default') ice_data_dist = 'uniform' + if (trim(ice_data_type) == 'default') ice_data_type = 'latsst' + !----------------------------------------------------------------- ! verify inputs !----------------------------------------------------------------- @@ -1052,11 +1067,11 @@ subroutine input_data restart = .true. use_restart_time = .true. elseif (trim(runtype) == 'initial') then - if (ice_ic == 'none' .or. ice_ic == 'default') then + if (ice_ic == 'none' .or. ice_ic == 'internal') then if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic = none or default, setting restart flags to .false.' + write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting restart flags to .false.' if (.not. use_restart_time) & - write(nu_diag,*) subname//'NOTE: ice_ic = none or default, setting use_restart_time=.false.' + write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting use_restart_time=.false.' write(nu_diag,*) ' ' endif use_restart_time = .false. @@ -1075,7 +1090,7 @@ subroutine input_data ! restart_ext = .false. else if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic /= none or default, setting restart=.true.' + write(nu_diag,*) subname//'NOTE: ice_ic /= none or internal, setting restart=.true.' write(nu_diag,*) ' ' endif restart = .true. @@ -2233,6 +2248,8 @@ subroutine input_data write(nu_diag,1031) ' bgc_data_type = ', trim(bgc_data_type) write(nu_diag,1031) ' fe_data_type = ', trim(fe_data_type) write(nu_diag,1031) ' ice_data_type = ', trim(ice_data_type) + write(nu_diag,1031) ' ice_data_conc = ', trim(ice_data_conc) + write(nu_diag,1031) ' ice_data_dist = ', trim(ice_data_dist) write(nu_diag,1031) ' bgc_data_dir = ', trim(bgc_data_dir) write(nu_diag,1031) ' ocn_data_type = ', trim(ocn_data_type) if (trim(bgc_data_type) /= 'default' .or. & @@ -2395,7 +2412,6 @@ subroutine init_state use ice_flux, only: sst, Tf, Tair, salinz, Tmltz use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y use ice_boundary, only: ice_HaloUpdate - use ice_forcing, only: ice_data_type use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar use ice_state, only: trcr_depend, aicen, trcrn, vicen, vsnon, & aice0, aice, vice, vsno, trcr, aice_init, bound_state, & @@ -2722,8 +2738,8 @@ subroutine set_state_var (nx_block, ny_block, & use ice_arrays_column, only: hin_max use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat - use ice_grid, only: grid_type - use ice_forcing, only: ice_data_type + use ice_grid, only: grid_type, dxrect, dyrect + use ice_forcing, only: ice_data_type, ice_data_conc, ice_data_dist integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2775,11 +2791,22 @@ subroutine set_state_var (nx_block, ny_block, & jedge , & ! edge around big block icells ! number of cells initialized with ice + logical (kind=log_kind) :: & + in_slot, in_cyl ! boxslotcyl flags + + real (kind=dbl_kind) :: & ! boxslotcyl parameters + diam , & ! cylinder diameter + radius , & ! cylinder radius + center_x, & ! cylinder center + center_y, & + width , & ! slot width + length ! slot height + integer (kind=int_kind), dimension(nx_block*ny_block) :: & indxi, indxj ! compressed indices for cells with aicen > puny real (kind=dbl_kind) :: & - Tsfc, sum, hbar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio + Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio real (kind=dbl_kind), dimension(ncat) :: & ainit, hinit ! initial area, thickness @@ -2862,45 +2889,41 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - if (trim(ice_ic) == 'default') then + if (trim(ice_ic) == 'internal') then !--------------------------------------------------------- ! ice concentration/thickness !--------------------------------------------------------- - if (trim(ice_data_type) == 'box2001' .or. & - trim(ice_data_type) == 'smallblock' .or. & - trim(ice_data_type) == 'channel' .or. & - trim(ice_data_type) == 'bigblock' .or. & - trim(ice_data_type) == 'blockep5' .or. & - trim(ice_data_type) == 'uniformp5' .or. & - trim(ice_data_type) == 'gauss') then + if (trim(ice_data_conc) == 'p5' .or. & + trim(ice_data_conc) == 'p8' .or. & + trim(ice_data_conc) == 'p9' .or. & + trim(ice_data_conc) == 'c1') then + + if (trim(ice_data_conc) == 'p5') then + hbar = c2 ! initial ice thickness + abar = p5 ! initial ice concentration + elseif (trim(ice_data_conc) == 'p8') then + hbar = c1 ! initial ice thickness + abar = 0.8_dbl_kind ! initial ice concentration + elseif (trim(ice_data_conc) == 'p9') then + hbar = c1 ! initial ice thickness + abar = 0.9_dbl_kind ! initial ice concentration + elseif (trim(ice_data_conc) == 'c1') then + hbar = c1 ! initial ice thickness + abar = c1 ! initial ice concentration + endif - hbar = c2 ! initial ice thickness do n = 1, ncat hinit(n) = c0 ainit(n) = c0 - if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then + if (hbar > hin_max(n-1) .and. hbar <= hin_max(n)) then hinit(n) = hbar - ainit(n) = p5 !echmod symm + ainit(n) = abar endif enddo - elseif (trim(ice_data_type) == 'boxslotcyl' .or. & - trim(ice_data_type) == 'medblocke' .or. & - trim(ice_data_type) == 'blocke') then - - hbar = c1 ! initial ice thickness (1 m) - do n = 1, ncat - hinit(n) = c0 - ainit(n) = c0 - if (hbar > hin_max(n-1) .and. hbar < hin_max(n)) then - hinit(n) = hbar - ainit(n) = c1 !echmod symm - endif - enddo - - else + elseif (trim(ice_data_conc) == 'parabolic') then ! initial category areas in cells with ice hbar = c3 ! initial ice thickness with greatest area @@ -2922,14 +2945,18 @@ subroutine set_state_var (nx_block, ny_block, & ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize enddo - endif ! ice_data_type + else + + call abort_ice(subname//'ERROR: ice_data_conc setting = '//trim(ice_data_conc), & + file=__FILE__, line=__LINE__) + + endif ! ice_data_conc !--------------------------------------------------------- ! location of ice !--------------------------------------------------------- - if ((trim(ice_data_type) == 'box2001') .or. & - (trim(ice_data_type) == 'boxslotcyl')) then + if (trim(ice_data_type) == 'box2001') then ! place ice on left side of domain icells = 0 @@ -2945,8 +2972,39 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! i enddo ! j - elseif ((trim(ice_data_type) == 'uniform') .or. & - (trim(ice_data_type) == 'uniformp5')) then + elseif (trim(ice_data_type) == 'boxslotcyl') then + + ! Geometric configuration of the slotted cylinder + diam = p3 *dxrect*(nx_global-1) + center_x = p5 *dxrect*(nx_global-1) + center_y = p75*dyrect*(ny_global-1) + radius = p5*diam + width = p166*diam + length = c5*p166*diam + + icells = 0 + do j = jlo, jhi + do i = ilo, ihi + if (tmask(i,j)) then + ! check if grid point is inside slotted cylinder + in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= center_x - width/c2) .and. & + (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & + (dyrect*real(jglob(j)-1, kind=dbl_kind) >= center_y - radius) .and. & + (dyrect*real(jglob(j)-1, kind=dbl_kind) <= center_y + (length - radius)) + + in_cyl = sqrt((dxrect*real(iglob(i)-1, kind=dbl_kind) - center_x)**c2 + & + (dyrect*real(jglob(j)-1, kind=dbl_kind) - center_y)**c2) <= radius + + if (in_cyl .and. .not. in_slot) then + icells = icells + 1 + indxi(icells) = i + indxj(icells) = j + endif + endif + enddo + enddo + + elseif (trim(ice_data_type) == 'uniform') then ! all cells not land mask are ice icells = 0 do j = jlo, jhi @@ -2972,13 +3030,13 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'blocke' .or. & - trim(ice_data_type) == 'blockep5') then - ! block on east half of domain + elseif (trim(ice_data_type) == 'smallblock') then + ! 2x2 ice in center of domain icells = 0 do j = jlo, jhi do i = ilo, ihi - if (iglob(i) >= nx_global/2) then + if ((iglob(i) == nx_global/2 .or. iglob(i) == nx_global/2+1) .and. & + (jglob(j) == ny_global/2 .or. jglob(j) == ny_global/2+1)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -2986,13 +3044,15 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'medblocke') then - ! block on east half of domain in center of domain + elseif (trim(ice_data_type) == 'block') then + ! ice in 50% of domain, not at edges icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.25) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.25) + 1 do j = jlo, jhi do i = ilo, ihi - if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4 .and. & - iglob(i) >= nx_global/2) then + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -3000,13 +3060,15 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'smallblock') then - ! 2x2 ice in center of domain + elseif (trim(ice_data_type) == 'bigblock') then + ! ice in 90% of domain, not at edges icells = 0 + iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 + jedge = int(real(ny_global,kind=dbl_kind) * 0.05) + 1 do j = jlo, jhi do i = ilo, ihi - if ((iglob(i) == nx_global/2 .or. iglob(i) == nx_global/2+1) .and. & - (jglob(j) == ny_global/2 .or. jglob(j) == ny_global/2+1)) then + if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & + (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -3014,15 +3076,12 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'medblock') then - ! ice in 50% of domain, not at edges + elseif (trim(ice_data_type) == 'easthalf') then + ! block on east half of domain icells = 0 - iedge = int(real(nx_global,kind=dbl_kind) * 0.25) + 1 - jedge = int(real(ny_global,kind=dbl_kind) * 0.25) + 1 do j = jlo, jhi do i = ilo, ihi - if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & - (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then + if (iglob(i) >= nx_global/2) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -3030,16 +3089,13 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (trim(ice_data_type) == 'bigblock' .or. & - trim(ice_data_type) == 'gauss') then - ! ice in 90% of domain, not at edges + elseif (trim(ice_data_type) == 'eastblock') then + ! block on east half of domain in center of domain icells = 0 - iedge = int(real(nx_global,kind=dbl_kind) * 0.05) + 1 - jedge = int(real(ny_global,kind=dbl_kind) * 0.05) + 1 do j = jlo, jhi do i = ilo, ihi - if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & - (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then + if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4 .and. & + iglob(i) >= nx_global/2) then icells = icells + 1 indxi(icells) = i indxj(icells) = j @@ -3047,7 +3103,7 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - else ! default behavior + elseif (trim(ice_data_type) == 'latsst') then !----------------------------------------------------------------- ! Place ice where ocean surface is cold. @@ -3072,6 +3128,11 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! i enddo ! j + else + + call abort_ice(subname//'ERROR: ice_data_type setting = '//trim(ice_data_conc), & + file=__FILE__, line=__LINE__) + endif ! ice_data_type !--------------------------------------------------------- @@ -3087,7 +3148,7 @@ subroutine set_state_var (nx_block, ny_block, & aicen(i,j,n) = ainit(n) - if (trim(ice_data_type) == 'box2001') then + if (trim(ice_data_dist) == 'box2001') then if (hinit(n) > c0) then ! ! constant slope from 0 to 1 in x direction aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & @@ -3107,20 +3168,8 @@ subroutine set_state_var (nx_block, ny_block, & ! - real(jglob(j), kind=dbl_kind)-p5) & ! / (real(ny_global,kind=dbl_kind)) * p5) endif - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - - elseif (trim(ice_data_type) == 'boxslotcyl') then - - if (hinit(n) > c0) then - ! slotted cylinder - call boxslotcyl_data_aice(aicen, i, j, & - nx_block, ny_block, & - n, ainit, & - iglob, jglob) - endif - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - elseif (trim(ice_data_type) == 'gauss') then + elseif (trim(ice_data_dist) == 'gauss') then if (hinit(n) > c0) then dist_ratio = 8._dbl_kind * & sqrt((real(iglob(i),kind=dbl_kind)-real(nx_global+1,kind=dbl_kind)/c2)**2 + & @@ -3129,14 +3178,19 @@ subroutine set_state_var (nx_block, ny_block, & (real(ny_global,kind=dbl_kind))**2) aicen(i,j,n) = ainit(n) * exp(-dist_ratio) endif - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - else ! default or uniform + elseif (trim(ice_data_dist) == 'uniform') then + + ! nothing extra to do - vicen(i,j,n) = hinit(n) * ainit(n) ! m + else + + call abort_ice(subname//'ERROR: ice_data_dist setting = '//trim(ice_data_dist), & + file=__FILE__, line=__LINE__) - endif ! ice_data_type + endif ! ice_data_dist + vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) call icepack_init_trcr(Tair = Tair(i,j), Tf = Tf(i,j), & @@ -3181,6 +3235,7 @@ subroutine set_state_var (nx_block, ny_block, & uvel = c0 vvel = c0 endif + endif ! ice_ic call icepack_warnings_flush(nu_diag) @@ -3191,85 +3246,6 @@ end subroutine set_state_var !======================================================================= -! Set ice concentration for slotted cylinder advection test -! -! author: Philippe Blain (ECCC) - - subroutine boxslotcyl_data_aice(aicen, i, j, & - nx_block, ny_block, & - n, ainit, & - iglob, jglob) - - use ice_constants, only: c0, c2, c5, p3, p166, p75, p5 - use ice_domain_size, only: nx_global, ny_global, ncat - use ice_grid, only: dxrect, dyrect - - integer (kind=int_kind), intent(in) :: & - i, j , & ! local indices - nx_block, ny_block, & ! block dimensions - iglob(nx_block) , & ! global indices - jglob(ny_block) , & - n ! thickness category index - - real (kind=dbl_kind), dimension(ncat) :: & - ainit ! initial area - - real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(out) :: & - aicen ! concentration of ice - - ! local variables - - logical :: in_slot, in_cyl, in_slotted_cyl - - real (kind=dbl_kind), dimension (2) :: & - slot_x, & ! geometric limits of the slot - slot_y - - real (kind=dbl_kind) :: & - diam , & ! cylinder diameter - radius , & ! cylinder radius - center_x, & ! cylinder center - center_y, & - width , & ! slot width - length ! slot height - - character(len=*), parameter :: subname = '(boxslotcyl_data_aice)' - - ! Geometric configuration of the slotted cylinder - diam = p3 *dxrect*(nx_global-1) - center_x = p5 *dxrect*(nx_global-1) - center_y = p75*dyrect*(ny_global-1) - radius = p5*diam - width = p166*diam - length = c5*p166*diam - - slot_x(1) = center_x - width/c2 - slot_x(2) = center_x + width/c2 - slot_y(1) = center_y - radius - slot_y(2) = center_y + (length - radius) - - ! check if grid point is inside slotted cylinder - in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= slot_x(1)) .and. & - (dxrect*real(iglob(i)-1, kind=dbl_kind) <= slot_x(2)) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) >= slot_y(1)) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) <= slot_y(2)) - - in_cyl = sqrt((dxrect*real(iglob(i)-1, kind=dbl_kind) - center_x)**c2 + & - (dyrect*real(jglob(j)-1, kind=dbl_kind) - center_y)**c2) <= radius - - in_slotted_cyl = in_cyl .and. .not. in_slot - - if (in_slotted_cyl) then - aicen(i,j,n) = ainit(n) - else - aicen(i,j,n) = c0 - endif - - - end subroutine boxslotcyl_data_aice - -!======================================================================= - ! Set ice velocity for slotted cylinder advection test ! ! author: Philippe Blain (ECCC) diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 5075ae205..ec19068ce 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -264,7 +264,9 @@ ocn_data_type = 'default' bgc_data_type = 'default' fe_data_type = 'default' - ice_data_type = 'default' + ice_data_type = 'latsst' + ice_data_conc = 'parabolic' + ice_data_dist = 'uniform' fyear_init = 2005 ycycle = 1 atm_data_format = 'bin' @@ -448,6 +450,9 @@ &icefields_nml f_tmask = .true. + f_umask = .false. + f_nmask = .false. + f_emask = .false. f_blkmask = .true. f_tarea = .true. f_uarea = .true. @@ -455,6 +460,10 @@ f_dyt = .false. f_dxu = .false. f_dyu = .false. + f_dxe = .false. + f_dye = .false. + f_dxn = .false. + f_dyn = .false. f_HTN = .false. f_HTE = .false. f_ANGLE = .true. diff --git a/configuration/scripts/options/set_nml.alt01 b/configuration/scripts/options/set_nml.alt01 index afe67691d..24947dcda 100644 --- a/configuration/scripts/options/set_nml.alt01 +++ b/configuration/scripts/options/set_nml.alt01 @@ -1,6 +1,6 @@ nilyr = 1 use_leap_years = .false. -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'roundrobin' distribution_wght = 'block' tr_iage = .false. diff --git a/configuration/scripts/options/set_nml.alt02 b/configuration/scripts/options/set_nml.alt02 index c4dbb0fea..a478809ca 100644 --- a/configuration/scripts/options/set_nml.alt02 +++ b/configuration/scripts/options/set_nml.alt02 @@ -1,6 +1,6 @@ ncat = 1 kcatbound = -1 -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'sectrobin' tr_iage = .true. tr_FY = .true. diff --git a/configuration/scripts/options/set_nml.alt03 b/configuration/scripts/options/set_nml.alt03 index 98e794735..c2ca38f32 100644 --- a/configuration/scripts/options/set_nml.alt03 +++ b/configuration/scripts/options/set_nml.alt03 @@ -1,6 +1,6 @@ ncat = 6 kcatbound = 2 -ice_ic = 'default' +ice_ic = 'internal' distribution_type = 'sectcart' conserv_check = .true. tr_iage = .false. diff --git a/configuration/scripts/options/set_nml.alt04 b/configuration/scripts/options/set_nml.alt04 index 98eb311cb..d1bc6ad02 100644 --- a/configuration/scripts/options/set_nml.alt04 +++ b/configuration/scripts/options/set_nml.alt04 @@ -1,4 +1,4 @@ -ice_ic = 'default' +ice_ic = 'internal' bfbflag = 'reprosum' distribution_type = 'rake' processor_shape = 'slenderX2' diff --git a/configuration/scripts/options/set_nml.alt05 b/configuration/scripts/options/set_nml.alt05 index a281bfa23..6793b5954 100644 --- a/configuration/scripts/options/set_nml.alt05 +++ b/configuration/scripts/options/set_nml.alt05 @@ -1,4 +1,4 @@ -ice_ic = 'default' +ice_ic = 'internal' tr_iage = .false. tr_FY = .false. tr_lvl = .false. diff --git a/configuration/scripts/options/set_nml.alt06 b/configuration/scripts/options/set_nml.alt06 index cd3a2222d..911acf8eb 100644 --- a/configuration/scripts/options/set_nml.alt06 +++ b/configuration/scripts/options/set_nml.alt06 @@ -1,5 +1,5 @@ ncat = 7 kcatbound = 3 nslyr = 3 -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index 974564b34..adce08a74 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -3,7 +3,7 @@ grid_ocn = 'B' days_per_year = 360 use_leap_years = .false. npt = 240 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' @@ -27,6 +27,8 @@ atmbndy = 'constant' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' calc_strair = .false. restore_ice = .true. f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index f7daef019..815746102 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -1,6 +1,6 @@ grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .false. kcatbound = 2 ew_boundary_type = 'cyclic' @@ -8,6 +8,8 @@ ns_boundary_type = 'cyclic' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' tr_iage = .true. tr_FY = .false. tr_lvl = .true. diff --git a/configuration/scripts/options/set_nml.boxchan b/configuration/scripts/options/set_nml.boxchan index 6005dfc2d..a3f0fd191 100644 --- a/configuration/scripts/options/set_nml.boxchan +++ b/configuration/scripts/options/set_nml.boxchan @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' ice_data_type = 'channel' +ice_data_conc = 'p5' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxislandse index 561cdb2b1..4a8a47705 100644 --- a/configuration/scripts/options/set_nml.boxislandse +++ b/configuration/scripts/options/set_nml.boxislandse @@ -1,6 +1,6 @@ npt = 48 kmt_type = 'boxislands' -ice_ic = 'default' +ice_ic = 'internal' use_leap_years = .false. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' @@ -19,6 +19,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' rotate_wind = .false. calc_strair = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn index 35f321ee4..dd386ce5a 100644 --- a/configuration/scripts/options/set_nml.boxislandsn +++ b/configuration/scripts/options/set_nml.boxislandsn @@ -1,6 +1,6 @@ npt = 48 kmt_type = 'boxislands' -ice_ic = 'default' +ice_ic = 'internal' use_leap_years = .false. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' @@ -19,6 +19,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_north' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' rotate_wind = .false. calc_strair = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne index c572e7e2c..75db55722 100644 --- a/configuration/scripts/options/set_nml.boxislandsne +++ b/configuration/scripts/options/set_nml.boxislandsne @@ -1,6 +1,6 @@ npt = 48 kmt_type = 'boxislands' -ice_ic = 'default' +ice_ic = 'internal' use_leap_years = .false. histfreq = 'd','x','x','x','x' grid_type = 'rectangular' @@ -19,6 +19,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_northeast' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' rotate_wind = .false. calc_strair = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 25ef3cbd4..deb53cf5a 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -1,5 +1,5 @@ nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' days_per_year = 360 use_leap_years = .false. npt = 72 @@ -52,6 +52,8 @@ krdg_redist = 1 seabed_stress = .true. atm_data_type = 'calm' ocn_data_type = 'calm' -ice_data_type = 'box2001' +ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' shortwave = 'ccsm3' albedo_type = 'constant' diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index a3bacd6d3..b2078a566 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -1,6 +1,6 @@ grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. use_leap_years = .false. ndtd = 2 @@ -10,6 +10,8 @@ ns_boundary_type = 'open' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' histfreq = 'd','x','x','x','x' histfreq_n = 1,1,1,1,1 f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxslotcyl b/configuration/scripts/options/set_nml.boxslotcyl index 9bbe59dd6..3edd33ad8 100644 --- a/configuration/scripts/options/set_nml.boxslotcyl +++ b/configuration/scripts/options/set_nml.boxslotcyl @@ -1,7 +1,7 @@ grid_atm = 'B' grid_ocn = 'B' nilyr = 1 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .false. dt = 3600.0 npt = 288 @@ -22,6 +22,8 @@ ktransport = 1 atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'boxslotcyl' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' histfreq = 'h','x','x','x','x' histfreq_n = 6 , 1 , 1 , 1 , 1 f_aice = 'h' diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme index bcdff7806..3ff31d2c4 100644 --- a/configuration/scripts/options/set_nml.boxsyme +++ b/configuration/scripts/options/set_nml.boxsyme @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn index 04b88a997..90ef74813 100644 --- a/configuration/scripts/options/set_nml.boxsymn +++ b/configuration/scripts/options/set_nml.boxsymn @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_north' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne index 927bbf961..5c7374976 100644 --- a/configuration/scripts/options/set_nml.boxsymne +++ b/configuration/scripts/options/set_nml.boxsymne @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_northeast' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms index d01bb7b99..7fc0fc5a0 100644 --- a/configuration/scripts/options/set_nml.boxsyms +++ b/configuration/scripts/options/set_nml.boxsyms @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_south' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxsymw index a34fa165d..4be1f5f95 100644 --- a/configuration/scripts/options/set_nml.boxsymw +++ b/configuration/scripts/options/set_nml.boxsymw @@ -2,7 +2,7 @@ days_per_year = 360 use_leap_years = .false. npt_unit = 'd' npt = 5 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -26,6 +26,8 @@ atmbndy = 'constant' atm_data_type = 'uniform_west' ocn_data_type = 'calm' ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxwall b/configuration/scripts/options/set_nml.boxwall index bf61166dc..5a99e311b 100644 --- a/configuration/scripts/options/set_nml.boxwall +++ b/configuration/scripts/options/set_nml.boxwall @@ -1,7 +1,7 @@ days_per_year = 360 use_leap_years = .false. npt = 240 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -24,7 +24,9 @@ coriolis = 'zero' atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' -ice_data_type = 'blocke' +ice_data_type = 'easthalf' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxwallblock b/configuration/scripts/options/set_nml.boxwallblock index 5b64ff798..5ef4cce7a 100644 --- a/configuration/scripts/options/set_nml.boxwallblock +++ b/configuration/scripts/options/set_nml.boxwallblock @@ -1,7 +1,7 @@ days_per_year = 360 use_leap_years = .false. npt = 240 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -24,7 +24,9 @@ coriolis = 'zero' atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' -ice_data_type = 'medblocke' +ice_data_type = 'eastblock' +ice_data_conc = 'c1' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxwallp5 b/configuration/scripts/options/set_nml.boxwallp5 index 6f5d3fa96..229dba456 100644 --- a/configuration/scripts/options/set_nml.boxwallp5 +++ b/configuration/scripts/options/set_nml.boxwallp5 @@ -1,7 +1,7 @@ days_per_year = 360 use_leap_years = .false. npt = 240 -ice_ic = 'default' +ice_ic = 'internal' restart_ext = .true. histfreq = 'd','1','x','x','x' grid_type = 'rectangular' @@ -24,7 +24,9 @@ coriolis = 'zero' atmbndy = 'constant' atm_data_type = 'uniform_east' ocn_data_type = 'calm' -ice_data_type = 'blockep5' +ice_data_type = 'easthalf' +ice_data_conc = 'p5' +ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 index e17e57fe2..6ad5b567b 100644 --- a/configuration/scripts/options/set_nml.gbox12 +++ b/configuration/scripts/options/set_nml.gbox12 @@ -1,6 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 80860bc8e..f82267c64 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,8 +1,10 @@ grid_ocn = 'B' -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index e17e57fe2..6ad5b567b 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -1,6 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index e17e57fe2..6ad5b567b 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -1,6 +1,8 @@ -ice_ic = 'default' +ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' ocn_data_type = 'calm' ice_data_type = 'box2001' +ice_data_conc = 'p5' +ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.icdefault b/configuration/scripts/options/set_nml.icdefault index 2f3c092d4..7019acf0b 100644 --- a/configuration/scripts/options/set_nml.icdefault +++ b/configuration/scripts/options/set_nml.icdefault @@ -1 +1 @@ -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.iobinary b/configuration/scripts/options/set_nml.iobinary index 2f3c092d4..7019acf0b 100644 --- a/configuration/scripts/options/set_nml.iobinary +++ b/configuration/scripts/options/set_nml.iobinary @@ -1 +1 @@ -ice_ic = 'default' +ice_ic = 'internal' diff --git a/configuration/scripts/options/set_nml.run10year b/configuration/scripts/options/set_nml.run10year index cf672e991..0eb95071a 100644 --- a/configuration/scripts/options/set_nml.run10year +++ b/configuration/scripts/options/set_nml.run10year @@ -1,7 +1,7 @@ npt_unit = 'y' npt = 10 dumpfreq = 'y' -dumpfreq_n = 12 +dumpfreq_n = 1 diagfreq = 24 histfreq = 'm','x','x','x','x' diff --git a/configuration/scripts/options/set_nml.tx1 b/configuration/scripts/options/set_nml.tx1 index 2ef4edd33..5e66db871 100644 --- a/configuration/scripts/options/set_nml.tx1 +++ b/configuration/scripts/options/set_nml.tx1 @@ -1,6 +1,6 @@ dt = 3600.0 runtype = 'initial' -ice_ic = 'default' +ice_ic = 'internal' grid_format = 'bin' grid_type = 'tripole' ns_boundary_type = 'tripole' diff --git a/configuration/scripts/options/set_nml.zsal b/configuration/scripts/options/set_nml.zsal index 5503e0231..724893ffc 100644 --- a/configuration/scripts/options/set_nml.zsal +++ b/configuration/scripts/options/set_nml.zsal @@ -4,5 +4,5 @@ sw_redist = .true. tfrz_option = 'linear_salt' tr_brine = .true. solve_zsal = .true. -ice_ic = 'default' +ice_ic = 'internal' diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 54263267a..91be4f72f 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -344,6 +344,9 @@ either Celsius or Kelvin units). "i0vis","fraction of penetrating visible solar radiation", "0.70" "iblkp","block on which to write debugging data", "" "i(j)block", "Cartesian i,j position of block", "" + "ice_data_conc", "ice initialization concentration, used mainly for box tests", "" + "ice_data_dist", "ice initialization distribution, used mainly for box tests", "" + "ice_data_type", "ice initialization mask, used mainly for box tests", "" "ice_hist_field", "type for history variables", "" "ice_ic", "choice of initial conditions (see :ref:`tab-ic`)", "" "ice_stdout", "unit number for standard output", "" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index fcc1cc8c9..549221d37 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -191,7 +191,8 @@ setup_nml "``history_format``", "``default``", "read/write history files in default format", "``default``" "", "``pio_pnetcdf``", "read/write restart files with pnetcdf in pio", "" "``history_precision``", "integer", "history file precision: 4 or 8 byte", "4" - "``ice_ic``", "``default``", "latitude and sst dependent initial condition", "``default``" + "``ice_ic``", "``default``", "equal to internal", "``default``" + "", "``internal``", "initial conditions set based on ice_data_ inputs", "" "", "``none``", "no ice", "" "", "'path/file'", "restart file name", "" "``incond_dir``", "string", "path to initial condition directory", "'./'" @@ -599,19 +600,27 @@ forcing_nml "``formdrag``", "logical", "calculate form drag", "``.false.``" "``fyear_init``", "integer", "first year of atmospheric forcing data", "1900" "``highfreq``", "logical", "high-frequency atmo coupling", "``.false.``" - "``ice_data_type``", "``bigblock``", "uniform ice block covering about 90 percent of the area in center of domain", "``default``" - "", "``blocke``", "initialize ice concentration on right side of domain with aice=1", "" - "", "``blockep5``", "initialize ice concentration on right side of domain with aice=0.5", "" - "", "``boxslotcyl``", "initialize ice concentration and velocity for :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "" - "", "``box2001``", "initialize ice concentration for :ref:`box2001` test (:cite:`Hunke01`) aice 0 to 1 zonally", "" - "", "``channel``", "uniform block ice concentration and thickness in i-direction in 50% of domain in j-direction", "" - "", "``default``", "ice dependent on latitude and ocean temperature", "" - "", "``gauss``", "gauss distributed ice block covering about 90 percent of the area in center of domain", "" - "", "``medblock``", "uniform ice block covering about 25 percent of the area in center of domain", "" - "", "``medblocke``", "uniform ice block covering about 25 percent at right side of domain", "" - "", "``smallblock``", "uniform 2x2 block ice concentration and thickness in center of domain", "" - "", "``uniform``", "uniform ice concentration and thickness across domain distributed in categories", "" - "", "``uniformp5``", "uniform ice concentration and thickness across domain with aice=0.5 q", "" + "``ice_data_conc``", "``c1``", "initial ice concentation of 1.0", "``default``" + "", "``default``", "same as parabolic", "" + "", "``p5``", "initial concentration of 0.5", "" + "", "``p8``", "initial concentration of 0.8", "" + "", "``p9``", "initial concentration of 0.9", "" + "", "``parabolic``", "parabolic in ice thickness space with sum of aicen=1.0", "" + "``ice_data_dist``", "``box2001``", "ice distribution ramped from 0 to 1 west to east consistent with :ref:`box2001` test (:cite:`Hunke01`)", "``default``" + "", "``default``", "uniform distribution, equivalent to uniform", "" + "", "``gauss``", "gauss distbution of ice with a peak in the center of the domain", "" + "", "``uniform``", "uniform distribution, equivalent to default", "" + "``ice_data_type``", "``bigblock``", "ice mask covering about 90 percent of the area in center of domain", "``default``" + "", "``block``", "ice block covering about 25 percent of the area in center of domain", "" + "", "``boxslotcyl``", "slot cylinder ice mask associated with :ref:`boxslotcyl` test (:cite:`Zalesak79`)", "" + "", "``box2001``", "box2001 ice mask associate with :ref:`box2001` test (:cite:`Hunke01`)", "" + "", "``channel``", "ice defined on entire grid in i-direction and 50% in j-direction in center of domain", "" + "", "``default``", "same as latsst", "" + "", "``eastblock``", "ice block covering about 25 percent of domain at the east edge of the domain", "" + "", "``easthalf``", "ice defined on east half of the domain","" + "", "``latsst``", "ice dependent on latitude and ocean temperature", "" + "", "``smallblock``", "ice defined on 2x2 gridcells in center of domain", "" + "", "``uniform``", "ice defined at all grid points", "" "``iceruf``", "real", "ice surface roughness at atmosphere interface", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" @@ -634,7 +643,7 @@ forcing_nml "``restore_ocn``", "logical", "restore sst to data", "``.false.``" "``restore_ice``", "logical", "restore ice state along lateral boundaries", "``.false.``" "``rotate_wind``", "logical", "rotate wind from east/north to computation grid", "``.true.``" - "``tfrz_option``", "``linear_salt``", "linear functino of salinity (ktherm=1)", "``mushy``" + "``tfrz_option``", "``linear_salt``", "linear function of salinity (ktherm=1)", "``mushy``" "", "``minus1p8``", "constant ocean freezing temperature (:math:`-1.8^{\circ} C`)", "" "", "``mushy``", "matches mushy-layer thermo (ktherm=2)", "" "``trestore``", "integer", "sst restoring time scale (days)", "90" diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 005f4f851..b8d42ad6d 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -229,7 +229,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``dyrect``: ``16.e5`` cm - ``ktherm``: ``-1`` (disables thermodynamics) - ``coriolis``: ``constant`` (``f=1.46e-4`` s\ :math:`^{-1}`) -- ``ice_data_type`` : ``box2001`` (special ice concentration initialization) +- ``ice_data_type`` : ``box2001`` (special initial ice mask) +- ``ice_data_conc`` : ``p5`` +- ``ice_data_dist`` : ``box2001`` (special ice concentration initialization) - ``atm_data_type`` : ``box2001`` (special atmospheric and ocean forcing) Ocean stresses are computed as in :cite:`Hunke01` where they are circular and centered @@ -257,7 +259,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``ktherm``: ``-1`` (disables thermodynamics) - ``kridge``: ``-1`` (disables ridging) - ``kdyn``: ``-1`` (disables dynamics) -- ``ice_data_type`` : ``boxslotcyl`` (special ice concentration and velocity initialization) +- ``ice_data_type`` : ``boxslotcyl`` (special initial ice mask) +- ``ice_data_conc`` : ``c1`` +- ``ice_data_dist`` : ``uniform`` Dynamics is disabled because we directly impose a constant ice velocity. The ice velocity field is circular and centered in the square domain, and such that the slotted cylinder makes a complete revolution with a period :math:`T=` 12 days : From 014852af273d8885fcb870d3b3bd62a8c8080524 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sat, 9 Apr 2022 17:49:02 -0700 Subject: [PATCH 100/109] Add memory diagnostics (#90) * Add memory usage methods and output control * update ice_memusage implementation, remove some redundant box tests from gridsys_suite * update documentation * update github actions for gptl * Add gptl copyright --- .github/workflows/test-cice.yml | 1 + cicecore/cicedynB/general/ice_init.F90 | 6 +- .../comm/serial/ice_reprosum.F90 | 32 +-- .../cicedynB/infrastructure/ice_memusage.F90 | 151 +++++++++++ .../infrastructure/ice_memusage_gptl.c | 239 ++++++++++++++++++ .../drivers/standalone/cice/CICE_InitMod.F90 | 11 + .../drivers/standalone/cice/CICE_RunMod.F90 | 5 + configuration/scripts/ice_in | 3 + .../scripts/machines/env.conda_macos | 5 + configuration/scripts/tests/gridsys_suite.ts | 36 +-- doc/source/user_guide/ug_case_settings.rst | 1 + doc/source/user_guide/ug_implementation.rst | 5 + 12 files changed, 460 insertions(+), 35 deletions(-) create mode 100644 cicecore/cicedynB/infrastructure/ice_memusage.F90 create mode 100644 cicecore/cicedynB/infrastructure/ice_memusage_gptl.c diff --git a/.github/workflows/test-cice.yml b/.github/workflows/test-cice.yml index 32e784564..c60a07721 100644 --- a/.github/workflows/test-cice.yml +++ b/.github/workflows/test-cice.yml @@ -42,6 +42,7 @@ jobs: run: | sudo xcode-select -r sudo xcode-select -s /Library/Developer/CommandLineTools + sudo ln -s /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/* /usr/local/include/ echo "xcrun --show-sdk-path: $(xcrun --show-sdk-path)" echo "xcode-select -p: $(xcode-select -p)" - name: system info diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 1f695e513..915d51640 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -122,6 +122,7 @@ subroutine input_data use ice_transport_driver, only: advection, conserv_check use ice_restoring, only: restore_ice use ice_timers, only: timer_stats + use ice_memusage, only: memory_stats #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setIO #endif @@ -180,7 +181,7 @@ subroutine input_data print_global, print_points, latpnt, lonpnt, & debug_forcing, histfreq, histfreq_n, hist_avg, & history_dir, history_file, history_precision, cpl_bgc, & - histfreq_base, dumpfreq_base, timer_stats, & + histfreq_base, dumpfreq_base, timer_stats, memory_stats, & conserv_check, debug_model, debug_model_step, & debug_model_i, debug_model_j, debug_model_iblk, debug_model_task, & year_init, month_init, day_init, sec_init, & @@ -301,6 +302,7 @@ subroutine input_data print_points = .false. ! if true, print point data print_global = .true. ! if true, print global diagnostic data timer_stats = .false. ! if true, print out detailed timer statistics + memory_stats = .false. ! if true, print out memory information bfbflag = 'off' ! off = optimized diag_type = 'stdout' diag_file = 'ice_diag.d' @@ -786,6 +788,7 @@ subroutine input_data call broadcast_scalar(print_points, master_task) call broadcast_scalar(print_global, master_task) call broadcast_scalar(timer_stats, master_task) + call broadcast_scalar(memory_stats, master_task) call broadcast_scalar(bfbflag, master_task) call broadcast_scalar(diag_type, master_task) call broadcast_scalar(diag_file, master_task) @@ -2181,6 +2184,7 @@ subroutine input_data write(nu_diag,1021) ' debug_model_iblk = ', debug_model_iblk write(nu_diag,1021) ' debug_model_task = ', debug_model_task write(nu_diag,1011) ' timer_stats = ', timer_stats + write(nu_diag,1011) ' memory_stats = ', memory_stats write(nu_diag,1031) ' bfbflag = ', trim(bfbflag) write(nu_diag,1021) ' numin = ', numin write(nu_diag,1021) ' numax = ', numax diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 index 1e4307535..13ff6fcb8 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 @@ -101,10 +101,10 @@ MODULE ice_reprosum !----------------------------------------------------------------------- subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & - repro_sum_rel_diff_max_in, & - repro_sum_recompute_in, & - repro_sum_master, & - repro_sum_logunit ) + repro_sum_rel_diff_max_in, & + repro_sum_recompute_in, & + repro_sum_master, & + repro_sum_logunit ) !------------------------------Arguments-------------------------------- logical, intent(in), optional :: repro_sum_use_ddpdd_in @@ -261,12 +261,12 @@ end subroutine ice_reprosum_setopts !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & - nflds, ddpdd_sum, & - arr_gbl_max, arr_gbl_max_out, & - arr_max_levels, arr_max_levels_out, & - gbl_max_nsummands, gbl_max_nsummands_out,& - gbl_count, repro_sum_validate, & - repro_sum_stats, rel_diff, commid ) + nflds, ddpdd_sum, & + arr_gbl_max, arr_gbl_max_out, & + arr_max_levels, arr_max_levels_out, & + gbl_max_nsummands, gbl_max_nsummands_out,& + gbl_count, repro_sum_validate, & + repro_sum_stats, rel_diff, commid ) !---------------------------------------------------------------------- ! Arguments @@ -435,7 +435,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! if (detailed_timing) call xicex_timer_start('ice_reprosum_ddpdd') call ice_reprosum_ddpdd(arr, arr_gsum, nsummands, dsummands, & - nflds, mpi_comm) + nflds, mpi_comm) repro_sum_fast = 1 ! if (detailed_timing) call xicex_timer_stop('ice_reprosum_ddpdd') @@ -775,9 +775,9 @@ end subroutine ice_reprosum_calc !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - arr_max_shift, arr_gmax_exp, max_levels, & - max_level, validate, recompute, & - omp_nthreads, mpi_comm ) + arr_max_shift, arr_gmax_exp, max_levels, & + max_level, validate, recompute, & + omp_nthreads, mpi_comm ) !---------------------------------------------------------------------- @@ -1225,7 +1225,7 @@ end subroutine ice_reprosum_int !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & - logunit, rel_diff ) + logunit, rel_diff ) !---------------------------------------------------------------------- ! Arguments @@ -1311,7 +1311,7 @@ end function ice_reprosum_tolExceeded !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & - nflds, mpi_comm ) + nflds, mpi_comm ) !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/ice_memusage.F90 b/cicecore/cicedynB/infrastructure/ice_memusage.F90 new file mode 100644 index 000000000..19e7dfb15 --- /dev/null +++ b/cicecore/cicedynB/infrastructure/ice_memusage.F90 @@ -0,0 +1,151 @@ +! Provides methods for querying memory use + +MODULE ice_memusage + +!------------------------------------------------------------------------------- +! PURPOSE: memory use query methods +! Should call ice_memusage_init once before calling other interfaces +!------------------------------------------------------------------------------- + + use ice_kinds_mod, only : dbl_kind, log_kind + + implicit none + private + +! PUBLIC: Public interfaces + + public :: ice_memusage_getusage, & + ice_memusage_init, & + ice_memusage_print + + logical(log_kind), public :: memory_stats + +! PRIVATE DATA: + + real(dbl_kind) :: mb_blk = 1.0_dbl_kind + logical :: initset = .false. + +!=============================================================================== + +contains + +!=============================================================================== +! Initialize memory conversion to MB + +subroutine ice_memusage_init(iunit) + + implicit none + + !----- arguments ----- + + integer, optional :: iunit !< output unit number for optional writes + + !----- local ----- + + ! --- Memory stats --- + integer :: msize ! memory size (high water) + integer :: mrss ! resident size (current memory use) + integer :: msize0,msize1 ! temporary size + integer :: mrss0,mrss1,mrss2 ! temporary rss + integer :: mshare,mtext,mdatastack + integer :: ierr + + integer :: ice_memusage_gptl + + real(dbl_kind),allocatable :: mem_tmp(:) + character(*),parameter :: subname = '(ice_memusage_init)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + ierr = ice_memusage_gptl (msize, mrss0, mshare, mtext, mdatastack) + allocate(mem_tmp(1024*1024)) ! 1 MWord, 8 MB + mem_tmp = -1.0 + ierr = ice_memusage_gptl (msize, mrss1, mshare, mtext, mdatastack) + deallocate(mem_tmp) + ierr = ice_memusage_gptl (msize, mrss2, mshare, mtext, mdatastack) + mb_blk = 1.0_dbl_kind + if (mrss1 - mrss0 > 0) then + mb_blk = (8.0_dbl_kind)/((mrss1-mrss0)*1.0_dbl_kind) + initset = .true. + endif + + if (present(iunit)) then + write(iunit,'(A,l4)') subname//' Initset conversion flag is ',initset + write(iunit,'(A,f16.2)') subname//' 8 MB memory alloc in MB is ',(mrss1-mrss0)*mb_blk + write(iunit,'(A,f16.2)') subname//' 8 MB memory dealloc in MB is ',(mrss1-mrss2)*mb_blk + write(iunit,'(A,f16.2)') subname//' Memory block size conversion in bytes is ',mb_blk*1024_dbl_kind*1024.0_dbl_kind + endif + +end subroutine ice_memusage_init + +!=============================================================================== +! Determine memory use + +subroutine ice_memusage_getusage(r_msize,r_mrss) + + implicit none + + !----- arguments --- + real(dbl_kind),intent(out) :: r_msize !< memory usage value + real(dbl_kind),intent(out) :: r_mrss !< memory usage value + + !----- local --- + integer :: msize,mrss + integer :: mshare,mtext,mdatastack + integer :: ierr + integer :: ice_memusage_gptl + character(*),parameter :: subname = '(ice_memusage_getusage)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + ierr = ice_memusage_gptl (msize, mrss, mshare, mtext, mdatastack) + r_msize = msize*mb_blk + r_mrss = mrss*mb_blk + +end subroutine ice_memusage_getusage + +!=============================================================================== +! Print memory use + +subroutine ice_memusage_print(iunit,string) + + implicit none + + !----- arguments --- + integer, intent(in) :: iunit !< unit number to write to + character(len=*),optional, intent(in) :: string !< optional string + + !----- local --- + real(dbl_kind) :: msize,mrss + character(len=128) :: lstring + character(*),parameter :: subname = '(ice_memusage_print)' + + !--------------------------------------------------- + + ! return if memory_stats are off + if (.not. memory_stats) return + + lstring = ' ' + if (present(string)) then + lstring = string + endif + + call ice_memusage_getusage(msize,mrss) + + if (initset) then + write(iunit,'(2a,2f14.4,1x,a)') subname,' memory use (MB) = ',msize,mrss,trim(lstring) + else + write(iunit,'(2a,2f14.4,1x,a)') subname,' memory use (??) = ',msize,mrss,trim(lstring) + endif + +end subroutine ice_memusage_print + +!=============================================================================== + +END MODULE ice_memusage diff --git a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c new file mode 100644 index 000000000..ec9c2c1d8 --- /dev/null +++ b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c @@ -0,0 +1,239 @@ +/* +** This file was downloaded and modified from https://github.com/jmrosinski/GPTL +** with the following Copyright permission, +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and associated documentation files (the “Software”), to deal +** in the Software for any noncommercial purposes without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to permit +** persons to whom the Software is furnished to do so, subject to the following +** conditions: The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. Any +** commercial use (including sale) of the software, and derivative development +** towards commercial use, requires written permission of the copyright +** holder. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +** EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES +** OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +** ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +** DEALINGS IN THE SOFTWARE. +** +*/ + +/* +** $Id: get_memusage.c,v 1.10 2010-11-09 19:08:53 rosinski Exp $ +** +** Author: Jim Rosinski +** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) +** +** get_memusage: +** +** Designed to be called from Fortran, returns information about memory +** usage in each of 5 input int* args. On Linux read from the /proc +** filesystem because getrusage() returns placebos (zeros). Return -1 for +** values which are unavailable or ambiguous on a particular architecture. +** +** Return value: 0 = success +** -1 = failure +*/ + +#define _NO_CHANGE 0 +#define _UPPER_CASE 1 +#define _ADD_UNDERSCORE 2 +#define _ADD_TWO_UNDERSCORES 3 + +#ifdef FORTRANUNDERSCORE +#define NAMING _ADD_UNDERSCORE +#endif + +#ifdef FORTRANDOUBLEUNDERSCORE +#define NAMING _ADD_TWO_UNDERSCORES +#endif + +#ifdef FORTRANCAPS +#define NAMING _UPPER_CASE +#endif + +#ifndef NAMING +#define NAMING _NO_CHANGE +#endif + +#if (NAMING == _ADD_UNDERSCORE) +#define ice_memusage_gptl ice_memusage_gptl_ +#endif + +#if (NAMING == _ADD_TWO_UNDERSCORES) +#define ice_memusage_gptl ice_memusage_gptl__ +#endif + +#if (NAMING == _UPPER_CASE) +#define ice_memusage_gptl ICE_MEMUSAGE_GPTL +#endif + + +#include + +/*#include "gptl.h" */ /* additional cpp defs and function prototypes */ +/* extern int ice_memusage_gptl (int *, int *, int *, int *, int *); */ + +/* _AIX is automatically defined when using the AIX C compilers */ +#ifdef _AIX +#include +#endif + +#ifdef IRIX64 +#include +#endif + +#ifdef HAVE_SLASHPROC + +#include +#include +#include +#include + +#elif (defined __APPLE__) + +#include +#include +#include + +#endif + +#ifdef BGP + +#include +#include +#include +#include +#define Personality _BGP_Personality_t + +#endif + +#ifdef BGQ + +#include +#include + +#endif + +int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastack) +{ +#if defined (BGP) || defined(BGQ) + + long long alloc; + struct mallinfo m; +#if defined (BGP) + Personality pers; +#endif +#if defined (BGQ) + uint64_t shared_mem_count; +#endif + long long total; + int node_config; + + /* memory available */ +#if defined(BGP) + Kernel_GetPersonality(&pers, sizeof(pers)); + total = BGP_Personality_DDRSizeMB(&pers); + + node_config = BGP_Personality_processConfig(&pers); + if (node_config == _BGP_PERS_PROCESSCONFIG_VNM) total /= 4; + else if (node_config == _BGP_PERS_PROCESSCONFIG_2x2) total /= 2; + total *= 1024*1024; + + *size = total; +#endif + +#if defined(BGQ) + Kernel_GetMemorySize(KERNEL_MEMSIZE_SHARED, &shared_mem_count); + + shared_mem_count *= 1024*1024; + *size = shared_mem_count; + +#endif + /* total memory used - heap only (not static memory)*/ + + m = mallinfo(); + alloc = m.hblkhd + m.uordblks; + + *rss = alloc; + *share = -1; + *text = -1; + *datastack = -1; + + +#elif (defined HAVE_SLASHPROC) + FILE *fd; /* file descriptor for fopen */ + int pid; /* process id */ + static char *head = "/proc/"; /* part of path */ + static char *tail = "/statm"; /* part of path */ + char file[19]; /* full path to file in /proc */ + int dum; /* placeholder for unused return arguments */ + int ret; /* function return value */ + + /* + ** The file we want to open is /proc//statm + */ + + pid = (int) getpid (); + if (pid > 999999) { + fprintf (stderr, "get_memusage: pid %d is too large\n", pid); + return -1; + } + + sprintf (file, "%s%d%s", head, pid, tail); + if ((fd = fopen (file, "r")) < 0) { + fprintf (stderr, "get_memusage: bad attempt to open %s\n", file); + return -1; + } + + /* + ** Read the desired data from the /proc filesystem directly into the output + ** arguments, close the file and return. + */ + + ret = fscanf (fd, "%d %d %d %d %d %d %d", + size, rss, share, text, datastack, &dum, &dum); + ret = fclose (fd); + return 0; + +#elif (defined __APPLE__) + + FILE *fd; + char cmd[60]; + int pid = (int) getpid (); + + sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); + fd = popen (cmd, "r"); + + if (fd) { + fscanf (fd, "%d %d %d", size, rss, text); + *share = -1; + *datastack = -1; + (void) pclose (fd); + } + + return 0; + +#else + + struct rusage usage; /* structure filled in by getrusage */ + + if (getrusage (RUSAGE_SELF, &usage) < 0) + return -1; + + *size = -1; + *rss = usage.ru_maxrss; + *share = -1; + *text = -1; + *datastack = -1; +#ifdef IRIX64 + *datastack = usage.ru_idrss + usage.ru_isrss; +#endif + return 0; + +#endif +} diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 363025b9b..0130d2588 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -15,6 +15,7 @@ module CICE_InitMod use ice_kinds_mod use ice_exit, only: abort_ice use ice_fileunits, only: init_fileunits, nu_diag + use ice_memusage, only: ice_memusage_init, ice_memusage_print use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave @@ -110,6 +111,12 @@ subroutine cice_init call input_zbgc ! vertical biogeochemistry namelist call count_tracers ! count tracers + ! Call this as early as possible, must be after memory_stats is read + if (my_task == master_task) then + call ice_memusage_init(nu_diag) + call ice_memusage_print(nu_diag,subname//':start') + endif + call init_domain_blocks ! set up block decomposition call init_grid1 ! domain distribution call alloc_grid ! allocate grid arrays @@ -238,6 +245,10 @@ subroutine cice_init if (write_ic) call accum_hist(dt) ! write initial conditions + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname//':end') + endif + end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 0b4326c0a..78e3b5259 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -15,11 +15,13 @@ module CICE_RunMod use ice_kinds_mod + use ice_communicate, only: my_task, master_task use ice_fileunits, only: nu_diag use ice_arrays_column, only: oceanmixed_ice use ice_constants, only: c0, c1 use ice_constants, only: field_loc_center, field_type_scalar use ice_exit, only: abort_ice + use ice_memusage, only: ice_memusage_print use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_max_iso, icepack_max_aero use icepack_intfc, only: icepack_query_parameters @@ -378,6 +380,9 @@ subroutine ice_step if (solve_zsal) call zsal_diags if (skl_bgc .or. z_tracers) call bgc_diags if (tr_brine) call hbrine_diags + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname) + endif endif call ice_timer_stop(timer_diags) ! diagnostics diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index ec19068ce..25130b131 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -39,6 +39,7 @@ print_global = .true. print_points = .true. timer_stats = .false. + memory_stats = .false. conserv_check = .false. latpnt(1) = 90. lonpnt(1) = 0. @@ -456,6 +457,8 @@ f_blkmask = .true. f_tarea = .true. f_uarea = .true. + f_narea = .false. + f_earea = .false. f_dxt = .false. f_dyt = .false. f_dxu = .false. diff --git a/configuration/scripts/machines/env.conda_macos b/configuration/scripts/machines/env.conda_macos index 3b3537bf7..8eaf5b622 100755 --- a/configuration/scripts/machines/env.conda_macos +++ b/configuration/scripts/machines/env.conda_macos @@ -7,6 +7,11 @@ endif if ("$inp" != "-nomodules") then +#On macos, for this to work, you may need to do something like +# sudo xcode-select -r +# sudo xcode-select -s /Library/Developer/CommandLineTools +# sudo ln -s /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/* /usr/local/include/ + # Init conda if ! $?CONDA_EXE then echo "" diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 95ee56f7f..6909c1ac9 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -10,15 +10,15 @@ smoke gbox80 3x3 boxwall smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn -smoke gbox80 2x2 boxsymn,run1day +#smoke gbox80 2x2 boxsymn,run1day smoke gbox80 4x2 boxsyme,run1day -smoke gbox80 4x1 boxsymne,run1day -smoke gbox80 2x2 boxsymn,run1day,kmtislands +#smoke gbox80 4x1 boxsymne,run1day +#smoke gbox80 2x2 boxsymn,run1day,kmtislands smoke gbox80 4x1 boxsyme,run1day,kmtislands -smoke gbox80 4x2 boxsymne,run1day,kmtislands -smoke gbox80 8x1 boxislandsn,run1day +#smoke gbox80 4x2 boxsymne,run1day,kmtislands +#smoke gbox80 8x1 boxislandsn,run1day smoke gbox80 4x2 boxislandse,run1day -smoke gbox80 2x4 boxislandsne,run1day +#smoke gbox80 2x4 boxislandsne,run1day smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day @@ -39,15 +39,15 @@ smoke gbox80 3x3 boxwall,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd -smoke gbox80 2x2 boxsymn,run1day,gridcd +#smoke gbox80 2x2 boxsymn,run1day,gridcd smoke gbox80 4x2 boxsyme,run1day,gridcd -smoke gbox80 4x1 boxsymne,run1day,gridcd -smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd +#smoke gbox80 4x1 boxsymne,run1day,gridcd +#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridcd -smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd -smoke gbox80 8x1 boxislandsn,run1day,gridcd +#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd +#smoke gbox80 8x1 boxislandsn,run1day,gridcd smoke gbox80 4x2 boxislandse,run1day,gridcd -smoke gbox80 2x4 boxislandsne,run1day,gridcd +#smoke gbox80 2x4 boxislandsne,run1day,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day @@ -68,15 +68,15 @@ smoke gbox80 3x3 boxwall,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc -smoke gbox80 2x2 boxsymn,run1day,gridc +#smoke gbox80 2x2 boxsymn,run1day,gridc smoke gbox80 4x2 boxsyme,run1day,gridc -smoke gbox80 4x1 boxsymne,run1day,gridc -smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc +#smoke gbox80 4x1 boxsymne,run1day,gridc +#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridc -smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc -smoke gbox80 8x1 boxislandsn,run1day,gridc +#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc +#smoke gbox80 8x1 boxislandsn,run1day,gridc smoke gbox80 4x2 boxislandse,run1day,gridc -smoke gbox80 2x4 boxislandsne,run1day,gridc +#smoke gbox80 2x4 boxislandsne,run1day,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 549221d37..ff12e3d31 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -201,6 +201,7 @@ setup_nml "``latpnt``", "real", "latitude of (2) diagnostic points", "90.0,-65.0" "``lcdf64``", "logical", "use 64-bit netcdf format", "``.false.``" "``lonpnt``", "real", "longitude of (2) diagnostic points", "0.0,-45.0" + "``memory_stats``", "logical", "turns on memory use diagnostics", "``.false.``" "``month_init``", "integer", "the initial month if not using restart", "1" "``ndtd``", "integer", "number of dynamics/advection/ridging/steps per thermo timestep", "1" "``npt``", "integer", "total number of npt_units to run the model", "99999" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 921d58a29..ab6c028ea 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -1239,6 +1239,11 @@ namelist, the point associated with ``lonpnt(1)`` and ``latpnt(1)`` is used. ``debug_model`` is normally used when the model aborts and needs to be debugged in detail at a particular (usually failing) grid point. +Memory use diagnostics are controlled by the logical namelist ``memory_stats``. +This feature uses an intrinsic query in C defined in **ice\_memusage\_gptl.c**. +Memory diagnostics will be written at the the frequency defined by +diagfreq. + Timers are declared and initialized in **ice\_timers.F90**, and the code to be timed is wrapped with calls to *ice\_timer\_start* and *ice\_timer\_stop*. Finally, *ice\_timer\_print* writes the results to From 6ff1a713a55889713077b5125245756f49e74b85 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Sun, 10 Apr 2022 00:49:31 +0000 Subject: [PATCH 101/109] New deformationsC_T subroutine for more consistent calculation (#89) * Added new subroutine to calc deformations at T using shearTsqr * Renamed deformations_T suroutines and clean up --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 30 ++-- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 135 ++++++++++++++++-- 2 files changed, 140 insertions(+), 25 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index a3e916619..f18e60802 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -113,7 +113,8 @@ subroutine evp (dt) use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & - DminTarea, visc_method, deformations, deformations_T, strain_rates_U, & + DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & + strain_rates_U, & dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & @@ -853,16 +854,19 @@ subroutine evp (dt) ! on last subcycle, save quantities for mechanical redistribution !----------------------------------------------------------------- if (ksub == ndte) then - call deformations_T (nx_block , ny_block , & + + call deformationsC_T (nx_block , ny_block , & icellt (iblk), & indxti (:,iblk), indxtj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & shear (:,:,iblk), divu (:,:,iblk), & rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + endif enddo !$OMP END PARALLEL DO @@ -996,16 +1000,16 @@ subroutine evp (dt) ! on last subcycle, save quantities for mechanical redistribution !----------------------------------------------------------------- if (ksub == ndte) then - call deformations_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + call deformationsCD_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) endif enddo !$OMP END PARALLEL DO diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index ee33eb09e..bc7f3abb1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -27,7 +27,7 @@ module ice_dyn_shared principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & alloc_dyn_shared, & - deformations, deformations_T, & + deformations, deformationsC_T, deformationsCD_T, & strain_rates, strain_rates_T, strain_rates_U, & visc_replpress, & dyn_haloUpdate, & @@ -1733,16 +1733,16 @@ end subroutine deformations ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine deformations_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - tarear, & - shear, divu, & - rdg_conv, rdg_shear ) + subroutine deformationsCD_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, & + shear, divu, & + rdg_conv, rdg_shear ) use ice_constants, only: p5 @@ -1820,7 +1820,118 @@ subroutine deformations_T (nx_block, ny_block, & enddo ! ij - end subroutine deformations_T + end subroutine deformationsCD_T + + +!======================================================================= +! Compute deformations for mechanical redistribution at T point +! +! author: JF Lemieux, ECCC +! Nov 2021 + + subroutine deformationsC_T (nx_block, ny_block, & + icellt, & + indxti, indxtj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + tarear, uarea, & + shearU, & + shear, divu, & + rdg_conv, rdg_shear ) + + use ice_constants, only: p5 + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block, & ! block dimensions + icellt ! no. of cells where icetmask = 1 + + integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + indxti , & ! compressed index in i-direction + indxtj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the N point + uvelN , & ! x-component of velocity (m/s) at the E point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + tarear , & ! 1/tarea + uarea , & ! area of u cell + shearU ! shearU + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & + shear , & ! strain rate II component (1/s) + divu , & ! strain rate I component, velocity divergence (1/s) + rdg_conv , & ! convergence term for ridging (1/s) + rdg_shear ! shear term for ridging (1/s) + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij + + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + divT , & ! divergence at T point + tensionT , & ! tension at T point + shearT , & ! shear at T point + DeltaT ! delt at T point + + real (kind=dbl_kind) :: & + tmp , & ! useful combination + shearTsqr ! strain rates squared at T point + + character(len=*), parameter :: subname = '(deformations_T2)' + + !----------------------------------------------------------------- + ! strain rates + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + + call strain_rates_T (nx_block , ny_block , & + icellt , & + indxti(:) , indxtj (:) , & + uvelE (:,:), vvelE (:,:), & + uvelN (:,:), vvelN (:,:), & + dxN (:,:), dyE (:,:), & + dxT (:,:), dyT (:,:), & + divT (:,:), tensionT(:,:), & + shearT(:,:), DeltaT (:,:) ) + + ! DeltaT is calc by strain_rates_T but replaced by calculation below. + + do ij = 1, icellt + i = indxti(ij) + j = indxtj(ij) + + !----------------------------------------------------------------- + ! deformations for mechanical redistribution + !----------------------------------------------------------------- + + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + + shearU(i ,j-1)**2 * uarea(i ,j-1) & + + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & + + shearU(i-1,j )**2 * uarea(i-1,j )) & + / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + + DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) + + divu(i,j) = divT(i,j) * tarear(i,j) + tmp = DeltaT(i,j) * tarear(i,j) + rdg_conv(i,j) = -min(divu(i,j),c0) + rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) + + ! diagnostic only...maybe we dont want to use shearTsqr here???? + ! shear = sqrt(tension**2 + shearing**2) + shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) + + enddo ! ij + + end subroutine deformationsC_T !======================================================================= ! Compute strain rates From a131940efa8d9a28cc4bc599dd8cffef16833827 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 14 Apr 2022 21:03:01 +0000 Subject: [PATCH 102/109] Cgrid documentation (#91) * Started to modify doc and added Kimmritz 2016 and Bouillon 2009 to the references * More modifs to doc and added Kreyscher 2000 reference * Modified also the doc for horizontal transport * Minor modifications following Elizabeth's comments * One more minor modif to documentation --- doc/source/master_list.bib | 31 ++++++ doc/source/science_guide/sg_dynamics.rst | 120 +++++++++++++++------ doc/source/science_guide/sg_horiztrans.rst | 5 +- 3 files changed, 120 insertions(+), 36 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index 9b4a2f672..e7363d09f 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -435,6 +435,15 @@ @Article{Dukowicz00 pages = {318-335}, url = {http://dx.doi.org/10.1006/jcph.2000.6465} } +@Article{Kreyscher00, + author = "M. Kreyscher and M. Harder and P. Lemke and G.M. Flato", + title = "{Results of the {S}ea {I}ce {M}odel {I}ntercomparison {P}roject: evaluation of sea ice rheology schemes for use in climate simulations}", + journal = JGR, + year = {2000}, + volume = {105}, + number = {C5}, + pages = {11299-11320} +} @Article{Bitz01, author = "C.M. Bitz and M.M. Holland and M. Eby and A.J. Weaver", title = "{Simulating the ice-thickness distribution in a coupled climate model}", @@ -874,6 +883,17 @@ @Article{Mirin12 pages = {17-30}, url = {http://dx.doi.org/10.1177/1094342011412630} } + +@Article{Bouillon09, + author = "S. Bouillon and M.A Morales Maqueda and V. Legat and T. Fichefet", + title = "{An elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids}", + journal = OM, + year = {2009}, + volume = {27}, + pages = {174-184}, + url = {doi:10.1016/j.ocemod.2009.01.004} +} + @Article{Bouillon13, author = "S. Bouillon and T. Fichefet and V. Legat and G. Madec", title = "{The elastic-viscous-plastic method revisited}", @@ -938,6 +958,17 @@ @Article{Kimmritz15 pages = {90-100}, url = {http://dx.doi.org/10.1016/j.jcp.2015.04.051} } + +@Article{Kimmritz16, + author = "M. Kimmritz and S. Danilov and M. Losch", + title = "{The adaptive EVP method for solving the sea ice momentum equation}", + journal = OM, + year = {2016}, + volume = {101}, + pages = {59-67}, + url = {http://dx.doi.org/10.1016/j.ocemod.2016.03.004} +} + @Article{Roberts15, author = "A.F. Roberts and A.P. Craig and W. Maslowski and R. Osinski and A.K. DuVivier and M. Hughes and B. Nijssen and J.J. Cassano and M. Brunke", title = "{Simulating transient ice-ocean Ekman transport in the Regional Arctic System Model and Community Earth System Model}", diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index b627f896d..98ddb64cf 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -6,10 +6,9 @@ Dynamics ======== There are different approaches in the CICE code for representing sea ice -rheology and for solving the sea ice momentum equation. The viscous-plastic (VP) originally developed by :cite:`Hibler79`, -the elastic-viscous-plastic (EVP) :cite:`Hunke97` model represents a modification of the -standard viscous-plastic (VP) model for sea ice dynamics. The elastic-anisotropic-plastic (EAP) model, -on the other hand, explicitly accounts for the observed sub-continuum +rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, +the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the +VP model and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If ``kdyn`` = 1 in the namelist then the EVP model is used (module **ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP @@ -46,6 +45,8 @@ FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. Note that the VP solver has not yet been tested on the ``tx1`` grid or with threading enabled. +The EVP, EAP and VP approaches are all available with the B grid. However, at the moment, the EVP model is the only possibility with the C grid. + Here we summarize the equations and direct the reader to the above references for details. @@ -93,18 +94,21 @@ For clarity, the two components of Equation :eq:`vpmom` are -C_bv-mfu - mg{\partial H_\circ\over\partial y}. \end{aligned} :label: momsys +On the B grid, the equations above are solved at the U point for the collocated u and v components (see figure :ref:`fig-Bgrid`). On the C grid, however, the two components are not collocated: the u component is at the E point while the v component is at the N point. -A bilinear discretization is used for the stress terms +The B grid spatial discretization is based on a variational method described in :cite:`Hunke97,Hunke02`. A bilinear discretization is used for the stress terms :math:`\partial\sigma_{ij}/\partial x_j`, which enables the discrete equations to be derived from the continuous equations written in curvilinear coordinates. In this manner, metric terms associated with the curvature of the grid are incorporated into the discretization explicitly. Details pertaining to -the spatial discretization are found in :cite:`Hunke02`. +the spatial discretization are found in :cite:`Hunke02` + +On the C grid, however, a finite difference approach is used for the spatial discretization. The C grid discretization is based on :cite:`Bouillon09, Bouillon13, Kimmritz16`. .. _evp-momentum: -Elastic-Viscous-Plastic +EVP time discretization and solution ~~~~~~~~~~~~~~~~~~~~~~~ The momentum equation is discretized in time as follows, for the classic @@ -118,24 +122,23 @@ variables used in the code. .. math:: \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} + - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} = &\underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ &+ {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t_e}u^k, :label: umom .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} = &\underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ &+ {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t_e}v^k, :label: vmom -and :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}`. +where :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}` and the definitions of :math:`u^{l}` and :math:`v^{l}` vary depending on the grid. -We solve this system of equations analytically for :math:`u^{k+1}` and -:math:`v^{k+1}`. Define +As :math:`u` and :math:`v` are collocated on the B grid, :math:`u^{l}` and :math:`v^{l}` are respectively :math:`u^{k+1}` and :math:`v^{k+1}` such that this system of equations can be solved as follows. Define .. math:: \hat{u} = F_u + \tau_{ax} - mg{\partial H_\circ\over\partial x} + {\tt vrel} \left(U_w\cos\theta - V_w\sin\theta\right) + {m\over\Delta t_e}u^k @@ -169,9 +172,18 @@ where b = mf + {\tt vrel}\sin\theta. :label: cevpb +Note that the time discretization and solution method for the EAP is exactly the same as for the B grid EVP. More details on the EAP model are given in Section :ref:`stress-eap`. + +However, on the C grid, :math:`u` and :math:`v` are not collocated. When solving the :math:`u` momentum equation for :math:`u^{k+1}` (at the E point), :math:`v^{l}=v^{k}_{int}` where :math:`v^{k}_{int}` is :math:`v^{k}` from the surrounding N points interpolated to the E point. The same approach is used for the :math:`v` momentum equation. With this explicit treatment of the off-diagonal terms :cite:`Kimmritz16`, :math:`u^{k+1}` and :math:`v^{k+1}` are obtained by solving + +.. math:: + \begin{aligned} + u^{k+1} = {\hat{u} + b v^{k}_{int} \over a} \\ + v^{k+1} = {\hat{v} - b u^{k}_{int} \over a}. \end{aligned} + .. _vp-momentum: -Viscous-Plastic +Implicit (VP) time discretization and solution ~~~~~~~~~~~~~~~ In the VP approach, equation :eq:`momsys` is discretized implicitly using a Backward Euler approach, @@ -261,40 +273,57 @@ This parameterization for the seabed stress is described in :cite:`Lemieux16`. It assumes that the largest keel draft varies linearly with the mean thickness in a grid cell (i.e. sea ice volume). The :math:`C_b` coefficients are expressed as .. math:: - C_b= k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)} (\sqrt{u^2+v^2}+u_0)^{-1}, \\ :label: Cb where :math:`k_2` determines the maximum seabed stress that can be sustained by the grounded parameterized ridge(s), :math:`u_0` is a small residual velocity and :math:`\alpha_b` is a parameter to ensure that the seabed stress quickly drops when -the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h_u - h_{cu})] e^{-\alpha_b * (1 - a_u)}` is defined as -:math:`T_b`. The quantities :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}` are calculated at -the 'u' point based on local ice conditions (surrounding tracer points). They are respectively given by +the ice concentration is smaller than 1. In the code, :math:`k_2 \max [0,(h - h_{c})] e^{-\alpha_b * (1 - a)}` is defined as +:math:`T_b`. + +On the B grid, the quantities :math:`h`, :math:`a` and :math:`h_{c}` are calculated at +the U point and are referred to as :math:`h_u`, :math:`a_{u}` and :math:`h_{cu}`. They are respectively given by .. math:: h_u=\max[v_i(i,j),v_i(i+1,j),v_i(i,j+1),v_i(i+1,j+1)], \\ :label: hu .. math:: - a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)]. \\ + a_u=\max[a_i(i,j),a_i(i+1,j),a_i(i,j+1),a_i(i+1,j+1)], \\ :label: au .. math:: h_{cu}=a_u h_{wu} / k_1, \\ :label: hcu -where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the :math:`u` point :math:`i,j` and +where the :math:`a_i` and :math:`v_i` are the total ice concentrations and ice volumes around the U point :math:`i,j` and :math:`k_1` is a parameter that defines the critical ice thickness :math:`h_{cu}` at which the parameterized ridge(s) reaches the seafloor for a water depth :math:`h_{wu}=\min[h_w(i,j),h_w(i+1,j),h_w(i,j+1),h_w(i+1,j+1)]`. Given the formulation of :math:`C_b` in equation :eq:`Cb`, the seabed stress components are non-zero only when :math:`h_u > h_{cu}`. +As :math:`u` and :math:`v` are not collocated on the C grid, :math:`T_b` is calculated at E and N points. For example, at the E point, :math:`h_e`, :math:`a_{e}` and :math:`h_{ce}` are respectively + +.. math:: + h_e=\max[v_i(i,j),v_i(i+1,j)], \\ + :label: he + +.. math:: + a_e=\max[a_i(i,j),a_i(i+1,j)], \\ + :label: ae + +.. math:: + h_{ce}=a_e h_{we} / k_1, \\ + :label: hce + +where :math:`h_{we}=\min[h_w(i,j),h_w(i+1,j)]`. Similar calculations are done at the N points. + +To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` +is larger than 30 m (same idea on the C grid depending on :math:`h_{we}` and :math:`h_{wn}`). This maximum value is chosen based on observations of large keels in the Arctic Ocean :cite:`Amundrud04`. + The maximum seabed stress depends on the weight of the ridge above hydrostatic balance and the value of :math:`k_2`. It is, however, the parameter :math:`k_1` that has the most notable impact on the simulated extent of landfast ice. The value of :math:`k_1` can be changed at runtime using the namelist variable ``k1``. -To prevent unrealistic grounding, :math:`T_b` is set to zero when :math:`h_{wu}` -is larger than 30 m. This maximum value is chosen based on observations of large -keels in the Arctic Ocean :cite:`Amundrud04`. - Seabed stress based on probabilistic approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -336,22 +365,34 @@ and then obtains :math:`T_{bt}` by multiplying :math:`T_{bt}^*` by :math:`e^{-\a To calculate :math:`T_{bt}^*` in equation :eq:`Tbt`, :math:`f(x)` and :math:`b(y)` are discretized using many small categories (100). :math:`f(x)` is discretized between 0 and 50 m while :math:`b(y)` is truncated at plus and minus three :math:`\sigma_b`. :math:`f(x)` is also modified by setting it to zero after a certain percentile of the log-normal distribution. This percentile, which is currently set to 99.7%, notably affects the simulation of landfast ice and is used as a tuning parameter. Its impact is similar to the one of the parameter :math:`k_1` for the LKD method. -:math:`T_b` at the 'u' point is calculated from the 't' point values around it according to +On the B grid, :math:`T_b` at the U point is calculated from the T point values around it according to .. math:: - T_b=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ + T_{bu}=\max[T_{bt}(i,j),T_{bt}(i+1,j),T_{bt}(i,j+1),T_{bt}(i+1,j+1)]. \\ :label: Tb Following again the LKD method, the seabed stress coefficients are finally expressed as .. math:: - C_b= T_b (\sqrt{u^2+v^2}+u_0)^{-1}, \\ + C_b= T_{bu} (\sqrt{u^2+v^2}+u_0)^{-1}. \\ :label: Cb2 +On the C grid, :math:`T_b` is needs to be calculated at the E and N points. :math:`T_{be}` and :math:`T_{bn}` are respectively given by + +.. math:: + T_{be}=\max[T_{bt}(i,j),T_{bt}(i+1,j)], \\ + :label: Tbe + +.. math:: + T_{bn}=\max[T_{bt}(i,j),T_{bt}(i,j+1)]. \\ + :label: Tbn + +The :math:`C_{b}` are different at the E and N points and are respectively :math:`T_{be} (\sqrt{u^2+v^2_{int}}+u_0)^{-1}` and :math:`T_{bn} (\sqrt{u^2_{int} + v^2}+u_0)^{-1}` where :math:`v_{int}` (:math:`u_{int}`) is :math:`v` ( :math:`u`) interpolated to the E (N) point. + .. _internal-stress: ******************************** -Internal stress +Internal stress and strain rate tensors ******************************** For convenience we formulate the stress tensor :math:`\bf \sigma` in @@ -398,19 +439,24 @@ An elliptical yield curve is used, with the viscosities given by .. math:: \zeta = {P(1+k_t)\over 2\Delta}, + :label: zeta .. math:: \eta = e_g^{-2} \zeta, + :label: eta where .. math:: \Delta = \left[D_D^2 + {e_f^2\over e_g^4}\left(D_T^2 + D_S^2\right)\right]^{1/2}. + :label: Delta + +When the deformation :math:`\Delta` tends toward zero, the viscosities tend toward infinity. To avoid this issue, :math:`\Delta` needs to be limited and is replaced by :math:`\Delta^*` in equation :eq:`zeta`. Two methods for limiting :math:`\Delta` (or for capping the viscosities) are available in the code. If the namelist parameter ``capping`` is set to 1., :math:`\Delta^*=max(\Delta, \Delta_{min})` :cite:`Hibler79` while with ``capping`` set to 0., the smoother formulation :math:`\Delta^*=(\Delta + \Delta_{min})` of :cite:`Kreyscher00` is used. The ice strength :math:`P` is a function of the ice thickness distribution as described in the `Icepack Documentation `_. - -Two modifications to the standard VP rheology of :cite:`Hibler79` are available. + +Two other modifications to the standard VP rheology of :cite:`Hibler79` are available. First, following the approach of :cite:`Konig10` (see also :cite:`Lemieux16`), the elliptical yield curve can be modified such that the ice has isotropic tensile strength. The tensile strength is expressed as a fraction of :math:`P`, that is :math:`k_t P` @@ -424,7 +470,7 @@ can be set in the namelist. The plastic potential can lead to more realistic fra By default, the namelist parameters are set to :math:`e_f=e_g=2` and :math:`k_t=0` which correspond to the standard VP rheology. -There are four options in the code for solving the sea ice momentum equation with a VP formulation: the standard EVP approach, a 1d EVP solver, the revised EVP approach and an implicit Picard solver. The modifications to the yield curve and to the flow rule described above are available for these four different solution methods. +There are four options in the code for solving the sea ice momentum equation with a VP formulation: the standard EVP approach, a 1d EVP solver, the revised EVP approach and an implicit Picard solver. The choice of the capping method for the viscosities and the modifications to the yield curve and to the flow rule described above are available for these four different solution methods. Note that only the EVP and revised EVP methods are currently available if one chooses the C grid. .. _stress-evp: @@ -488,6 +534,10 @@ the viscosity terms in the subcycling. Choices of the parameters used to define :math:`T` and :math:`\Delta t_e` are discussed in Sections :ref:`revp` and :ref:`parameters`. +On the B grid, the stresses :math:`\sigma_{1}`, :math:`\sigma_{2}` and :math:`\sigma_{12}` are collocated at the U point. To calculate these stresses, the viscosities :math:`\zeta` and :math:`\eta` and the replacement pressure :math:`P_R` are also defined at the U point. + +However, on the C grid, :math:`\sigma_{1}` and :math:`\sigma_{2}` are collocated at the T point while :math:`\sigma_{12}` is defined at the U point. During a subcycling step, :math:`\zeta`, :math:`\eta` and :math:`P_R` are first calculated at the T point. To do so, :math:`\Delta` given by equation :eq:`Delta` is calculated following the approach of :cite:`Bouillon13` (see also :cite:`Kimmritz16` for details). With this approach, :math:`D_S^2` at the T point is obtained by calculating :math:`D_S^2` at the U points and interpolating these values to the T point. As :math:`\sigma_{12}` is calculated at the U point, :math:`\eta` also needs to be computed as these locations. If ``visc_method`` in the namelist is set to ``avg_zeta`` (the default value), :math:`\eta` at the U point is obtained by interpolating T point values to this location. This corresponds to the approach used by :cite:`Bouillon13` and the one associated with the C1 configuration of :cite:`Kimmritz16`. On the other hand, if ``visc_method = avg_strength``, the strength :math:`P` calculated at T points is interpolated to the U point and :math:`\Delta` is calculated at the U point in order to obtain :math:`\eta` following equations :eq:`zeta` and :eq:`eta`. This latter approach is the one used in the C2 configuration of :cite:`Kimmritz16`. + .. _evp1d: 1d EVP solver @@ -508,7 +558,7 @@ implicit solvers and there is an additional term for the pseudo-time iteration. .. math:: {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - {\left(mf+{\tt vrel}\sin\theta\right)} v^{k+1} + - {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + {\tau_{ax} - mg{\partial H_\circ\over\partial x} } + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, @@ -516,7 +566,7 @@ implicit solvers and there is an additional term for the pseudo-time iteration. .. math:: {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + {\left(mf+{\tt vrel}\sin\theta\right)} u^{k+1} + + {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, @@ -527,21 +577,21 @@ With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bou .. math:: \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{k+1} + - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), :label: umomr2 .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{k+1} + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), :label: vmomr2 -At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` are obtained in the same manner as for the standard EVP approach (see equations :eq:`cevpuhat` to :eq:`cevpb`). +At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become diff --git a/doc/source/science_guide/sg_horiztrans.rst b/doc/source/science_guide/sg_horiztrans.rst index bafb4c72f..f85f13ee5 100644 --- a/doc/source/science_guide/sg_horiztrans.rst +++ b/doc/source/science_guide/sg_horiztrans.rst @@ -35,7 +35,10 @@ versions but have not yet been implemented. Two transport schemes are available: upwind and the incremental remapping scheme of :cite:`Dukowicz00` as modified for sea ice by -:cite:`Lipscomb04`. The remapping scheme has several desirable features: +:cite:`Lipscomb04`. The upwind scheme is naturally suited for a C grid discretization. As such, the C grid velocity components (i.e. :math:`uvelE=u` at the E point and :math:`vvelN=v` at the N point) are directly passed to the upwind transport scheme. On the other hand, if the B grid is used, :math:`uvel` and :math:`vvel` (respectively :math:`u` and :math:`v` at the U point) are interpolated to the E and N points such that the upwind advection can be performed. Conversely, as the remapping scheme was originally developed for B grid applications, :math:`uvel` and :math:`vvel` are directly used for the advection. If the remapping scheme is used for the C grid, :math:`uvelE` and :math:`vvelN` are first interpolated to the U points before performing the advection. + + +The remapping scheme has several desirable features: - It conserves the quantity being transported (area, volume, or energy). From 9b7c5de05c019c63f7e8338bfcc3cbbabab2811d Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 14 Apr 2022 14:03:15 -0700 Subject: [PATCH 103/109] Update Orion port, working again (#92) Fix small bug in ice_init.F90 for error in ice_data_type Review and update MACH/ENV info Fix rsnw_mlt documentation --- cicecore/cicedynB/general/ice_init.F90 | 2 +- .../scripts/machines/env.banting_gnu | 2 ++ .../scripts/machines/env.banting_intel | 2 +- .../scripts/machines/env.cesium_intel | 2 ++ .../scripts/machines/env.cheyenne_gnu | 2 ++ .../scripts/machines/env.cheyenne_intel | 2 ++ .../scripts/machines/env.cheyenne_pgi | 2 ++ configuration/scripts/machines/env.cori_intel | 2 +- configuration/scripts/machines/env.daley_gnu | 2 ++ configuration/scripts/machines/env.fram_intel | 2 ++ configuration/scripts/machines/env.gaea_intel | 2 +- configuration/scripts/machines/env.hera_intel | 2 +- configuration/scripts/machines/env.izumi_gnu | 2 +- .../scripts/machines/env.izumi_intel | 2 +- configuration/scripts/machines/env.izumi_nag | 2 +- configuration/scripts/machines/env.izumi_pgi | 2 +- .../scripts/machines/env.millikan_intel | 2 ++ .../scripts/machines/env.mustang_intel18 | 2 +- .../scripts/machines/env.mustang_intel20 | 2 +- .../scripts/machines/env.narwhal_aocc | 4 +-- .../scripts/machines/env.orion_intel | 31 +++++++++---------- doc/source/cice_index.rst | 2 +- 22 files changed, 45 insertions(+), 30 deletions(-) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 915d51640..6276770bc 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -3134,7 +3134,7 @@ subroutine set_state_var (nx_block, ny_block, & else - call abort_ice(subname//'ERROR: ice_data_type setting = '//trim(ice_data_conc), & + call abort_ice(subname//'ERROR: ice_data_type setting = '//trim(ice_data_type), & file=__FILE__, line=__LINE__) endif ! ice_data_type diff --git a/configuration/scripts/machines/env.banting_gnu b/configuration/scripts/machines/env.banting_gnu index 997816a9d..fed181121 100755 --- a/configuration/scripts/machines/env.banting_gnu +++ b/configuration/scripts/machines/env.banting_gnu @@ -25,7 +25,9 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME banting +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake), Aries Interconnect" setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu??, cray-mpich??, netcdf??" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/banting/cice/runs setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ diff --git a/configuration/scripts/machines/env.banting_intel b/configuration/scripts/machines/env.banting_intel index 0beeb2618..5273c5d2e 100755 --- a/configuration/scripts/machines/env.banting_intel +++ b/configuration/scripts/machines/env.banting_intel @@ -20,7 +20,7 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME banting -setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake)" +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake), Aries Interconnect" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "Intel 19.0.3.199, cray-mpich/7.7.7, cray-netcdf/4.6.1.3" setenv ICE_MACHINE_MAKE make diff --git a/configuration/scripts/machines/env.cesium_intel b/configuration/scripts/machines/env.cesium_intel index 8dabe1645..329bdf32d 100755 --- a/configuration/scripts/machines/env.cesium_intel +++ b/configuration/scripts/machines/env.cesium_intel @@ -10,7 +10,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME cesium +setenv ICE_MACHINE_MACHINFO "cesium" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5" setenv ICE_MACHINE_MAKE colormake-short setenv ICE_MACHINE_WKDIR /users/dor/afsg/phb/local/CICEDIRS/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /users/dor/afsg/phb/local/FORCING diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index f580cc354..c962c35f3 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index ef12df914..89a8920b6 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index cbd486c29..5caa9d992 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -42,6 +42,8 @@ limit coredumpsize unlimited limit stacksize unlimited # May be needed for OpenMP memory #setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE setenv ICE_MACHINE_MACHNAME cheyenne setenv ICE_MACHINE_MACHINFO "SGI ICE XA Xeon E5-2697V4 Broadwell" diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel index 734b2edf3..cab93246b 100755 --- a/configuration/scripts/machines/env.cori_intel +++ b/configuration/scripts/machines/env.cori_intel @@ -48,7 +48,7 @@ endif setenv ICE_MACHINE_MACHNAME cori setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 19.0.3.199 20190206, cray-mpich/7.7.6, netcdf/4.6.3.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.0.3.199 20190206, gcc/8.2.0, cray-mpich/7.7.6, netcdf/4.6.3.2" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $SCRATCH/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /project/projectdirs/ccsm1/cice-consortium/ diff --git a/configuration/scripts/machines/env.daley_gnu b/configuration/scripts/machines/env.daley_gnu index 25b438e8e..24c2153e3 100755 --- a/configuration/scripts/machines/env.daley_gnu +++ b/configuration/scripts/machines/env.daley_gnu @@ -25,7 +25,9 @@ setenv HDF5_USE_FILE_LOCKING FALSE # necessary since data is on an NFS filesyste endif setenv ICE_MACHINE_MACHNAME daley +setenv ICE_MACHINE_MACHINFO "Cray XC50, Intel Xeon Gold 6148 (Skylake)" setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu??, cray-mpich??, netcdf??" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/daley/cice/runs setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel index 98edb3a66..35de927e7 100755 --- a/configuration/scripts/machines/env.fram_intel +++ b/configuration/scripts/machines/env.fram_intel @@ -11,7 +11,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME fram +setenv ICE_MACHINE_MACHINFO "fram" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5, netcdf" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR /home/dormrb01/zephyr4/armn/jfl/local1/CICEDIRS/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /users/dor/armn/jfl/local1/FORCING diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel index e204c6fff..3601bfcfb 100755 --- a/configuration/scripts/machines/env.gaea_intel +++ b/configuration/scripts/machines/env.gaea_intel @@ -22,7 +22,7 @@ module list endif setenv ICE_MACHINE_MACHNAME gaea -setenv ICE_MACHINE_MACHINFO "Cray Intel SkyLake 6148" +setenv ICE_MACHINE_MACHINFO "Cray XC40 Intel Haswell/Broadwell 2.3GHz, Gemini Interconnect" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 18.0.5 20180823, cray-mpich, cray-netcdf" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel index a9cf59516..937992639 100755 --- a/configuration/scripts/machines/env.hera_intel +++ b/configuration/scripts/machines/env.hera_intel @@ -21,7 +21,7 @@ module load netcdf/4.7.0 endif setenv ICE_MACHINE_MACHNAME hera -setenv ICE_MACHINE_MACHINFO "Cray Intel SkyLake 6148" +setenv ICE_MACHINE_MACHINFO "Cray CS500 Intel SkyLake 2.4GHz, Infiniband HDR" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 18.0.5 20180823, intelmpi/2018.0.4, netcdf/4.7.0" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_gnu b/configuration/scripts/machines/env.izumi_gnu index 873324e5d..5318b5930 100755 --- a/configuration/scripts/machines/env.izumi_gnu +++ b/configuration/scripts/machines/env.izumi_gnu @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME gnu setenv ICE_MACHINE_ENVINFO "GNU Fortran (GCC) 9.3.0, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_intel b/configuration/scripts/machines/env.izumi_intel index 33baa096e..773dc0572 100755 --- a/configuration/scripts/machines/env.izumi_intel +++ b/configuration/scripts/machines/env.izumi_intel @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "ifort 19.1.0.166 20191121, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_nag b/configuration/scripts/machines/env.izumi_nag index d1ce4ba95..873b8e728 100755 --- a/configuration/scripts/machines/env.izumi_nag +++ b/configuration/scripts/machines/env.izumi_nag @@ -18,7 +18,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME nag setenv ICE_MACHINE_ENVINFO "NAG Fortran Compiler Release 6.2(Chiyoda) Build 6207, gcc (GCC) 8.1.0, cc (GCC) 4.8.5 20150623, mvapich2-2.3.3, netcdf/c4.6.1-f4.4.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.izumi_pgi b/configuration/scripts/machines/env.izumi_pgi index 8a8c36b8f..ab39eda9e 100755 --- a/configuration/scripts/machines/env.izumi_pgi +++ b/configuration/scripts/machines/env.izumi_pgi @@ -17,7 +17,7 @@ setenv OMP_STACKSIZE 64M endif setenv ICE_MACHINE_MACHNAME izumi -setenv ICE_MACHINE_MACHINFO "Linux Cluster" +setenv ICE_MACHINE_MACHINFO "Intel Xeon Gold 5118 2.3GHz" setenv ICE_MACHINE_ENVNAME pgi setenv ICE_MACHINE_ENVINFO "pgf90 20.1-0, mvapich2-2.3.3, netcdf4.7.4" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.millikan_intel b/configuration/scripts/machines/env.millikan_intel index c0a7356ad..350ea5b6e 100755 --- a/configuration/scripts/machines/env.millikan_intel +++ b/configuration/scripts/machines/env.millikan_intel @@ -10,7 +10,9 @@ source $ssmuse -d /fs/ssm/hpco/tmp/eccc/201402/04/intel-2016.1.150 # netcdf (and #setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME millikan +setenv ICE_MACHINE_MACHINFO "millikan" setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "intel 2016.1.156, openmpi 1.6.5, netcdf" setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR /users/dor/armn/amb/data/local/runs setenv ICE_MACHINE_INPUTDATA /users/dor/armn/amb/data/local/forcing diff --git a/configuration/scripts/machines/env.mustang_intel18 b/configuration/scripts/machines/env.mustang_intel18 index 45e5b6518..d689d7ae4 100755 --- a/configuration/scripts/machines/env.mustang_intel18 +++ b/configuration/scripts/machines/env.mustang_intel18 @@ -33,7 +33,7 @@ endif setenv ICE_MACHINE_MACHNAME mustang setenv ICE_MACHINE_MACHINFO "HPE SGI 8600 Xeon Platinum 8168" setenv ICE_MACHINE_ENVNAME intel18 -setenv ICE_MACHINE_ENVINFO "ifort 18.0.3 20180410, mpt2.19, netcdf4.4.2" +setenv ICE_MACHINE_ENVINFO "ifort 18.0.3 20180410, mpt2.18, netcdf4.4.2" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice-consortium diff --git a/configuration/scripts/machines/env.mustang_intel20 b/configuration/scripts/machines/env.mustang_intel20 index cca0b3019..785875c29 100755 --- a/configuration/scripts/machines/env.mustang_intel20 +++ b/configuration/scripts/machines/env.mustang_intel20 @@ -33,7 +33,7 @@ endif setenv ICE_MACHINE_MACHNAME mustang setenv ICE_MACHINE_MACHINFO "HPE SGI 8600 Xeon Platinum 8168" setenv ICE_MACHINE_ENVNAME intel20 -setenv ICE_MACHINE_ENVINFO "ifort 19.1.1.217 20200306, mpt2.19, netcdf4.4.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.1.217 20200306, mpt2.20, netcdf4.4.2" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice-consortium diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc index 6d6822f46..4016a1d7d 100755 --- a/configuration/scripts/machines/env.narwhal_aocc +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -39,9 +39,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.5, netcdf/4.7.4.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.orion_intel b/configuration/scripts/machines/env.orion_intel index bdfccdd60..7a8e47f5d 100755 --- a/configuration/scripts/machines/env.orion_intel +++ b/configuration/scripts/machines/env.orion_intel @@ -7,30 +7,29 @@ endif if ("$inp" != "-nomodules") then -echo "Modules don't currently work with csh on Orion" -echo "Please run the following manually before running cice.setup" -echo " module purge" -echo " module load intel/2020" -echo " module load impi/2020" -echo " module load netcdf/4.7.2" - -##source /etc/profile.d/modules.csh +source /etc/profile.d/z030-HPC2-lmod.csh ##module list -#module purge -#module load intel/2020 -#module load impi/2020 -#module load netcdf/4.7.2 +module purge +module load intel/2020.2 +module load impi/2020.2 +module load netcdf/4.7.4 ##module list -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - endif +limit coredumpsize unlimited +limit stacksize unlimited + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +#setenv OMP_PROC_BIND true +#setenv OMP_PLACES threads +#setenv OMP_DISPLAY_ENV TRUE + setenv ICE_MACHINE_MACHNAME orion setenv ICE_MACHINE_MACHINFO "Dell EMC PowerEdge C6420 Xeon Gold 6148" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 19.1.0.166 20191121, intelmpi 2019 Update 6 Build 20191024, netcdf/4.7.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.2.254 20200623, intelmpi 2019 Update 8 Build 20200624 (id: 4f16ad915), netcdf/4.7.4" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $HOME/scratch/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /home/acraig/scratch/CICE_INPUTDATA diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 91be4f72f..c17938d59 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -602,7 +602,7 @@ either Celsius or Kelvin units). "rside", "fraction of ice that melts laterally", "" "rsnw", "snow grain radius", "10\ :math:`^{-6}` m" "rsnw_fall", "freshly fallen snow grain radius", "100. :math:`\times` 10\ :math:`^{-6}` m" - "rsnw_melt", "melting snow grain radius", "1000. :math:`\times` 10\ :math:`^{-6}` m" + "rsnw_mlt", "melting snow grain radius", "1000. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_nonmelt", "nonmelting snow grain radius", "500. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_sig", "standard deviation of snow grain radius", "250. :math:`\times` 10\ :math:`^{-6}` m" "rsnw_tmax", "maximum snow radius", "1500. :math:`\times` 10\ :math:`^{-6}` m" From da68566c65c252aeb5234d71725825885e179197 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 18 Apr 2022 14:29:50 -0700 Subject: [PATCH 104/109] More machine updates, Cori, Onyx, Narwhal, Hera (#93) * Modify halo allocation for zero local blocks, instead of not allocating, allocate(0). Non-allocated array was creating some problems on some machines/compilers for gx1 32x1x16x16x32 tests where some pes have no blocks due to slenderX2 decomposition. Add -DNO_R16 to narwhal_cray Add OMP_STACKSIZE 64M to hera_intel * Update cori modules including hdf5 env setting for netcdf4 files --- .../infrastructure/comm/mpi/ice_boundary.F90 | 2 +- .../infrastructure/ice_read_write.F90 | 1 + .../scripts/machines/Macros.narwhal_aocc | 2 +- .../scripts/machines/Macros.narwhal_cray | 2 +- configuration/scripts/machines/env.cori_intel | 21 +++++++++++-------- configuration/scripts/machines/env.hera_intel | 6 +++--- 6 files changed, 19 insertions(+), 15 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 3959f12cf..5c9a28f10 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -225,8 +225,8 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** store some block info to fill haloes properly call ice_distributionGet(dist, numLocalBlocks=halo%numLocalBlocks) + allocate(halo%blockGlobalID(halo%numLocalBlocks)) if (halo%numLocalBlocks > 0) then - allocate(halo%blockGlobalID(halo%numLocalBlocks)) call ice_distributionGet(dist, blockGlobalID=halo%blockGlobalID) endif diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 index 2443d75a3..d5cbe1768 100644 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedynB/infrastructure/ice_read_write.F90 @@ -1058,6 +1058,7 @@ subroutine ice_open_nc(filename, fid) status = nf90_open(filename, NF90_NOWRITE, fid) if (status /= nf90_noerr) then + !write(nu_diag,*) subname,' NF90_STRERROR = ',trim(nf90_strerror(status)) call abort_ice(subname//' ERROR: Cannot open '//trim(filename), & file=__FILE__, line=__LINE__) endif diff --git a/configuration/scripts/machines/Macros.narwhal_aocc b/configuration/scripts/machines/Macros.narwhal_aocc index 44b1dc2f6..95f301e85 100644 --- a/configuration/scripts/machines/Macros.narwhal_aocc +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -3,7 +3,7 @@ #============================================================================== CPP := ftn -E -CPPDEFS := -DNO_R16 -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} CFLAGS := -c -O2 FIXEDFLAGS := -ffixed-form diff --git a/configuration/scripts/machines/Macros.narwhal_cray b/configuration/scripts/machines/Macros.narwhal_cray index ab0e6378e..8496f7a9b 100644 --- a/configuration/scripts/machines/Macros.narwhal_cray +++ b/configuration/scripts/machines/Macros.narwhal_cray @@ -3,7 +3,7 @@ #============================================================================== CPP := ftn -e P -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 ${ICE_CPPDEFS} CFLAGS := -c -O2 FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel index cab93246b..45b900983 100755 --- a/configuration/scripts/machines/env.cori_intel +++ b/configuration/scripts/machines/env.cori_intel @@ -13,42 +13,45 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-pgi -module load PrgEnv-intel/6.0.5 +module load PrgEnv-intel/6.0.10 module unload intel -module load intel/19.0.3.199 +module load intel/19.1.2.254 module unload gcc -module load gcc/8.2.0 +module load gcc/11.2.0 module unload cray-mpich module unload cray-mpich-abi -module load cray-mpich/7.7.6 +module load cray-mpich/7.7.19 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf -module load cray-netcdf/4.6.3.2 +module load cray-netcdf/4.8.1.1 module unload cray-libsci module unload craype -module load craype/2.6.2 +module load craype/2.7.10 setenv NETCDF_PATH ${NETCDF_DIR} + +endif + +setenv HDF5_USE_FILE_LOCKING FALSE + setenv OMP_PROC_BIND true setenv OMP_PLACES threads setenv OMP_STACKSIZE 32M limit coredumpsize unlimited limit stacksize unlimited -endif - setenv ICE_MACHINE_MACHNAME cori setenv ICE_MACHINE_MACHINFO "Cray XC40 Xeon E5-2698v3 Haswell" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 19.0.3.199 20190206, gcc/8.2.0, cray-mpich/7.7.6, netcdf/4.6.3.2" +setenv ICE_MACHINE_ENVINFO "ifort 19.1.2.254 20200623, gcc/11.2.0, cray-mpich/7.7.19, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $SCRATCH/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /project/projectdirs/ccsm1/cice-consortium/ diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel index 937992639..6698c0c2c 100755 --- a/configuration/scripts/machines/env.hera_intel +++ b/configuration/scripts/machines/env.hera_intel @@ -15,11 +15,11 @@ module load impi/2018.0.4 module load netcdf/4.7.0 #module list -# May be needed for OpenMP memory -#setenv OMP_STACKSIZE 64M - endif +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M + setenv ICE_MACHINE_MACHNAME hera setenv ICE_MACHINE_MACHINFO "Cray CS500 Intel SkyLake 2.4GHz, Infiniband HDR" setenv ICE_MACHINE_ENVNAME intel From ca3a1bef63cb7ebf40849be30b4e6029c423b1cc Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Tue, 19 Apr 2022 21:59:07 +0000 Subject: [PATCH 105/109] Modifications to the doc related to rEVP (#94) * Split rEVP doc section in two * Very minor modif to doc --- doc/source/master_list.bib | 12 +++ doc/source/science_guide/sg_dynamics.rst | 106 ++++++++++++----------- 2 files changed, 68 insertions(+), 50 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index e7363d09f..d2e887d75 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -1030,6 +1030,18 @@ @article{Roach19 year={2019} } +@article{Koldunov19, + author = "N.V. Koldunov and S. Danilov and D. Sidorenko and N. Hutter and M. Losch and H. Goessling and N. Rakowsky and P. Scholz and D. Sein and Q. Wang and T. Jung", + title = {{Fast EVP solutions in a high-resolution sea ice model}}, + journal = {Journal of Advances in Modeling Earth Systems}, + volume={11}, + number={5}, + pages={1269-1284}, + year={2019}, + url = {http://doi.wiley.com/10.1029/2018MS001485} +} + + @incollection{Arakawa77, author = "A. Arakawa and V.R. Lamb", title = "Computational Design of the Basic Dynamical Processes of the UCLA General Circulation Model", diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 98ddb64cf..da93323b7 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -8,12 +8,12 @@ Dynamics There are different approaches in the CICE code for representing sea ice rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the -VP model and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum +VP model, the revised EVP (rEVP) approach :cite:`Lemieux12,Bouillon13` and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If ``kdyn`` = 1 in the namelist then the EVP model is used (module **ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the -VP model (**ice\_dyn\_vp.F90**). +VP model (**ice\_dyn\_vp.F90**). The rEVP approach can be used by setting ``kdyn`` = 1 and ``revised_evp`` = true in the namelist. At times scales associated with the wind forcing, the EVP model reduces to the VP model while the EAP model @@ -22,7 +22,7 @@ reduces to the anisotropic rheology described in detail in adjustment process takes place in both models by a numerically more efficient elastic wave mechanism. While retaining the essential physics, this elastic wave modification leads to a fully explicit numerical -scheme which greatly improves the model’s computational efficiency. +scheme which greatly improves the model’s computational efficiency. The rEVP is also a fully explicit scheme which by construction should lead to the VP solution. The EVP sea ice dynamics model is thoroughly documented in :cite:`Hunke97`, :cite:`Hunke01`, @@ -36,7 +36,7 @@ respectively in :cite:`Hunke99` and The EVP numerical implementation in this code release is that of :cite:`Hunke02` and :cite:`Hunke03`, with revisions to the numerical solver as -in :cite:`Bouillon13`. The implementation of the EAP sea ice +in :cite:`Bouillon13`. Details about the rEVP solver can be found in :cite:`Lemieux12,Bouillon13,Kimmritz15,Koldunov19`. The implementation of the EAP sea ice dynamics into CICE is described in detail in :cite:`Tsamados13`. @@ -45,7 +45,7 @@ FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. Note that the VP solver has not yet been tested on the ``tx1`` grid or with threading enabled. -The EVP, EAP and VP approaches are all available with the B grid. However, at the moment, the EVP model is the only possibility with the C grid. +The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. Here we summarize the equations and direct the reader to the above references for details. @@ -181,6 +181,53 @@ However, on the C grid, :math:`u` and :math:`v` are not collocated. When solving u^{k+1} = {\hat{u} + b v^{k}_{int} \over a} \\ v^{k+1} = {\hat{v} - b u^{k}_{int} \over a}. \end{aligned} +.. _revp-momentum: + +Revised EVP time discretization and solution +~~~~~~~~~~~~~~~~~~~~~~~ + +The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution +(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of +implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become + +.. math:: + {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} + - {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} + = & {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + + {\tau_{ax} - mg{\partial H_\circ\over\partial x} }\\ + & + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, + :label: umomr + + +.. math:: + {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} + + {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} + = & {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } \\ + & + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, + :label: vmomr + +where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. +With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as + +.. math:: + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} + - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} + = & \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ + & + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), + :label: umomr2 + +.. math:: + \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} + = & \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ + & + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), + :label: vmomr2 + +At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). + .. _vp-momentum: Implicit (VP) time discretization and solution @@ -354,7 +401,7 @@ ITD and the seabed is given by .. math:: P_c=\int_{0}^{\inf} \int_{0}^{D(x)} g(x)b(y) dy dx \label{prob_contact}. -:math:`T_b` is first calculated at the 't' point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates +:math:`T_b` is first calculated at the T point (referred to as :math:`T_{bt}`). :math:`T_{bt}` depends on the weight of the ridge in excess of hydrostatic balance. The parameterization first calculates .. math:: T_{bt}^*=\mu_s g \int_{0}^{\inf} \int_{0}^{D(x)} (\rho_i x - \rho_w @@ -552,48 +599,7 @@ The scalability of geophysical models is in general terms limited by the memory Revised EVP approach ~~~~~~~~~~~~~~~~~~~~ -The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution -(given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of -implicit solvers and there is an additional term for the pseudo-time iteration. Hence, with the revised approach, the discretized momentum equations :eq:`umom` and :eq:`vmom` become - -.. math:: - {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} - = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} - + {\tau_{ax} - mg{\partial H_\circ\over\partial x} } - + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, - :label: umomr - -.. math:: - {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} - = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} - + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } - + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, - :label: vmomr - -where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. -With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bouillon13`, these equations can be written as - -.. math:: - \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} - = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} - + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} - + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), - :label: umomr2 - -.. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} - + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} - = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} - + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} - + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), - :label: vmomr2 - -At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). - -Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become +Introducing the numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite:`Bouillon13`, the stress equations in :eq:`sigdisc` become .. math:: \begin{aligned} @@ -603,11 +609,11 @@ Introducing another numerical parameter :math:`\alpha=2T \Delta t_e ^{-1}` :cite {\alpha (\sigma_{12}^{k+1}-\sigma_{12}^{k})} + {\sigma_{12}^{k}} &=& \eta^k D_S^k,\end{aligned} -where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, as opposed to the classic EVP, +where as opposed to the classic EVP, the second term in each equation is at iteration :math:`k` :cite:`Bouillon13`. Also, contrary to the classic EVP, :math:`\Delta t_e` times the number of subcycles (or iterations) does not need to be equal to the advective time step :math:`\Delta t`. Finally, as with the classic EVP approach, the stresses are initialized using the previous time level values. The revised EVP is activated by setting the namelist parameter ``revised_evp = true``. -In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx``. The values of ``arlx`` and ``brlx`` can be set in the namelist. +In the code :math:`\alpha` is ``arlx`` and :math:`\beta` is ``brlx`` (introduced in Section :ref:`revp-momentum`). The values of ``arlx`` and ``brlx`` can be set in the namelist. It is recommended to use large values of these parameters and to set :math:`\alpha=\beta` :cite:`Kimmritz15`. .. _stress-eap: From 99b64757a0c3a887e11884611b267145ef3e0e4a Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 19 Apr 2022 15:08:10 -0700 Subject: [PATCH 106/109] update documentation (#95) --- doc/source/science_guide/sg_dynamics.rst | 10 +++++----- doc/source/user_guide/ug_case_settings.rst | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index da93323b7..dd556037b 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -109,7 +109,7 @@ On the C grid, however, a finite difference approach is used for the spatial dis .. _evp-momentum: EVP time discretization and solution -~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The momentum equation is discretized in time as follows, for the classic EVP approach. @@ -184,7 +184,7 @@ However, on the C grid, :math:`u` and :math:`v` are not collocated. When solving .. _revp-momentum: Revised EVP time discretization and solution -~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The revised EVP approach is based on a pseudo-time iterative scheme :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15`. By construction, the revised EVP approach should lead to the VP solution (given the right numerical parameters and a sufficiently large number of iterations). To do so, the inertial term is formulated such that it matches the backward Euler approach of @@ -231,7 +231,7 @@ At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or th .. _vp-momentum: Implicit (VP) time discretization and solution -~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the VP approach, equation :eq:`momsys` is discretized implicitly using a Backward Euler approach, and stresses are not computed explicitly: @@ -438,9 +438,9 @@ The :math:`C_{b}` are different at the E and N points and are respectively :math .. _internal-stress: -******************************** +**************************************************************** Internal stress and strain rate tensors -******************************** +**************************************************************** For convenience we formulate the stress tensor :math:`\bf \sigma` in terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}`, diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index ff12e3d31..5bf0ab6dc 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -192,7 +192,7 @@ setup_nml "", "``pio_pnetcdf``", "read/write restart files with pnetcdf in pio", "" "``history_precision``", "integer", "history file precision: 4 or 8 byte", "4" "``ice_ic``", "``default``", "equal to internal", "``default``" - "", "``internal``", "initial conditions set based on ice_data_ inputs", "" + "", "``internal``", "initial conditions set based on ice\_data\_type,conc,dist inputs", "" "", "``none``", "no ice", "" "", "'path/file'", "restart file name", "" "``incond_dir``", "string", "path to initial condition directory", "'./'" From 698d03f5b90ce3bab92b51ac0c7ce013ef5c5969 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 19 Apr 2022 15:18:33 -0700 Subject: [PATCH 107/109] update documentation (#96) --- doc/source/master_list.bib | 9 --------- 1 file changed, 9 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index d2e887d75..6a623c471 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -729,15 +729,6 @@ @Article{Lemieux08 eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2007JC004680}, year = {2008} } -@Article{Bouillon09, - author = "S. Bouillon and T. Fichefet and V. Legat and G. Madec", - title = "{An elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids}", - journal = OM, - year = {2009}, - volume = {27}, - pages = {174-184}, - url = {https://doi.org/10.1016/j.ocemod.2009.01.004} -} @Article{Hunke09, author = "E.C. Hunke and C.M. Bitz", title = "{Age characteristics in a multidecadal Arctic sea ice simulation}", From 5a150aae4eb9b3ebe6082baad41e173037028856 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 19 Apr 2022 15:25:38 -0700 Subject: [PATCH 108/109] update documentation (#97) --- doc/source/master_list.bib | 9 --------- 1 file changed, 9 deletions(-) diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index 6a623c471..b563414d7 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -970,15 +970,6 @@ @Article{Roberts15 pages = {211-228}, url = {http://dx.doi.org/10.3189/2015AoG69A760} } -@Article{Kimmritz16, - author = "M. Kimmritz and S. Danilov and M. Losch", - title = "{The adaptive EVP method for solving the sea ice momentum equation}", - journal = OM, - year = {2016}, - volume = {101}, - pages = {59-67}, - url = {https://doi.org/10.1016/j.ocemod.2016.03.004} -} @Article{Lemieux16, author = "J.F. Lemieux and F. Dupont and P. Blain and F. Roy and G.C. Smith and G.M. Flato", title = "{Improving the simulation of landfast ice by combining tensile strength and a parameterization for grounded ridges}", From 79bf252f3fa7f54c1eafe6fe456e19b8fe5c55b7 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 28 Apr 2022 16:54:03 +0000 Subject: [PATCH 109/109] Rename some sections in the documentation (#98) * Fixed refs + format for some eqs * renamed some sections * Moved mom section to beginning of dynamics --- doc/source/science_guide/sg_dynamics.rst | 157 ++++++++++---------- doc/source/user_guide/ug_implementation.rst | 27 ++-- 2 files changed, 95 insertions(+), 89 deletions(-) diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index dd556037b..287de001e 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -5,57 +5,6 @@ Dynamics ======== -There are different approaches in the CICE code for representing sea ice -rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, -the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the -VP model, the revised EVP (rEVP) approach :cite:`Lemieux12,Bouillon13` and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum -anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If -``kdyn`` = 1 in the namelist then the EVP model is used (module -**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP -model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the -VP model (**ice\_dyn\_vp.F90**). The rEVP approach can be used by setting ``kdyn`` = 1 and ``revised_evp`` = true in the namelist. - -At times scales associated with the -wind forcing, the EVP model reduces to the VP model while the EAP model -reduces to the anisotropic rheology described in detail in -:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the -adjustment process takes place in both models by a numerically more -efficient elastic wave mechanism. While retaining the essential physics, -this elastic wave modification leads to a fully explicit numerical -scheme which greatly improves the model’s computational efficiency. The rEVP is also a fully explicit scheme which by construction should lead to the VP solution. - -The EVP sea ice dynamics model is thoroughly documented in -:cite:`Hunke97`, :cite:`Hunke01`, -:cite:`Hunke02` and :cite:`Hunke03` and the EAP -dynamics in :cite:`Tsamados13`. Simulation results and -performance of the EVP and EAP models have been compared with the VP -model and with each other in realistic simulations of the Arctic -respectively in :cite:`Hunke99` and -:cite:`Tsamados13`. - -The EVP numerical -implementation in this code release is that of :cite:`Hunke02` -and :cite:`Hunke03`, with revisions to the numerical solver as -in :cite:`Bouillon13`. Details about the rEVP solver can be found in :cite:`Lemieux12,Bouillon13,Kimmritz15,Koldunov19`. The implementation of the EAP sea ice -dynamics into CICE is described in detail in -:cite:`Tsamados13`. - -The VP solver implementation mostly follows :cite:`Lemieux08`, with -FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. -Note that the VP solver has not yet been tested on the ``tx1`` grid or with -threading enabled. - -The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. - -Here we summarize the equations and -direct the reader to the above references for details. - -.. _momentum: - -******** -Momentum -******** - The force balance per unit area in the ice pack is given by a two-dimensional momentum equation :cite:`Hibler79`, obtained by integrating the 3D equation through the thickness of the ice in the @@ -96,7 +45,7 @@ For clarity, the two components of Equation :eq:`vpmom` are On the B grid, the equations above are solved at the U point for the collocated u and v components (see figure :ref:`fig-Bgrid`). On the C grid, however, the two components are not collocated: the u component is at the E point while the v component is at the N point. -The B grid spatial discretization is based on a variational method described in :cite:`Hunke97,Hunke02`. A bilinear discretization is used for the stress terms +The B grid spatial discretization is based on a variational method described in :cite:`Hunke97` and :cite:`Hunke02`. A bilinear discretization is used for the stress terms :math:`\partial\sigma_{ij}/\partial x_j`, which enables the discrete equations to be derived from the continuous equations written in curvilinear coordinates. In this @@ -104,7 +53,58 @@ manner, metric terms associated with the curvature of the grid are incorporated into the discretization explicitly. Details pertaining to the spatial discretization are found in :cite:`Hunke02` -On the C grid, however, a finite difference approach is used for the spatial discretization. The C grid discretization is based on :cite:`Bouillon09, Bouillon13, Kimmritz16`. +On the C grid, however, a finite difference approach is used for the spatial discretization. The C grid discretization is based on :cite:`Bouillon09`, :cite:`Bouillon13` and :cite:`Kimmritz16`. + +There are different approaches in the CICE code for representing sea ice +rheology and for solving the sea ice momentum equation: the viscous-plastic (VP) rheology :cite:`Hibler79` with an implicit method, +the elastic-viscous-plastic (EVP) :cite:`Hunke97` model which represents a modification of the +VP model, the revised EVP (rEVP) approach :cite:`Lemieux12,Bouillon13` and the elastic-anisotropic-plastic (EAP) model which explicitly accounts for the sub-continuum +anisotropy of the sea ice cover :cite:`Wilchinsky06,Weiss09`. If +``kdyn`` = 1 in the namelist then the EVP model is used (module +**ice\_dyn\_evp.F90**), while ``kdyn`` = 2 is associated with the EAP +model (**ice\_dyn\_eap.F90**), and ``kdyn`` = 3 is associated with the +VP model (**ice\_dyn\_vp.F90**). The rEVP approach can be used by setting ``kdyn`` = 1 and ``revised_evp`` = true in the namelist. + +At times scales associated with the +wind forcing, the EVP model reduces to the VP model while the EAP model +reduces to the anisotropic rheology described in detail in +:cite:`Wilchinsky06,Tsamados13`. At shorter time scales the +adjustment process takes place in both models by a numerically more +efficient elastic wave mechanism. While retaining the essential physics, +this elastic wave modification leads to a fully explicit numerical +scheme which greatly improves the model’s computational efficiency. The rEVP is also a fully explicit scheme which by construction should lead to the VP solution. + +The EVP sea ice dynamics model is thoroughly documented in +:cite:`Hunke97`, :cite:`Hunke01`, +:cite:`Hunke02` and :cite:`Hunke03` and the EAP +dynamics in :cite:`Tsamados13`. Simulation results and +performance of the EVP and EAP models have been compared with the VP +model and with each other in realistic simulations of the Arctic +respectively in :cite:`Hunke99` and +:cite:`Tsamados13`. + +The EVP numerical +implementation in this code release is that of :cite:`Hunke02` +and :cite:`Hunke03`, with revisions to the numerical solver as +in :cite:`Bouillon13`. Details about the rEVP solver can be found in :cite:`Lemieux12`, :cite:`Bouillon13`, :cite:`Kimmritz15` and :cite:`Koldunov19`. The implementation of the EAP sea ice +dynamics into CICE is described in detail in +:cite:`Tsamados13`. + +The VP solver implementation mostly follows :cite:`Lemieux08`, with +FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. +Note that the VP solver has not yet been tested on the ``tx1`` grid or with +threading enabled. + +The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. + +Here we summarize the equations and +direct the reader to the above references for details. + +.. _momentumTS: + +********************** +Momentum time stepping +********************** .. _evp-momentum: @@ -192,19 +192,20 @@ implicit solvers and there is an additional term for the pseudo-time iteration. .. math:: {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} - = & {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} - + {\tau_{ax} - mg{\partial H_\circ\over\partial x} }\\ - & + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, + - & {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} + = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + + {\tau_{ax}} \\ + & - {mg{\partial H_\circ\over\partial x} } + + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, :label: umomr - .. math:: {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} - = & {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} - + {\tau_{ay} - mg{\partial H_\circ\over\partial y} } \\ - & + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, + + & {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} + = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + + {\tau_{ay}} \\ + & - {mg{\partial H_\circ\over\partial y} } + + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, :label: vmomr where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. @@ -212,16 +213,16 @@ With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bou .. math:: \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} - = & \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + - \underbrace{\left(mf+{\tt vrel} \sin\theta\right)}_{\tt ccb} & v^{l} + = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ & + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), :label: umomr2 .. math:: \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} - + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} - = & \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca} & v^{k+1} + = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ & + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), :label: vmomr2 @@ -277,6 +278,15 @@ The Picard iterative process stops when :math:`\left\lVert \mathbf{u}_{k} \right Parameters for the FGMRES linear solver and the preconditioner can be controlled using additional namelist flags (see :ref:`dynamics_nml`). + +.. _surfstress: + +******************** +Surface stress terms +******************** + +The formulation for the wind stress is described in `Icepack Documentation `_. Below, some details about the ice-ocean stress and the seabed stress are given. + Ice-Ocean stress ~~~~~~~~~~~~~~~~ @@ -290,9 +300,8 @@ pending further testing. .. _seabed-stress: -*************** Seabed stress -*************** +~~~~~~~~~~~~~ CICE includes two options for calculating the seabed stress, i.e. the term in the momentum equation that represents the interaction @@ -313,8 +322,7 @@ grounding schemes. It is suggested to have a bathymetry field with water depths larger than 5 m that represents well shallow water (less than 30 m) regions such as the Laptev Sea and the East Siberian Sea. -Seabed stress based on linear keel draft (LKD) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on linear keel draft (LKD)** This parameterization for the seabed stress is described in :cite:`Lemieux16`. It assumes that the largest keel draft varies linearly with the mean thickness in a grid cell (i.e. sea ice volume). The :math:`C_b` coefficients are expressed as @@ -371,8 +379,7 @@ The maximum seabed stress depends on the weight of the ridge above hydrostatic balance and the value of :math:`k_2`. It is, however, the parameter :math:`k_1` that has the most notable impact on the simulated extent of landfast ice. The value of :math:`k_1` can be changed at runtime using the namelist variable ``k1``. -Seabed stress based on probabilistic approach -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**Seabed stress based on probabilistic approach** This more sophisticated grounding parameterization computes the seabed stress based on the probability of contact between the ice thickness distribution @@ -438,9 +445,9 @@ The :math:`C_{b}` are different at the E and N points and are respectively :math .. _internal-stress: -**************************************************************** -Internal stress and strain rate tensors -**************************************************************** +******** +Rheology +******** For convenience we formulate the stress tensor :math:`\bf \sigma` in terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}`, diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index ab6c028ea..36799d68e 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -97,11 +97,10 @@ northeast corner of the corresponding T-cells and have velocity in the center of each. The velocity components are aligned along grid lines. The internal ice stress tensor takes four different values within a grid -cell; bilinear approximations are used for the stress tensor and the ice +cell with the B-grid implementation; bilinear approximations are used for the stress tensor and the ice velocity across the cell, as described in :cite:`Hunke02`. This tends to avoid the grid decoupling problems associated with the -B-grid. EVP is available on the C-grid through the MITgcm code -distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. +B-grid. .. _fig-Bgrid: @@ -111,14 +110,13 @@ distribution, http://mitgcm.org/viewvc/MITgcm/MITgcm/pkg/seaice/. Schematic of CICE B-grid. -The ability to solve on the C and CD grids was added later. With the C grid, -the u velocity points are located on the E edges and the v velocity points are -located on the N edges of the T cell rather than at the T cell corners. On -the CD grid, the u and v velocity points are located on both the N and E edges. -To support this capability, N and E grids -were added to the existing T and U grids, and the N and E grids are defined -at the northern and eastern edge of the T cell. This is shown in -Figure :ref:`fig-Cgrid`. +The ability to solve on the C and CD grids was added later. With the C-grid, +the u velocity points are located on the E edges and the v velocity points +are located on the N edges of the T cell rather than at the T cell corners. +On the CD-grid, the u and v velocity points are located on both the N and E edges. +To support this capability, N and E grids were added to the existing T and U grids, +and the N and E grids are defined at the northern and eastern edge of the T cell. +This is shown in Figure :ref:`fig-Cgrid`. .. _fig-Cgrid: @@ -398,15 +396,16 @@ respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global -grid. At each time step the logical masks ``ice_tmask`` and ``ice_umask`` are +grid. At each time step the logical masks ``icetmask`` and ``iceumask`` are determined from the current ice extent, such that they have the value “true” wherever ice exists. They also include a border of cells around the ice pack for numerical purposes. These masks are used in the dynamics component to prevent unnecessary calculations on grid points where there is no ice. They are not used in the thermodynamics component, so that ice may form in previously ice-free cells. Like the -land masks ``hm`` and ``uvm``, the ice extent masks ``ice_tmask`` and ``ice_umask`` -are for T-cells and U-cells, respectively. +land masks ``hm`` and ``uvm``, the ice extent masks ``icetmask`` and ``iceumask`` +are for T-cells and U-cells, respectively. Note that the ice extent masks +``iceemask`` and ``icenmask`` are also defined when using the C or CD grid. Improved parallel performance may result from utilizing halo masks for boundary updates of the full ice state, incremental remapping transport,