LCOV - code coverage report
Current view: top level - cicecore/cicedyn/infrastructure/comm/mpi - ice_global_reductions.F90 (source / functions) Hit Total Coverage
Test: 231018-211459:8916b9ff2c:1:quick Lines: 144 561 25.67 %
Date: 2023-10-18 15:30:36 Functions: 8 24 33.33 %

          Line data    Source code
       1             : !|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
       2             : 
       3             :  module ice_global_reductions
       4             : 
       5             : !  This module contains all the routines for performing global
       6             : !  reductions like global sums, minvals, maxvals, etc.
       7             : !
       8             : ! author: Phil Jones, LANL
       9             : ! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL
      10             : ! Feb. 2008: Updated from POP version by Elizabeth C. Hunke, LANL
      11             : ! Aug. 2014: Added bit-for-bit reproducible options for global_sum_dbl
      12             : !            and global_sum_prod_dbl by T Craig NCAR
      13             : ! Mar. 2019: Refactored bit-for-bit option, T Craig
      14             : 
      15             : #ifndef SERIAL_REMOVE_MPI
      16             :    use mpi   ! MPI Fortran module
      17             : #endif
      18             :    use ice_kinds_mod
      19             :    use ice_blocks, only: block, get_block, nx_block, ny_block
      20             : #ifdef SERIAL_REMOVE_MPI
      21             :    use ice_communicate, only: my_task, master_task
      22             : #else
      23             :    use ice_communicate, only: my_task, mpiR16, mpiR8, mpiR4, master_task
      24             : #endif
      25             :    use ice_constants, only: field_loc_Nface, field_loc_NEcorner, c0
      26             :    use ice_fileunits, only: bfbflag
      27             :    use ice_exit, only: abort_ice
      28             :    use ice_distribution, only: distrb, ice_distributionGet, &
      29             :        ice_distributionGetBlockID
      30             :    use ice_domain_size, only: nx_global, ny_global, max_blocks
      31             :    use ice_gather_scatter, only: gather_global
      32             :    use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted
      33             :    use ice_reprosum, only: ice_reprosum_calc
      34             : 
      35             :    implicit none
      36             :    private
      37             : 
      38             :    public :: global_sum,      &
      39             :              global_sum_prod, &   ! LCOV_EXCL_LINE
      40             :              global_maxval,   &   ! LCOV_EXCL_LINE
      41             :              global_minval
      42             : 
      43             : !-----------------------------------------------------------------------
      44             : !
      45             : !  generic interfaces for module procedures
      46             : !
      47             : !-----------------------------------------------------------------------
      48             : 
      49             :    interface global_sum
      50             :      module procedure global_sum_dbl,              &
      51             :                       global_sum_real,             &   ! LCOV_EXCL_LINE
      52             :                       global_sum_int,              &   ! LCOV_EXCL_LINE
      53             :                       global_sum_scalar_dbl,       &   ! LCOV_EXCL_LINE
      54             :                       global_sum_scalar_real,      &   ! LCOV_EXCL_LINE
      55             :                       global_sum_scalar_int
      56             :    end interface
      57             : 
      58             :    interface global_sum_prod
      59             :      module procedure global_sum_prod_dbl,         &
      60             :                       global_sum_prod_real,        &   ! LCOV_EXCL_LINE
      61             :                       global_sum_prod_int
      62             :    end interface
      63             : 
      64             :    interface global_maxval
      65             :      module procedure global_maxval_dbl,           &
      66             :                       global_maxval_real,          &   ! LCOV_EXCL_LINE
      67             :                       global_maxval_int,           &   ! LCOV_EXCL_LINE
      68             :                       global_maxval_scalar_dbl,    &   ! LCOV_EXCL_LINE
      69             :                       global_maxval_scalar_real,   &   ! LCOV_EXCL_LINE
      70             :                       global_maxval_scalar_int,    &   ! LCOV_EXCL_LINE
      71             :                       global_maxval_scalar_int_nodist
      72             :    end interface
      73             : 
      74             :    interface global_minval
      75             :      module procedure global_minval_dbl,           &
      76             :                       global_minval_real,          &   ! LCOV_EXCL_LINE
      77             :                       global_minval_int,           &   ! LCOV_EXCL_LINE
      78             :                       global_minval_scalar_dbl,    &   ! LCOV_EXCL_LINE
      79             :                       global_minval_scalar_real,   &   ! LCOV_EXCL_LINE
      80             :                       global_minval_scalar_int,    &   ! LCOV_EXCL_LINE
      81             :                       global_minval_scalar_int_nodist
      82             :    end interface
      83             : 
      84             : !***********************************************************************
      85             : 
      86             :  contains
      87             : 
      88             : !***********************************************************************
      89             : 
      90      230400 :  function global_sum_dbl(array, dist, field_loc, mMask, lMask) &
      91       57600 :           result(globalSum)
      92             : 
      93             : !  Computes the global sum of the physical domain of a 2-d array.
      94             : !
      95             : !  This is actually the specific interface for the generic global_sum
      96             : !  function corresponding to double precision arrays.  The generic
      97             : !  interface is identical but will handle real and integer 2-d slabs
      98             : !  and real, integer, and double precision scalars.
      99             : 
     100             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
     101             :       array                ! array to be summed
     102             : 
     103             :    type (distrb), intent(in) :: &
     104             :       dist                 ! block distribution for array X
     105             : 
     106             :    integer (int_kind), intent(in) :: &
     107             :       field_loc            ! location of field on staggered grid
     108             : 
     109             :    real (dbl_kind), dimension(:,:,:), intent(in), optional :: &
     110             :       mMask                ! optional multiplicative mask
     111             : 
     112             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     113             :       lMask                ! optional logical mask
     114             : 
     115             :    real (dbl_kind) :: &
     116             :       globalSum            ! resulting global sum
     117             : 
     118             : !-----------------------------------------------------------------------
     119             : !
     120             : !  local variables
     121             : !
     122             : !-----------------------------------------------------------------------
     123             : 
     124             :    integer (int_kind) :: &
     125             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     126             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     127             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     128             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     129             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     130             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     131             :       maxiglob       ! maximum non-redundant value of i_global
     132             : 
     133             :    logical (log_kind) :: &
     134             :       Nrow           ! this field is on a N row (a velocity row)
     135             : 
     136             :    real (dbl_kind), dimension(:,:), allocatable :: &
     137      230400 :       work           ! temporary local array
     138             : 
     139             :    real (dbl_kind), dimension(:), allocatable :: &
     140      230400 :       sums           ! array of sums
     141             : 
     142             :    type (block) :: &
     143             :       this_block     ! holds local block information
     144             : 
     145             :    character(len=*), parameter :: subname = '(global_sum_dbl)'
     146             : 
     147             : !-----------------------------------------------------------------------
     148             : 
     149             : 
     150             :    call ice_distributionGet(dist,          &
     151             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     152             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     153      230400 :                             communicator = communicator)
     154             : 
     155      230400 :    if (numBlocks == 0) then
     156           0 :       allocate(work(1,1))
     157             :    else
     158      230400 :       allocate(work(nx_block*ny_block*numBlocks,1))
     159             :    endif
     160      230400 :    allocate(sums(1))
     161   606216000 :    work = 0.0_dbl_kind
     162      460800 :    sums = 0.0_dbl_kind
     163             : 
     164     1147200 :    do iblock=1,numBlocks
     165      916800 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     166             : 
     167      916800 :       this_block = get_block(blockID, blockID)
     168             : 
     169      916800 :       ib = this_block%ilo
     170      916800 :       ie = this_block%ihi
     171      916800 :       jb = this_block%jlo
     172      916800 :       je = this_block%jhi
     173             : 
     174             :       !*** if this row along or beyond tripole boundary
     175             :       !*** must eliminate redundant points from global sum
     176             : 
     177      916800 :       maxiglob = -1
     178      916800 :       if (this_block%tripole) then
     179             :          Nrow=(field_loc == field_loc_Nface .or. &
     180           0 :                field_loc == field_loc_NEcorner)
     181           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     182           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     183           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     184           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     185             :          else
     186           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     187             :          endif
     188             :       endif
     189             : 
     190      916800 :       n = (iblock-1)*nx_block*ny_block
     191             : 
     192      916800 :       if (present(mMask)) then
     193    29112000 :          do j=jb,je
     194   541828800 :          do i=ib,ie
     195   513633600 :             n = n + 1
     196   540912000 :             work(n,1) = array(i,j,iblock)*mMask(i,j,iblock)
     197             :          end do
     198             :          end do
     199           0 :       elseif (present(lMask)) then
     200           0 :          do j=jb,je
     201           0 :          do i=ib,ie
     202           0 :             n = n + 1
     203           0 :             if (lMask(i,j,iblock)) then
     204           0 :                work(n,1) = array(i,j,iblock)
     205             :             endif
     206             :          end do
     207             :          end do
     208             :       else
     209           0 :          do j=jb,je
     210           0 :          do i=ib,ie
     211           0 :             n = n + 1
     212           0 :             work(n,1) = array(i,j,iblock)
     213             :          enddo
     214             :          enddo
     215             :       endif
     216             : 
     217     2064000 :       if (maxiglob >= 0) then
     218             :          ! eliminate redundant points at je
     219             :          ! set n to (ib,je) index
     220           0 :          n = (iblock-1)*nx_block*ny_block
     221           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     222           0 :          j=je
     223           0 :          do i=ib,ie
     224           0 :             n = n + 1
     225           0 :             if (this_block%i_glob(i) > maxiglob) then
     226           0 :                work(n,1) = 0._dbl_kind
     227             :             endif
     228             :          end do
     229             :       endif
     230             : 
     231             :    end do
     232             : 
     233      230400 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     234             : 
     235      230400 :    globalSum = sums(1)
     236             : 
     237      230400 :    deallocate(work)
     238      230400 :    deallocate(sums)
     239             : 
     240             : !-----------------------------------------------------------------------
     241             : 
     242      460800 :  end function global_sum_dbl
     243             : 
     244             : !***********************************************************************
     245             : 
     246           0 :  function global_sum_real(array, dist, field_loc, mMask, lMask) &
     247           0 :           result(globalSum)
     248             : 
     249             : !  Computes the global sum of the physical domain of a 2-d array.
     250             : !
     251             : !  This is actually the specific interface for the generic global_sum
     252             : !  function corresponding to single precision arrays.  The generic
     253             : !  interface is identical but will handle double, real and integer 2-d slabs
     254             : !  and real, integer, and double precision scalars.
     255             : 
     256             :    real (real_kind), dimension(:,:,:), intent(in) :: &
     257             :       array                ! array to be summed
     258             : 
     259             :    type (distrb), intent(in) :: &
     260             :       dist                 ! block distribution for array X
     261             : 
     262             :    integer (int_kind), intent(in) :: &
     263             :       field_loc            ! location of field on staggered grid
     264             : 
     265             :    real (real_kind), dimension(:,:,:), intent(in), optional :: &
     266             :       mMask                ! optional multiplicative mask
     267             : 
     268             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     269             :       lMask                ! optional logical mask
     270             : 
     271             :    real (real_kind) :: &
     272             :       globalSum            ! resulting global sum
     273             : 
     274             : !-----------------------------------------------------------------------
     275             : !
     276             : !  local variables
     277             : !
     278             : !-----------------------------------------------------------------------
     279             : 
     280             :    integer (int_kind) :: &
     281             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     282             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     283             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     284             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     285             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     286             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     287             :       maxiglob       ! maximum non-redundant value of i_global
     288             : 
     289             :    logical (log_kind) :: &
     290             :       Nrow           ! this field is on a N row (a velocity row)
     291             : 
     292             :    real (dbl_kind), dimension(:,:), allocatable :: &
     293           0 :       work           ! temporary local array
     294             : 
     295             :    real (dbl_kind), dimension(:), allocatable :: &
     296           0 :       sums           ! array of sums
     297             : 
     298             :    type (block) :: &
     299             :       this_block     ! holds local block information
     300             : 
     301             :    character(len=*), parameter :: subname = '(global_sum_real)'
     302             : 
     303             : !-----------------------------------------------------------------------
     304             : 
     305             : 
     306             :    call ice_distributionGet(dist,          &
     307             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     308             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     309           0 :                             communicator = communicator)
     310             : 
     311           0 :    if (numBlocks == 0) then
     312           0 :       allocate(work(1,1))
     313             :    else
     314           0 :       allocate(work(nx_block*ny_block*numBlocks,1))
     315             :    endif
     316           0 :    allocate(sums(1))
     317           0 :    work = 0.0_dbl_kind
     318           0 :    sums = 0.0_dbl_kind
     319             : 
     320           0 :    do iblock=1,numBlocks
     321           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     322             : 
     323           0 :       this_block = get_block(blockID, blockID)
     324             : 
     325           0 :       ib = this_block%ilo
     326           0 :       ie = this_block%ihi
     327           0 :       jb = this_block%jlo
     328           0 :       je = this_block%jhi
     329             : 
     330             :       !*** if this row along or beyond tripole boundary
     331             :       !*** must eliminate redundant points from global sum
     332             : 
     333           0 :       maxiglob = -1
     334           0 :       if (this_block%tripole) then
     335             :          Nrow=(field_loc == field_loc_Nface .or. &
     336           0 :                field_loc == field_loc_NEcorner)
     337           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     338           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     339           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     340           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     341             :          else
     342           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     343             :          endif
     344             :       endif
     345             : 
     346           0 :       n = (iblock-1)*nx_block*ny_block
     347             : 
     348           0 :       if (present(mMask)) then
     349           0 :          do j=jb,je
     350           0 :          do i=ib,ie
     351           0 :             n = n + 1
     352           0 :             work(n,1) = real(array(i,j,iblock)*mMask(i,j,iblock),dbl_kind)
     353             :          end do
     354             :          end do
     355           0 :       elseif (present(lMask)) then
     356           0 :          do j=jb,je
     357           0 :          do i=ib,ie
     358           0 :             n = n + 1
     359           0 :             if (lMask(i,j,iblock)) then
     360           0 :                work(n,1) = real(array(i,j,iblock),dbl_kind)
     361             :             endif
     362             :          end do
     363             :          end do
     364             :       else
     365           0 :          do j=jb,je
     366           0 :          do i=ib,ie
     367           0 :             n = n + 1
     368           0 :             work(n,1) = real(array(i,j,iblock),dbl_kind)
     369             :          enddo
     370             :          enddo
     371             :       endif
     372             : 
     373           0 :       if (maxiglob >= 0) then
     374             :          ! eliminate redundant points at je
     375             :          ! set n to (ib,je) index
     376           0 :          n = (iblock-1)*nx_block*ny_block
     377           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     378           0 :          j=je
     379           0 :          do i=ib,ie
     380           0 :             n = n + 1
     381           0 :             if (this_block%i_glob(i) > maxiglob) then
     382           0 :                work(n,1) = 0._dbl_kind
     383             :             endif
     384             :          end do
     385             :       endif
     386             : 
     387             :    end do
     388             : 
     389           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     390             : 
     391           0 :    globalSum = real(sums(1),real_kind)
     392             : 
     393           0 :    deallocate(work)
     394           0 :    deallocate(sums)
     395             : 
     396             : !-----------------------------------------------------------------------
     397             : 
     398           0 :  end function global_sum_real
     399             : 
     400             : !***********************************************************************
     401             : 
     402           0 :  function global_sum_int(array, dist, field_loc, mMask, lMask) &
     403             :           result(globalSum)
     404             : 
     405             : !  Computes the global sum of the physical domain of a 2-d array.
     406             : !
     407             : !  This is actually the specific interface for the generic global_sum
     408             : !  function corresponding to integer arrays.  The generic
     409             : !  interface is identical but will handle real and integer 2-d slabs
     410             : !  and real, integer, and double precision scalars.
     411             : 
     412             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
     413             :       array                ! array to be summed
     414             : 
     415             :    type (distrb), intent(in) :: &
     416             :       dist                 ! block distribution for array X
     417             : 
     418             :    integer (int_kind), intent(in) :: &
     419             :       field_loc            ! location of field on staggered grid
     420             : 
     421             :    integer (int_kind), dimension(:,:,:), intent(in), optional :: &
     422             :       mMask                ! optional multiplicative mask
     423             : 
     424             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     425             :       lMask                ! optional logical mask
     426             : 
     427             :    integer (int_kind) :: &
     428             :       globalSum            ! resulting global sum
     429             : 
     430             : !-----------------------------------------------------------------------
     431             : !
     432             : !  local variables
     433             : !
     434             : !-----------------------------------------------------------------------
     435             : 
     436             :    integer (int_kind) :: &
     437             :       blockSum,     &! sum of local block domain   ! LCOV_EXCL_LINE
     438             :       localSum       ! sum of all local block domains
     439             : 
     440             :    integer (int_kind) :: &
     441             :       i,j,iblock,   &! local counters   ! LCOV_EXCL_LINE
     442             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     443             :       ierr,         &! mpi error flag   ! LCOV_EXCL_LINE
     444             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     445             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     446             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     447             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     448             :       maxiglob       ! maximum non-redundant value of i_global
     449             : 
     450             :    logical (log_kind) :: &
     451             :       Nrow           ! this field is on a N row (a velocity row)
     452             : 
     453             :    type (block) :: &
     454             :       this_block     ! holds local block information
     455             : 
     456             :    character(len=*), parameter :: subname = '(global_sum_int)'
     457             : 
     458             : !-----------------------------------------------------------------------
     459             : 
     460           0 :    localSum  = 0_int_kind
     461           0 :    globalSum = 0_int_kind
     462             : 
     463             :    call ice_distributionGet(dist,          &
     464             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     465             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     466           0 :                             communicator = communicator)
     467             : 
     468           0 :    do iblock=1,numBlocks
     469           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     470             : 
     471           0 :       this_block = get_block(blockID, blockID)
     472             : 
     473           0 :       ib = this_block%ilo
     474           0 :       ie = this_block%ihi
     475           0 :       jb = this_block%jlo
     476           0 :       je = this_block%jhi
     477             : 
     478             :       !*** if this row along or beyond tripole boundary
     479             :       !*** must eliminate redundant points from global sum
     480             : 
     481           0 :       maxiglob = -1
     482           0 :       if (this_block%tripole) then
     483             :          Nrow=(field_loc == field_loc_Nface .or. &
     484           0 :                field_loc == field_loc_NEcorner)
     485           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     486           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     487           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     488           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     489             :          else
     490           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     491             :          endif
     492             :       endif
     493             : 
     494           0 :       blockSum = 0_int_kind
     495             : 
     496           0 :       do j=jb,je
     497           0 :       do i=ib,ie
     498             :          ! eliminate redundant points
     499           0 :          if (maxiglob >= 0 .and. j == je .and. this_block%i_glob(i) > maxiglob) then
     500             : !            blockSum = blockSum + 0_int_kind
     501             :          else
     502           0 :             if (present(mMask)) then
     503           0 :                blockSum = blockSum + array(i,j,iblock)*mMask(i,j,iblock)
     504           0 :             else if (present(lMask)) then
     505           0 :                if (lMask(i,j,iblock)) then
     506           0 :                   blockSum = blockSum + array(i,j,iblock)
     507             :               endif
     508             :             else
     509           0 :                blockSum = blockSum + array(i,j,iblock)
     510             :             endif
     511             :          endif
     512             :       end do
     513             :       end do
     514             : 
     515             :       !*** now add block sum to global sum
     516             : 
     517           0 :       localSum = localSum + blockSum
     518             : 
     519             :    end do
     520             : 
     521             : !-----------------------------------------------------------------------
     522             : !
     523             : !  now use MPI global reduction to reduce local sum to global sum
     524             : !
     525             : !-----------------------------------------------------------------------
     526             : 
     527             : #ifdef SERIAL_REMOVE_MPI
     528             :    globalSum = localSum
     529             : #else
     530           0 :    if (my_task < numProcs) then
     531             :       call MPI_ALLREDUCE(localSum, globalSum, 1, &
     532           0 :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
     533             :    endif
     534             : #endif
     535             : 
     536             : !-----------------------------------------------------------------------
     537             : 
     538           0 :  end function global_sum_int
     539             : 
     540             : !***********************************************************************
     541             : 
     542           0 :  function global_sum_scalar_dbl(scalar, dist) &
     543           0 :           result(globalSum)
     544             : 
     545             : !  Computes the global sum of a set of scalars distributed across
     546             : !  a parallel machine.
     547             : !
     548             : !  This is actually the specific interface for the generic global_sum
     549             : !  function corresponding to double precision scalars.  The generic
     550             : !  interface is identical but will handle real and integer 2-d slabs
     551             : !  and real, integer, and double precision scalars.
     552             : 
     553             :    real (dbl_kind), intent(in) :: &
     554             :       scalar               ! scalar to be summed
     555             : 
     556             :    type (distrb), intent(in) :: &
     557             :       dist                 ! block distribution for array X
     558             : 
     559             :    real (dbl_kind) :: &
     560             :       globalSum            ! resulting global sum
     561             : 
     562             : !-----------------------------------------------------------------------
     563             : !
     564             : !  local variables
     565             : !
     566             : !-----------------------------------------------------------------------
     567             : 
     568             :    integer (int_kind) :: &
     569             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     570             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     571             :       communicator   ! communicator for this distribution
     572             : 
     573             :    real (dbl_kind), dimension(:,:), allocatable :: &
     574           0 :       work           ! temporary local array
     575             : 
     576             :    real (dbl_kind), dimension(:), allocatable :: &
     577           0 :       sums           ! array of sums
     578             : 
     579             :    character(len=*), parameter :: subname = '(global_sum_scalar_dbl)'
     580             : 
     581             : !-----------------------------------------------------------------------
     582             : !
     583             : !  get communicator for MPI calls
     584             : !
     585             : !-----------------------------------------------------------------------
     586             : 
     587             :    call ice_distributionGet(dist, &
     588             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     589             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     590           0 :                             communicator = communicator)
     591             : 
     592             : 
     593           0 :    allocate(work(1,1))
     594           0 :    allocate(sums(1))
     595           0 :    work(1,1) = scalar
     596           0 :    sums = 0.0_dbl_kind
     597             : 
     598           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     599             : 
     600           0 :    globalSum = sums(1)
     601             : 
     602           0 :    deallocate(work)
     603           0 :    deallocate(sums)
     604             : 
     605             : !-----------------------------------------------------------------------
     606             : 
     607           0 :  end function global_sum_scalar_dbl
     608             : 
     609             : !***********************************************************************
     610             : 
     611           0 :  function global_sum_scalar_real(scalar, dist) &
     612           0 :           result(globalSum)
     613             : 
     614             : !  Computes the global sum of a set of scalars distributed across
     615             : !  a parallel machine.
     616             : !
     617             : !  This is actually the specific interface for the generic global_sum
     618             : !  function corresponding to real scalars.  The generic
     619             : !  interface is identical but will handle real and integer 2-d slabs
     620             : !  and real, integer, and double precision scalars.
     621             : 
     622             :    real (real_kind), intent(in) :: &
     623             :       scalar               ! scalar to be summed
     624             : 
     625             :    type (distrb), intent(in) :: &
     626             :       dist                 ! block distribution for array X
     627             : 
     628             :    real (real_kind) :: &
     629             :       globalSum            ! resulting global sum
     630             : 
     631             : !-----------------------------------------------------------------------
     632             : !
     633             : !  local variables
     634             : !
     635             : !-----------------------------------------------------------------------
     636             : 
     637             :    integer (int_kind) :: &
     638             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     639             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     640             :       communicator   ! communicator for this distribution
     641             : 
     642             :    real (dbl_kind), dimension(:,:), allocatable :: &
     643           0 :       work           ! temporary local array
     644             : 
     645             :    real (dbl_kind), dimension(:), allocatable :: &
     646           0 :       sums           ! array of sums
     647             : 
     648             :    character(len=*), parameter :: subname = '(global_sum_scalar_real)'
     649             : 
     650             : !-----------------------------------------------------------------------
     651             : !
     652             : !  get communicator for MPI calls
     653             : !
     654             : !-----------------------------------------------------------------------
     655             : 
     656             :    call ice_distributionGet(dist, &
     657             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     658             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     659           0 :                             communicator = communicator)
     660             : 
     661           0 :    allocate(work(1,1))
     662           0 :    allocate(sums(1))
     663           0 :    work(1,1) = real(scalar,dbl_kind)
     664           0 :    sums = 0.0_dbl_kind
     665             : 
     666           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     667             : 
     668           0 :    globalSum = real(sums(1),real_kind)
     669             : 
     670           0 :    deallocate(work)
     671           0 :    deallocate(sums)
     672             : 
     673             : !-----------------------------------------------------------------------
     674             : 
     675           0 :  end function global_sum_scalar_real
     676             : 
     677             : !***********************************************************************
     678             : 
     679           0 :  function global_sum_scalar_int(scalar, dist) &
     680             :           result(globalSum)
     681             : 
     682             : !  Computes the global sum of a set of scalars distributed across
     683             : !  a parallel machine.
     684             : !
     685             : !  This is actually the specific interface for the generic global_sum
     686             : !  function corresponding to integer scalars.  The generic
     687             : !  interface is identical but will handle real and integer 2-d slabs
     688             : !  and real, integer, and double precision scalars.
     689             : 
     690             :    integer (int_kind), intent(in) :: &
     691             :       scalar               ! scalar to be summed
     692             : 
     693             :    type (distrb), intent(in) :: &
     694             :       dist                 ! block distribution for array X
     695             : 
     696             :    integer (int_kind) :: &
     697             :       globalSum            ! resulting global sum
     698             : 
     699             : !-----------------------------------------------------------------------
     700             : !
     701             : !  local variables
     702             : !
     703             : !-----------------------------------------------------------------------
     704             : 
     705             :    integer (int_kind) :: &
     706             :       ierr,         &! mpi error flag   ! LCOV_EXCL_LINE
     707             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     708             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     709             :       communicator   ! communicator for this distribution
     710             : 
     711             :    character(len=*), parameter :: subname = '(global_sum_scalar_int)'
     712             : 
     713             : !-----------------------------------------------------------------------
     714             : !
     715             : !  get communicator for MPI calls
     716             : !
     717             : !-----------------------------------------------------------------------
     718             : 
     719             :    call ice_distributionGet(dist, &
     720             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     721             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     722           0 :                             communicator = communicator)
     723             : 
     724             : !-----------------------------------------------------------------------
     725             : !
     726             : !  now use MPI global reduction to reduce local sum to global sum
     727             : !
     728             : !-----------------------------------------------------------------------
     729             : 
     730             : #ifdef SERIAL_REMOVE_MPI
     731             :    globalSum = scalar
     732             : #else
     733           0 :    if (my_task < numProcs) then
     734             :       call MPI_ALLREDUCE(scalar, globalSum, 1, &
     735           0 :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
     736             :    endif
     737             : #endif
     738             : 
     739             : !-----------------------------------------------------------------------
     740             : 
     741           0 :  end function global_sum_scalar_int
     742             : 
     743             : !***********************************************************************
     744             : 
     745       46080 :  function global_sum_prod_dbl (array1, array2, dist, field_loc, &
     746             :                                mMask, lMask) &   ! LCOV_EXCL_LINE
     747       11520 :           result(globalSum)
     748             : 
     749             : !  Computes the global sum of the physical domain of a product of
     750             : !  two 2-d arrays.
     751             : !
     752             : !  This is actually the specific interface for the generic
     753             : !  global_sum_prod function corresponding to double precision arrays.
     754             : !  The generic interface is identical but will handle real and integer
     755             : !  2-d slabs.
     756             : 
     757             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
     758             :       array1, array2       ! arrays whose product is to be summed
     759             : 
     760             :    type (distrb), intent(in) :: &
     761             :       dist                 ! block distribution for arrays
     762             : 
     763             :    integer (int_kind), intent(in) :: &
     764             :       field_loc            ! location of field on staggered grid
     765             : 
     766             :    real (dbl_kind), dimension(:,:,:), intent(in), optional :: &
     767             :       mMask                ! optional multiplicative mask
     768             : 
     769             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     770             :       lMask                ! optional logical mask
     771             : 
     772             :    real (dbl_kind) :: &
     773             :       globalSum            ! resulting global sum
     774             : 
     775             : !-----------------------------------------------------------------------
     776             : !
     777             : !  local variables
     778             : !
     779             : !-----------------------------------------------------------------------
     780             : 
     781             :    integer (int_kind) :: &
     782             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     783             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     784             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     785             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     786             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     787             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     788             :       maxiglob       ! maximum non-redundant value of i_global
     789             : 
     790             :    logical (log_kind) :: &
     791             :       Nrow           ! this field is on a N row (a velocity row)
     792             : 
     793             :    real (dbl_kind), dimension(:,:), allocatable :: &
     794       46080 :       work           ! temporary local array
     795             : 
     796             :    real (dbl_kind), dimension(:), allocatable :: &
     797       46080 :       sums           ! array of sums
     798             : 
     799             :    type (block) :: &
     800             :       this_block     ! holds local block information
     801             : 
     802             :    character(len=*), parameter :: subname = '(global_sum_prod_dbl)'
     803             : 
     804             : !-----------------------------------------------------------------------
     805             : 
     806             : 
     807             :    call ice_distributionGet(dist,          &
     808             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     809             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     810       46080 :                             communicator = communicator)
     811             : 
     812       46080 :    if (numBlocks == 0) then
     813           0 :       allocate(work(1,1))
     814             :    else
     815       46080 :       allocate(work(nx_block*ny_block*numBlocks,1))
     816             :    endif
     817       46080 :    allocate(sums(1))
     818   121243200 :    work = 0.0_dbl_kind
     819       92160 :    sums = 0.0_dbl_kind
     820             : 
     821      229440 :    do iblock=1,numBlocks
     822      183360 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     823             : 
     824      183360 :       this_block = get_block(blockID, blockID)
     825             : 
     826      183360 :       ib = this_block%ilo
     827      183360 :       ie = this_block%ihi
     828      183360 :       jb = this_block%jlo
     829      183360 :       je = this_block%jhi
     830             : 
     831             :       !*** if this row along or beyond tripole boundary
     832             :       !*** must eliminate redundant points from global sum
     833             : 
     834      183360 :       maxiglob = -1
     835      183360 :       if (this_block%tripole) then
     836             :          Nrow=(field_loc == field_loc_Nface .or. &
     837           0 :                field_loc == field_loc_NEcorner)
     838           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     839           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     840           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     841           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     842             :          else
     843           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     844             :          endif
     845             :       endif
     846             : 
     847      183360 :       n = (iblock-1)*nx_block*ny_block
     848             : 
     849      183360 :       if (present(mMask)) then
     850     5822400 :          do j=jb,je
     851   108365760 :          do i=ib,ie
     852   102726720 :             n = n + 1
     853   108182400 :             work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock)
     854             :          end do
     855             :          end do
     856           0 :       elseif (present(lMask)) then
     857           0 :          do j=jb,je
     858           0 :          do i=ib,ie
     859           0 :             n = n + 1
     860           0 :             if (lMask(i,j,iblock)) then
     861           0 :                work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)
     862             :             endif
     863             :          end do
     864             :          end do
     865             :       else
     866           0 :          do j=jb,je
     867           0 :          do i=ib,ie
     868           0 :             n = n + 1
     869           0 :             work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)
     870             :          enddo
     871             :          enddo
     872             :       endif
     873             : 
     874      412800 :       if (maxiglob >= 0) then
     875             :          ! eliminate redundant points at je
     876             :          ! set n to (ib,je) index
     877           0 :          n = (iblock-1)*nx_block*ny_block
     878           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     879           0 :          j=je
     880           0 :          do i=ib,ie
     881           0 :             n = n + 1
     882           0 :             if (this_block%i_glob(i) > maxiglob) then
     883           0 :                work(n,1) = 0._dbl_kind
     884             :             endif
     885             :          end do
     886             :       endif
     887             : 
     888             :    end do
     889             : 
     890       46080 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     891             : 
     892       46080 :    globalSum = sums(1)
     893             : 
     894       46080 :    deallocate(work)
     895       46080 :    deallocate(sums)
     896             : 
     897             : !-----------------------------------------------------------------------
     898             : 
     899       92160 :  end function global_sum_prod_dbl
     900             : 
     901             : !***********************************************************************
     902             : 
     903           0 :  function global_sum_prod_real (array1, array2, dist, field_loc, &
     904             :                                 mMask, lMask) &   ! LCOV_EXCL_LINE
     905           0 :           result(globalSum)
     906             : 
     907             : !  Computes the global sum of the physical domain of a product of
     908             : !  two 2-d arrays.
     909             : !
     910             : !  This is actually the specific interface for the generic
     911             : !  global_sum_prod function corresponding to single precision arrays.
     912             : !  The generic interface is identical but will handle real and integer
     913             : !  2-d slabs.
     914             : 
     915             :    real (real_kind), dimension(:,:,:), intent(in) :: &
     916             :       array1, array2       ! arrays whose product is to be summed
     917             : 
     918             :    type (distrb), intent(in) :: &
     919             :       dist                 ! block distribution for arrays
     920             : 
     921             :    integer (int_kind), intent(in) :: &
     922             :       field_loc            ! location of field on staggered grid
     923             : 
     924             :    real (real_kind), dimension(:,:,:), intent(in), optional :: &
     925             :       mMask                ! optional multiplicative mask
     926             : 
     927             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     928             :       lMask                ! optional logical mask
     929             : 
     930             :    real (real_kind) :: &
     931             :       globalSum            ! resulting global sum
     932             : 
     933             : !-----------------------------------------------------------------------
     934             : !
     935             : !  local variables
     936             : !
     937             : !-----------------------------------------------------------------------
     938             : 
     939             :    integer (int_kind) :: &
     940             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     941             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     942             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     943             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     944             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     945             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     946             :       maxiglob       ! maximum non-redundant value of i_global
     947             : 
     948             :    logical (log_kind) :: &
     949             :       Nrow           ! this field is on a N row (a velocity row)
     950             : 
     951             :    real (dbl_kind), dimension(:,:), allocatable :: &
     952           0 :       work           ! temporary local array
     953             : 
     954             :    real (dbl_kind), dimension(:), allocatable :: &
     955           0 :       sums           ! array of sums
     956             : 
     957             :    type (block) :: &
     958             :       this_block     ! holds local block information
     959             : 
     960             :    character(len=*), parameter :: subname = '(global_sum_prod_dbl)'
     961             : 
     962             : !-----------------------------------------------------------------------
     963             : 
     964             : 
     965             :    call ice_distributionGet(dist,          &
     966             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     967             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     968           0 :                             communicator = communicator)
     969             : 
     970           0 :    if (numBlocks == 0) then
     971           0 :       allocate(work(1,1))
     972             :    else
     973           0 :       allocate(work(nx_block*ny_block*numBlocks,1))
     974             :    endif
     975           0 :    allocate(sums(1))
     976           0 :    work = 0.0_dbl_kind
     977           0 :    sums = 0.0_dbl_kind
     978             : 
     979           0 :    do iblock=1,numBlocks
     980           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     981             : 
     982           0 :       this_block = get_block(blockID, blockID)
     983             : 
     984           0 :       ib = this_block%ilo
     985           0 :       ie = this_block%ihi
     986           0 :       jb = this_block%jlo
     987           0 :       je = this_block%jhi
     988             : 
     989             :       !*** if this row along or beyond tripole boundary
     990             :       !*** must eliminate redundant points from global sum
     991             : 
     992           0 :       maxiglob = -1
     993           0 :       if (this_block%tripole) then
     994             :          Nrow=(field_loc == field_loc_Nface .or. &
     995           0 :                field_loc == field_loc_NEcorner)
     996           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     997           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     998           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     999           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
    1000             :          else
    1001           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
    1002             :          endif
    1003             :       endif
    1004             : 
    1005           0 :       n = (iblock-1)*nx_block*ny_block
    1006             : 
    1007           0 :       if (present(mMask)) then
    1008           0 :          do j=jb,je
    1009           0 :          do i=ib,ie
    1010           0 :             n = n + 1
    1011           0 :             work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock),dbl_kind)
    1012             :          end do
    1013             :          end do
    1014           0 :       elseif (present(lMask)) then
    1015           0 :          do j=jb,je
    1016           0 :          do i=ib,ie
    1017           0 :             n = n + 1
    1018           0 :             if (lMask(i,j,iblock)) then
    1019           0 :                work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock),dbl_kind)
    1020             :             endif
    1021             :          end do
    1022             :          end do
    1023             :       else
    1024           0 :          do j=jb,je
    1025           0 :          do i=ib,ie
    1026           0 :             n = n + 1
    1027           0 :             work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock),dbl_kind)
    1028             :          enddo
    1029             :          enddo
    1030             :       endif
    1031             : 
    1032           0 :       if (maxiglob >= 0) then
    1033             :          ! eliminate redundant points at je
    1034             :          ! set n to (ib,je) index
    1035           0 :          n = (iblock-1)*nx_block*ny_block
    1036           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
    1037           0 :          j=je
    1038           0 :          do i=ib,ie
    1039           0 :             n = n + 1
    1040           0 :             if (this_block%i_glob(i) > maxiglob) then
    1041           0 :                work(n,1) = 0._dbl_kind
    1042             :             endif
    1043             :          end do
    1044             :       endif
    1045             : 
    1046             :    end do
    1047             : 
    1048           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
    1049             : 
    1050           0 :    globalSum = real(sums(1),real_kind)
    1051             : 
    1052           0 :    deallocate(work)
    1053           0 :    deallocate(sums)
    1054             : 
    1055             : !-----------------------------------------------------------------------
    1056             : 
    1057           0 :  end function global_sum_prod_real
    1058             : 
    1059             : !***********************************************************************
    1060             : 
    1061           0 :  function global_sum_prod_int (array1, array2, dist, field_loc, &
    1062             :                                mMask, lMask) &   ! LCOV_EXCL_LINE
    1063             :           result(globalSum)
    1064             : 
    1065             : !  Computes the global sum of the physical domain of a product of
    1066             : !  two 2-d arrays.
    1067             : !
    1068             : !  This is actually the specific interface for the generic
    1069             : !  global_sum_prod function corresponding to integer arrays.
    1070             : !  The generic interface is identical but will handle real and integer
    1071             : !  2-d slabs.
    1072             : 
    1073             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1074             :       array1, array2       ! arrays whose product is to be summed
    1075             : 
    1076             :    type (distrb), intent(in) :: &
    1077             :       dist                 ! block distribution for arrays
    1078             : 
    1079             :    integer (int_kind), intent(in) :: &
    1080             :       field_loc            ! location of field on staggered grid
    1081             : 
    1082             :    integer (int_kind), dimension(:,:,:), intent(in), optional :: &
    1083             :       mMask                ! optional multiplicative mask
    1084             : 
    1085             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1086             :       lMask                ! optional logical mask
    1087             : 
    1088             :    integer (int_kind) :: &
    1089             :       globalSum            ! resulting global sum
    1090             : 
    1091             : !-----------------------------------------------------------------------
    1092             : !
    1093             : !  local variables
    1094             : !
    1095             : !-----------------------------------------------------------------------
    1096             : 
    1097             :    integer (int_kind) :: &
    1098             :       blockSum,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1099             :       localSum       ! sum of all local block domains
    1100             : 
    1101             :    integer (int_kind) :: &
    1102             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1103             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1104             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1105             :       blockID,         &! block location   ! LCOV_EXCL_LINE
    1106             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1107             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1108             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1109             :       maxiglob          ! maximum non-redundant value of i_global
    1110             : 
    1111             :    logical (log_kind) :: &
    1112             :       Nrow           ! this field is on a N row (a velocity row)
    1113             : 
    1114             :    type (block) :: &
    1115             :       this_block          ! holds local block information
    1116             : 
    1117             :    character(len=*), parameter :: subname = '(global_sum_prod_int)'
    1118             : 
    1119             : !-----------------------------------------------------------------------
    1120             : 
    1121           0 :    localSum  = 0_int_kind
    1122           0 :    globalSum = 0_int_kind
    1123             : 
    1124             :    call ice_distributionGet(dist, &
    1125             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1126             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1127           0 :                             communicator = communicator)
    1128             : 
    1129           0 :    do iblock=1,numBlocks
    1130           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1131             : 
    1132           0 :       this_block = get_block(blockID, blockID)
    1133             : 
    1134           0 :       ib = this_block%ilo
    1135           0 :       ie = this_block%ihi
    1136           0 :       jb = this_block%jlo
    1137           0 :       je = this_block%jhi
    1138             : 
    1139             :       !*** if this row along or beyond tripole boundary
    1140             :       !*** must eliminate redundant points from global sum
    1141             : 
    1142           0 :       maxiglob = -1
    1143           0 :       if (this_block%tripole) then
    1144             :          Nrow=(field_loc == field_loc_Nface .or. &
    1145           0 :                field_loc == field_loc_NEcorner)
    1146           0 :          if (Nrow .and. this_block%tripoleTFlag) then
    1147           0 :             maxiglob = 0 ! entire u-row on T-fold grid
    1148           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
    1149           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
    1150             :          else
    1151           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
    1152             :          endif
    1153             :       endif
    1154             : 
    1155           0 :       blockSum = 0_int_kind
    1156             : 
    1157           0 :       do j=jb,je
    1158           0 :       do i=ib,ie
    1159             :          ! eliminate redundant points
    1160           0 :          if (maxiglob >= 0 .and. j == je .and. this_block%i_glob(i) > maxiglob) then
    1161             : !            blockSum = blockSum + 0_int_kind
    1162             :          else
    1163           0 :             if (present(mMask)) then
    1164           0 :                blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock)
    1165           0 :             else if (present(lMask)) then
    1166           0 :                if (lMask(i,j,iblock)) then
    1167           0 :                   blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)
    1168             :                endif
    1169             :             else
    1170           0 :                blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)
    1171             :             endif
    1172             :          endif
    1173             :       end do
    1174             :       end do
    1175             : 
    1176             :       !*** now add block sum to global sum
    1177             : 
    1178           0 :       localSum = localSum + blockSum
    1179             : 
    1180             :    end do
    1181             : 
    1182             : !-----------------------------------------------------------------------
    1183             : !
    1184             : !  now use MPI global reduction to reduce local sum to global sum
    1185             : !
    1186             : !-----------------------------------------------------------------------
    1187             : 
    1188             : #ifdef SERIAL_REMOVE_MPI
    1189             :    globalSum = localSum
    1190             : #else
    1191           0 :    if (my_task < numProcs) then
    1192             :       call MPI_ALLREDUCE(localSum, globalSum, 1, &
    1193           0 :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
    1194             :    endif
    1195             : #endif
    1196             : 
    1197             : !-----------------------------------------------------------------------
    1198             : 
    1199           0 :  end function global_sum_prod_int
    1200             : 
    1201             : !***********************************************************************
    1202             : 
    1203       34848 :  function global_maxval_dbl (array, dist, lMask) &
    1204        8704 :           result(globalMaxval)
    1205             : 
    1206             : !  Computes the global maximum value of the physical domain of a 2-d field
    1207             : !
    1208             : !  This is actually the specific interface for the generic global_maxval
    1209             : !  function corresponding to double precision arrays.
    1210             : 
    1211             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
    1212             :       array                ! array for which max value needed
    1213             : 
    1214             :    type (distrb), intent(in) :: &
    1215             :       dist                 ! block distribution for array X
    1216             : 
    1217             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1218             :       lMask                ! optional logical mask
    1219             : 
    1220             :    real (dbl_kind) :: &
    1221             :       globalMaxval         ! resulting maximum value of array
    1222             : 
    1223             : !-----------------------------------------------------------------------
    1224             : !
    1225             : !  local variables
    1226             : !
    1227             : !-----------------------------------------------------------------------
    1228             : 
    1229             :    real (dbl_kind) ::    &
    1230             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1231        8704 :       localMaxval       ! sum of all local block domains
    1232             : 
    1233             :    integer (int_kind) :: &
    1234             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1235             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1236             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1237             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1238             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1239             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1240             :       blockID           ! block location
    1241             : 
    1242             :    type (block) :: &
    1243             :       this_block          ! holds local block information
    1244             : 
    1245             :    character(len=*), parameter :: subname = '(global_maxval_dbl)'
    1246             : 
    1247             : !-----------------------------------------------------------------------
    1248             : 
    1249       34848 :    localMaxval  = -HUGE(0.0_dbl_kind)
    1250       34848 :    globalMaxval = -HUGE(0.0_dbl_kind)
    1251             : 
    1252             :    call ice_distributionGet(dist, &
    1253             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1254             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1255       34848 :                             communicator = communicator)
    1256             : 
    1257      173640 :    do iblock=1,numBlocks
    1258      138792 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1259             : 
    1260      138792 :       this_block = get_block(blockID, blockID)
    1261             : 
    1262      138792 :       ib = this_block%ilo
    1263      138792 :       ie = this_block%ihi
    1264      138792 :       jb = this_block%jlo
    1265      138792 :       je = this_block%jhi
    1266             : 
    1267      138792 :       blockMaxval = -HUGE(0.0_dbl_kind)
    1268             : 
    1269      138792 :       if (present(lMask)) then
    1270     4407000 :          do j=jb,je
    1271    81945432 :          do i=ib,ie
    1272    81806640 :             if (lMask(i,j,iblock)) then
    1273    39019888 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1274             :             endif
    1275             :          end do
    1276             :          end do
    1277             :       else
    1278           0 :          do j=jb,je
    1279           0 :          do i=ib,ie
    1280           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1281             :          end do
    1282             :          end do
    1283             :       endif
    1284             : 
    1285      312432 :       localMaxval = max(localMaxval,blockMaxval)
    1286             : 
    1287             :    end do
    1288             : 
    1289             : !-----------------------------------------------------------------------
    1290             : !
    1291             : !  now use MPI global reduction to reduce local maxval to global maxval
    1292             : !
    1293             : !-----------------------------------------------------------------------
    1294             : 
    1295             : #ifdef SERIAL_REMOVE_MPI
    1296             :    globalMaxval = localMaxval
    1297             : #else
    1298       34848 :    if (my_task < numProcs) then
    1299             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1300       34848 :                          mpiR8, MPI_MAX, communicator, ierr)
    1301             :    endif
    1302             : #endif
    1303             : 
    1304             : !-----------------------------------------------------------------------
    1305             : 
    1306       69696 :  end function global_maxval_dbl
    1307             : 
    1308             : !***********************************************************************
    1309             : 
    1310           0 :  function global_maxval_real (array, dist, lMask) &
    1311           0 :           result(globalMaxval)
    1312             : 
    1313             : !  Computes the global maximum value of the physical domain of a 2-d field
    1314             : !
    1315             : !  This is actually the specific interface for the generic global_maxval
    1316             : !  function corresponding to single precision arrays.
    1317             : 
    1318             :    real (real_kind), dimension(:,:,:), intent(in) :: &
    1319             :       array                ! array for which max value needed
    1320             : 
    1321             :    type (distrb), intent(in) :: &
    1322             :       dist                 ! block distribution for array X
    1323             : 
    1324             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1325             :       lMask                ! optional logical mask
    1326             : 
    1327             :    real (real_kind) :: &
    1328             :       globalMaxval         ! resulting maximum value of array
    1329             : 
    1330             : !-----------------------------------------------------------------------
    1331             : !
    1332             : !  local variables
    1333             : !
    1334             : !-----------------------------------------------------------------------
    1335             : 
    1336             :    real (real_kind) ::    &
    1337             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1338           0 :       localMaxval       ! sum of all local block domains
    1339             : 
    1340             :    integer (int_kind) :: &
    1341             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1342             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1343             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1344             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1345             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1346             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1347             :       blockID           ! block location
    1348             : 
    1349             :    type (block) :: &
    1350             :       this_block          ! holds local block information
    1351             : 
    1352             :    character(len=*), parameter :: subname = '(global_maxval_real)'
    1353             : 
    1354             : !-----------------------------------------------------------------------
    1355             : 
    1356           0 :    localMaxval  = -HUGE(0.0_real_kind)
    1357           0 :    globalMaxval = -HUGE(0.0_real_kind)
    1358             : 
    1359             :    call ice_distributionGet(dist, &
    1360             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1361             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1362           0 :                             communicator = communicator)
    1363             : 
    1364           0 :    do iblock=1,numBlocks
    1365           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1366             : 
    1367           0 :       this_block = get_block(blockID, blockID)
    1368             : 
    1369           0 :       ib = this_block%ilo
    1370           0 :       ie = this_block%ihi
    1371           0 :       jb = this_block%jlo
    1372           0 :       je = this_block%jhi
    1373             : 
    1374           0 :       blockMaxval = -HUGE(0.0_real_kind)
    1375             : 
    1376           0 :       if (present(lMask)) then
    1377           0 :          do j=jb,je
    1378           0 :          do i=ib,ie
    1379           0 :             if (lMask(i,j,iblock)) then
    1380           0 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1381             :             endif
    1382             :          end do
    1383             :          end do
    1384             :       else
    1385           0 :          do j=jb,je
    1386           0 :          do i=ib,ie
    1387           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1388             :          end do
    1389             :          end do
    1390             :       endif
    1391             : 
    1392           0 :       localMaxval = max(localMaxval,blockMaxval)
    1393             : 
    1394             :    end do
    1395             : 
    1396             : !-----------------------------------------------------------------------
    1397             : !
    1398             : !  now use MPI global reduction to reduce local maxval to global maxval
    1399             : !
    1400             : !-----------------------------------------------------------------------
    1401             : 
    1402             : #ifdef SERIAL_REMOVE_MPI
    1403             :    globalMaxval = localMaxval
    1404             : #else
    1405           0 :    if (my_task < numProcs) then
    1406             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1407           0 :                          mpiR4, MPI_MAX, communicator, ierr)
    1408             :    endif
    1409             : #endif
    1410             : 
    1411             : !-----------------------------------------------------------------------
    1412             : 
    1413           0 :  end function global_maxval_real
    1414             : 
    1415             : !***********************************************************************
    1416             : 
    1417           0 :  function global_maxval_int (array, dist, lMask) &
    1418             :           result(globalMaxval)
    1419             : 
    1420             : !  Computes the global maximum value of the physical domain of a 2-d field
    1421             : !
    1422             : !  This is actually the specific interface for the generic global_maxval
    1423             : !  function corresponding to integer arrays.
    1424             : 
    1425             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1426             :       array                ! array for which max value needed
    1427             : 
    1428             :    type (distrb), intent(in) :: &
    1429             :       dist                 ! block distribution for array X
    1430             : 
    1431             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1432             :       lMask                ! optional logical mask
    1433             : 
    1434             :    integer (int_kind) :: &
    1435             :       globalMaxval         ! resulting maximum value of array
    1436             : 
    1437             : !-----------------------------------------------------------------------
    1438             : !
    1439             : !  local variables
    1440             : !
    1441             : !-----------------------------------------------------------------------
    1442             : 
    1443             :    integer (int_kind) ::    &
    1444             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1445             :       localMaxval       ! sum of all local block domains
    1446             : 
    1447             :    integer (int_kind) :: &
    1448             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1449             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1450             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1451             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1452             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1453             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1454             :       blockID           ! block location
    1455             : 
    1456             :    type (block) :: &
    1457             :       this_block          ! holds local block information
    1458             : 
    1459             :    character(len=*), parameter :: subname = '(global_maxval_int)'
    1460             : 
    1461             : !-----------------------------------------------------------------------
    1462             : 
    1463           0 :    localMaxval  = -HUGE(0_int_kind)
    1464           0 :    globalMaxval = -HUGE(0_int_kind)
    1465             : 
    1466             :    call ice_distributionGet(dist, &
    1467             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1468             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1469           0 :                             communicator = communicator)
    1470             : 
    1471           0 :    do iblock=1,numBlocks
    1472           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1473             : 
    1474           0 :       this_block = get_block(blockID, blockID)
    1475             : 
    1476           0 :       ib = this_block%ilo
    1477           0 :       ie = this_block%ihi
    1478           0 :       jb = this_block%jlo
    1479           0 :       je = this_block%jhi
    1480             : 
    1481           0 :       blockMaxval = -HUGE(0_int_kind)
    1482             : 
    1483           0 :       if (present(lMask)) then
    1484           0 :          do j=jb,je
    1485           0 :          do i=ib,ie
    1486           0 :             if (lMask(i,j,iblock)) then
    1487           0 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1488             :             endif
    1489             :          end do
    1490             :          end do
    1491             :       else
    1492           0 :          do j=jb,je
    1493           0 :          do i=ib,ie
    1494           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1495             :          end do
    1496             :          end do
    1497             :       endif
    1498             : 
    1499           0 :       localMaxval = max(localMaxval,blockMaxval)
    1500             : 
    1501             :    end do
    1502             : 
    1503             : !-----------------------------------------------------------------------
    1504             : !
    1505             : !  now use MPI global reduction to reduce local maxval to global maxval
    1506             : !
    1507             : !-----------------------------------------------------------------------
    1508             : 
    1509             : #ifdef SERIAL_REMOVE_MPI
    1510             :    globalMaxval = localMaxval
    1511             : #else
    1512           0 :    if (my_task < numProcs) then
    1513             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1514           0 :                          MPI_INTEGER, MPI_MAX, communicator, ierr)
    1515             :    endif
    1516             : #endif
    1517             : 
    1518             : !-----------------------------------------------------------------------
    1519             : 
    1520           0 :  end function global_maxval_int
    1521             : 
    1522             : !***********************************************************************
    1523             : 
    1524         864 :  function global_maxval_scalar_dbl (scalar, dist) &
    1525         192 :           result(globalMaxval)
    1526             : 
    1527             : !  Computes the global maximum value of a scalar value across
    1528             : !  a distributed machine.
    1529             : !
    1530             : !  This is actually the specific interface for the generic global_maxval
    1531             : !  function corresponding to double precision scalars.
    1532             : 
    1533             :    real (dbl_kind), intent(in) :: &
    1534             :       scalar               ! scalar for which max value needed
    1535             : 
    1536             :    type (distrb), intent(in) :: &
    1537             :       dist                 ! block distribution
    1538             : 
    1539             :    real (dbl_kind) :: &
    1540             :       globalMaxval         ! resulting maximum value
    1541             : 
    1542             : !-----------------------------------------------------------------------
    1543             : !
    1544             : !  local variables
    1545             : !
    1546             : !-----------------------------------------------------------------------
    1547             : 
    1548             :    integer (int_kind) :: &
    1549             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1550             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1551             :       communicator      ! communicator for this distribution
    1552             : 
    1553             :    character(len=*), parameter :: subname = '(global_maxval_scalar_dbl)'
    1554             : 
    1555             : !-----------------------------------------------------------------------
    1556             : 
    1557             :    call ice_distributionGet(dist, &
    1558             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1559         864 :                             communicator = communicator)
    1560             : 
    1561             : !-----------------------------------------------------------------------
    1562             : !
    1563             : !  now use MPI global reduction to reduce local maxval to global maxval
    1564             : !
    1565             : !-----------------------------------------------------------------------
    1566             : 
    1567             : #ifdef SERIAL_REMOVE_MPI
    1568             :    globalMaxval = scalar
    1569             : #else
    1570         864 :    if (my_task < numProcs) then
    1571             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1572         864 :                          mpiR8, MPI_MAX, communicator, ierr)
    1573             :    endif
    1574             : #endif
    1575             : 
    1576             : !-----------------------------------------------------------------------
    1577             : 
    1578        1728 :  end function global_maxval_scalar_dbl
    1579             : 
    1580             : !***********************************************************************
    1581             : 
    1582           0 :  function global_maxval_scalar_real (scalar, dist) &
    1583           0 :           result(globalMaxval)
    1584             : 
    1585             : !  Computes the global maximum value of a scalar value across
    1586             : !  a distributed machine.
    1587             : !
    1588             : !  This is actually the specific interface for the generic global_maxval
    1589             : !  function corresponding to single precision scalars.
    1590             : 
    1591             :    real (real_kind), intent(in) :: &
    1592             :       scalar               ! scalar for which max value needed
    1593             : 
    1594             :    type (distrb), intent(in) :: &
    1595             :       dist                 ! block distribution
    1596             : 
    1597             :    real (real_kind) :: &
    1598             :       globalMaxval         ! resulting maximum value
    1599             : 
    1600             : !-----------------------------------------------------------------------
    1601             : !
    1602             : !  local variables
    1603             : !
    1604             : !-----------------------------------------------------------------------
    1605             : 
    1606             :    integer (int_kind) :: &
    1607             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1608             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1609             :       communicator      ! communicator for this distribution
    1610             : 
    1611             :    character(len=*), parameter :: subname = '(global_maxval_scalar_real)'
    1612             : 
    1613             : !-----------------------------------------------------------------------
    1614             : 
    1615             :    call ice_distributionGet(dist, &
    1616             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1617           0 :                             communicator = communicator)
    1618             : 
    1619             : !-----------------------------------------------------------------------
    1620             : !
    1621             : !  now use MPI global reduction to reduce local maxval to global maxval
    1622             : !
    1623             : !-----------------------------------------------------------------------
    1624             : 
    1625             : #ifdef SERIAL_REMOVE_MPI
    1626             :    globalMaxval = scalar
    1627             : #else
    1628           0 :    if (my_task < numProcs) then
    1629             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1630           0 :                          mpiR4, MPI_MAX, communicator, ierr)
    1631             :    endif
    1632             : #endif
    1633             : 
    1634             : !-----------------------------------------------------------------------
    1635             : 
    1636           0 :  end function global_maxval_scalar_real
    1637             : 
    1638             : !***********************************************************************
    1639             : 
    1640         360 :  function global_maxval_scalar_int (scalar, dist) &
    1641             :           result(globalMaxval)
    1642             : 
    1643             : !  Computes the global maximum value of a scalar value across
    1644             : !  a distributed machine.
    1645             : !
    1646             : !  This is actually the specific interface for the generic global_maxval
    1647             : !  function corresponding to single precision scalars.
    1648             : 
    1649             :    integer (int_kind), intent(in) :: &
    1650             :       scalar               ! scalar for which max value needed
    1651             : 
    1652             :    type (distrb), intent(in) :: &
    1653             :       dist                 ! block distribution
    1654             : 
    1655             :    integer (int_kind) :: &
    1656             :       globalMaxval         ! resulting maximum value
    1657             : 
    1658             : !-----------------------------------------------------------------------
    1659             : !
    1660             : !  local variables
    1661             : !
    1662             : !-----------------------------------------------------------------------
    1663             : 
    1664             :    integer (int_kind) :: &
    1665             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1666             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1667             :       communicator      ! communicator for this distribution
    1668             : 
    1669             :    character(len=*), parameter :: subname = '(global_maxval_scalar_int)'
    1670             : 
    1671             : !-----------------------------------------------------------------------
    1672             : 
    1673             :    call ice_distributionGet(dist, &
    1674             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1675         360 :                             communicator = communicator)
    1676             : 
    1677             : !-----------------------------------------------------------------------
    1678             : !
    1679             : !  now use MPI global reduction to reduce local maxval to global maxval
    1680             : !
    1681             : !-----------------------------------------------------------------------
    1682             : 
    1683             : #ifdef SERIAL_REMOVE_MPI
    1684             :    globalMaxval = scalar
    1685             : #else
    1686         360 :    if (my_task < numProcs) then
    1687             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1688         360 :                          MPI_INTEGER, MPI_MAX, communicator, ierr)
    1689             :    endif
    1690             : #endif
    1691             : 
    1692             : !-----------------------------------------------------------------------
    1693             : 
    1694         720 :  end function global_maxval_scalar_int
    1695             : 
    1696             : !***********************************************************************
    1697             : 
    1698           0 :  function global_maxval_scalar_int_nodist (scalar, communicator) &
    1699             :           result(globalMaxval)
    1700             : 
    1701             : !  Computes the global maximum value of a scalar value across
    1702             : !  a communicator.  This method supports testing.
    1703             : !
    1704             : !  This is actually the specific interface for the generic global_maxval
    1705             : !  function corresponding to single precision scalars.
    1706             : 
    1707             :    integer (int_kind), intent(in) :: &
    1708             :       scalar               ! scalar for which max value needed
    1709             : 
    1710             :    integer (int_kind), intent(in) :: &
    1711             :       communicator         ! mpi communicator
    1712             : 
    1713             :    integer (int_kind) :: &
    1714             :       globalMaxval         ! resulting maximum value
    1715             : 
    1716             : !-----------------------------------------------------------------------
    1717             : !
    1718             : !  local variables
    1719             : !
    1720             : !-----------------------------------------------------------------------
    1721             : 
    1722             :    integer (int_kind) :: &
    1723             :       ierr             ! mpi error flag
    1724             : 
    1725             :    character(len=*), parameter :: subname = '(global_maxval_scalar_int_nodist)'
    1726             : 
    1727             : !-----------------------------------------------------------------------
    1728             : 
    1729             : !-----------------------------------------------------------------------
    1730             : !
    1731             : !  now use MPI global reduction to reduce local maxval to global maxval
    1732             : !
    1733             : !-----------------------------------------------------------------------
    1734             : 
    1735             : #ifdef SERIAL_REMOVE_MPI
    1736             :    globalMaxval = scalar
    1737             : #else
    1738             :    call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1739           0 :                       MPI_INTEGER, MPI_MAX, communicator, ierr)
    1740             : #endif
    1741             : 
    1742             : !-----------------------------------------------------------------------
    1743             : 
    1744           0 :  end function global_maxval_scalar_int_nodist
    1745             : 
    1746             : !***********************************************************************
    1747             : 
    1748         288 :  function global_minval_dbl (array, dist, lMask) &
    1749          64 :           result(globalMinval)
    1750             : 
    1751             : !  Computes the global minimum value of the physical domain of a 2-d field
    1752             : !
    1753             : !  This is actually the specific interface for the generic global_minval
    1754             : !  function corresponding to double precision arrays.
    1755             : 
    1756             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
    1757             :       array                ! array for which min value needed
    1758             : 
    1759             :    type (distrb), intent(in) :: &
    1760             :       dist                 ! block distribution for array X
    1761             : 
    1762             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1763             :       lMask                ! optional logical mask
    1764             : 
    1765             :    real (dbl_kind) :: &
    1766             :       globalMinval         ! resulting minimum value of array
    1767             : 
    1768             : !-----------------------------------------------------------------------
    1769             : !
    1770             : !  local variables
    1771             : !
    1772             : !-----------------------------------------------------------------------
    1773             : 
    1774             :    real (dbl_kind) ::    &
    1775             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1776          64 :       localMinval       ! sum of all local block domains
    1777             : 
    1778             :    integer (int_kind) :: &
    1779             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1780             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1781             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1782             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1783             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1784             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1785             :       blockID           ! block location
    1786             : 
    1787             :    type (block) :: &
    1788             :       this_block          ! holds local block information
    1789             : 
    1790             :    character(len=*), parameter :: subname = '(global_minval_dbl)'
    1791             : 
    1792             : !-----------------------------------------------------------------------
    1793             : 
    1794         288 :    localMinval  = HUGE(0.0_dbl_kind)
    1795         288 :    globalMinval = HUGE(0.0_dbl_kind)
    1796             : 
    1797             :    call ice_distributionGet(dist, &
    1798             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1799             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1800         288 :                             communicator = communicator)
    1801             : 
    1802        1560 :    do iblock=1,numBlocks
    1803        1272 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1804             : 
    1805        1272 :       this_block = get_block(blockID, blockID)
    1806             : 
    1807        1272 :       ib = this_block%ilo
    1808        1272 :       ie = this_block%ihi
    1809        1272 :       jb = this_block%jlo
    1810        1272 :       je = this_block%jhi
    1811             : 
    1812        1272 :       blockMinval = HUGE(0.0_dbl_kind)
    1813             : 
    1814        1272 :       if (present(lMask)) then
    1815       40200 :          do j=jb,je
    1816      671112 :          do i=ib,ie
    1817      669840 :             if (lMask(i,j,iblock)) then
    1818      497368 :                blockMinval = min(blockMinval,array(i,j,iblock))
    1819             :             endif
    1820             :          end do
    1821             :          end do
    1822             :       else
    1823           0 :          do j=jb,je
    1824           0 :          do i=ib,ie
    1825           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    1826             :          end do
    1827             :          end do
    1828             :       endif
    1829             : 
    1830        2832 :       localMinval = min(localMinval,blockMinval)
    1831             : 
    1832             :    end do
    1833             : 
    1834             : !-----------------------------------------------------------------------
    1835             : !
    1836             : !  now use MPI global reduction to reduce local minval to global minval
    1837             : !
    1838             : !-----------------------------------------------------------------------
    1839             : 
    1840             : #ifdef SERIAL_REMOVE_MPI
    1841             :    globalMinval = localMinval
    1842             : #else
    1843         288 :    if (my_task < numProcs) then
    1844             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    1845         288 :                          mpiR8, MPI_MIN, communicator, ierr)
    1846             :    endif
    1847             : #endif
    1848             : 
    1849             : !-----------------------------------------------------------------------
    1850             : 
    1851         576 :  end function global_minval_dbl
    1852             : 
    1853             : !***********************************************************************
    1854             : 
    1855           0 :  function global_minval_real (array, dist, lMask) &
    1856           0 :           result(globalMinval)
    1857             : 
    1858             : !  Computes the global minimum value of the physical domain of a 2-d field
    1859             : !
    1860             : !  This is actually the specific interface for the generic global_minval
    1861             : !  function corresponding to single precision arrays.
    1862             : 
    1863             :    real (real_kind), dimension(:,:,:), intent(in) :: &
    1864             :       array                ! array for which min value needed
    1865             : 
    1866             :    type (distrb), intent(in) :: &
    1867             :       dist                 ! block distribution for array X
    1868             : 
    1869             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1870             :       lMask                ! optional logical mask
    1871             : 
    1872             :    real (real_kind) :: &
    1873             :       globalMinval         ! resulting minimum value of array
    1874             : 
    1875             : !-----------------------------------------------------------------------
    1876             : !
    1877             : !  local variables
    1878             : !
    1879             : !-----------------------------------------------------------------------
    1880             : 
    1881             :    real (real_kind) ::    &
    1882             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1883           0 :       localMinval       ! sum of all local block domains
    1884             : 
    1885             :    integer (int_kind) :: &
    1886             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1887             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1888             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1889             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1890             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1891             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1892             :       blockID           ! block location
    1893             : 
    1894             :    type (block) :: &
    1895             :       this_block          ! holds local block information
    1896             : 
    1897             :    character(len=*), parameter :: subname = '(global_minval_real)'
    1898             : 
    1899             : !-----------------------------------------------------------------------
    1900             : 
    1901           0 :    localMinval  = HUGE(0.0_real_kind)
    1902           0 :    globalMinval = HUGE(0.0_real_kind)
    1903             : 
    1904             :    call ice_distributionGet(dist, &
    1905             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1906             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1907           0 :                             communicator = communicator)
    1908             : 
    1909           0 :    do iblock=1,numBlocks
    1910           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1911             : 
    1912           0 :       this_block = get_block(blockID, blockID)
    1913             : 
    1914           0 :       ib = this_block%ilo
    1915           0 :       ie = this_block%ihi
    1916           0 :       jb = this_block%jlo
    1917           0 :       je = this_block%jhi
    1918             : 
    1919           0 :       blockMinval = HUGE(0.0_real_kind)
    1920             : 
    1921           0 :       if (present(lMask)) then
    1922           0 :          do j=jb,je
    1923           0 :          do i=ib,ie
    1924           0 :             if (lMask(i,j,iblock)) then
    1925           0 :                blockMinval = min(blockMinval,array(i,j,iblock))
    1926             :             endif
    1927             :          end do
    1928             :          end do
    1929             :       else
    1930           0 :          do j=jb,je
    1931           0 :          do i=ib,ie
    1932           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    1933             :          end do
    1934             :          end do
    1935             :       endif
    1936             : 
    1937           0 :       localMinval = min(localMinval,blockMinval)
    1938             : 
    1939             :    end do
    1940             : 
    1941             : !-----------------------------------------------------------------------
    1942             : !
    1943             : !  now use MPI global reduction to reduce local minval to global minval
    1944             : !
    1945             : !-----------------------------------------------------------------------
    1946             : 
    1947             : #ifdef SERIAL_REMOVE_MPI
    1948             :    globalMinval = localMinval
    1949             : #else
    1950           0 :    if (my_task < numProcs) then
    1951             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    1952           0 :                          mpiR4, MPI_MIN, communicator, ierr)
    1953             :    endif
    1954             : #endif
    1955             : 
    1956             : !-----------------------------------------------------------------------
    1957             : 
    1958           0 :  end function global_minval_real
    1959             : 
    1960             : !***********************************************************************
    1961             : 
    1962           0 :  function global_minval_int (array, dist, lMask) &
    1963             :           result(globalMinval)
    1964             : 
    1965             : !  Computes the global minimum value of the physical domain of a 2-d field
    1966             : !
    1967             : !  This is actually the specific interface for the generic global_minval
    1968             : !  function corresponding to integer arrays.
    1969             : 
    1970             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1971             :       array                ! array for which min value needed
    1972             : 
    1973             :    type (distrb), intent(in) :: &
    1974             :       dist                 ! block distribution for array X
    1975             : 
    1976             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1977             :       lMask                ! optional logical mask
    1978             : 
    1979             :    integer (int_kind) :: &
    1980             :       globalMinval         ! resulting minimum value of array
    1981             : 
    1982             : !-----------------------------------------------------------------------
    1983             : !
    1984             : !  local variables
    1985             : !
    1986             : !-----------------------------------------------------------------------
    1987             : 
    1988             :    integer (int_kind) ::    &
    1989             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1990             :       localMinval       ! sum of all local block domains
    1991             : 
    1992             :    integer (int_kind) :: &
    1993             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1994             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1995             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1996             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1997             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1998             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1999             :       blockID           ! block location
    2000             : 
    2001             :    type (block) :: &
    2002             :       this_block          ! holds local block information
    2003             : 
    2004             :    character(len=*), parameter :: subname = '(global_minval_int)'
    2005             : 
    2006             : !-----------------------------------------------------------------------
    2007             : 
    2008           0 :    localMinval  = HUGE(0_int_kind)
    2009           0 :    globalMinval = HUGE(0_int_kind)
    2010             : 
    2011             :    call ice_distributionGet(dist, &
    2012             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    2013             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2014           0 :                             communicator = communicator)
    2015             : 
    2016           0 :    do iblock=1,numBlocks
    2017           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    2018             : 
    2019           0 :       this_block = get_block(blockID, blockID)
    2020             : 
    2021           0 :       ib = this_block%ilo
    2022           0 :       ie = this_block%ihi
    2023           0 :       jb = this_block%jlo
    2024           0 :       je = this_block%jhi
    2025             : 
    2026           0 :       blockMinval = HUGE(0_int_kind)
    2027             : 
    2028           0 :       if (present(lMask)) then
    2029           0 :          do j=jb,je
    2030           0 :          do i=ib,ie
    2031           0 :             if (lMask(i,j,iblock)) then
    2032           0 :                blockMinval = min(blockMinval,array(i,j,iblock))
    2033             :             endif
    2034             :          end do
    2035             :          end do
    2036             :       else
    2037           0 :          do j=jb,je
    2038           0 :          do i=ib,ie
    2039           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    2040             :          end do
    2041             :          end do
    2042             :       endif
    2043             : 
    2044           0 :       localMinval = min(localMinval,blockMinval)
    2045             : 
    2046             :    end do
    2047             : 
    2048             : !-----------------------------------------------------------------------
    2049             : !
    2050             : !  now use MPI global reduction to reduce local minval to global minval
    2051             : !
    2052             : !-----------------------------------------------------------------------
    2053             : 
    2054             : #ifdef SERIAL_REMOVE_MPI
    2055             :    globalMinval = localMinval
    2056             : #else
    2057           0 :    if (my_task < numProcs) then
    2058             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    2059           0 :                          MPI_INTEGER, MPI_MIN, communicator, ierr)
    2060             :    endif
    2061             : #endif
    2062             : 
    2063             : !-----------------------------------------------------------------------
    2064             : 
    2065           0 :  end function global_minval_int
    2066             : 
    2067             : !***********************************************************************
    2068             : 
    2069          72 :  function global_minval_scalar_dbl (scalar, dist) &
    2070          16 :           result(globalMinval)
    2071             : 
    2072             : !  Computes the global minimum value of a scalar value across
    2073             : !  a distributed machine.
    2074             : !
    2075             : !  This is actually the specific interface for the generic global_minval
    2076             : !  function corresponding to double precision scalars.
    2077             : 
    2078             :    real (dbl_kind), intent(in) :: &
    2079             :       scalar               ! scalar for which min value needed
    2080             : 
    2081             :    type (distrb), intent(in) :: &
    2082             :       dist                 ! block distribution
    2083             : 
    2084             :    real (dbl_kind) :: &
    2085             :       globalMinval         ! resulting minimum value
    2086             : 
    2087             : !-----------------------------------------------------------------------
    2088             : !
    2089             : !  local variables
    2090             : !
    2091             : !-----------------------------------------------------------------------
    2092             : 
    2093             :    integer (int_kind) :: &
    2094             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2095             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2096             :       communicator      ! communicator for this distribution
    2097             : 
    2098             :    character(len=*), parameter :: subname = '(global_minval_scalar_dbl)'
    2099             : 
    2100             : !-----------------------------------------------------------------------
    2101             : 
    2102             :    call ice_distributionGet(dist, &
    2103             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2104          72 :                             communicator = communicator)
    2105             : 
    2106             : !-----------------------------------------------------------------------
    2107             : !
    2108             : !  now use MPI global reduction to reduce local minval to global minval
    2109             : !
    2110             : !-----------------------------------------------------------------------
    2111             : 
    2112             : #ifdef SERIAL_REMOVE_MPI
    2113             :    globalMinval = scalar
    2114             : #else
    2115          72 :    if (my_task < numProcs) then
    2116             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2117          72 :                          mpiR8, MPI_MIN, communicator, ierr)
    2118             :    endif
    2119             : #endif
    2120             : 
    2121             : !-----------------------------------------------------------------------
    2122             : 
    2123         144 :  end function global_minval_scalar_dbl
    2124             : 
    2125             : !***********************************************************************
    2126             : 
    2127           0 :  function global_minval_scalar_real (scalar, dist) &
    2128           0 :           result(globalMinval)
    2129             : 
    2130             : !  Computes the global minimum value of a scalar value across
    2131             : !  a distributed machine.
    2132             : !
    2133             : !  This is actually the specific interface for the generic global_minval
    2134             : !  function corresponding to single precision scalars.
    2135             : 
    2136             :    real (real_kind), intent(in) :: &
    2137             :       scalar               ! scalar for which min value needed
    2138             : 
    2139             :    type (distrb), intent(in) :: &
    2140             :       dist                 ! block distribution
    2141             : 
    2142             :    real (real_kind) :: &
    2143             :       globalMinval         ! resulting minimum value
    2144             : 
    2145             : !-----------------------------------------------------------------------
    2146             : !
    2147             : !  local variables
    2148             : !
    2149             : !-----------------------------------------------------------------------
    2150             : 
    2151             :    integer (int_kind) :: &
    2152             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2153             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2154             :       communicator      ! communicator for this distribution
    2155             : 
    2156             :    character(len=*), parameter :: subname = '(global_minval_scalar_real)'
    2157             : 
    2158             : !-----------------------------------------------------------------------
    2159             : 
    2160             :    call ice_distributionGet(dist, &
    2161             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2162           0 :                             communicator = communicator)
    2163             : 
    2164             : !-----------------------------------------------------------------------
    2165             : !
    2166             : !  now use MPI global reduction to reduce local minval to global minval
    2167             : !
    2168             : !-----------------------------------------------------------------------
    2169             : 
    2170             : #ifdef SERIAL_REMOVE_MPI
    2171             :    globalMinval = scalar
    2172             : #else
    2173           0 :    if (my_task < numProcs) then
    2174             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2175           0 :                          mpiR4, MPI_MIN, communicator, ierr)
    2176             :    endif
    2177             : #endif
    2178             : 
    2179             : !-----------------------------------------------------------------------
    2180             : 
    2181           0 :  end function global_minval_scalar_real
    2182             : 
    2183             : !***********************************************************************
    2184             : 
    2185           0 :  function global_minval_scalar_int (scalar, dist) &
    2186             :           result(globalMinval)
    2187             : 
    2188             : !  Computes the global minimum value of a scalar value across
    2189             : !  a distributed machine.
    2190             : !
    2191             : !  This is actually the specific interface for the generic global_minval
    2192             : !  function corresponding to single precision scalars.
    2193             : 
    2194             :    integer (int_kind), intent(in) :: &
    2195             :       scalar               ! scalar for which min value needed
    2196             : 
    2197             :    type (distrb), intent(in) :: &
    2198             :       dist                 ! block distribution
    2199             : 
    2200             :    integer (int_kind) :: &
    2201             :       globalMinval         ! resulting minimum value
    2202             : 
    2203             : !-----------------------------------------------------------------------
    2204             : !
    2205             : !  local variables
    2206             : !
    2207             : !-----------------------------------------------------------------------
    2208             : 
    2209             :    integer (int_kind) :: &
    2210             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2211             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2212             :       communicator      ! communicator for this distribution
    2213             : 
    2214             :    character(len=*), parameter :: subname = '(global_minval_scalar_int)'
    2215             : 
    2216             : !-----------------------------------------------------------------------
    2217             : 
    2218             :    call ice_distributionGet(dist, &
    2219             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2220           0 :                             communicator = communicator)
    2221             : 
    2222             : !-----------------------------------------------------------------------
    2223             : !
    2224             : !  now use MPI global reduction to reduce local minval to global minval
    2225             : !
    2226             : !-----------------------------------------------------------------------
    2227             : 
    2228             : #ifdef SERIAL_REMOVE_MPI
    2229             :    globalMinval = scalar
    2230             : #else
    2231           0 :    if (my_task < numProcs) then
    2232             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2233           0 :                          MPI_INTEGER, MPI_MIN, communicator, ierr)
    2234             :    endif
    2235             : #endif
    2236             : 
    2237             : !-----------------------------------------------------------------------
    2238             : 
    2239           0 :  end function global_minval_scalar_int
    2240             : 
    2241             : !***********************************************************************
    2242             : 
    2243           0 :  function global_minval_scalar_int_nodist (scalar, communicator) &
    2244             :           result(globalMinval)
    2245             : 
    2246             : !  Computes the global minimum value of a scalar value across
    2247             : !  a communicator.  This method supports testing.
    2248             : !
    2249             : !  This is actually the specific interface for the generic global_minval
    2250             : !  function corresponding to single precision scalars.
    2251             : 
    2252             :    integer (int_kind), intent(in) :: &
    2253             :       scalar               ! scalar for which min value needed
    2254             : 
    2255             :    integer(int_kind), intent(in) :: &
    2256             :       communicator         ! mpi communicator
    2257             : 
    2258             :    integer (int_kind) :: &
    2259             :       globalMinval         ! resulting minimum value
    2260             : 
    2261             : !-----------------------------------------------------------------------
    2262             : !
    2263             : !  local variables
    2264             : !
    2265             : !-----------------------------------------------------------------------
    2266             : 
    2267             :    integer (int_kind) :: &
    2268             :       ierr             ! mpi error flag
    2269             : 
    2270             :    character(len=*), parameter :: subname = '(global_minval_scalar_int_nodist)'
    2271             : 
    2272             : !-----------------------------------------------------------------------
    2273             : 
    2274             : !-----------------------------------------------------------------------
    2275             : !
    2276             : !  now use MPI global reduction to reduce local minval to global minval
    2277             : !
    2278             : !-----------------------------------------------------------------------
    2279             : 
    2280             : #ifdef SERIAL_REMOVE_MPI
    2281             :    globalMinval = scalar
    2282             : #else
    2283             :    call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2284           0 :                       MPI_INTEGER, MPI_MIN, communicator, ierr)
    2285             : #endif
    2286             : 
    2287             : !-----------------------------------------------------------------------
    2288             : 
    2289           0 :  end function global_minval_scalar_int_nodist
    2290             : 
    2291             : !***********************************************************************
    2292             : 
    2293      276480 : subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs)
    2294             : 
    2295             : ! Computes the global sum of a 2-d array over fields
    2296             : ! with first dimension values and second dimension fields
    2297             : !
    2298             : ! Several different options are supported.
    2299             : ! lsum4 = local sum with real*4 and scalar mpi allreduce, unlikely to be bfb
    2300             : ! lsum8 = local sum with real*8 and scalar mpi allreduce, unlikely to be bfb
    2301             : ! lsum16 = local sum with real*16 and scalar mpi allreduce, likely to be bfb
    2302             : !    WARNING: this does not work in several compilers and mpi
    2303             : !    implementations due to support for quad precision and consistency
    2304             : !    between underlying datatypes in fortran and c.  The source code
    2305             : !    can be turned off with a cpp NO_R16.  Otherwise, it is recommended
    2306             : !    that the results be validated on any platform where it might be used.
    2307             : ! reprosum = fixed point method based on ordered double integer sums.
    2308             : !    that requires two scalar reductions per global sum.
    2309             : !    This is extremely likely to be bfb.
    2310             : !    (See Mirin and Worley, 2012, IJHPCA, 26, 1730,
    2311             : !    https://journals.sagepub.com/doi/10.1177/1094342011412630)
    2312             : ! ddpdd = parallel double-double algorithm using single scalar reduction.
    2313             : !    This is very likely to be bfb.
    2314             : !    (See He and Ding, 2001, Journal of Supercomputing, 18, 259,
    2315             : !    https://link.springer.com/article/10.1023%2FA%3A1008153532043)
    2316             : 
    2317             :    real (dbl_kind), dimension(:,:), intent(in) :: &
    2318             :       array2               ! array to be summed
    2319             : 
    2320             :    real (dbl_kind), dimension(:), intent(inout) :: &
    2321             :       sums8                 ! resulting global sum
    2322             : 
    2323             :    integer(int_kind), intent(in) :: &
    2324             :       mpicomm
    2325             : 
    2326             :    integer(int_kind), intent(in) :: &
    2327             :       numprocs
    2328             : 
    2329             : !-----------------------------------------------------------------------
    2330             : !
    2331             : !  local variables
    2332             : !
    2333             : !-----------------------------------------------------------------------
    2334             : 
    2335      276480 :    real (real_kind), allocatable :: psums4(:)
    2336      276480 :    real (real_kind), allocatable :: sums4(:)
    2337      276480 :    real (dbl_kind) , allocatable :: psums8(:)
    2338             :    ! if r16 is not available (NO_R16), then r16 reverts to double precision (r8)
    2339      276480 :    real (r16_kind) , allocatable :: psums16(:)
    2340      276480 :    real (r16_kind) , allocatable :: sums16(:)
    2341             : 
    2342             :    integer (int_kind) :: ns,nf,i,j, ierr
    2343             : 
    2344             :    character(len=*), parameter :: subname = '(compute_sums_dbl)'
    2345             : 
    2346             : !-----------------------------------------------------------------------
    2347             : 
    2348      552960 :    sums8 = 0._dbl_kind
    2349      276480 :    ns = size(array2,dim=1)
    2350      276480 :    nf = size(array2,dim=2)
    2351             : 
    2352      276480 :    if (bfbflag == 'off' .or. bfbflag == 'lsum8') then
    2353      276480 :       allocate(psums8(nf))
    2354      552960 :       psums8(:) = 0._dbl_kind
    2355             : 
    2356      552960 :       do j = 1, nf
    2357   727459200 :       do i = 1, ns
    2358   727182720 :          psums8(j) = psums8(j) + array2(i,j)
    2359             :       enddo
    2360             :       enddo
    2361             : 
    2362             : #ifdef SERIAL_REMOVE_MPI
    2363             :       sums8 = psums8
    2364             : #else
    2365      276480 :       if (my_task < numProcs) then
    2366      276480 :          call MPI_ALLREDUCE(psums8, sums8, nf, mpiR8, MPI_SUM, mpicomm, ierr)
    2367             :       endif
    2368             : #endif
    2369             : 
    2370      276480 :       deallocate(psums8)
    2371             : 
    2372             :    ! if no_r16 is set, this will revert to a double precision calculation like lsum8
    2373           0 :    elseif (bfbflag == 'lsum16') then
    2374           0 :       allocate(psums16(nf))
    2375           0 :       psums16(:) = 0._r16_kind
    2376           0 :       allocate(sums16(nf))
    2377           0 :       sums16(:) = 0._r16_kind
    2378             : 
    2379           0 :       do j = 1, nf
    2380           0 :       do i = 1, ns
    2381           0 :          psums16(j) = psums16(j) + real(array2(i,j),r16_kind)
    2382             :       enddo
    2383             :       enddo
    2384             : 
    2385             : #ifdef SERIAL_REMOVE_MPI
    2386             :       sums16 = psums16
    2387             : #else
    2388           0 :       if (my_task < numProcs) then
    2389           0 :          call MPI_ALLREDUCE(psums16, sums16, nf, mpiR16, MPI_SUM, mpicomm, ierr)
    2390             :       endif
    2391             : #endif
    2392           0 :       sums8 = real(sums16,dbl_kind)
    2393             : 
    2394           0 :       deallocate(psums16,sums16)
    2395             : 
    2396           0 :    elseif (bfbflag == 'lsum4') then
    2397           0 :       allocate(psums4(nf))
    2398           0 :       psums4(:) = 0._real_kind
    2399           0 :       allocate(sums4(nf))
    2400           0 :       sums4(:) = 0._real_kind
    2401             : 
    2402           0 :       do j = 1, nf
    2403           0 :       do i = 1, ns
    2404           0 :          psums4(j) = psums4(j) + real(array2(i,j),real_kind)
    2405             :       enddo
    2406             :       enddo
    2407             : 
    2408             : #ifdef SERIAL_REMOVE_MPI
    2409             :       sums4 = psums4
    2410             : #else
    2411           0 :       if (my_task < numProcs) then
    2412           0 :          call MPI_ALLREDUCE(psums4, sums4, nf, mpiR4, MPI_SUM, mpicomm, ierr)
    2413             :       endif
    2414             : #endif
    2415           0 :       sums8 = real(sums4,dbl_kind)
    2416             : 
    2417           0 :       deallocate(psums4,sums4)
    2418             : 
    2419           0 :    elseif (bfbflag == 'ddpdd') then
    2420           0 :       if (my_task < numProcs) then
    2421           0 :          call ice_reprosum_calc(array2,sums8,ns,ns,nf,ddpdd_sum=.true.,commid=mpicomm)
    2422             :       endif
    2423             : 
    2424           0 :    elseif (bfbflag == 'reprosum') then
    2425           0 :       if (my_task < numProcs) then
    2426           0 :          call ice_reprosum_calc(array2,sums8,ns,ns,nf,ddpdd_sum=.false.,commid=mpicomm)
    2427             :       endif
    2428             : 
    2429             :    else
    2430           0 :       call abort_ice(subname//'ERROR: bfbflag unknown '//trim(bfbflag))
    2431             :    endif
    2432             : 
    2433      276480 : end subroutine compute_sums_dbl
    2434             : 
    2435             : !***********************************************************************
    2436             : 
    2437             : end module ice_global_reductions
    2438             : 
    2439             : !|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||

Generated by: LCOV version 1.14-6-g40580cd