LCOV - code coverage report
Current view: top level - cicecore/cicedyn/infrastructure/comm/serial - ice_global_reductions.F90 (source / functions) Hit Total Coverage
Test: 231018-211459:8916b9ff2c:1:quick Lines: 130 525 24.76 %
Date: 2023-10-18 15:30:36 Functions: 8 24 33.33 %

          Line data    Source code
       1             : !|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
       2             : #define SERIAL_REMOVE_MPI
       3             : 
       4             :  module ice_global_reductions
       5             : 
       6             : !  This module contains all the routines for performing global
       7             : !  reductions like global sums, minvals, maxvals, etc.
       8             : !
       9             : ! author: Phil Jones, LANL
      10             : ! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL
      11             : ! Feb. 2008: Updated from POP version by Elizabeth C. Hunke, LANL
      12             : ! Aug. 2014: Added bit-for-bit reproducible options for global_sum_dbl
      13             : !            and global_sum_prod_dbl by T Craig NCAR
      14             : ! Mar. 2019: Refactored bit-for-bit option, T Craig
      15             : 
      16             : #ifndef SERIAL_REMOVE_MPI
      17             :    use mpi   ! MPI Fortran module
      18             : #endif
      19             :    use ice_kinds_mod
      20             :    use ice_blocks, only: block, get_block, nx_block, ny_block
      21             : #ifdef SERIAL_REMOVE_MPI
      22             :    use ice_communicate, only: my_task, master_task
      23             : #else
      24             :    use ice_communicate, only: my_task, mpiR16, mpiR8, mpiR4, master_task
      25             : #endif
      26             :    use ice_constants, only: field_loc_Nface, field_loc_NEcorner, c0
      27             :    use ice_fileunits, only: bfbflag
      28             :    use ice_exit, only: abort_ice
      29             :    use ice_distribution, only: distrb, ice_distributionGet, &
      30             :        ice_distributionGetBlockID
      31             :    use ice_domain_size, only: nx_global, ny_global, max_blocks
      32             :    use ice_gather_scatter, only: gather_global
      33             :    use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted
      34             :    use ice_reprosum, only: ice_reprosum_calc
      35             : 
      36             :    implicit none
      37             :    private
      38             : 
      39             :    public :: global_sum,      &
      40             :              global_sum_prod, &   ! LCOV_EXCL_LINE
      41             :              global_maxval,   &   ! LCOV_EXCL_LINE
      42             :              global_minval
      43             : 
      44             : !-----------------------------------------------------------------------
      45             : !
      46             : !  generic interfaces for module procedures
      47             : !
      48             : !-----------------------------------------------------------------------
      49             : 
      50             :    interface global_sum
      51             :      module procedure global_sum_dbl,              &
      52             :                       global_sum_real,             &   ! LCOV_EXCL_LINE
      53             :                       global_sum_int,              &   ! LCOV_EXCL_LINE
      54             :                       global_sum_scalar_dbl,       &   ! LCOV_EXCL_LINE
      55             :                       global_sum_scalar_real,      &   ! LCOV_EXCL_LINE
      56             :                       global_sum_scalar_int
      57             :    end interface
      58             : 
      59             :    interface global_sum_prod
      60             :      module procedure global_sum_prod_dbl,         &
      61             :                       global_sum_prod_real,        &   ! LCOV_EXCL_LINE
      62             :                       global_sum_prod_int
      63             :    end interface
      64             : 
      65             :    interface global_maxval
      66             :      module procedure global_maxval_dbl,           &
      67             :                       global_maxval_real,          &   ! LCOV_EXCL_LINE
      68             :                       global_maxval_int,           &   ! LCOV_EXCL_LINE
      69             :                       global_maxval_scalar_dbl,    &   ! LCOV_EXCL_LINE
      70             :                       global_maxval_scalar_real,   &   ! LCOV_EXCL_LINE
      71             :                       global_maxval_scalar_int,    &   ! LCOV_EXCL_LINE
      72             :                       global_maxval_scalar_int_nodist
      73             :    end interface
      74             : 
      75             :    interface global_minval
      76             :      module procedure global_minval_dbl,           &
      77             :                       global_minval_real,          &   ! LCOV_EXCL_LINE
      78             :                       global_minval_int,           &   ! LCOV_EXCL_LINE
      79             :                       global_minval_scalar_dbl,    &   ! LCOV_EXCL_LINE
      80             :                       global_minval_scalar_real,   &   ! LCOV_EXCL_LINE
      81             :                       global_minval_scalar_int,    &   ! LCOV_EXCL_LINE
      82             :                       global_minval_scalar_int_nodist
      83             :    end interface
      84             : 
      85             : !***********************************************************************
      86             : 
      87             :  contains
      88             : 
      89             : !***********************************************************************
      90             : 
      91         960 :  function global_sum_dbl(array, dist, field_loc, mMask, lMask) &
      92             :           result(globalSum)
      93             : 
      94             : !  Computes the global sum of the physical domain of a 2-d array.
      95             : !
      96             : !  This is actually the specific interface for the generic global_sum
      97             : !  function corresponding to double precision arrays.  The generic
      98             : !  interface is identical but will handle real and integer 2-d slabs
      99             : !  and real, integer, and double precision scalars.
     100             : 
     101             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
     102             :       array                ! array to be summed
     103             : 
     104             :    type (distrb), intent(in) :: &
     105             :       dist                 ! block distribution for array X
     106             : 
     107             :    integer (int_kind), intent(in) :: &
     108             :       field_loc            ! location of field on staggered grid
     109             : 
     110             :    real (dbl_kind), dimension(:,:,:), intent(in), optional :: &
     111             :       mMask                ! optional multiplicative mask
     112             : 
     113             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     114             :       lMask                ! optional logical mask
     115             : 
     116             :    real (dbl_kind) :: &
     117             :       globalSum            ! resulting global sum
     118             : 
     119             : !-----------------------------------------------------------------------
     120             : !
     121             : !  local variables
     122             : !
     123             : !-----------------------------------------------------------------------
     124             : 
     125             :    integer (int_kind) :: &
     126             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     127             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     128             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     129             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     130             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     131             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     132             :       maxiglob       ! maximum non-redundant value of i_global
     133             : 
     134             :    logical (log_kind) :: &
     135             :       Nrow           ! this field is on a N row (a velocity row)
     136             : 
     137             :    real (dbl_kind), dimension(:,:), allocatable :: &
     138         960 :       work           ! temporary local array
     139             : 
     140             :    real (dbl_kind), dimension(:), allocatable :: &
     141         960 :       sums           ! array of sums
     142             : 
     143             :    type (block) :: &
     144             :       this_block     ! holds local block information
     145             : 
     146             :    character(len=*), parameter :: subname = '(global_sum_dbl)'
     147             : 
     148             : !-----------------------------------------------------------------------
     149             : 
     150             : 
     151             :    call ice_distributionGet(dist,          &
     152             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     153             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     154         960 :                             communicator = communicator)
     155             : 
     156         960 :    if (numBlocks == 0) then
     157           0 :       allocate(work(1,1))
     158             :    else
     159         960 :       allocate(work(nx_block*ny_block*numBlocks,1))
     160             :    endif
     161         960 :    allocate(sums(1))
     162    11556480 :    work = 0.0_dbl_kind
     163        1920 :    sums = 0.0_dbl_kind
     164             : 
     165        1920 :    do iblock=1,numBlocks
     166         960 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     167             : 
     168         960 :       this_block = get_block(blockID, blockID)
     169             : 
     170         960 :       ib = this_block%ilo
     171         960 :       ie = this_block%ihi
     172         960 :       jb = this_block%jlo
     173         960 :       je = this_block%jhi
     174             : 
     175             :       !*** if this row along or beyond tripole boundary
     176             :       !*** must eliminate redundant points from global sum
     177             : 
     178         960 :       maxiglob = -1
     179         960 :       if (this_block%tripole) then
     180             :          Nrow=(field_loc == field_loc_Nface .or. &
     181           0 :                field_loc == field_loc_NEcorner)
     182           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     183           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     184           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     185           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     186             :          else
     187           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     188             :          endif
     189             :       endif
     190             : 
     191         960 :       n = (iblock-1)*nx_block*ny_block
     192             : 
     193         960 :       if (present(mMask)) then
     194      113280 :          do j=jb,je
     195    11248320 :          do i=ib,ie
     196    11136000 :             n = n + 1
     197    11247360 :             work(n,1) = array(i,j,iblock)*mMask(i,j,iblock)
     198             :          end do
     199             :          end do
     200           0 :       elseif (present(lMask)) then
     201           0 :          do j=jb,je
     202           0 :          do i=ib,ie
     203           0 :             n = n + 1
     204           0 :             if (lMask(i,j,iblock)) then
     205           0 :                work(n,1) = array(i,j,iblock)
     206             :             endif
     207             :          end do
     208             :          end do
     209             :       else
     210           0 :          do j=jb,je
     211           0 :          do i=ib,ie
     212           0 :             n = n + 1
     213           0 :             work(n,1) = array(i,j,iblock)
     214             :          enddo
     215             :          enddo
     216             :       endif
     217             : 
     218        2880 :       if (maxiglob >= 0) then
     219             :          ! eliminate redundant points at je
     220             :          ! set n to (ib,je) index
     221           0 :          n = (iblock-1)*nx_block*ny_block
     222           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     223           0 :          j=je
     224           0 :          do i=ib,ie
     225           0 :             n = n + 1
     226           0 :             if (this_block%i_glob(i) > maxiglob) then
     227           0 :                work(n,1) = 0._dbl_kind
     228             :             endif
     229             :          end do
     230             :       endif
     231             : 
     232             :    end do
     233             : 
     234         960 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     235             : 
     236         960 :    globalSum = sums(1)
     237             : 
     238         960 :    deallocate(work)
     239         960 :    deallocate(sums)
     240             : 
     241             : !-----------------------------------------------------------------------
     242             : 
     243        1920 :  end function global_sum_dbl
     244             : 
     245             : !***********************************************************************
     246             : 
     247           0 :  function global_sum_real(array, dist, field_loc, mMask, lMask) &
     248             :           result(globalSum)
     249             : 
     250             : !  Computes the global sum of the physical domain of a 2-d array.
     251             : !
     252             : !  This is actually the specific interface for the generic global_sum
     253             : !  function corresponding to single precision arrays.  The generic
     254             : !  interface is identical but will handle double, real and integer 2-d slabs
     255             : !  and real, integer, and double precision scalars.
     256             : 
     257             :    real (real_kind), dimension(:,:,:), intent(in) :: &
     258             :       array                ! array to be summed
     259             : 
     260             :    type (distrb), intent(in) :: &
     261             :       dist                 ! block distribution for array X
     262             : 
     263             :    integer (int_kind), intent(in) :: &
     264             :       field_loc            ! location of field on staggered grid
     265             : 
     266             :    real (real_kind), dimension(:,:,:), intent(in), optional :: &
     267             :       mMask                ! optional multiplicative mask
     268             : 
     269             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     270             :       lMask                ! optional logical mask
     271             : 
     272             :    real (real_kind) :: &
     273             :       globalSum            ! resulting global sum
     274             : 
     275             : !-----------------------------------------------------------------------
     276             : !
     277             : !  local variables
     278             : !
     279             : !-----------------------------------------------------------------------
     280             : 
     281             :    integer (int_kind) :: &
     282             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     283             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     284             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     285             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     286             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     287             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     288             :       maxiglob       ! maximum non-redundant value of i_global
     289             : 
     290             :    logical (log_kind) :: &
     291             :       Nrow           ! this field is on a N row (a velocity row)
     292             : 
     293             :    real (dbl_kind), dimension(:,:), allocatable :: &
     294           0 :       work           ! temporary local array
     295             : 
     296             :    real (dbl_kind), dimension(:), allocatable :: &
     297           0 :       sums           ! array of sums
     298             : 
     299             :    type (block) :: &
     300             :       this_block     ! holds local block information
     301             : 
     302             :    character(len=*), parameter :: subname = '(global_sum_real)'
     303             : 
     304             : !-----------------------------------------------------------------------
     305             : 
     306             : 
     307             :    call ice_distributionGet(dist,          &
     308             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     309             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     310           0 :                             communicator = communicator)
     311             : 
     312           0 :    if (numBlocks == 0) then
     313           0 :       allocate(work(1,1))
     314             :    else
     315           0 :       allocate(work(nx_block*ny_block*numBlocks,1))
     316             :    endif
     317           0 :    allocate(sums(1))
     318           0 :    work = 0.0_dbl_kind
     319           0 :    sums = 0.0_dbl_kind
     320             : 
     321           0 :    do iblock=1,numBlocks
     322           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     323             : 
     324           0 :       this_block = get_block(blockID, blockID)
     325             : 
     326           0 :       ib = this_block%ilo
     327           0 :       ie = this_block%ihi
     328           0 :       jb = this_block%jlo
     329           0 :       je = this_block%jhi
     330             : 
     331             :       !*** if this row along or beyond tripole boundary
     332             :       !*** must eliminate redundant points from global sum
     333             : 
     334           0 :       maxiglob = -1
     335           0 :       if (this_block%tripole) then
     336             :          Nrow=(field_loc == field_loc_Nface .or. &
     337           0 :                field_loc == field_loc_NEcorner)
     338           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     339           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     340           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     341           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     342             :          else
     343           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     344             :          endif
     345             :       endif
     346             : 
     347           0 :       n = (iblock-1)*nx_block*ny_block
     348             : 
     349           0 :       if (present(mMask)) then
     350           0 :          do j=jb,je
     351           0 :          do i=ib,ie
     352           0 :             n = n + 1
     353           0 :             work(n,1) = real(array(i,j,iblock)*mMask(i,j,iblock),dbl_kind)
     354             :          end do
     355             :          end do
     356           0 :       elseif (present(lMask)) then
     357           0 :          do j=jb,je
     358           0 :          do i=ib,ie
     359           0 :             n = n + 1
     360           0 :             if (lMask(i,j,iblock)) then
     361           0 :                work(n,1) = real(array(i,j,iblock),dbl_kind)
     362             :             endif
     363             :          end do
     364             :          end do
     365             :       else
     366           0 :          do j=jb,je
     367           0 :          do i=ib,ie
     368           0 :             n = n + 1
     369           0 :             work(n,1) = real(array(i,j,iblock),dbl_kind)
     370             :          enddo
     371             :          enddo
     372             :       endif
     373             : 
     374           0 :       if (maxiglob >= 0) then
     375             :          ! eliminate redundant points at je
     376             :          ! set n to (ib,je) index
     377           0 :          n = (iblock-1)*nx_block*ny_block
     378           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     379           0 :          j=je
     380           0 :          do i=ib,ie
     381           0 :             n = n + 1
     382           0 :             if (this_block%i_glob(i) > maxiglob) then
     383           0 :                work(n,1) = 0._dbl_kind
     384             :             endif
     385             :          end do
     386             :       endif
     387             : 
     388             :    end do
     389             : 
     390           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     391             : 
     392           0 :    globalSum = real(sums(1),real_kind)
     393             : 
     394           0 :    deallocate(work)
     395           0 :    deallocate(sums)
     396             : 
     397             : !-----------------------------------------------------------------------
     398             : 
     399           0 :  end function global_sum_real
     400             : 
     401             : !***********************************************************************
     402             : 
     403           0 :  function global_sum_int(array, dist, field_loc, mMask, lMask) &
     404             :           result(globalSum)
     405             : 
     406             : !  Computes the global sum of the physical domain of a 2-d array.
     407             : !
     408             : !  This is actually the specific interface for the generic global_sum
     409             : !  function corresponding to integer arrays.  The generic
     410             : !  interface is identical but will handle real and integer 2-d slabs
     411             : !  and real, integer, and double precision scalars.
     412             : 
     413             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
     414             :       array                ! array to be summed
     415             : 
     416             :    type (distrb), intent(in) :: &
     417             :       dist                 ! block distribution for array X
     418             : 
     419             :    integer (int_kind), intent(in) :: &
     420             :       field_loc            ! location of field on staggered grid
     421             : 
     422             :    integer (int_kind), dimension(:,:,:), intent(in), optional :: &
     423             :       mMask                ! optional multiplicative mask
     424             : 
     425             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     426             :       lMask                ! optional logical mask
     427             : 
     428             :    integer (int_kind) :: &
     429             :       globalSum            ! resulting global sum
     430             : 
     431             : !-----------------------------------------------------------------------
     432             : !
     433             : !  local variables
     434             : !
     435             : !-----------------------------------------------------------------------
     436             : 
     437             :    integer (int_kind) :: &
     438             :       blockSum,     &! sum of local block domain   ! LCOV_EXCL_LINE
     439             :       localSum       ! sum of all local block domains
     440             : 
     441             :    integer (int_kind) :: &
     442             :       i,j,iblock,   &! local counters   ! LCOV_EXCL_LINE
     443             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     444             :       ierr,         &! mpi error flag   ! LCOV_EXCL_LINE
     445             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     446             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     447             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     448             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     449             :       maxiglob       ! maximum non-redundant value of i_global
     450             : 
     451             :    logical (log_kind) :: &
     452             :       Nrow           ! this field is on a N row (a velocity row)
     453             : 
     454             :    type (block) :: &
     455             :       this_block     ! holds local block information
     456             : 
     457             :    character(len=*), parameter :: subname = '(global_sum_int)'
     458             : 
     459             : !-----------------------------------------------------------------------
     460             : 
     461           0 :    localSum  = 0_int_kind
     462           0 :    globalSum = 0_int_kind
     463             : 
     464             :    call ice_distributionGet(dist,          &
     465             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     466             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     467           0 :                             communicator = communicator)
     468             : 
     469           0 :    do iblock=1,numBlocks
     470           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     471             : 
     472           0 :       this_block = get_block(blockID, blockID)
     473             : 
     474           0 :       ib = this_block%ilo
     475           0 :       ie = this_block%ihi
     476           0 :       jb = this_block%jlo
     477           0 :       je = this_block%jhi
     478             : 
     479             :       !*** if this row along or beyond tripole boundary
     480             :       !*** must eliminate redundant points from global sum
     481             : 
     482           0 :       maxiglob = -1
     483           0 :       if (this_block%tripole) then
     484             :          Nrow=(field_loc == field_loc_Nface .or. &
     485           0 :                field_loc == field_loc_NEcorner)
     486           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     487           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     488           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     489           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     490             :          else
     491           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     492             :          endif
     493             :       endif
     494             : 
     495           0 :       blockSum = 0_int_kind
     496             : 
     497           0 :       do j=jb,je
     498           0 :       do i=ib,ie
     499             :          ! eliminate redundant points
     500           0 :          if (maxiglob >= 0 .and. j == je .and. this_block%i_glob(i) > maxiglob) then
     501             : !            blockSum = blockSum + 0_int_kind
     502             :          else
     503           0 :             if (present(mMask)) then
     504           0 :                blockSum = blockSum + array(i,j,iblock)*mMask(i,j,iblock)
     505           0 :             else if (present(lMask)) then
     506           0 :                if (lMask(i,j,iblock)) then
     507           0 :                   blockSum = blockSum + array(i,j,iblock)
     508             :               endif
     509             :             else
     510           0 :                blockSum = blockSum + array(i,j,iblock)
     511             :             endif
     512             :          endif
     513             :       end do
     514             :       end do
     515             : 
     516             :       !*** now add block sum to global sum
     517             : 
     518           0 :       localSum = localSum + blockSum
     519             : 
     520             :    end do
     521             : 
     522             : !-----------------------------------------------------------------------
     523             : !
     524             : !  now use MPI global reduction to reduce local sum to global sum
     525             : !
     526             : !-----------------------------------------------------------------------
     527             : 
     528             : #ifdef SERIAL_REMOVE_MPI
     529           0 :    globalSum = localSum
     530             : #else
     531             :    if (my_task < numProcs) then
     532             :       call MPI_ALLREDUCE(localSum, globalSum, 1, &
     533             :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
     534             :    endif
     535             : #endif
     536             : 
     537             : !-----------------------------------------------------------------------
     538             : 
     539           0 :  end function global_sum_int
     540             : 
     541             : !***********************************************************************
     542             : 
     543           0 :  function global_sum_scalar_dbl(scalar, dist) &
     544             :           result(globalSum)
     545             : 
     546             : !  Computes the global sum of a set of scalars distributed across
     547             : !  a parallel machine.
     548             : !
     549             : !  This is actually the specific interface for the generic global_sum
     550             : !  function corresponding to double precision scalars.  The generic
     551             : !  interface is identical but will handle real and integer 2-d slabs
     552             : !  and real, integer, and double precision scalars.
     553             : 
     554             :    real (dbl_kind), intent(in) :: &
     555             :       scalar               ! scalar to be summed
     556             : 
     557             :    type (distrb), intent(in) :: &
     558             :       dist                 ! block distribution for array X
     559             : 
     560             :    real (dbl_kind) :: &
     561             :       globalSum            ! resulting global sum
     562             : 
     563             : !-----------------------------------------------------------------------
     564             : !
     565             : !  local variables
     566             : !
     567             : !-----------------------------------------------------------------------
     568             : 
     569             :    integer (int_kind) :: &
     570             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     571             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     572             :       communicator   ! communicator for this distribution
     573             : 
     574             :    real (dbl_kind), dimension(:,:), allocatable :: &
     575           0 :       work           ! temporary local array
     576             : 
     577             :    real (dbl_kind), dimension(:), allocatable :: &
     578           0 :       sums           ! array of sums
     579             : 
     580             :    character(len=*), parameter :: subname = '(global_sum_scalar_dbl)'
     581             : 
     582             : !-----------------------------------------------------------------------
     583             : !
     584             : !  get communicator for MPI calls
     585             : !
     586             : !-----------------------------------------------------------------------
     587             : 
     588             :    call ice_distributionGet(dist, &
     589             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     590             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     591           0 :                             communicator = communicator)
     592             : 
     593             : 
     594           0 :    allocate(work(1,1))
     595           0 :    allocate(sums(1))
     596           0 :    work(1,1) = scalar
     597           0 :    sums = 0.0_dbl_kind
     598             : 
     599           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     600             : 
     601           0 :    globalSum = sums(1)
     602             : 
     603           0 :    deallocate(work)
     604           0 :    deallocate(sums)
     605             : 
     606             : !-----------------------------------------------------------------------
     607             : 
     608           0 :  end function global_sum_scalar_dbl
     609             : 
     610             : !***********************************************************************
     611             : 
     612           0 :  function global_sum_scalar_real(scalar, dist) &
     613             :           result(globalSum)
     614             : 
     615             : !  Computes the global sum of a set of scalars distributed across
     616             : !  a parallel machine.
     617             : !
     618             : !  This is actually the specific interface for the generic global_sum
     619             : !  function corresponding to real scalars.  The generic
     620             : !  interface is identical but will handle real and integer 2-d slabs
     621             : !  and real, integer, and double precision scalars.
     622             : 
     623             :    real (real_kind), intent(in) :: &
     624             :       scalar               ! scalar to be summed
     625             : 
     626             :    type (distrb), intent(in) :: &
     627             :       dist                 ! block distribution for array X
     628             : 
     629             :    real (real_kind) :: &
     630             :       globalSum            ! resulting global sum
     631             : 
     632             : !-----------------------------------------------------------------------
     633             : !
     634             : !  local variables
     635             : !
     636             : !-----------------------------------------------------------------------
     637             : 
     638             :    integer (int_kind) :: &
     639             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     640             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     641             :       communicator   ! communicator for this distribution
     642             : 
     643             :    real (dbl_kind), dimension(:,:), allocatable :: &
     644           0 :       work           ! temporary local array
     645             : 
     646             :    real (dbl_kind), dimension(:), allocatable :: &
     647           0 :       sums           ! array of sums
     648             : 
     649             :    character(len=*), parameter :: subname = '(global_sum_scalar_real)'
     650             : 
     651             : !-----------------------------------------------------------------------
     652             : !
     653             : !  get communicator for MPI calls
     654             : !
     655             : !-----------------------------------------------------------------------
     656             : 
     657             :    call ice_distributionGet(dist, &
     658             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     659             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     660           0 :                             communicator = communicator)
     661             : 
     662           0 :    allocate(work(1,1))
     663           0 :    allocate(sums(1))
     664           0 :    work(1,1) = real(scalar,dbl_kind)
     665           0 :    sums = 0.0_dbl_kind
     666             : 
     667           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     668             : 
     669           0 :    globalSum = real(sums(1),real_kind)
     670             : 
     671           0 :    deallocate(work)
     672           0 :    deallocate(sums)
     673             : 
     674             : !-----------------------------------------------------------------------
     675             : 
     676           0 :  end function global_sum_scalar_real
     677             : 
     678             : !***********************************************************************
     679             : 
     680           0 :  function global_sum_scalar_int(scalar, dist) &
     681             :           result(globalSum)
     682             : 
     683             : !  Computes the global sum of a set of scalars distributed across
     684             : !  a parallel machine.
     685             : !
     686             : !  This is actually the specific interface for the generic global_sum
     687             : !  function corresponding to integer scalars.  The generic
     688             : !  interface is identical but will handle real and integer 2-d slabs
     689             : !  and real, integer, and double precision scalars.
     690             : 
     691             :    integer (int_kind), intent(in) :: &
     692             :       scalar               ! scalar to be summed
     693             : 
     694             :    type (distrb), intent(in) :: &
     695             :       dist                 ! block distribution for array X
     696             : 
     697             :    integer (int_kind) :: &
     698             :       globalSum            ! resulting global sum
     699             : 
     700             : !-----------------------------------------------------------------------
     701             : !
     702             : !  local variables
     703             : !
     704             : !-----------------------------------------------------------------------
     705             : 
     706             :    integer (int_kind) :: &
     707             :       ierr,         &! mpi error flag   ! LCOV_EXCL_LINE
     708             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     709             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     710             :       communicator   ! communicator for this distribution
     711             : 
     712             :    character(len=*), parameter :: subname = '(global_sum_scalar_int)'
     713             : 
     714             : !-----------------------------------------------------------------------
     715             : !
     716             : !  get communicator for MPI calls
     717             : !
     718             : !-----------------------------------------------------------------------
     719             : 
     720             :    call ice_distributionGet(dist, &
     721             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     722             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
     723           0 :                             communicator = communicator)
     724             : 
     725             : !-----------------------------------------------------------------------
     726             : !
     727             : !  now use MPI global reduction to reduce local sum to global sum
     728             : !
     729             : !-----------------------------------------------------------------------
     730             : 
     731             : #ifdef SERIAL_REMOVE_MPI
     732           0 :    globalSum = scalar
     733             : #else
     734             :    if (my_task < numProcs) then
     735             :       call MPI_ALLREDUCE(scalar, globalSum, 1, &
     736             :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
     737             :    endif
     738             : #endif
     739             : 
     740             : !-----------------------------------------------------------------------
     741             : 
     742           0 :  end function global_sum_scalar_int
     743             : 
     744             : !***********************************************************************
     745             : 
     746         192 :  function global_sum_prod_dbl (array1, array2, dist, field_loc, &
     747             :                                mMask, lMask) &   ! LCOV_EXCL_LINE
     748             :           result(globalSum)
     749             : 
     750             : !  Computes the global sum of the physical domain of a product of
     751             : !  two 2-d arrays.
     752             : !
     753             : !  This is actually the specific interface for the generic
     754             : !  global_sum_prod function corresponding to double precision arrays.
     755             : !  The generic interface is identical but will handle real and integer
     756             : !  2-d slabs.
     757             : 
     758             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
     759             :       array1, array2       ! arrays whose product is to be summed
     760             : 
     761             :    type (distrb), intent(in) :: &
     762             :       dist                 ! block distribution for arrays
     763             : 
     764             :    integer (int_kind), intent(in) :: &
     765             :       field_loc            ! location of field on staggered grid
     766             : 
     767             :    real (dbl_kind), dimension(:,:,:), intent(in), optional :: &
     768             :       mMask                ! optional multiplicative mask
     769             : 
     770             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     771             :       lMask                ! optional logical mask
     772             : 
     773             :    real (dbl_kind) :: &
     774             :       globalSum            ! resulting global sum
     775             : 
     776             : !-----------------------------------------------------------------------
     777             : !
     778             : !  local variables
     779             : !
     780             : !-----------------------------------------------------------------------
     781             : 
     782             :    integer (int_kind) :: &
     783             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     784             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     785             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     786             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     787             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     788             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     789             :       maxiglob       ! maximum non-redundant value of i_global
     790             : 
     791             :    logical (log_kind) :: &
     792             :       Nrow           ! this field is on a N row (a velocity row)
     793             : 
     794             :    real (dbl_kind), dimension(:,:), allocatable :: &
     795         192 :       work           ! temporary local array
     796             : 
     797             :    real (dbl_kind), dimension(:), allocatable :: &
     798         192 :       sums           ! array of sums
     799             : 
     800             :    type (block) :: &
     801             :       this_block     ! holds local block information
     802             : 
     803             :    character(len=*), parameter :: subname = '(global_sum_prod_dbl)'
     804             : 
     805             : !-----------------------------------------------------------------------
     806             : 
     807             : 
     808             :    call ice_distributionGet(dist,          &
     809             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     810             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     811         192 :                             communicator = communicator)
     812             : 
     813         192 :    if (numBlocks == 0) then
     814           0 :       allocate(work(1,1))
     815             :    else
     816         192 :       allocate(work(nx_block*ny_block*numBlocks,1))
     817             :    endif
     818         192 :    allocate(sums(1))
     819     2311296 :    work = 0.0_dbl_kind
     820         384 :    sums = 0.0_dbl_kind
     821             : 
     822         384 :    do iblock=1,numBlocks
     823         192 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     824             : 
     825         192 :       this_block = get_block(blockID, blockID)
     826             : 
     827         192 :       ib = this_block%ilo
     828         192 :       ie = this_block%ihi
     829         192 :       jb = this_block%jlo
     830         192 :       je = this_block%jhi
     831             : 
     832             :       !*** if this row along or beyond tripole boundary
     833             :       !*** must eliminate redundant points from global sum
     834             : 
     835         192 :       maxiglob = -1
     836         192 :       if (this_block%tripole) then
     837             :          Nrow=(field_loc == field_loc_Nface .or. &
     838           0 :                field_loc == field_loc_NEcorner)
     839           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     840           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     841           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
     842           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
     843             :          else
     844           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
     845             :          endif
     846             :       endif
     847             : 
     848         192 :       n = (iblock-1)*nx_block*ny_block
     849             : 
     850         192 :       if (present(mMask)) then
     851       22656 :          do j=jb,je
     852     2249664 :          do i=ib,ie
     853     2227200 :             n = n + 1
     854     2249472 :             work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock)
     855             :          end do
     856             :          end do
     857           0 :       elseif (present(lMask)) then
     858           0 :          do j=jb,je
     859           0 :          do i=ib,ie
     860           0 :             n = n + 1
     861           0 :             if (lMask(i,j,iblock)) then
     862           0 :                work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)
     863             :             endif
     864             :          end do
     865             :          end do
     866             :       else
     867           0 :          do j=jb,je
     868           0 :          do i=ib,ie
     869           0 :             n = n + 1
     870           0 :             work(n,1) = array1(i,j,iblock)*array2(i,j,iblock)
     871             :          enddo
     872             :          enddo
     873             :       endif
     874             : 
     875         576 :       if (maxiglob >= 0) then
     876             :          ! eliminate redundant points at je
     877             :          ! set n to (ib,je) index
     878           0 :          n = (iblock-1)*nx_block*ny_block
     879           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
     880           0 :          j=je
     881           0 :          do i=ib,ie
     882           0 :             n = n + 1
     883           0 :             if (this_block%i_glob(i) > maxiglob) then
     884           0 :                work(n,1) = 0._dbl_kind
     885             :             endif
     886             :          end do
     887             :       endif
     888             : 
     889             :    end do
     890             : 
     891         192 :    call compute_sums_dbl(work,sums,communicator,numProcs)
     892             : 
     893         192 :    globalSum = sums(1)
     894             : 
     895         192 :    deallocate(work)
     896         192 :    deallocate(sums)
     897             : 
     898             : !-----------------------------------------------------------------------
     899             : 
     900         384 :  end function global_sum_prod_dbl
     901             : 
     902             : !***********************************************************************
     903             : 
     904           0 :  function global_sum_prod_real (array1, array2, dist, field_loc, &
     905             :                                 mMask, lMask) &   ! LCOV_EXCL_LINE
     906             :           result(globalSum)
     907             : 
     908             : !  Computes the global sum of the physical domain of a product of
     909             : !  two 2-d arrays.
     910             : !
     911             : !  This is actually the specific interface for the generic
     912             : !  global_sum_prod function corresponding to single precision arrays.
     913             : !  The generic interface is identical but will handle real and integer
     914             : !  2-d slabs.
     915             : 
     916             :    real (real_kind), dimension(:,:,:), intent(in) :: &
     917             :       array1, array2       ! arrays whose product is to be summed
     918             : 
     919             :    type (distrb), intent(in) :: &
     920             :       dist                 ! block distribution for arrays
     921             : 
     922             :    integer (int_kind), intent(in) :: &
     923             :       field_loc            ! location of field on staggered grid
     924             : 
     925             :    real (real_kind), dimension(:,:,:), intent(in), optional :: &
     926             :       mMask                ! optional multiplicative mask
     927             : 
     928             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
     929             :       lMask                ! optional logical mask
     930             : 
     931             :    real (real_kind) :: &
     932             :       globalSum            ! resulting global sum
     933             : 
     934             : !-----------------------------------------------------------------------
     935             : !
     936             : !  local variables
     937             : !
     938             : !-----------------------------------------------------------------------
     939             : 
     940             :    integer (int_kind) :: &
     941             :       i,j,iblock,n, &! local counters   ! LCOV_EXCL_LINE
     942             :       ib,ie,jb,je,  &! beg,end of physical domain   ! LCOV_EXCL_LINE
     943             :       blockID,      &! block location   ! LCOV_EXCL_LINE
     944             :       numProcs,     &! number of processor participating   ! LCOV_EXCL_LINE
     945             :       numBlocks,    &! number of local blocks   ! LCOV_EXCL_LINE
     946             :       communicator, &! communicator for this distribution   ! LCOV_EXCL_LINE
     947             :       maxiglob       ! maximum non-redundant value of i_global
     948             : 
     949             :    logical (log_kind) :: &
     950             :       Nrow           ! this field is on a N row (a velocity row)
     951             : 
     952             :    real (dbl_kind), dimension(:,:), allocatable :: &
     953           0 :       work           ! temporary local array
     954             : 
     955             :    real (dbl_kind), dimension(:), allocatable :: &
     956           0 :       sums           ! array of sums
     957             : 
     958             :    type (block) :: &
     959             :       this_block     ! holds local block information
     960             : 
     961             :    character(len=*), parameter :: subname = '(global_sum_prod_dbl)'
     962             : 
     963             : !-----------------------------------------------------------------------
     964             : 
     965             : 
     966             :    call ice_distributionGet(dist,          &
     967             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
     968             :                             nprocs = numProcs,       &   ! LCOV_EXCL_LINE
     969           0 :                             communicator = communicator)
     970             : 
     971           0 :    if (numBlocks == 0) then
     972           0 :       allocate(work(1,1))
     973             :    else
     974           0 :       allocate(work(nx_block*ny_block*numBlocks,1))
     975             :    endif
     976           0 :    allocate(sums(1))
     977           0 :    work = 0.0_dbl_kind
     978           0 :    sums = 0.0_dbl_kind
     979             : 
     980           0 :    do iblock=1,numBlocks
     981           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
     982             : 
     983           0 :       this_block = get_block(blockID, blockID)
     984             : 
     985           0 :       ib = this_block%ilo
     986           0 :       ie = this_block%ihi
     987           0 :       jb = this_block%jlo
     988           0 :       je = this_block%jhi
     989             : 
     990             :       !*** if this row along or beyond tripole boundary
     991             :       !*** must eliminate redundant points from global sum
     992             : 
     993           0 :       maxiglob = -1
     994           0 :       if (this_block%tripole) then
     995             :          Nrow=(field_loc == field_loc_Nface .or. &
     996           0 :                field_loc == field_loc_NEcorner)
     997           0 :          if (Nrow .and. this_block%tripoleTFlag) then
     998           0 :             maxiglob = 0 ! entire u-row on T-fold grid
     999           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
    1000           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
    1001             :          else
    1002           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
    1003             :          endif
    1004             :       endif
    1005             : 
    1006           0 :       n = (iblock-1)*nx_block*ny_block
    1007             : 
    1008           0 :       if (present(mMask)) then
    1009           0 :          do j=jb,je
    1010           0 :          do i=ib,ie
    1011           0 :             n = n + 1
    1012           0 :             work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock),dbl_kind)
    1013             :          end do
    1014             :          end do
    1015           0 :       elseif (present(lMask)) then
    1016           0 :          do j=jb,je
    1017           0 :          do i=ib,ie
    1018           0 :             n = n + 1
    1019           0 :             if (lMask(i,j,iblock)) then
    1020           0 :                work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock),dbl_kind)
    1021             :             endif
    1022             :          end do
    1023             :          end do
    1024             :       else
    1025           0 :          do j=jb,je
    1026           0 :          do i=ib,ie
    1027           0 :             n = n + 1
    1028           0 :             work(n,1) = real(array1(i,j,iblock)*array2(i,j,iblock),dbl_kind)
    1029             :          enddo
    1030             :          enddo
    1031             :       endif
    1032             : 
    1033           0 :       if (maxiglob >= 0) then
    1034             :          ! eliminate redundant points at je
    1035             :          ! set n to (ib,je) index
    1036           0 :          n = (iblock-1)*nx_block*ny_block
    1037           0 :          n = n + (je-1-jb+1)*(ie-ib+1)
    1038           0 :          j=je
    1039           0 :          do i=ib,ie
    1040           0 :             n = n + 1
    1041           0 :             if (this_block%i_glob(i) > maxiglob) then
    1042           0 :                work(n,1) = 0._dbl_kind
    1043             :             endif
    1044             :          end do
    1045             :       endif
    1046             : 
    1047             :    end do
    1048             : 
    1049           0 :    call compute_sums_dbl(work,sums,communicator,numProcs)
    1050             : 
    1051           0 :    globalSum = real(sums(1),real_kind)
    1052             : 
    1053           0 :    deallocate(work)
    1054           0 :    deallocate(sums)
    1055             : 
    1056             : !-----------------------------------------------------------------------
    1057             : 
    1058           0 :  end function global_sum_prod_real
    1059             : 
    1060             : !***********************************************************************
    1061             : 
    1062           0 :  function global_sum_prod_int (array1, array2, dist, field_loc, &
    1063             :                                mMask, lMask) &   ! LCOV_EXCL_LINE
    1064             :           result(globalSum)
    1065             : 
    1066             : !  Computes the global sum of the physical domain of a product of
    1067             : !  two 2-d arrays.
    1068             : !
    1069             : !  This is actually the specific interface for the generic
    1070             : !  global_sum_prod function corresponding to integer arrays.
    1071             : !  The generic interface is identical but will handle real and integer
    1072             : !  2-d slabs.
    1073             : 
    1074             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1075             :       array1, array2       ! arrays whose product is to be summed
    1076             : 
    1077             :    type (distrb), intent(in) :: &
    1078             :       dist                 ! block distribution for arrays
    1079             : 
    1080             :    integer (int_kind), intent(in) :: &
    1081             :       field_loc            ! location of field on staggered grid
    1082             : 
    1083             :    integer (int_kind), dimension(:,:,:), intent(in), optional :: &
    1084             :       mMask                ! optional multiplicative mask
    1085             : 
    1086             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1087             :       lMask                ! optional logical mask
    1088             : 
    1089             :    integer (int_kind) :: &
    1090             :       globalSum            ! resulting global sum
    1091             : 
    1092             : !-----------------------------------------------------------------------
    1093             : !
    1094             : !  local variables
    1095             : !
    1096             : !-----------------------------------------------------------------------
    1097             : 
    1098             :    integer (int_kind) :: &
    1099             :       blockSum,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1100             :       localSum       ! sum of all local block domains
    1101             : 
    1102             :    integer (int_kind) :: &
    1103             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1104             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1105             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1106             :       blockID,         &! block location   ! LCOV_EXCL_LINE
    1107             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1108             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1109             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1110             :       maxiglob          ! maximum non-redundant value of i_global
    1111             : 
    1112             :    logical (log_kind) :: &
    1113             :       Nrow           ! this field is on a N row (a velocity row)
    1114             : 
    1115             :    type (block) :: &
    1116             :       this_block          ! holds local block information
    1117             : 
    1118             :    character(len=*), parameter :: subname = '(global_sum_prod_int)'
    1119             : 
    1120             : !-----------------------------------------------------------------------
    1121             : 
    1122           0 :    localSum  = 0_int_kind
    1123           0 :    globalSum = 0_int_kind
    1124             : 
    1125             :    call ice_distributionGet(dist, &
    1126             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1127             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1128           0 :                             communicator = communicator)
    1129             : 
    1130           0 :    do iblock=1,numBlocks
    1131           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1132             : 
    1133           0 :       this_block = get_block(blockID, blockID)
    1134             : 
    1135           0 :       ib = this_block%ilo
    1136           0 :       ie = this_block%ihi
    1137           0 :       jb = this_block%jlo
    1138           0 :       je = this_block%jhi
    1139             : 
    1140             :       !*** if this row along or beyond tripole boundary
    1141             :       !*** must eliminate redundant points from global sum
    1142             : 
    1143           0 :       maxiglob = -1
    1144           0 :       if (this_block%tripole) then
    1145             :          Nrow=(field_loc == field_loc_Nface .or. &
    1146           0 :                field_loc == field_loc_NEcorner)
    1147           0 :          if (Nrow .and. this_block%tripoleTFlag) then
    1148           0 :             maxiglob = 0 ! entire u-row on T-fold grid
    1149           0 :          elseif (Nrow .or. this_block%tripoleTFlag) then
    1150           0 :             maxiglob = nx_global/2 ! half T-row on T-fold or half u-row on u-fold
    1151             :          else
    1152           0 :             maxiglob = -1 ! nothing to do for T-row on u-fold
    1153             :          endif
    1154             :       endif
    1155             : 
    1156           0 :       blockSum = 0_int_kind
    1157             : 
    1158           0 :       do j=jb,je
    1159           0 :       do i=ib,ie
    1160             :          ! eliminate redundant points
    1161           0 :          if (maxiglob >= 0 .and. j == je .and. this_block%i_glob(i) > maxiglob) then
    1162             : !            blockSum = blockSum + 0_int_kind
    1163             :          else
    1164           0 :             if (present(mMask)) then
    1165           0 :                blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)*mMask(i,j,iblock)
    1166           0 :             else if (present(lMask)) then
    1167           0 :                if (lMask(i,j,iblock)) then
    1168           0 :                   blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)
    1169             :                endif
    1170             :             else
    1171           0 :                blockSum = blockSum + array1(i,j,iblock)*array2(i,j,iblock)
    1172             :             endif
    1173             :          endif
    1174             :       end do
    1175             :       end do
    1176             : 
    1177             :       !*** now add block sum to global sum
    1178             : 
    1179           0 :       localSum = localSum + blockSum
    1180             : 
    1181             :    end do
    1182             : 
    1183             : !-----------------------------------------------------------------------
    1184             : !
    1185             : !  now use MPI global reduction to reduce local sum to global sum
    1186             : !
    1187             : !-----------------------------------------------------------------------
    1188             : 
    1189             : #ifdef SERIAL_REMOVE_MPI
    1190           0 :    globalSum = localSum
    1191             : #else
    1192             :    if (my_task < numProcs) then
    1193             :       call MPI_ALLREDUCE(localSum, globalSum, 1, &
    1194             :                          MPI_INTEGER, MPI_SUM, communicator, ierr)
    1195             :    endif
    1196             : #endif
    1197             : 
    1198             : !-----------------------------------------------------------------------
    1199             : 
    1200           0 :  end function global_sum_prod_int
    1201             : 
    1202             : !***********************************************************************
    1203             : 
    1204         152 :  function global_maxval_dbl (array, dist, lMask) &
    1205             :           result(globalMaxval)
    1206             : 
    1207             : !  Computes the global maximum value of the physical domain of a 2-d field
    1208             : !
    1209             : !  This is actually the specific interface for the generic global_maxval
    1210             : !  function corresponding to double precision arrays.
    1211             : 
    1212             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
    1213             :       array                ! array for which max value needed
    1214             : 
    1215             :    type (distrb), intent(in) :: &
    1216             :       dist                 ! block distribution for array X
    1217             : 
    1218             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1219             :       lMask                ! optional logical mask
    1220             : 
    1221             :    real (dbl_kind) :: &
    1222             :       globalMaxval         ! resulting maximum value of array
    1223             : 
    1224             : !-----------------------------------------------------------------------
    1225             : !
    1226             : !  local variables
    1227             : !
    1228             : !-----------------------------------------------------------------------
    1229             : 
    1230             :    real (dbl_kind) ::    &
    1231             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1232             :       localMaxval       ! sum of all local block domains
    1233             : 
    1234             :    integer (int_kind) :: &
    1235             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1236             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1237             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1238             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1239             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1240             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1241             :       blockID           ! block location
    1242             : 
    1243             :    type (block) :: &
    1244             :       this_block          ! holds local block information
    1245             : 
    1246             :    character(len=*), parameter :: subname = '(global_maxval_dbl)'
    1247             : 
    1248             : !-----------------------------------------------------------------------
    1249             : 
    1250         152 :    localMaxval  = -HUGE(0.0_dbl_kind)
    1251         152 :    globalMaxval = -HUGE(0.0_dbl_kind)
    1252             : 
    1253             :    call ice_distributionGet(dist, &
    1254             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1255             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1256         152 :                             communicator = communicator)
    1257             : 
    1258         304 :    do iblock=1,numBlocks
    1259         152 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1260             : 
    1261         152 :       this_block = get_block(blockID, blockID)
    1262             : 
    1263         152 :       ib = this_block%ilo
    1264         152 :       ie = this_block%ihi
    1265         152 :       jb = this_block%jlo
    1266         152 :       je = this_block%jhi
    1267             : 
    1268         152 :       blockMaxval = -HUGE(0.0_dbl_kind)
    1269             : 
    1270         152 :       if (present(lMask)) then
    1271       17936 :          do j=jb,je
    1272     1780984 :          do i=ib,ie
    1273     1780832 :             if (lMask(i,j,iblock)) then
    1274      896444 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1275             :             endif
    1276             :          end do
    1277             :          end do
    1278             :       else
    1279           0 :          do j=jb,je
    1280           0 :          do i=ib,ie
    1281           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1282             :          end do
    1283             :          end do
    1284             :       endif
    1285             : 
    1286         456 :       localMaxval = max(localMaxval,blockMaxval)
    1287             : 
    1288             :    end do
    1289             : 
    1290             : !-----------------------------------------------------------------------
    1291             : !
    1292             : !  now use MPI global reduction to reduce local maxval to global maxval
    1293             : !
    1294             : !-----------------------------------------------------------------------
    1295             : 
    1296             : #ifdef SERIAL_REMOVE_MPI
    1297         152 :    globalMaxval = localMaxval
    1298             : #else
    1299             :    if (my_task < numProcs) then
    1300             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1301             :                          mpiR8, MPI_MAX, communicator, ierr)
    1302             :    endif
    1303             : #endif
    1304             : 
    1305             : !-----------------------------------------------------------------------
    1306             : 
    1307         304 :  end function global_maxval_dbl
    1308             : 
    1309             : !***********************************************************************
    1310             : 
    1311           0 :  function global_maxval_real (array, dist, lMask) &
    1312             :           result(globalMaxval)
    1313             : 
    1314             : !  Computes the global maximum value of the physical domain of a 2-d field
    1315             : !
    1316             : !  This is actually the specific interface for the generic global_maxval
    1317             : !  function corresponding to single precision arrays.
    1318             : 
    1319             :    real (real_kind), dimension(:,:,:), intent(in) :: &
    1320             :       array                ! array for which max value needed
    1321             : 
    1322             :    type (distrb), intent(in) :: &
    1323             :       dist                 ! block distribution for array X
    1324             : 
    1325             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1326             :       lMask                ! optional logical mask
    1327             : 
    1328             :    real (real_kind) :: &
    1329             :       globalMaxval         ! resulting maximum value of array
    1330             : 
    1331             : !-----------------------------------------------------------------------
    1332             : !
    1333             : !  local variables
    1334             : !
    1335             : !-----------------------------------------------------------------------
    1336             : 
    1337             :    real (real_kind) ::    &
    1338             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1339             :       localMaxval       ! sum of all local block domains
    1340             : 
    1341             :    integer (int_kind) :: &
    1342             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1343             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1344             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1345             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1346             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1347             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1348             :       blockID           ! block location
    1349             : 
    1350             :    type (block) :: &
    1351             :       this_block          ! holds local block information
    1352             : 
    1353             :    character(len=*), parameter :: subname = '(global_maxval_real)'
    1354             : 
    1355             : !-----------------------------------------------------------------------
    1356             : 
    1357           0 :    localMaxval  = -HUGE(0.0_real_kind)
    1358           0 :    globalMaxval = -HUGE(0.0_real_kind)
    1359             : 
    1360             :    call ice_distributionGet(dist, &
    1361             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1362             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1363           0 :                             communicator = communicator)
    1364             : 
    1365           0 :    do iblock=1,numBlocks
    1366           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1367             : 
    1368           0 :       this_block = get_block(blockID, blockID)
    1369             : 
    1370           0 :       ib = this_block%ilo
    1371           0 :       ie = this_block%ihi
    1372           0 :       jb = this_block%jlo
    1373           0 :       je = this_block%jhi
    1374             : 
    1375           0 :       blockMaxval = -HUGE(0.0_real_kind)
    1376             : 
    1377           0 :       if (present(lMask)) then
    1378           0 :          do j=jb,je
    1379           0 :          do i=ib,ie
    1380           0 :             if (lMask(i,j,iblock)) then
    1381           0 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1382             :             endif
    1383             :          end do
    1384             :          end do
    1385             :       else
    1386           0 :          do j=jb,je
    1387           0 :          do i=ib,ie
    1388           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1389             :          end do
    1390             :          end do
    1391             :       endif
    1392             : 
    1393           0 :       localMaxval = max(localMaxval,blockMaxval)
    1394             : 
    1395             :    end do
    1396             : 
    1397             : !-----------------------------------------------------------------------
    1398             : !
    1399             : !  now use MPI global reduction to reduce local maxval to global maxval
    1400             : !
    1401             : !-----------------------------------------------------------------------
    1402             : 
    1403             : #ifdef SERIAL_REMOVE_MPI
    1404           0 :    globalMaxval = localMaxval
    1405             : #else
    1406             :    if (my_task < numProcs) then
    1407             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1408             :                          mpiR4, MPI_MAX, communicator, ierr)
    1409             :    endif
    1410             : #endif
    1411             : 
    1412             : !-----------------------------------------------------------------------
    1413             : 
    1414           0 :  end function global_maxval_real
    1415             : 
    1416             : !***********************************************************************
    1417             : 
    1418           0 :  function global_maxval_int (array, dist, lMask) &
    1419             :           result(globalMaxval)
    1420             : 
    1421             : !  Computes the global maximum value of the physical domain of a 2-d field
    1422             : !
    1423             : !  This is actually the specific interface for the generic global_maxval
    1424             : !  function corresponding to integer arrays.
    1425             : 
    1426             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1427             :       array                ! array for which max value needed
    1428             : 
    1429             :    type (distrb), intent(in) :: &
    1430             :       dist                 ! block distribution for array X
    1431             : 
    1432             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1433             :       lMask                ! optional logical mask
    1434             : 
    1435             :    integer (int_kind) :: &
    1436             :       globalMaxval         ! resulting maximum value of array
    1437             : 
    1438             : !-----------------------------------------------------------------------
    1439             : !
    1440             : !  local variables
    1441             : !
    1442             : !-----------------------------------------------------------------------
    1443             : 
    1444             :    integer (int_kind) ::    &
    1445             :       blockMaxval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1446             :       localMaxval       ! sum of all local block domains
    1447             : 
    1448             :    integer (int_kind) :: &
    1449             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1450             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1451             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1452             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1453             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1454             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1455             :       blockID           ! block location
    1456             : 
    1457             :    type (block) :: &
    1458             :       this_block          ! holds local block information
    1459             : 
    1460             :    character(len=*), parameter :: subname = '(global_maxval_int)'
    1461             : 
    1462             : !-----------------------------------------------------------------------
    1463             : 
    1464           0 :    localMaxval  = -HUGE(0_int_kind)
    1465           0 :    globalMaxval = -HUGE(0_int_kind)
    1466             : 
    1467             :    call ice_distributionGet(dist, &
    1468             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1469             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1470           0 :                             communicator = communicator)
    1471             : 
    1472           0 :    do iblock=1,numBlocks
    1473           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1474             : 
    1475           0 :       this_block = get_block(blockID, blockID)
    1476             : 
    1477           0 :       ib = this_block%ilo
    1478           0 :       ie = this_block%ihi
    1479           0 :       jb = this_block%jlo
    1480           0 :       je = this_block%jhi
    1481             : 
    1482           0 :       blockMaxval = -HUGE(0_int_kind)
    1483             : 
    1484           0 :       if (present(lMask)) then
    1485           0 :          do j=jb,je
    1486           0 :          do i=ib,ie
    1487           0 :             if (lMask(i,j,iblock)) then
    1488           0 :                blockMaxval = max(blockMaxval,array(i,j,iblock))
    1489             :             endif
    1490             :          end do
    1491             :          end do
    1492             :       else
    1493           0 :          do j=jb,je
    1494           0 :          do i=ib,ie
    1495           0 :             blockMaxval = max(blockMaxval,array(i,j,iblock))
    1496             :          end do
    1497             :          end do
    1498             :       endif
    1499             : 
    1500           0 :       localMaxval = max(localMaxval,blockMaxval)
    1501             : 
    1502             :    end do
    1503             : 
    1504             : !-----------------------------------------------------------------------
    1505             : !
    1506             : !  now use MPI global reduction to reduce local maxval to global maxval
    1507             : !
    1508             : !-----------------------------------------------------------------------
    1509             : 
    1510             : #ifdef SERIAL_REMOVE_MPI
    1511           0 :    globalMaxval = localMaxval
    1512             : #else
    1513             :    if (my_task < numProcs) then
    1514             :       call MPI_ALLREDUCE(localMaxval, globalMaxval, 1, &
    1515             :                          MPI_INTEGER, MPI_MAX, communicator, ierr)
    1516             :    endif
    1517             : #endif
    1518             : 
    1519             : !-----------------------------------------------------------------------
    1520             : 
    1521           0 :  end function global_maxval_int
    1522             : 
    1523             : !***********************************************************************
    1524             : 
    1525          24 :  function global_maxval_scalar_dbl (scalar, dist) &
    1526             :           result(globalMaxval)
    1527             : 
    1528             : !  Computes the global maximum value of a scalar value across
    1529             : !  a distributed machine.
    1530             : !
    1531             : !  This is actually the specific interface for the generic global_maxval
    1532             : !  function corresponding to double precision scalars.
    1533             : 
    1534             :    real (dbl_kind), intent(in) :: &
    1535             :       scalar               ! scalar for which max value needed
    1536             : 
    1537             :    type (distrb), intent(in) :: &
    1538             :       dist                 ! block distribution
    1539             : 
    1540             :    real (dbl_kind) :: &
    1541             :       globalMaxval         ! resulting maximum value
    1542             : 
    1543             : !-----------------------------------------------------------------------
    1544             : !
    1545             : !  local variables
    1546             : !
    1547             : !-----------------------------------------------------------------------
    1548             : 
    1549             :    integer (int_kind) :: &
    1550             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1551             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1552             :       communicator      ! communicator for this distribution
    1553             : 
    1554             :    character(len=*), parameter :: subname = '(global_maxval_scalar_dbl)'
    1555             : 
    1556             : !-----------------------------------------------------------------------
    1557             : 
    1558             :    call ice_distributionGet(dist, &
    1559             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1560          24 :                             communicator = communicator)
    1561             : 
    1562             : !-----------------------------------------------------------------------
    1563             : !
    1564             : !  now use MPI global reduction to reduce local maxval to global maxval
    1565             : !
    1566             : !-----------------------------------------------------------------------
    1567             : 
    1568             : #ifdef SERIAL_REMOVE_MPI
    1569          24 :    globalMaxval = scalar
    1570             : #else
    1571             :    if (my_task < numProcs) then
    1572             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1573             :                          mpiR8, MPI_MAX, communicator, ierr)
    1574             :    endif
    1575             : #endif
    1576             : 
    1577             : !-----------------------------------------------------------------------
    1578             : 
    1579          24 :  end function global_maxval_scalar_dbl
    1580             : 
    1581             : !***********************************************************************
    1582             : 
    1583           0 :  function global_maxval_scalar_real (scalar, dist) &
    1584             :           result(globalMaxval)
    1585             : 
    1586             : !  Computes the global maximum value of a scalar value across
    1587             : !  a distributed machine.
    1588             : !
    1589             : !  This is actually the specific interface for the generic global_maxval
    1590             : !  function corresponding to single precision scalars.
    1591             : 
    1592             :    real (real_kind), intent(in) :: &
    1593             :       scalar               ! scalar for which max value needed
    1594             : 
    1595             :    type (distrb), intent(in) :: &
    1596             :       dist                 ! block distribution
    1597             : 
    1598             :    real (real_kind) :: &
    1599             :       globalMaxval         ! resulting maximum value
    1600             : 
    1601             : !-----------------------------------------------------------------------
    1602             : !
    1603             : !  local variables
    1604             : !
    1605             : !-----------------------------------------------------------------------
    1606             : 
    1607             :    integer (int_kind) :: &
    1608             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1609             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1610             :       communicator      ! communicator for this distribution
    1611             : 
    1612             :    character(len=*), parameter :: subname = '(global_maxval_scalar_real)'
    1613             : 
    1614             : !-----------------------------------------------------------------------
    1615             : 
    1616             :    call ice_distributionGet(dist, &
    1617             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1618           0 :                             communicator = communicator)
    1619             : 
    1620             : !-----------------------------------------------------------------------
    1621             : !
    1622             : !  now use MPI global reduction to reduce local maxval to global maxval
    1623             : !
    1624             : !-----------------------------------------------------------------------
    1625             : 
    1626             : #ifdef SERIAL_REMOVE_MPI
    1627           0 :    globalMaxval = scalar
    1628             : #else
    1629             :    if (my_task < numProcs) then
    1630             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1631             :                          mpiR4, MPI_MAX, communicator, ierr)
    1632             :    endif
    1633             : #endif
    1634             : 
    1635             : !-----------------------------------------------------------------------
    1636             : 
    1637           0 :  end function global_maxval_scalar_real
    1638             : 
    1639             : !***********************************************************************
    1640             : 
    1641           8 :  function global_maxval_scalar_int (scalar, dist) &
    1642             :           result(globalMaxval)
    1643             : 
    1644             : !  Computes the global maximum value of a scalar value across
    1645             : !  a distributed machine.
    1646             : !
    1647             : !  This is actually the specific interface for the generic global_maxval
    1648             : !  function corresponding to single precision scalars.
    1649             : 
    1650             :    integer (int_kind), intent(in) :: &
    1651             :       scalar               ! scalar for which max value needed
    1652             : 
    1653             :    type (distrb), intent(in) :: &
    1654             :       dist                 ! block distribution
    1655             : 
    1656             :    integer (int_kind) :: &
    1657             :       globalMaxval         ! resulting maximum value
    1658             : 
    1659             : !-----------------------------------------------------------------------
    1660             : !
    1661             : !  local variables
    1662             : !
    1663             : !-----------------------------------------------------------------------
    1664             : 
    1665             :    integer (int_kind) :: &
    1666             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1667             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1668             :       communicator      ! communicator for this distribution
    1669             : 
    1670             :    character(len=*), parameter :: subname = '(global_maxval_scalar_int)'
    1671             : 
    1672             : !-----------------------------------------------------------------------
    1673             : 
    1674             :    call ice_distributionGet(dist, &
    1675             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1676           8 :                             communicator = communicator)
    1677             : 
    1678             : !-----------------------------------------------------------------------
    1679             : !
    1680             : !  now use MPI global reduction to reduce local maxval to global maxval
    1681             : !
    1682             : !-----------------------------------------------------------------------
    1683             : 
    1684             : #ifdef SERIAL_REMOVE_MPI
    1685           8 :    globalMaxval = scalar
    1686             : #else
    1687             :    if (my_task < numProcs) then
    1688             :       call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1689             :                          MPI_INTEGER, MPI_MAX, communicator, ierr)
    1690             :    endif
    1691             : #endif
    1692             : 
    1693             : !-----------------------------------------------------------------------
    1694             : 
    1695           8 :  end function global_maxval_scalar_int
    1696             : 
    1697             : !***********************************************************************
    1698             : 
    1699           0 :  function global_maxval_scalar_int_nodist (scalar, communicator) &
    1700             :           result(globalMaxval)
    1701             : 
    1702             : !  Computes the global maximum value of a scalar value across
    1703             : !  a communicator.  This method supports testing.
    1704             : !
    1705             : !  This is actually the specific interface for the generic global_maxval
    1706             : !  function corresponding to single precision scalars.
    1707             : 
    1708             :    integer (int_kind), intent(in) :: &
    1709             :       scalar               ! scalar for which max value needed
    1710             : 
    1711             :    integer (int_kind), intent(in) :: &
    1712             :       communicator         ! mpi communicator
    1713             : 
    1714             :    integer (int_kind) :: &
    1715             :       globalMaxval         ! resulting maximum value
    1716             : 
    1717             : !-----------------------------------------------------------------------
    1718             : !
    1719             : !  local variables
    1720             : !
    1721             : !-----------------------------------------------------------------------
    1722             : 
    1723             :    integer (int_kind) :: &
    1724             :       ierr             ! mpi error flag
    1725             : 
    1726             :    character(len=*), parameter :: subname = '(global_maxval_scalar_int_nodist)'
    1727             : 
    1728             : !-----------------------------------------------------------------------
    1729             : 
    1730             : !-----------------------------------------------------------------------
    1731             : !
    1732             : !  now use MPI global reduction to reduce local maxval to global maxval
    1733             : !
    1734             : !-----------------------------------------------------------------------
    1735             : 
    1736             : #ifdef SERIAL_REMOVE_MPI
    1737           0 :    globalMaxval = scalar
    1738             : #else
    1739             :    call MPI_ALLREDUCE(scalar, globalMaxval, 1, &
    1740             :                       MPI_INTEGER, MPI_MAX, communicator, ierr)
    1741             : #endif
    1742             : 
    1743             : !-----------------------------------------------------------------------
    1744             : 
    1745           0 :  end function global_maxval_scalar_int_nodist
    1746             : 
    1747             : !***********************************************************************
    1748             : 
    1749           8 :  function global_minval_dbl (array, dist, lMask) &
    1750             :           result(globalMinval)
    1751             : 
    1752             : !  Computes the global minimum value of the physical domain of a 2-d field
    1753             : !
    1754             : !  This is actually the specific interface for the generic global_minval
    1755             : !  function corresponding to double precision arrays.
    1756             : 
    1757             :    real (dbl_kind), dimension(:,:,:), intent(in) :: &
    1758             :       array                ! array for which min value needed
    1759             : 
    1760             :    type (distrb), intent(in) :: &
    1761             :       dist                 ! block distribution for array X
    1762             : 
    1763             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1764             :       lMask                ! optional logical mask
    1765             : 
    1766             :    real (dbl_kind) :: &
    1767             :       globalMinval         ! resulting minimum value of array
    1768             : 
    1769             : !-----------------------------------------------------------------------
    1770             : !
    1771             : !  local variables
    1772             : !
    1773             : !-----------------------------------------------------------------------
    1774             : 
    1775             :    real (dbl_kind) ::    &
    1776             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1777             :       localMinval       ! sum of all local block domains
    1778             : 
    1779             :    integer (int_kind) :: &
    1780             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1781             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1782             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1783             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1784             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1785             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1786             :       blockID           ! block location
    1787             : 
    1788             :    type (block) :: &
    1789             :       this_block          ! holds local block information
    1790             : 
    1791             :    character(len=*), parameter :: subname = '(global_minval_dbl)'
    1792             : 
    1793             : !-----------------------------------------------------------------------
    1794             : 
    1795           8 :    localMinval  = HUGE(0.0_dbl_kind)
    1796           8 :    globalMinval = HUGE(0.0_dbl_kind)
    1797             : 
    1798             :    call ice_distributionGet(dist, &
    1799             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1800             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1801           8 :                             communicator = communicator)
    1802             : 
    1803          16 :    do iblock=1,numBlocks
    1804           8 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1805             : 
    1806           8 :       this_block = get_block(blockID, blockID)
    1807             : 
    1808           8 :       ib = this_block%ilo
    1809           8 :       ie = this_block%ihi
    1810           8 :       jb = this_block%jlo
    1811           8 :       je = this_block%jhi
    1812             : 
    1813           8 :       blockMinval = HUGE(0.0_dbl_kind)
    1814             : 
    1815           8 :       if (present(lMask)) then
    1816         944 :          do j=jb,je
    1817       93736 :          do i=ib,ie
    1818       93728 :             if (lMask(i,j,iblock)) then
    1819       61244 :                blockMinval = min(blockMinval,array(i,j,iblock))
    1820             :             endif
    1821             :          end do
    1822             :          end do
    1823             :       else
    1824           0 :          do j=jb,je
    1825           0 :          do i=ib,ie
    1826           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    1827             :          end do
    1828             :          end do
    1829             :       endif
    1830             : 
    1831          24 :       localMinval = min(localMinval,blockMinval)
    1832             : 
    1833             :    end do
    1834             : 
    1835             : !-----------------------------------------------------------------------
    1836             : !
    1837             : !  now use MPI global reduction to reduce local minval to global minval
    1838             : !
    1839             : !-----------------------------------------------------------------------
    1840             : 
    1841             : #ifdef SERIAL_REMOVE_MPI
    1842           8 :    globalMinval = localMinval
    1843             : #else
    1844             :    if (my_task < numProcs) then
    1845             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    1846             :                          mpiR8, MPI_MIN, communicator, ierr)
    1847             :    endif
    1848             : #endif
    1849             : 
    1850             : !-----------------------------------------------------------------------
    1851             : 
    1852          16 :  end function global_minval_dbl
    1853             : 
    1854             : !***********************************************************************
    1855             : 
    1856           0 :  function global_minval_real (array, dist, lMask) &
    1857             :           result(globalMinval)
    1858             : 
    1859             : !  Computes the global minimum value of the physical domain of a 2-d field
    1860             : !
    1861             : !  This is actually the specific interface for the generic global_minval
    1862             : !  function corresponding to single precision arrays.
    1863             : 
    1864             :    real (real_kind), dimension(:,:,:), intent(in) :: &
    1865             :       array                ! array for which min value needed
    1866             : 
    1867             :    type (distrb), intent(in) :: &
    1868             :       dist                 ! block distribution for array X
    1869             : 
    1870             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1871             :       lMask                ! optional logical mask
    1872             : 
    1873             :    real (real_kind) :: &
    1874             :       globalMinval         ! resulting minimum value of array
    1875             : 
    1876             : !-----------------------------------------------------------------------
    1877             : !
    1878             : !  local variables
    1879             : !
    1880             : !-----------------------------------------------------------------------
    1881             : 
    1882             :    real (real_kind) ::    &
    1883             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1884             :       localMinval       ! sum of all local block domains
    1885             : 
    1886             :    integer (int_kind) :: &
    1887             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1888             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1889             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1890             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1891             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1892             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    1893             :       blockID           ! block location
    1894             : 
    1895             :    type (block) :: &
    1896             :       this_block          ! holds local block information
    1897             : 
    1898             :    character(len=*), parameter :: subname = '(global_minval_real)'
    1899             : 
    1900             : !-----------------------------------------------------------------------
    1901             : 
    1902           0 :    localMinval  = HUGE(0.0_real_kind)
    1903           0 :    globalMinval = HUGE(0.0_real_kind)
    1904             : 
    1905             :    call ice_distributionGet(dist, &
    1906             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    1907             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    1908           0 :                             communicator = communicator)
    1909             : 
    1910           0 :    do iblock=1,numBlocks
    1911           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    1912             : 
    1913           0 :       this_block = get_block(blockID, blockID)
    1914             : 
    1915           0 :       ib = this_block%ilo
    1916           0 :       ie = this_block%ihi
    1917           0 :       jb = this_block%jlo
    1918           0 :       je = this_block%jhi
    1919             : 
    1920           0 :       blockMinval = HUGE(0.0_real_kind)
    1921             : 
    1922           0 :       if (present(lMask)) then
    1923           0 :          do j=jb,je
    1924           0 :          do i=ib,ie
    1925           0 :             if (lMask(i,j,iblock)) then
    1926           0 :                blockMinval = min(blockMinval,array(i,j,iblock))
    1927             :             endif
    1928             :          end do
    1929             :          end do
    1930             :       else
    1931           0 :          do j=jb,je
    1932           0 :          do i=ib,ie
    1933           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    1934             :          end do
    1935             :          end do
    1936             :       endif
    1937             : 
    1938           0 :       localMinval = min(localMinval,blockMinval)
    1939             : 
    1940             :    end do
    1941             : 
    1942             : !-----------------------------------------------------------------------
    1943             : !
    1944             : !  now use MPI global reduction to reduce local minval to global minval
    1945             : !
    1946             : !-----------------------------------------------------------------------
    1947             : 
    1948             : #ifdef SERIAL_REMOVE_MPI
    1949           0 :    globalMinval = localMinval
    1950             : #else
    1951             :    if (my_task < numProcs) then
    1952             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    1953             :                          mpiR4, MPI_MIN, communicator, ierr)
    1954             :    endif
    1955             : #endif
    1956             : 
    1957             : !-----------------------------------------------------------------------
    1958             : 
    1959           0 :  end function global_minval_real
    1960             : 
    1961             : !***********************************************************************
    1962             : 
    1963           0 :  function global_minval_int (array, dist, lMask) &
    1964             :           result(globalMinval)
    1965             : 
    1966             : !  Computes the global minimum value of the physical domain of a 2-d field
    1967             : !
    1968             : !  This is actually the specific interface for the generic global_minval
    1969             : !  function corresponding to integer arrays.
    1970             : 
    1971             :    integer (int_kind), dimension(:,:,:), intent(in) :: &
    1972             :       array                ! array for which min value needed
    1973             : 
    1974             :    type (distrb), intent(in) :: &
    1975             :       dist                 ! block distribution for array X
    1976             : 
    1977             :    logical (log_kind), dimension(:,:,:), intent(in), optional :: &
    1978             :       lMask                ! optional logical mask
    1979             : 
    1980             :    integer (int_kind) :: &
    1981             :       globalMinval         ! resulting minimum value of array
    1982             : 
    1983             : !-----------------------------------------------------------------------
    1984             : !
    1985             : !  local variables
    1986             : !
    1987             : !-----------------------------------------------------------------------
    1988             : 
    1989             :    integer (int_kind) ::    &
    1990             :       blockMinval,     &! sum of local block domain   ! LCOV_EXCL_LINE
    1991             :       localMinval       ! sum of all local block domains
    1992             : 
    1993             :    integer (int_kind) :: &
    1994             :       i,j,iblock,      &! local counters   ! LCOV_EXCL_LINE
    1995             :       ib,ie,jb,je,     &! beg,end of physical domain   ! LCOV_EXCL_LINE
    1996             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    1997             :       numBlocks,       &! number of local blocks   ! LCOV_EXCL_LINE
    1998             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    1999             :       communicator,    &! communicator for this distribution   ! LCOV_EXCL_LINE
    2000             :       blockID           ! block location
    2001             : 
    2002             :    type (block) :: &
    2003             :       this_block          ! holds local block information
    2004             : 
    2005             :    character(len=*), parameter :: subname = '(global_minval_int)'
    2006             : 
    2007             : !-----------------------------------------------------------------------
    2008             : 
    2009           0 :    localMinval  = HUGE(0_int_kind)
    2010           0 :    globalMinval = HUGE(0_int_kind)
    2011             : 
    2012             :    call ice_distributionGet(dist, &
    2013             :                             numLocalBlocks = numBlocks, &   ! LCOV_EXCL_LINE
    2014             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2015           0 :                             communicator = communicator)
    2016             : 
    2017           0 :    do iblock=1,numBlocks
    2018           0 :       call ice_distributionGetBlockID(dist, iblock, blockID)
    2019             : 
    2020           0 :       this_block = get_block(blockID, blockID)
    2021             : 
    2022           0 :       ib = this_block%ilo
    2023           0 :       ie = this_block%ihi
    2024           0 :       jb = this_block%jlo
    2025           0 :       je = this_block%jhi
    2026             : 
    2027           0 :       blockMinval = HUGE(0_int_kind)
    2028             : 
    2029           0 :       if (present(lMask)) then
    2030           0 :          do j=jb,je
    2031           0 :          do i=ib,ie
    2032           0 :             if (lMask(i,j,iblock)) then
    2033           0 :                blockMinval = min(blockMinval,array(i,j,iblock))
    2034             :             endif
    2035             :          end do
    2036             :          end do
    2037             :       else
    2038           0 :          do j=jb,je
    2039           0 :          do i=ib,ie
    2040           0 :             blockMinval = min(blockMinval,array(i,j,iblock))
    2041             :          end do
    2042             :          end do
    2043             :       endif
    2044             : 
    2045           0 :       localMinval = min(localMinval,blockMinval)
    2046             : 
    2047             :    end do
    2048             : 
    2049             : !-----------------------------------------------------------------------
    2050             : !
    2051             : !  now use MPI global reduction to reduce local minval to global minval
    2052             : !
    2053             : !-----------------------------------------------------------------------
    2054             : 
    2055             : #ifdef SERIAL_REMOVE_MPI
    2056           0 :    globalMinval = localMinval
    2057             : #else
    2058             :    if (my_task < numProcs) then
    2059             :       call MPI_ALLREDUCE(localMinval, globalMinval, 1, &
    2060             :                          MPI_INTEGER, MPI_MIN, communicator, ierr)
    2061             :    endif
    2062             : #endif
    2063             : 
    2064             : !-----------------------------------------------------------------------
    2065             : 
    2066           0 :  end function global_minval_int
    2067             : 
    2068             : !***********************************************************************
    2069             : 
    2070           2 :  function global_minval_scalar_dbl (scalar, dist) &
    2071             :           result(globalMinval)
    2072             : 
    2073             : !  Computes the global minimum value of a scalar value across
    2074             : !  a distributed machine.
    2075             : !
    2076             : !  This is actually the specific interface for the generic global_minval
    2077             : !  function corresponding to double precision scalars.
    2078             : 
    2079             :    real (dbl_kind), intent(in) :: &
    2080             :       scalar               ! scalar for which min value needed
    2081             : 
    2082             :    type (distrb), intent(in) :: &
    2083             :       dist                 ! block distribution
    2084             : 
    2085             :    real (dbl_kind) :: &
    2086             :       globalMinval         ! resulting minimum value
    2087             : 
    2088             : !-----------------------------------------------------------------------
    2089             : !
    2090             : !  local variables
    2091             : !
    2092             : !-----------------------------------------------------------------------
    2093             : 
    2094             :    integer (int_kind) :: &
    2095             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2096             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2097             :       communicator      ! communicator for this distribution
    2098             : 
    2099             :    character(len=*), parameter :: subname = '(global_minval_scalar_dbl)'
    2100             : 
    2101             : !-----------------------------------------------------------------------
    2102             : 
    2103             :    call ice_distributionGet(dist, &
    2104             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2105           2 :                             communicator = communicator)
    2106             : 
    2107             : !-----------------------------------------------------------------------
    2108             : !
    2109             : !  now use MPI global reduction to reduce local minval to global minval
    2110             : !
    2111             : !-----------------------------------------------------------------------
    2112             : 
    2113             : #ifdef SERIAL_REMOVE_MPI
    2114           2 :    globalMinval = scalar
    2115             : #else
    2116             :    if (my_task < numProcs) then
    2117             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2118             :                          mpiR8, MPI_MIN, communicator, ierr)
    2119             :    endif
    2120             : #endif
    2121             : 
    2122             : !-----------------------------------------------------------------------
    2123             : 
    2124           2 :  end function global_minval_scalar_dbl
    2125             : 
    2126             : !***********************************************************************
    2127             : 
    2128           0 :  function global_minval_scalar_real (scalar, dist) &
    2129             :           result(globalMinval)
    2130             : 
    2131             : !  Computes the global minimum value of a scalar value across
    2132             : !  a distributed machine.
    2133             : !
    2134             : !  This is actually the specific interface for the generic global_minval
    2135             : !  function corresponding to single precision scalars.
    2136             : 
    2137             :    real (real_kind), intent(in) :: &
    2138             :       scalar               ! scalar for which min value needed
    2139             : 
    2140             :    type (distrb), intent(in) :: &
    2141             :       dist                 ! block distribution
    2142             : 
    2143             :    real (real_kind) :: &
    2144             :       globalMinval         ! resulting minimum value
    2145             : 
    2146             : !-----------------------------------------------------------------------
    2147             : !
    2148             : !  local variables
    2149             : !
    2150             : !-----------------------------------------------------------------------
    2151             : 
    2152             :    integer (int_kind) :: &
    2153             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2154             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2155             :       communicator      ! communicator for this distribution
    2156             : 
    2157             :    character(len=*), parameter :: subname = '(global_minval_scalar_real)'
    2158             : 
    2159             : !-----------------------------------------------------------------------
    2160             : 
    2161             :    call ice_distributionGet(dist, &
    2162             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2163           0 :                             communicator = communicator)
    2164             : 
    2165             : !-----------------------------------------------------------------------
    2166             : !
    2167             : !  now use MPI global reduction to reduce local minval to global minval
    2168             : !
    2169             : !-----------------------------------------------------------------------
    2170             : 
    2171             : #ifdef SERIAL_REMOVE_MPI
    2172           0 :    globalMinval = scalar
    2173             : #else
    2174             :    if (my_task < numProcs) then
    2175             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2176             :                          mpiR4, MPI_MIN, communicator, ierr)
    2177             :    endif
    2178             : #endif
    2179             : 
    2180             : !-----------------------------------------------------------------------
    2181             : 
    2182           0 :  end function global_minval_scalar_real
    2183             : 
    2184             : !***********************************************************************
    2185             : 
    2186           0 :  function global_minval_scalar_int (scalar, dist) &
    2187             :           result(globalMinval)
    2188             : 
    2189             : !  Computes the global minimum value of a scalar value across
    2190             : !  a distributed machine.
    2191             : !
    2192             : !  This is actually the specific interface for the generic global_minval
    2193             : !  function corresponding to single precision scalars.
    2194             : 
    2195             :    integer (int_kind), intent(in) :: &
    2196             :       scalar               ! scalar for which min value needed
    2197             : 
    2198             :    type (distrb), intent(in) :: &
    2199             :       dist                 ! block distribution
    2200             : 
    2201             :    integer (int_kind) :: &
    2202             :       globalMinval         ! resulting minimum value
    2203             : 
    2204             : !-----------------------------------------------------------------------
    2205             : !
    2206             : !  local variables
    2207             : !
    2208             : !-----------------------------------------------------------------------
    2209             : 
    2210             :    integer (int_kind) :: &
    2211             :       ierr,            &! mpi error flag   ! LCOV_EXCL_LINE
    2212             :       numProcs,        &! number of processor participating   ! LCOV_EXCL_LINE
    2213             :       communicator      ! communicator for this distribution
    2214             : 
    2215             :    character(len=*), parameter :: subname = '(global_minval_scalar_int)'
    2216             : 
    2217             : !-----------------------------------------------------------------------
    2218             : 
    2219             :    call ice_distributionGet(dist, &
    2220             :                             nprocs = numProcs,        &   ! LCOV_EXCL_LINE
    2221           0 :                             communicator = communicator)
    2222             : 
    2223             : !-----------------------------------------------------------------------
    2224             : !
    2225             : !  now use MPI global reduction to reduce local minval to global minval
    2226             : !
    2227             : !-----------------------------------------------------------------------
    2228             : 
    2229             : #ifdef SERIAL_REMOVE_MPI
    2230           0 :    globalMinval = scalar
    2231             : #else
    2232             :    if (my_task < numProcs) then
    2233             :       call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2234             :                          MPI_INTEGER, MPI_MIN, communicator, ierr)
    2235             :    endif
    2236             : #endif
    2237             : 
    2238             : !-----------------------------------------------------------------------
    2239             : 
    2240           0 :  end function global_minval_scalar_int
    2241             : 
    2242             : !***********************************************************************
    2243             : 
    2244           0 :  function global_minval_scalar_int_nodist (scalar, communicator) &
    2245             :           result(globalMinval)
    2246             : 
    2247             : !  Computes the global minimum value of a scalar value across
    2248             : !  a communicator.  This method supports testing.
    2249             : !
    2250             : !  This is actually the specific interface for the generic global_minval
    2251             : !  function corresponding to single precision scalars.
    2252             : 
    2253             :    integer (int_kind), intent(in) :: &
    2254             :       scalar               ! scalar for which min value needed
    2255             : 
    2256             :    integer(int_kind), intent(in) :: &
    2257             :       communicator         ! mpi communicator
    2258             : 
    2259             :    integer (int_kind) :: &
    2260             :       globalMinval         ! resulting minimum value
    2261             : 
    2262             : !-----------------------------------------------------------------------
    2263             : !
    2264             : !  local variables
    2265             : !
    2266             : !-----------------------------------------------------------------------
    2267             : 
    2268             :    integer (int_kind) :: &
    2269             :       ierr             ! mpi error flag
    2270             : 
    2271             :    character(len=*), parameter :: subname = '(global_minval_scalar_int_nodist)'
    2272             : 
    2273             : !-----------------------------------------------------------------------
    2274             : 
    2275             : !-----------------------------------------------------------------------
    2276             : !
    2277             : !  now use MPI global reduction to reduce local minval to global minval
    2278             : !
    2279             : !-----------------------------------------------------------------------
    2280             : 
    2281             : #ifdef SERIAL_REMOVE_MPI
    2282           0 :    globalMinval = scalar
    2283             : #else
    2284             :    call MPI_ALLREDUCE(scalar, globalMinval, 1, &
    2285             :                       MPI_INTEGER, MPI_MIN, communicator, ierr)
    2286             : #endif
    2287             : 
    2288             : !-----------------------------------------------------------------------
    2289             : 
    2290           0 :  end function global_minval_scalar_int_nodist
    2291             : 
    2292             : !***********************************************************************
    2293             : 
    2294        1152 : subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs)
    2295             : 
    2296             : ! Computes the global sum of a 2-d array over fields
    2297             : ! with first dimension values and second dimension fields
    2298             : !
    2299             : ! Several different options are supported.
    2300             : ! lsum4 = local sum with real*4 and scalar mpi allreduce, unlikely to be bfb
    2301             : ! lsum8 = local sum with real*8 and scalar mpi allreduce, unlikely to be bfb
    2302             : ! lsum16 = local sum with real*16 and scalar mpi allreduce, likely to be bfb
    2303             : !    WARNING: this does not work in several compilers and mpi
    2304             : !    implementations due to support for quad precision and consistency
    2305             : !    between underlying datatypes in fortran and c.  The source code
    2306             : !    can be turned off with a cpp NO_R16.  Otherwise, it is recommended
    2307             : !    that the results be validated on any platform where it might be used.
    2308             : ! reprosum = fixed point method based on ordered double integer sums.
    2309             : !    that requires two scalar reductions per global sum.
    2310             : !    This is extremely likely to be bfb.
    2311             : !    (See Mirin and Worley, 2012, IJHPCA, 26, 1730,
    2312             : !    https://journals.sagepub.com/doi/10.1177/1094342011412630)
    2313             : ! ddpdd = parallel double-double algorithm using single scalar reduction.
    2314             : !    This is very likely to be bfb.
    2315             : !    (See He and Ding, 2001, Journal of Supercomputing, 18, 259,
    2316             : !    https://link.springer.com/article/10.1023%2FA%3A1008153532043)
    2317             : 
    2318             :    real (dbl_kind), dimension(:,:), intent(in) :: &
    2319             :       array2               ! array to be summed
    2320             : 
    2321             :    real (dbl_kind), dimension(:), intent(inout) :: &
    2322             :       sums8                 ! resulting global sum
    2323             : 
    2324             :    integer(int_kind), intent(in) :: &
    2325             :       mpicomm
    2326             : 
    2327             :    integer(int_kind), intent(in) :: &
    2328             :       numprocs
    2329             : 
    2330             : !-----------------------------------------------------------------------
    2331             : !
    2332             : !  local variables
    2333             : !
    2334             : !-----------------------------------------------------------------------
    2335             : 
    2336        1152 :    real (real_kind), allocatable :: psums4(:)
    2337        1152 :    real (real_kind), allocatable :: sums4(:)
    2338        1152 :    real (dbl_kind) , allocatable :: psums8(:)
    2339             :    ! if r16 is not available (NO_R16), then r16 reverts to double precision (r8)
    2340        1152 :    real (r16_kind) , allocatable :: psums16(:)
    2341        1152 :    real (r16_kind) , allocatable :: sums16(:)
    2342             : 
    2343             :    integer (int_kind) :: ns,nf,i,j, ierr
    2344             : 
    2345             :    character(len=*), parameter :: subname = '(compute_sums_dbl)'
    2346             : 
    2347             : !-----------------------------------------------------------------------
    2348             : 
    2349        2304 :    sums8 = 0._dbl_kind
    2350        1152 :    ns = size(array2,dim=1)
    2351        1152 :    nf = size(array2,dim=2)
    2352             : 
    2353        1152 :    if (bfbflag == 'off' .or. bfbflag == 'lsum8') then
    2354        1152 :       allocate(psums8(nf))
    2355        2304 :       psums8(:) = 0._dbl_kind
    2356             : 
    2357        2304 :       do j = 1, nf
    2358    13867776 :       do i = 1, ns
    2359    13866624 :          psums8(j) = psums8(j) + array2(i,j)
    2360             :       enddo
    2361             :       enddo
    2362             : 
    2363             : #ifdef SERIAL_REMOVE_MPI
    2364        2304 :       sums8 = psums8
    2365             : #else
    2366             :       if (my_task < numProcs) then
    2367             :          call MPI_ALLREDUCE(psums8, sums8, nf, mpiR8, MPI_SUM, mpicomm, ierr)
    2368             :       endif
    2369             : #endif
    2370             : 
    2371        1152 :       deallocate(psums8)
    2372             : 
    2373             :    ! if no_r16 is set, this will revert to a double precision calculation like lsum8
    2374           0 :    elseif (bfbflag == 'lsum16') then
    2375           0 :       allocate(psums16(nf))
    2376           0 :       psums16(:) = 0._r16_kind
    2377           0 :       allocate(sums16(nf))
    2378           0 :       sums16(:) = 0._r16_kind
    2379             : 
    2380           0 :       do j = 1, nf
    2381           0 :       do i = 1, ns
    2382           0 :          psums16(j) = psums16(j) + real(array2(i,j),r16_kind)
    2383             :       enddo
    2384             :       enddo
    2385             : 
    2386             : #ifdef SERIAL_REMOVE_MPI
    2387           0 :       sums16 = psums16
    2388             : #else
    2389             :       if (my_task < numProcs) then
    2390             :          call MPI_ALLREDUCE(psums16, sums16, nf, mpiR16, MPI_SUM, mpicomm, ierr)
    2391             :       endif
    2392             : #endif
    2393           0 :       sums8 = real(sums16,dbl_kind)
    2394             : 
    2395           0 :       deallocate(psums16,sums16)
    2396             : 
    2397           0 :    elseif (bfbflag == 'lsum4') then
    2398           0 :       allocate(psums4(nf))
    2399           0 :       psums4(:) = 0._real_kind
    2400           0 :       allocate(sums4(nf))
    2401           0 :       sums4(:) = 0._real_kind
    2402             : 
    2403           0 :       do j = 1, nf
    2404           0 :       do i = 1, ns
    2405           0 :          psums4(j) = psums4(j) + real(array2(i,j),real_kind)
    2406             :       enddo
    2407             :       enddo
    2408             : 
    2409             : #ifdef SERIAL_REMOVE_MPI
    2410           0 :       sums4 = psums4
    2411             : #else
    2412             :       if (my_task < numProcs) then
    2413             :          call MPI_ALLREDUCE(psums4, sums4, nf, mpiR4, MPI_SUM, mpicomm, ierr)
    2414             :       endif
    2415             : #endif
    2416           0 :       sums8 = real(sums4,dbl_kind)
    2417             : 
    2418           0 :       deallocate(psums4,sums4)
    2419             : 
    2420           0 :    elseif (bfbflag == 'ddpdd') then
    2421           0 :       if (my_task < numProcs) then
    2422           0 :          call ice_reprosum_calc(array2,sums8,ns,ns,nf,ddpdd_sum=.true.,commid=mpicomm)
    2423             :       endif
    2424             : 
    2425           0 :    elseif (bfbflag == 'reprosum') then
    2426           0 :       if (my_task < numProcs) then
    2427           0 :          call ice_reprosum_calc(array2,sums8,ns,ns,nf,ddpdd_sum=.false.,commid=mpicomm)
    2428             :       endif
    2429             : 
    2430             :    else
    2431           0 :       call abort_ice(subname//'ERROR: bfbflag unknown '//trim(bfbflag))
    2432             :    endif
    2433             : 
    2434        1152 : end subroutine compute_sums_dbl
    2435             : 
    2436             : !***********************************************************************
    2437             : 
    2438             : end module ice_global_reductions
    2439             : 
    2440             : !|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||

Generated by: LCOV version 1.14-6-g40580cd