Ignore:
Timestamp:
Mar 29, 2011 11:39:40 AM (10 years ago)
Author:
raasch
Message:

New:
---

In case of multigrid method, on coarse grid levels, gathered data are
identically processed on all PEs (before, on PE0 only), so that the subsequent
scattering of data is not neccessary any more. (modules, init_pegrid, poismg)

Changed:


Calculation of weighted average of p is now handled in the same way
regardless of the number of ghost layers (advection scheme). (pres)

multigrid and sor method are using p_loc for iterative
advancements of pressure. p_sub removed. (init_3d_model, modules, poismg, pres, sor)

bc_lr and bc_ns replaced by bc_lr_dirrad, bc_lr_raddir, bc_ns_dirrad, bc_ns_raddir
for speed optimization. (calc_spectra, check_parameters, exchange_horiz,
exchange_horiz_2d, header, init_3d_model, init_grid, init_pegrid, modules,
poismg, pres, sor, time_integration, timestep)

grid_level directly used as index for MPI data type arrays. (exchange_horiz,
poismg)

initial assignments of zero to array p for iterative solvers only (init_3d_model)

Errors:


localsum calculation modified for proper OpenMP reduction. (pres)

Bugfix: bottom (nzb) and top (nzt+1) boundary conditions set in routines
resid and restrict. They were missed before, which may have led to
unpredictable results. (poismg)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • palm/trunk/SOURCE/exchange_horiz.f90

    r690 r707  
    44! Current revisions:
    55! -----------------
    6 !
     6! grid_level directly used as index for MPI data type arrays,
     7! bc_lr/ns replaced by bc_lr/ns_cyc
    78!
    89! Former revisions:
     
    5556    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
    5657#endif
    57     INTEGER ::  i, nbgp_local
     58    INTEGER ::  nbgp_local
    5859    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
    5960                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
    6061
    6162    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
    62 
    63 !
    64 !-- In the Poisson multigrid solver arrays with coarser grids are used.
    65 !-- Set i appropriately, because the coarser grids have different
    66 !-- MPI datatypes type_xz, type_yz.
    67     IF ( exchange_mg )  THEN
    68        i = grid_level
    69     ELSE
    70        i = 0
    71     END IF
    7263
    7364#if defined( __parallel )
     
    7970!--    One-dimensional decomposition along y, boundary values can be exchanged
    8071!--    within the PE memory
    81        IF ( bc_lr == 'cyclic' )  THEN
     72       IF ( bc_lr_cyc )  THEN
    8273          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
    8374          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
     
    9081!--       Send left boundary, receive right one (synchronous)
    9182          CALL MPI_SENDRECV(                                                   &
    92                        ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  0, &
    93                        ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, &
    94                        comm2d, status, ierr )
     83              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
     84              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
     85              comm2d, status, ierr )
    9586!
    9687!--       Send right boundary, receive left one (synchronous)
    97           CALL MPI_SENDRECV(                                                   &
    98             ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, &
    99             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_yz(i), pleft,  1, &
    100                        comm2d, status, ierr )
     88          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
     89                             type_yz(grid_level), pright, 1,             &
     90                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
     91                             type_yz(grid_level), pleft,  1,             &
     92                             comm2d, status, ierr )
    10193
    10294       ELSE
     
    10597!
    10698!--       Send left boundary, receive right one (asynchronous)
    107           CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft, &
    108                           0, comm2d, req(1), ierr )
    109           CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, &
    110                           0, comm2d, req(2), ierr )
     99          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
     100                          pleft, 0, comm2d, req(1), ierr )
     101          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
     102                          pright, 0, comm2d, req(2), ierr )
    111103!
    112104!--       Send right boundary, receive left one (asynchronous)
    113105          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
    114                           type_yz(i), pright, 1, comm2d, req(3), ierr )
     106                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
    115107          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
    116                           type_yz(i), pleft,  1, comm2d, req(4), ierr )
     108                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
    117109
    118110          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
     
    127119!--    One-dimensional decomposition along x, boundary values can be exchanged
    128120!--    within the PE memory
    129        IF ( bc_ns == 'cyclic' )  THEN
     121       IF ( bc_ns_cyc )  THEN
    130122          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
    131123          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
     
    138130!--       Send front boundary, receive rear one (synchronous)
    139131          CALL MPI_SENDRECV(                                                   &
    140                        ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, 0, &
    141                        ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, &
    142                        comm2d, status, ierr )
     132              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
     133              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
     134              comm2d, status, ierr )
    143135!
    144136!--       Send rear boundary, receive front one (synchronous)
    145           CALL MPI_SENDRECV(                                                   &
    146             ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, &
    147             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_xz(i), psouth, 1, &
    148             comm2d, status, ierr )
     137          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
     138                             type_xz(grid_level), pnorth, 1,             &
     139                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
     140                             type_xz(grid_level), psouth, 1,             &
     141                             comm2d, status, ierr )
    149142
    150143       ELSE
     
    153146!
    154147!--       Send front boundary, receive rear one (asynchronous)
    155           CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, &
    156                           0, comm2d, req(1), ierr )
    157           CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, &
    158                           0, comm2d, req(2), ierr )
     148          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
     149                          psouth, 0, comm2d, req(1), ierr )
     150          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
     151                          pnorth, 0, comm2d, req(2), ierr )
    159152!
    160153!--       Send rear boundary, receive front one (asynchronous)
    161154          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
    162                           type_xz(i), pnorth, 1, comm2d, req(3), ierr )
     155                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
    163156          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
    164                           type_xz(i), psouth, 1, comm2d, req(4), ierr )
     157                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
    165158
    166159          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
Note: See TracChangeset for help on using the changeset viewer.