source: palm/trunk/SOURCE/exchange_horiz.f90 @ 684

Last change on this file since 684 was 684, checked in by raasch, 13 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 6.4 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[684]6!
[668]7!
8! Former revisions:
9! -----------------
10! $Id: exchange_horiz.f90 684 2011-02-09 14:49:31Z raasch $
11!
[684]12! 683 2011-02-09 14:25:15Z raasch
13! optional synchronous exchange (sendrecv) implemented, code partly reformatted
14!
[668]15! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]16! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
17! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
18! used for normal grid, the remaining types used for the several grid levels.
19! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
20! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]21! with MPI-Vectors than without.
[1]22!
[77]23! 75 2007-03-22 09:54:05Z raasch
24! Special cases for additional gridpoints along x or y in case of non-cyclic
25! boundary conditions are not regarded any more
26!
[3]27! RCS Log replace by Id keyword, revision history cleaned up
28!
[1]29! Revision 1.16  2006/02/23 12:19:08  raasch
30! anz_yz renamed ngp_yz
31!
32! Revision 1.1  1997/07/24 11:13:29  raasch
33! Initial revision
34!
35!
36! Description:
37! ------------
38! Exchange of lateral boundary values (parallel computers) and cyclic
39! lateral boundary conditions, respectively.
40!------------------------------------------------------------------------------!
41
42    USE control_parameters
43    USE cpulog
44    USE indices
45    USE interfaces
46    USE pegrid
47
48    IMPLICIT NONE
49
50#if defined( __parallel )
51    INTEGER, DIMENSION(4)                 ::  req
52    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
53#endif
[683]54    INTEGER ::  i, nbgp_local
[667]55    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
56                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
[1]57
58    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
59
[683]60!
61!-- In the Poisson multigrid solver arrays with coarser grids are used.
62!-- Set i appropriately, because the coarser grids have different
63!-- MPI datatypes type_xz, type_yz.
64    IF ( exchange_mg == .TRUE. )  THEN
65       i = grid_level
[667]66    ELSE
[683]67       i = 0
[667]68    END IF
[683]69
[1]70#if defined( __parallel )
71
72!
73!-- Exchange of lateral boundary values for parallel computers
74    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
75!
76!--    One-dimensional decomposition along y, boundary values can be exchanged
77!--    within the PE memory
78       IF ( bc_lr == 'cyclic' )  THEN
[667]79          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
80          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]81       ENDIF
82
83    ELSE
[75]84
[683]85       IF ( synchronous_exchange )  THEN
[1]86!
[683]87!--       Send left boundary, receive right one (synchronous)
88          CALL MPI_SENDRECV(                                                   &
89                       ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  0, &
90                       ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, &
91                       comm2d, status, ierr )
[1]92!
[683]93!--       Send right boundary, receive left one (synchronous)
94          CALL MPI_SENDRECV(                                                   &
95            ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, &
96            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_yz(i), pleft,  1, &
97                       comm2d, status, ierr )
[667]98
[683]99       ELSE
[667]100
[683]101          req = 0
102!
103!--       Send left boundary, receive right one (asynchronous)
104          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  &
105                          0, comm2d, req(1), ierr )
106          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, &
107                          0, comm2d, req(2), ierr )
108!
109!--       Send right boundary, receive left one (asynchronous)
110          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
111                          type_yz(i), pright, 1, comm2d, req(3), ierr )
112          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
113                          type_yz(i), pleft,  1, comm2d, req(4), ierr )
[667]114
[683]115          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]116
[683]117       ENDIF
118
[1]119    ENDIF
120
121
122    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
123!
124!--    One-dimensional decomposition along x, boundary values can be exchanged
125!--    within the PE memory
126       IF ( bc_ns == 'cyclic' )  THEN
[667]127          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
128          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]129       ENDIF
130
131    ELSE
132
[683]133       IF ( synchronous_exchange )  THEN
[1]134!
[683]135!--       Send front boundary, receive rear one (synchronous)
136          CALL MPI_SENDRECV(                                                   &
137                       ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, 0, &
138                       ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, &
139                       comm2d, status, ierr )
140!
141!--       Send rear boundary, receive front one (synchronous)
142          CALL MPI_SENDRECV(                                                   &
143            ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, &
144            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_xz(i), psouth, 1, &
145            comm2d, status, ierr )
[667]146
[683]147       ELSE
148
149          req = 0
[1]150!
[683]151!--       Send front boundary, receive rear one (asynchronous)
152          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, &
153                          0, comm2d, req(1), ierr )
154          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, &
155                          0, comm2d, req(2), ierr )
156!
157!--       Send rear boundary, receive front one (asynchronous)
158          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
159                          type_xz(i), pnorth, 1, comm2d, req(3), ierr )
160          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
161                          type_xz(i), psouth, 1, comm2d, req(4), ierr )
[75]162
[683]163          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
164
165       ENDIF
166
[1]167    ENDIF
168
169#else
170
171!
172!-- Lateral boundary conditions in the non-parallel case
173    IF ( bc_lr == 'cyclic' )  THEN
[667]174        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
175        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]176    ENDIF
177
178    IF ( bc_ns == 'cyclic' )  THEN
[667]179        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
180        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]181    ENDIF
182
183#endif
184    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
185
[667]186
[1]187 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.