source: palm/trunk/SOURCE/exchange_horiz.f90 @ 688

Last change on this file since 688 was 688, checked in by gryschka, 13 years ago

Bugifx for some logical expressions (syntax was not compatible with all compilers)

  • Property svn:keywords set to Id
File size: 6.5 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[668]6!
[688]7! Bugfix for some logical expressions
8! (syntax was not compatible with all compilers)
[686]9!
[668]10! Former revisions:
11! -----------------
12!
[688]13! $id:
14!
[684]15! 683 2011-02-09 14:25:15Z raasch
16! optional synchronous exchange (sendrecv) implemented, code partly reformatted
17!
[668]18! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]19! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
20! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
21! used for normal grid, the remaining types used for the several grid levels.
22! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
23! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]24! with MPI-Vectors than without.
[1]25!
[77]26! 75 2007-03-22 09:54:05Z raasch
27! Special cases for additional gridpoints along x or y in case of non-cyclic
28! boundary conditions are not regarded any more
29!
[3]30! RCS Log replace by Id keyword, revision history cleaned up
31!
[1]32! Revision 1.16  2006/02/23 12:19:08  raasch
33! anz_yz renamed ngp_yz
34!
35! Revision 1.1  1997/07/24 11:13:29  raasch
36! Initial revision
37!
38!
39! Description:
40! ------------
41! Exchange of lateral boundary values (parallel computers) and cyclic
42! lateral boundary conditions, respectively.
43!------------------------------------------------------------------------------!
44
45    USE control_parameters
46    USE cpulog
47    USE indices
48    USE interfaces
49    USE pegrid
50
51    IMPLICIT NONE
52
53#if defined( __parallel )
54    INTEGER, DIMENSION(4)                 ::  req
55    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
56#endif
[683]57    INTEGER ::  i, nbgp_local
[667]58    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
59                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
[1]60
61    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
62
[683]63!
64!-- In the Poisson multigrid solver arrays with coarser grids are used.
65!-- Set i appropriately, because the coarser grids have different
66!-- MPI datatypes type_xz, type_yz.
[688]67    IF ( exchange_mg )  THEN
[683]68       i = grid_level
[667]69    ELSE
[683]70       i = 0
[667]71    END IF
[683]72
[1]73#if defined( __parallel )
74
75!
76!-- Exchange of lateral boundary values for parallel computers
77    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
78!
79!--    One-dimensional decomposition along y, boundary values can be exchanged
80!--    within the PE memory
81       IF ( bc_lr == 'cyclic' )  THEN
[667]82          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
83          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]84       ENDIF
85
86    ELSE
[75]87
[683]88       IF ( synchronous_exchange )  THEN
[1]89!
[683]90!--       Send left boundary, receive right one (synchronous)
91          CALL MPI_SENDRECV(                                                   &
92                       ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  0, &
93                       ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, &
94                       comm2d, status, ierr )
[1]95!
[683]96!--       Send right boundary, receive left one (synchronous)
97          CALL MPI_SENDRECV(                                                   &
98            ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, &
99            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_yz(i), pleft,  1, &
100                       comm2d, status, ierr )
[667]101
[683]102       ELSE
[667]103
[683]104          req = 0
105!
106!--       Send left boundary, receive right one (asynchronous)
107          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  &
108                          0, comm2d, req(1), ierr )
109          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, &
110                          0, comm2d, req(2), ierr )
111!
112!--       Send right boundary, receive left one (asynchronous)
113          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
114                          type_yz(i), pright, 1, comm2d, req(3), ierr )
115          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
116                          type_yz(i), pleft,  1, comm2d, req(4), ierr )
[667]117
[683]118          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]119
[683]120       ENDIF
121
[1]122    ENDIF
123
124
125    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
126!
127!--    One-dimensional decomposition along x, boundary values can be exchanged
128!--    within the PE memory
129       IF ( bc_ns == 'cyclic' )  THEN
[667]130          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
131          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]132       ENDIF
133
134    ELSE
135
[683]136       IF ( synchronous_exchange )  THEN
[1]137!
[683]138!--       Send front boundary, receive rear one (synchronous)
139          CALL MPI_SENDRECV(                                                   &
140                       ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, 0, &
141                       ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, &
142                       comm2d, status, ierr )
143!
144!--       Send rear boundary, receive front one (synchronous)
145          CALL MPI_SENDRECV(                                                   &
146            ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, &
147            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_xz(i), psouth, 1, &
148            comm2d, status, ierr )
[667]149
[683]150       ELSE
151
152          req = 0
[1]153!
[683]154!--       Send front boundary, receive rear one (asynchronous)
155          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, &
156                          0, comm2d, req(1), ierr )
157          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, &
158                          0, comm2d, req(2), ierr )
159!
160!--       Send rear boundary, receive front one (asynchronous)
161          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
162                          type_xz(i), pnorth, 1, comm2d, req(3), ierr )
163          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
164                          type_xz(i), psouth, 1, comm2d, req(4), ierr )
[75]165
[683]166          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
167
168       ENDIF
169
[1]170    ENDIF
171
172#else
173
174!
175!-- Lateral boundary conditions in the non-parallel case
176    IF ( bc_lr == 'cyclic' )  THEN
[667]177        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
178        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]179    ENDIF
180
181    IF ( bc_ns == 'cyclic' )  THEN
[667]182        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
183        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]184    ENDIF
185
186#endif
187    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
188
[667]189
[1]190 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.