source: palm/tags/release-3.8/SOURCE/exchange_horiz.f90 @ 4343

Last change on this file since 4343 was 710, checked in by raasch, 10 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 6.6 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6!
7!
8! Former revisions:
9! -----------------
10! $Id: exchange_horiz.f90 710 2011-03-30 09:45:27Z oliver.maas $
11!
12! 709 2011-03-30 09:31:40Z raasch
13! formatting adjustments
14!
15! 707 2011-03-29 11:39:40Z raasch
16! grid_level directly used as index for MPI data type arrays,
17! bc_lr/ns replaced by bc_lr/ns_cyc
18!
19! 689 2011-02-20 19:31:12z gryschka
20! Bugfix for some logical expressions
21! (syntax was not compatible with all compilers)
22!
23! 683 2011-02-09 14:25:15Z raasch
24! optional synchronous exchange (sendrecv) implemented, code partly reformatted
25!
26! 667 2010-12-23 12:06:00Z suehring/gryschka
27! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
28! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
29! used for normal grid, the remaining types used for the several grid levels.
30! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
31! depend on the advection scheme. Exchange of left and right PEs is 10% faster
32! with MPI-Vectors than without.
33!
34! 75 2007-03-22 09:54:05Z raasch
35! Special cases for additional gridpoints along x or y in case of non-cyclic
36! boundary conditions are not regarded any more
37!
38! RCS Log replace by Id keyword, revision history cleaned up
39!
40! Revision 1.16  2006/02/23 12:19:08  raasch
41! anz_yz renamed ngp_yz
42!
43! Revision 1.1  1997/07/24 11:13:29  raasch
44! Initial revision
45!
46!
47! Description:
48! ------------
49! Exchange of lateral boundary values (parallel computers) and cyclic
50! lateral boundary conditions, respectively.
51!------------------------------------------------------------------------------!
52
53    USE control_parameters
54    USE cpulog
55    USE indices
56    USE interfaces
57    USE pegrid
58
59    IMPLICIT NONE
60
61#if defined( __parallel )
62    INTEGER, DIMENSION(4)                 ::  req
63    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
64#endif
65    INTEGER ::  nbgp_local
66    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
67                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
68
69    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
70
71#if defined( __parallel )
72
73!
74!-- Exchange of lateral boundary values
75    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
76!
77!--    One-dimensional decomposition along y, boundary values can be exchanged
78!--    within the PE memory
79       IF ( bc_lr_cyc )  THEN
80          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
81          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
82       ENDIF
83
84    ELSE
85
86       IF ( synchronous_exchange )  THEN
87!
88!--       Send left boundary, receive right one (synchronous)
89          CALL MPI_SENDRECV(                                                   &
90              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
91              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
92              comm2d, status, ierr )
93!
94!--       Send right boundary, receive left one (synchronous)
95          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
96                             type_yz(grid_level), pright, 1,             &
97                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
98                             type_yz(grid_level), pleft,  1,             &
99                             comm2d, status, ierr )
100
101       ELSE
102
103          req = 0
104!
105!--       Send left boundary, receive right one (asynchronous)
106          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
107                          pleft, 0, comm2d, req(1), ierr )
108          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
109                          pright, 0, comm2d, req(2), ierr )
110!
111!--       Send right boundary, receive left one (asynchronous)
112          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
113                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
114          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
115                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
116
117          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
118
119       ENDIF
120
121    ENDIF
122
123
124    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
125!
126!--    One-dimensional decomposition along x, boundary values can be exchanged
127!--    within the PE memory
128       IF ( bc_ns_cyc )  THEN
129          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
130          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
131       ENDIF
132
133    ELSE
134
135       IF ( synchronous_exchange )  THEN
136!
137!--       Send front boundary, receive rear one (synchronous)
138          CALL MPI_SENDRECV(                                                   &
139              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
140              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
141              comm2d, status, ierr )
142!
143!--       Send rear boundary, receive front one (synchronous)
144          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
145                             type_xz(grid_level), pnorth, 1,             &
146                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
147                             type_xz(grid_level), psouth, 1,             &
148                             comm2d, status, ierr )
149
150       ELSE
151
152          req = 0
153!
154!--       Send front boundary, receive rear one (asynchronous)
155          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
156                          psouth, 0, comm2d, req(1), ierr )
157          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
158                          pnorth, 0, comm2d, req(2), ierr )
159!
160!--       Send rear boundary, receive front one (asynchronous)
161          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
162                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
163          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
164                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
165
166          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
167
168       ENDIF
169
170    ENDIF
171
172#else
173
174!
175!-- Lateral boundary conditions in the non-parallel case
176    IF ( bc_lr == 'cyclic' )  THEN
177        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
178        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
179    ENDIF
180
181    IF ( bc_ns == 'cyclic' )  THEN
182        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
183        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
184    ENDIF
185
186#endif
187    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
188
189
190 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.