source: palm/trunk/SOURCE/exchange_horiz.f90 @ 1202

Last change on this file since 1202 was 1132, checked in by raasch, 12 years ago

r1028 documented

  • Property svn:keywords set to Id
File size: 10.2 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! -----------------
22!
23!
24! Former revisions:
25! -----------------
26! $Id: exchange_horiz.f90 1132 2013-04-12 14:35:30Z witha $
27!
28! 1128 2013-04-12 06:19:32Z raasch
29! modifications for asynchronous transfer,
30! local variables req, wait_stat are global now, and have been moved to module
31! pegrid
32!
33! 1113 2013-03-10 02:48:14Z raasch
34! GPU-porting for single-core (1PE) mode
35!
36! 1036 2012-10-22 13:43:42Z raasch
37! code put under GPL (PALM 3.9)
38!
39! 841 2012-02-28 12:29:49Z maronga
40! Excluded routine from compilation of namelist_file_check
41!
42! 709 2011-03-30 09:31:40Z raasch
43! formatting adjustments
44!
45! 707 2011-03-29 11:39:40Z raasch
46! grid_level directly used as index for MPI data type arrays,
47! bc_lr/ns replaced by bc_lr/ns_cyc
48!
49! 689 2011-02-20 19:31:12z gryschka
50! Bugfix for some logical expressions
51! (syntax was not compatible with all compilers)
52!
53! 683 2011-02-09 14:25:15Z raasch
54! optional synchronous exchange (sendrecv) implemented, code partly reformatted
55!
56! 667 2010-12-23 12:06:00Z suehring/gryschka
57! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
58! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
59! used for normal grid, the remaining types used for the several grid levels.
60! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
61! depend on the advection scheme. Exchange of left and right PEs is 10% faster
62! with MPI-Vectors than without.
63!
64! 75 2007-03-22 09:54:05Z raasch
65! Special cases for additional gridpoints along x or y in case of non-cyclic
66! boundary conditions are not regarded any more
67!
68! RCS Log replace by Id keyword, revision history cleaned up
69!
70! Revision 1.16  2006/02/23 12:19:08  raasch
71! anz_yz renamed ngp_yz
72!
73! Revision 1.1  1997/07/24 11:13:29  raasch
74! Initial revision
75!
76!
77! Description:
78! ------------
79! Exchange of lateral boundary values (parallel computers) and cyclic
80! lateral boundary conditions, respectively.
81!------------------------------------------------------------------------------!
82
83    USE control_parameters
84    USE cpulog
85    USE indices
86    USE interfaces
87    USE pegrid
88
89    IMPLICIT NONE
90
91
92    INTEGER ::  i, j, k, nbgp_local
93    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
94                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
95
96#if ! defined( __check )
97
98    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
99
100#if defined( __parallel )
101
102!
103!-- Exchange in x-direction of lateral boundaries
104    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
105!
106!--    One-dimensional decomposition along y, boundary values can be exchanged
107!--    within the PE memory
108       IF ( bc_lr_cyc )  THEN
109          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
110          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
111       ENDIF
112
113    ELSE
114
115       IF ( synchronous_exchange )  THEN
116!
117!--       Send left boundary, receive right one (synchronous)
118          CALL MPI_SENDRECV(                                                   &
119              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
120              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
121              comm2d, status, ierr )
122!
123!--       Send right boundary, receive left one (synchronous)
124          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
125                             type_yz(grid_level), pright, 1,             &
126                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
127                             type_yz(grid_level), pleft,  1,             &
128                             comm2d, status, ierr )
129
130       ELSE
131
132!
133!--       In case of background communication switched on, exchange is done
134!--       either along x or along y
135          IF ( send_receive == 'lr'  .OR.  send_receive == 'al' )  THEN
136
137             IF ( .NOT. sendrecv_in_background )  THEN
138                req(1:4)  = 0
139                req_count = 0
140             ENDIF
141!
142!--          Send left boundary, receive right one (asynchronous)
143             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
144                             pleft, req_count, comm2d, req(req_count+1), ierr )
145             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
146                             pright, req_count, comm2d, req(req_count+2), ierr )
147!
148!--          Send right boundary, receive left one (asynchronous)
149             CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
150                             type_yz(grid_level), pright, req_count+1, comm2d,    &
151                             req(req_count+3), ierr )
152             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
153                             type_yz(grid_level), pleft,  req_count+1, comm2d,    &
154                             req(req_count+4), ierr )
155
156             IF ( .NOT. sendrecv_in_background )  THEN
157                CALL MPI_WAITALL( 4, req, wait_stat, ierr )
158             ELSE
159                req_count = req_count + 4
160             ENDIF
161
162          ENDIF
163
164       ENDIF
165
166    ENDIF
167
168
169    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
170!
171!--    One-dimensional decomposition along x, boundary values can be exchanged
172!--    within the PE memory
173       IF ( bc_ns_cyc )  THEN
174          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
175          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
176       ENDIF
177
178    ELSE
179
180       IF ( synchronous_exchange )  THEN
181!
182!--       Send front boundary, receive rear one (synchronous)
183          CALL MPI_SENDRECV(                                                   &
184              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
185              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
186              comm2d, status, ierr )
187!
188!--       Send rear boundary, receive front one (synchronous)
189          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
190                             type_xz(grid_level), pnorth, 1,             &
191                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
192                             type_xz(grid_level), psouth, 1,             &
193                             comm2d, status, ierr )
194
195       ELSE
196
197!
198!--       In case of background communication switched on, exchange is done
199!--       either along x or along y
200          IF ( send_receive == 'lr'  .OR.  send_receive == 'al' )  THEN
201
202             IF ( .NOT. sendrecv_in_background )  THEN
203                req(1:4)  = 0
204                req_count = 0
205             ENDIF
206
207!
208!--          Send front boundary, receive rear one (asynchronous)
209             CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
210                             psouth, req_count, comm2d, req(req_count+1), ierr )
211             CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
212                             pnorth, req_count, comm2d, req(req_count+2), ierr )
213!
214!--          Send rear boundary, receive front one (asynchronous)
215             CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
216                             type_xz(grid_level), pnorth, req_count+1, comm2d,    &
217                             req(req_count+3), ierr )
218             CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
219                             type_xz(grid_level), psouth, req_count+1, comm2d,    &
220                             req(req_count+4), ierr )
221
222             IF ( .NOT. sendrecv_in_background )  THEN
223                CALL MPI_WAITALL( 4, req, wait_stat, ierr )
224             ELSE
225                req_count = req_count + 4
226             ENDIF
227
228          ENDIF
229
230       ENDIF
231
232    ENDIF
233
234#else
235
236!
237!-- Lateral boundary conditions in the non-parallel case.
238!-- Case dependent, because in GPU mode still not all arrays are on device. This
239!-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems
240!-- with array syntax, explicit loops are used.
241    IF ( bc_lr == 'cyclic' )  THEN
242       IF ( on_device )  THEN
243          !$acc kernels present( ar )
244          !$acc loop independent
245          DO  i = 0, nbgp_local-1
246             DO  j = nys-nbgp_local, nyn+nbgp_local
247                !$acc loop vector( 32 )
248                DO  k = nzb, nzt+1
249                   ar(k,j,nxl-nbgp_local+i) = ar(k,j,nxr-nbgp_local+1+i)
250                   ar(k,j,nxr+1+i)          = ar(k,j,nxl+i)
251                ENDDO
252             ENDDO
253          ENDDO
254          !$acc end kernels
255       ELSE
256          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
257          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
258       ENDIF
259    ENDIF
260
261    IF ( bc_ns == 'cyclic' )  THEN
262       IF ( on_device )  THEN
263          !$acc kernels present( ar )
264          !$acc loop
265          DO  i = nxl-nbgp_local, nxr+nbgp_local
266             !$acc loop independent
267             DO  j = 0, nbgp_local-1
268                !$acc loop vector( 32 )
269                DO  k = nzb, nzt+1
270                   ar(k,nys-nbgp_local+j,i) = ar(k,nyn-nbgp_local+1+j,i)
271                     ar(k,nyn+1+j,i)          = ar(k,nys+j,i)
272                ENDDO
273             ENDDO
274          ENDDO
275          !$acc end kernels
276       ELSE
277          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
278          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
279       ENDIF
280    ENDIF
281
282#endif
283    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
284
285#endif
286 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.