source: palm/trunk/SOURCE/exchange_horiz.f90 @ 1113

Last change on this file since 1113 was 1113, checked in by raasch, 9 years ago

GPU porting of boundary conditions and routine pres; index bug removec from radiation boundary condition

  • Property svn:keywords set to Id
File size: 8.9 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! -----------------
22! GPU-porting for single-core (1PE) mode
23!
24! Former revisions:
25! -----------------
26! $Id: exchange_horiz.f90 1113 2013-03-10 02:48:14Z raasch $
27!
28! 1036 2012-10-22 13:43:42Z raasch
29! code put under GPL (PALM 3.9)
30!
31! 841 2012-02-28 12:29:49Z maronga
32! Excluded routine from compilation of namelist_file_check
33!
34! 709 2011-03-30 09:31:40Z raasch
35! formatting adjustments
36!
37! 707 2011-03-29 11:39:40Z raasch
38! grid_level directly used as index for MPI data type arrays,
39! bc_lr/ns replaced by bc_lr/ns_cyc
40!
41! 689 2011-02-20 19:31:12z gryschka
42! Bugfix for some logical expressions
43! (syntax was not compatible with all compilers)
44!
45! 683 2011-02-09 14:25:15Z raasch
46! optional synchronous exchange (sendrecv) implemented, code partly reformatted
47!
48! 667 2010-12-23 12:06:00Z suehring/gryschka
49! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
50! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
51! used for normal grid, the remaining types used for the several grid levels.
52! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
53! depend on the advection scheme. Exchange of left and right PEs is 10% faster
54! with MPI-Vectors than without.
55!
56! 75 2007-03-22 09:54:05Z raasch
57! Special cases for additional gridpoints along x or y in case of non-cyclic
58! boundary conditions are not regarded any more
59!
60! RCS Log replace by Id keyword, revision history cleaned up
61!
62! Revision 1.16  2006/02/23 12:19:08  raasch
63! anz_yz renamed ngp_yz
64!
65! Revision 1.1  1997/07/24 11:13:29  raasch
66! Initial revision
67!
68!
69! Description:
70! ------------
71! Exchange of lateral boundary values (parallel computers) and cyclic
72! lateral boundary conditions, respectively.
73!------------------------------------------------------------------------------!
74
75    USE control_parameters
76    USE cpulog
77    USE indices
78    USE interfaces
79    USE pegrid
80
81    IMPLICIT NONE
82
83
84    INTEGER ::  i, j, k, nbgp_local
85    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
86                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
87
88#if ! defined( __check )
89#if defined( __parallel )
90    INTEGER, DIMENSION(4)                 ::  req
91    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
92#endif
93
94
95    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
96
97#if defined( __parallel )
98
99!
100!-- Exchange of lateral boundary values
101    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
102!
103!--    One-dimensional decomposition along y, boundary values can be exchanged
104!--    within the PE memory
105       IF ( bc_lr_cyc )  THEN
106          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
107          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
108       ENDIF
109
110    ELSE
111
112       IF ( synchronous_exchange )  THEN
113!
114!--       Send left boundary, receive right one (synchronous)
115          CALL MPI_SENDRECV(                                                   &
116              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
117              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
118              comm2d, status, ierr )
119!
120!--       Send right boundary, receive left one (synchronous)
121          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
122                             type_yz(grid_level), pright, 1,             &
123                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
124                             type_yz(grid_level), pleft,  1,             &
125                             comm2d, status, ierr )
126
127       ELSE
128
129          req = 0
130!
131!--       Send left boundary, receive right one (asynchronous)
132          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
133                          pleft, 0, comm2d, req(1), ierr )
134          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
135                          pright, 0, comm2d, req(2), ierr )
136!
137!--       Send right boundary, receive left one (asynchronous)
138          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
139                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
140          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
141                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
142
143          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
144
145       ENDIF
146
147    ENDIF
148
149
150    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
151!
152!--    One-dimensional decomposition along x, boundary values can be exchanged
153!--    within the PE memory
154       IF ( bc_ns_cyc )  THEN
155          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
156          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
157       ENDIF
158
159    ELSE
160
161       IF ( synchronous_exchange )  THEN
162!
163!--       Send front boundary, receive rear one (synchronous)
164          CALL MPI_SENDRECV(                                                   &
165              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
166              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
167              comm2d, status, ierr )
168!
169!--       Send rear boundary, receive front one (synchronous)
170          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
171                             type_xz(grid_level), pnorth, 1,             &
172                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
173                             type_xz(grid_level), psouth, 1,             &
174                             comm2d, status, ierr )
175
176       ELSE
177
178          req = 0
179!
180!--       Send front boundary, receive rear one (asynchronous)
181          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
182                          psouth, 0, comm2d, req(1), ierr )
183          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
184                          pnorth, 0, comm2d, req(2), ierr )
185!
186!--       Send rear boundary, receive front one (asynchronous)
187          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
188                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
189          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
190                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
191
192          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
193
194       ENDIF
195
196    ENDIF
197
198#else
199
200!
201!-- Lateral boundary conditions in the non-parallel case.
202!-- Case dependent, because in GPU mode still not all arrays are on device. This
203!-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems
204!-- with array syntax, explicit loops are used.
205    IF ( bc_lr == 'cyclic' )  THEN
206       IF ( on_device )  THEN
207          !$acc kernels present( ar )
208          !$acc loop independent
209          DO  i = 0, nbgp_local-1
210             DO  j = nys-nbgp_local, nyn+nbgp_local
211                !$acc loop vector( 32 )
212                DO  k = nzb, nzt+1
213                   ar(k,j,nxl-nbgp_local+i) = ar(k,j,nxr-nbgp_local+1+i)
214                   ar(k,j,nxr+1+i)          = ar(k,j,nxl+i)
215                ENDDO
216             ENDDO
217          ENDDO
218          !$acc end kernels
219       ELSE
220          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
221          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
222       ENDIF
223    ENDIF
224
225    IF ( bc_ns == 'cyclic' )  THEN
226       IF ( on_device )  THEN
227          !$acc kernels present( ar )
228          !$acc loop
229          DO  i = nxl-nbgp_local, nxr+nbgp_local
230             !$acc loop independent
231             DO  j = 0, nbgp_local-1
232                !$acc loop vector( 32 )
233                DO  k = nzb, nzt+1
234                   ar(k,nys-nbgp_local+j,i) = ar(k,nyn-nbgp_local+1+j,i)
235                     ar(k,nyn+1+j,i)          = ar(k,nys+j,i)
236                ENDDO
237             ENDDO
238          ENDDO
239          !$acc end kernels
240       ELSE
241          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
242          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
243       ENDIF
244    ENDIF
245
246#endif
247    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
248
249#endif
250 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.