source: palm/trunk/SOURCE/exchange_horiz.f90 @ 1114

Last change on this file since 1114 was 1114, checked in by raasch, 11 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 8.9 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
[1036]3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
[484]20! Current revisions:
[1]21! -----------------
[1114]22!
[668]23!
24! Former revisions:
25! -----------------
[708]26! $Id: exchange_horiz.f90 1114 2013-03-10 03:25:57Z raasch $
[668]27!
[1114]28! 1113 2013-03-10 02:48:14Z raasch
29! GPU-porting for single-core (1PE) mode
30!
[1037]31! 1036 2012-10-22 13:43:42Z raasch
32! code put under GPL (PALM 3.9)
33!
[842]34! 841 2012-02-28 12:29:49Z maronga
35! Excluded routine from compilation of namelist_file_check
36!
[710]37! 709 2011-03-30 09:31:40Z raasch
38! formatting adjustments
39!
[708]40! 707 2011-03-29 11:39:40Z raasch
41! grid_level directly used as index for MPI data type arrays,
42! bc_lr/ns replaced by bc_lr/ns_cyc
43!
[690]44! 689 2011-02-20 19:31:12z gryschka
45! Bugfix for some logical expressions
46! (syntax was not compatible with all compilers)
[688]47!
[684]48! 683 2011-02-09 14:25:15Z raasch
49! optional synchronous exchange (sendrecv) implemented, code partly reformatted
50!
[668]51! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]52! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
53! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
54! used for normal grid, the remaining types used for the several grid levels.
55! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
56! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]57! with MPI-Vectors than without.
[1]58!
[77]59! 75 2007-03-22 09:54:05Z raasch
60! Special cases for additional gridpoints along x or y in case of non-cyclic
61! boundary conditions are not regarded any more
62!
[3]63! RCS Log replace by Id keyword, revision history cleaned up
64!
[1]65! Revision 1.16  2006/02/23 12:19:08  raasch
66! anz_yz renamed ngp_yz
67!
68! Revision 1.1  1997/07/24 11:13:29  raasch
69! Initial revision
70!
71!
72! Description:
73! ------------
74! Exchange of lateral boundary values (parallel computers) and cyclic
75! lateral boundary conditions, respectively.
76!------------------------------------------------------------------------------!
77
78    USE control_parameters
79    USE cpulog
80    USE indices
81    USE interfaces
82    USE pegrid
83
84    IMPLICIT NONE
85
[841]86
[1113]87    INTEGER ::  i, j, k, nbgp_local
[841]88    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
89                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
90
91#if ! defined( __check )
[1]92#if defined( __parallel )
93    INTEGER, DIMENSION(4)                 ::  req
94    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
95#endif
96
[841]97
[1]98    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
99
100#if defined( __parallel )
101
102!
[709]103!-- Exchange of lateral boundary values
[1]104    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
105!
106!--    One-dimensional decomposition along y, boundary values can be exchanged
107!--    within the PE memory
[707]108       IF ( bc_lr_cyc )  THEN
[667]109          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
110          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]111       ENDIF
112
113    ELSE
[75]114
[683]115       IF ( synchronous_exchange )  THEN
[1]116!
[683]117!--       Send left boundary, receive right one (synchronous)
118          CALL MPI_SENDRECV(                                                   &
[707]119              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
120              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
121              comm2d, status, ierr )
[1]122!
[683]123!--       Send right boundary, receive left one (synchronous)
[707]124          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
125                             type_yz(grid_level), pright, 1,             &
126                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
127                             type_yz(grid_level), pleft,  1,             &
128                             comm2d, status, ierr )
[667]129
[683]130       ELSE
[667]131
[683]132          req = 0
133!
134!--       Send left boundary, receive right one (asynchronous)
[707]135          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
136                          pleft, 0, comm2d, req(1), ierr )
137          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
138                          pright, 0, comm2d, req(2), ierr )
[683]139!
140!--       Send right boundary, receive left one (asynchronous)
141          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
[707]142                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
[683]143          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]144                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
[667]145
[683]146          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]147
[683]148       ENDIF
149
[1]150    ENDIF
151
152
153    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
154!
155!--    One-dimensional decomposition along x, boundary values can be exchanged
156!--    within the PE memory
[707]157       IF ( bc_ns_cyc )  THEN
[667]158          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
159          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]160       ENDIF
161
162    ELSE
163
[683]164       IF ( synchronous_exchange )  THEN
[1]165!
[683]166!--       Send front boundary, receive rear one (synchronous)
167          CALL MPI_SENDRECV(                                                   &
[707]168              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
169              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
170              comm2d, status, ierr )
[683]171!
172!--       Send rear boundary, receive front one (synchronous)
[707]173          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
174                             type_xz(grid_level), pnorth, 1,             &
175                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
176                             type_xz(grid_level), psouth, 1,             &
177                             comm2d, status, ierr )
[667]178
[683]179       ELSE
180
181          req = 0
[1]182!
[683]183!--       Send front boundary, receive rear one (asynchronous)
[707]184          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
185                          psouth, 0, comm2d, req(1), ierr )
186          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
187                          pnorth, 0, comm2d, req(2), ierr )
[683]188!
189!--       Send rear boundary, receive front one (asynchronous)
190          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
[707]191                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
[683]192          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]193                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
[75]194
[683]195          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
196
197       ENDIF
198
[1]199    ENDIF
200
201#else
202
203!
[1113]204!-- Lateral boundary conditions in the non-parallel case.
205!-- Case dependent, because in GPU mode still not all arrays are on device. This
206!-- workaround has to be removed later. Also, since PGI compiler 12.5 has problems
207!-- with array syntax, explicit loops are used.
[1]208    IF ( bc_lr == 'cyclic' )  THEN
[1113]209       IF ( on_device )  THEN
210          !$acc kernels present( ar )
211          !$acc loop independent
212          DO  i = 0, nbgp_local-1
213             DO  j = nys-nbgp_local, nyn+nbgp_local
214                !$acc loop vector( 32 )
215                DO  k = nzb, nzt+1
216                   ar(k,j,nxl-nbgp_local+i) = ar(k,j,nxr-nbgp_local+1+i)
217                   ar(k,j,nxr+1+i)          = ar(k,j,nxl+i)
218                ENDDO
219             ENDDO
220          ENDDO
221          !$acc end kernels
222       ELSE
223          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
224          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
225       ENDIF
[1]226    ENDIF
227
228    IF ( bc_ns == 'cyclic' )  THEN
[1113]229       IF ( on_device )  THEN
230          !$acc kernels present( ar )
231          !$acc loop
232          DO  i = nxl-nbgp_local, nxr+nbgp_local
233             !$acc loop independent
234             DO  j = 0, nbgp_local-1
235                !$acc loop vector( 32 )
236                DO  k = nzb, nzt+1
237                   ar(k,nys-nbgp_local+j,i) = ar(k,nyn-nbgp_local+1+j,i)
238                     ar(k,nyn+1+j,i)          = ar(k,nys+j,i)
239                ENDDO
240             ENDDO
241          ENDDO
242          !$acc end kernels
243       ELSE
244          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
245          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
246       ENDIF
[1]247    ENDIF
248
249#endif
250    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
251
[841]252#endif
[1]253 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.