source: palm/trunk/SOURCE/exchange_horiz.f90 @ 709

Last change on this file since 709 was 709, checked in by raasch, 13 years ago

formatting adjustments

  • Property svn:keywords set to Id
File size: 6.6 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[709]6! formatting adjustments
[668]7!
8! Former revisions:
9! -----------------
[708]10! $Id: exchange_horiz.f90 709 2011-03-30 09:31:40Z raasch $
[668]11!
[708]12! 707 2011-03-29 11:39:40Z raasch
13! grid_level directly used as index for MPI data type arrays,
14! bc_lr/ns replaced by bc_lr/ns_cyc
15!
[690]16! 689 2011-02-20 19:31:12z gryschka
17! Bugfix for some logical expressions
18! (syntax was not compatible with all compilers)
[688]19!
[684]20! 683 2011-02-09 14:25:15Z raasch
21! optional synchronous exchange (sendrecv) implemented, code partly reformatted
22!
[668]23! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]24! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
25! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
26! used for normal grid, the remaining types used for the several grid levels.
27! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
28! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]29! with MPI-Vectors than without.
[1]30!
[77]31! 75 2007-03-22 09:54:05Z raasch
32! Special cases for additional gridpoints along x or y in case of non-cyclic
33! boundary conditions are not regarded any more
34!
[3]35! RCS Log replace by Id keyword, revision history cleaned up
36!
[1]37! Revision 1.16  2006/02/23 12:19:08  raasch
38! anz_yz renamed ngp_yz
39!
40! Revision 1.1  1997/07/24 11:13:29  raasch
41! Initial revision
42!
43!
44! Description:
45! ------------
46! Exchange of lateral boundary values (parallel computers) and cyclic
47! lateral boundary conditions, respectively.
48!------------------------------------------------------------------------------!
49
50    USE control_parameters
51    USE cpulog
52    USE indices
53    USE interfaces
54    USE pegrid
55
56    IMPLICIT NONE
57
58#if defined( __parallel )
59    INTEGER, DIMENSION(4)                 ::  req
60    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
61#endif
[707]62    INTEGER ::  nbgp_local
[667]63    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
64                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
[1]65
66    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
67
68#if defined( __parallel )
69
70!
[709]71!-- Exchange of lateral boundary values
[1]72    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
73!
74!--    One-dimensional decomposition along y, boundary values can be exchanged
75!--    within the PE memory
[707]76       IF ( bc_lr_cyc )  THEN
[667]77          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
78          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]79       ENDIF
80
81    ELSE
[75]82
[683]83       IF ( synchronous_exchange )  THEN
[1]84!
[683]85!--       Send left boundary, receive right one (synchronous)
86          CALL MPI_SENDRECV(                                                   &
[707]87              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
88              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
89              comm2d, status, ierr )
[1]90!
[683]91!--       Send right boundary, receive left one (synchronous)
[707]92          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
93                             type_yz(grid_level), pright, 1,             &
94                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
95                             type_yz(grid_level), pleft,  1,             &
96                             comm2d, status, ierr )
[667]97
[683]98       ELSE
[667]99
[683]100          req = 0
101!
102!--       Send left boundary, receive right one (asynchronous)
[707]103          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
104                          pleft, 0, comm2d, req(1), ierr )
105          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
106                          pright, 0, comm2d, req(2), ierr )
[683]107!
108!--       Send right boundary, receive left one (asynchronous)
109          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
[707]110                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
[683]111          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]112                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
[667]113
[683]114          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]115
[683]116       ENDIF
117
[1]118    ENDIF
119
120
121    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
122!
123!--    One-dimensional decomposition along x, boundary values can be exchanged
124!--    within the PE memory
[707]125       IF ( bc_ns_cyc )  THEN
[667]126          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
127          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]128       ENDIF
129
130    ELSE
131
[683]132       IF ( synchronous_exchange )  THEN
[1]133!
[683]134!--       Send front boundary, receive rear one (synchronous)
135          CALL MPI_SENDRECV(                                                   &
[707]136              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
137              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
138              comm2d, status, ierr )
[683]139!
140!--       Send rear boundary, receive front one (synchronous)
[707]141          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
142                             type_xz(grid_level), pnorth, 1,             &
143                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
144                             type_xz(grid_level), psouth, 1,             &
145                             comm2d, status, ierr )
[667]146
[683]147       ELSE
148
149          req = 0
[1]150!
[683]151!--       Send front boundary, receive rear one (asynchronous)
[707]152          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
153                          psouth, 0, comm2d, req(1), ierr )
154          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
155                          pnorth, 0, comm2d, req(2), ierr )
[683]156!
157!--       Send rear boundary, receive front one (asynchronous)
158          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
[707]159                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
[683]160          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]161                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
[75]162
[683]163          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
164
165       ENDIF
166
[1]167    ENDIF
168
169#else
170
171!
172!-- Lateral boundary conditions in the non-parallel case
173    IF ( bc_lr == 'cyclic' )  THEN
[667]174        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
175        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]176    ENDIF
177
178    IF ( bc_ns == 'cyclic' )  THEN
[667]179        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
180        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]181    ENDIF
182
183#endif
184    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
185
[667]186
[1]187 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.