source: palm/trunk/SOURCE/exchange_horiz.f90 @ 862

Last change on this file since 862 was 842, checked in by maronga, 13 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 6.8 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[668]6!
[842]7!
[668]8! Former revisions:
9! -----------------
[708]10! $Id: exchange_horiz.f90 842 2012-02-28 12:37:31Z suehring $
[668]11!
[842]12! 841 2012-02-28 12:29:49Z maronga
13! Excluded routine from compilation of namelist_file_check
14!
[710]15! 709 2011-03-30 09:31:40Z raasch
16! formatting adjustments
17!
[708]18! 707 2011-03-29 11:39:40Z raasch
19! grid_level directly used as index for MPI data type arrays,
20! bc_lr/ns replaced by bc_lr/ns_cyc
21!
[690]22! 689 2011-02-20 19:31:12z gryschka
23! Bugfix for some logical expressions
24! (syntax was not compatible with all compilers)
[688]25!
[684]26! 683 2011-02-09 14:25:15Z raasch
27! optional synchronous exchange (sendrecv) implemented, code partly reformatted
28!
[668]29! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]30! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
31! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
32! used for normal grid, the remaining types used for the several grid levels.
33! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
34! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]35! with MPI-Vectors than without.
[1]36!
[77]37! 75 2007-03-22 09:54:05Z raasch
38! Special cases for additional gridpoints along x or y in case of non-cyclic
39! boundary conditions are not regarded any more
40!
[3]41! RCS Log replace by Id keyword, revision history cleaned up
42!
[1]43! Revision 1.16  2006/02/23 12:19:08  raasch
44! anz_yz renamed ngp_yz
45!
46! Revision 1.1  1997/07/24 11:13:29  raasch
47! Initial revision
48!
49!
50! Description:
51! ------------
52! Exchange of lateral boundary values (parallel computers) and cyclic
53! lateral boundary conditions, respectively.
54!------------------------------------------------------------------------------!
55
56    USE control_parameters
57    USE cpulog
58    USE indices
59    USE interfaces
60    USE pegrid
61
62    IMPLICIT NONE
63
[841]64
65    INTEGER ::  nbgp_local
66    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
67                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
68
69#if ! defined( __check )
[1]70#if defined( __parallel )
71    INTEGER, DIMENSION(4)                 ::  req
72    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
73#endif
74
[841]75
[1]76    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
77
78#if defined( __parallel )
79
80!
[709]81!-- Exchange of lateral boundary values
[1]82    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
83!
84!--    One-dimensional decomposition along y, boundary values can be exchanged
85!--    within the PE memory
[707]86       IF ( bc_lr_cyc )  THEN
[667]87          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
88          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]89       ENDIF
90
91    ELSE
[75]92
[683]93       IF ( synchronous_exchange )  THEN
[1]94!
[683]95!--       Send left boundary, receive right one (synchronous)
96          CALL MPI_SENDRECV(                                                   &
[707]97              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
98              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
99              comm2d, status, ierr )
[1]100!
[683]101!--       Send right boundary, receive left one (synchronous)
[707]102          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
103                             type_yz(grid_level), pright, 1,             &
104                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
105                             type_yz(grid_level), pleft,  1,             &
106                             comm2d, status, ierr )
[667]107
[683]108       ELSE
[667]109
[683]110          req = 0
111!
112!--       Send left boundary, receive right one (asynchronous)
[707]113          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
114                          pleft, 0, comm2d, req(1), ierr )
115          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
116                          pright, 0, comm2d, req(2), ierr )
[683]117!
118!--       Send right boundary, receive left one (asynchronous)
119          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
[707]120                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
[683]121          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]122                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
[667]123
[683]124          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]125
[683]126       ENDIF
127
[1]128    ENDIF
129
130
131    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
132!
133!--    One-dimensional decomposition along x, boundary values can be exchanged
134!--    within the PE memory
[707]135       IF ( bc_ns_cyc )  THEN
[667]136          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
137          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]138       ENDIF
139
140    ELSE
141
[683]142       IF ( synchronous_exchange )  THEN
[1]143!
[683]144!--       Send front boundary, receive rear one (synchronous)
145          CALL MPI_SENDRECV(                                                   &
[707]146              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
147              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
148              comm2d, status, ierr )
[683]149!
150!--       Send rear boundary, receive front one (synchronous)
[707]151          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
152                             type_xz(grid_level), pnorth, 1,             &
153                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
154                             type_xz(grid_level), psouth, 1,             &
155                             comm2d, status, ierr )
[667]156
[683]157       ELSE
158
159          req = 0
[1]160!
[683]161!--       Send front boundary, receive rear one (asynchronous)
[707]162          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
163                          psouth, 0, comm2d, req(1), ierr )
164          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
165                          pnorth, 0, comm2d, req(2), ierr )
[683]166!
167!--       Send rear boundary, receive front one (asynchronous)
168          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
[707]169                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
[683]170          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]171                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
[75]172
[683]173          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
174
175       ENDIF
176
[1]177    ENDIF
178
179#else
180
181!
182!-- Lateral boundary conditions in the non-parallel case
183    IF ( bc_lr == 'cyclic' )  THEN
[667]184        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
185        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]186    ENDIF
187
188    IF ( bc_ns == 'cyclic' )  THEN
[667]189        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
190        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]191    ENDIF
192
193#endif
194    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
195
[841]196#endif
[1]197 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.