source: palm/trunk/SOURCE/exchange_horiz.f90 @ 841

Last change on this file since 841 was 841, checked in by maronga, 13 years ago

further adjustments and bugfixes for the namelist file check

  • Property svn:keywords set to Id
File size: 6.7 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[841]6! Excluded routine from compilation of namelist_file_check
[668]7!
8! Former revisions:
9! -----------------
[708]10! $Id: exchange_horiz.f90 841 2012-02-28 12:29:49Z maronga $
[668]11!
[710]12! 709 2011-03-30 09:31:40Z raasch
13! formatting adjustments
14!
[708]15! 707 2011-03-29 11:39:40Z raasch
16! grid_level directly used as index for MPI data type arrays,
17! bc_lr/ns replaced by bc_lr/ns_cyc
18!
[690]19! 689 2011-02-20 19:31:12z gryschka
20! Bugfix for some logical expressions
21! (syntax was not compatible with all compilers)
[688]22!
[684]23! 683 2011-02-09 14:25:15Z raasch
24! optional synchronous exchange (sendrecv) implemented, code partly reformatted
25!
[668]26! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]27! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
28! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
29! used for normal grid, the remaining types used for the several grid levels.
30! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
31! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]32! with MPI-Vectors than without.
[1]33!
[77]34! 75 2007-03-22 09:54:05Z raasch
35! Special cases for additional gridpoints along x or y in case of non-cyclic
36! boundary conditions are not regarded any more
37!
[3]38! RCS Log replace by Id keyword, revision history cleaned up
39!
[1]40! Revision 1.16  2006/02/23 12:19:08  raasch
41! anz_yz renamed ngp_yz
42!
43! Revision 1.1  1997/07/24 11:13:29  raasch
44! Initial revision
45!
46!
47! Description:
48! ------------
49! Exchange of lateral boundary values (parallel computers) and cyclic
50! lateral boundary conditions, respectively.
51!------------------------------------------------------------------------------!
52
53    USE control_parameters
54    USE cpulog
55    USE indices
56    USE interfaces
57    USE pegrid
58
59    IMPLICIT NONE
60
[841]61
62    INTEGER ::  nbgp_local
63    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
64                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
65
66#if ! defined( __check )
[1]67#if defined( __parallel )
68    INTEGER, DIMENSION(4)                 ::  req
69    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
70#endif
71
[841]72
[1]73    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
74
75#if defined( __parallel )
76
77!
[709]78!-- Exchange of lateral boundary values
[1]79    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
80!
81!--    One-dimensional decomposition along y, boundary values can be exchanged
82!--    within the PE memory
[707]83       IF ( bc_lr_cyc )  THEN
[667]84          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
85          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]86       ENDIF
87
88    ELSE
[75]89
[683]90       IF ( synchronous_exchange )  THEN
[1]91!
[683]92!--       Send left boundary, receive right one (synchronous)
93          CALL MPI_SENDRECV(                                                   &
[707]94              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
95              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
96              comm2d, status, ierr )
[1]97!
[683]98!--       Send right boundary, receive left one (synchronous)
[707]99          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
100                             type_yz(grid_level), pright, 1,             &
101                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
102                             type_yz(grid_level), pleft,  1,             &
103                             comm2d, status, ierr )
[667]104
[683]105       ELSE
[667]106
[683]107          req = 0
108!
109!--       Send left boundary, receive right one (asynchronous)
[707]110          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
111                          pleft, 0, comm2d, req(1), ierr )
112          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
113                          pright, 0, comm2d, req(2), ierr )
[683]114!
115!--       Send right boundary, receive left one (asynchronous)
116          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
[707]117                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
[683]118          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]119                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
[667]120
[683]121          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
[75]122
[683]123       ENDIF
124
[1]125    ENDIF
126
127
128    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
129!
130!--    One-dimensional decomposition along x, boundary values can be exchanged
131!--    within the PE memory
[707]132       IF ( bc_ns_cyc )  THEN
[667]133          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
134          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]135       ENDIF
136
137    ELSE
138
[683]139       IF ( synchronous_exchange )  THEN
[1]140!
[683]141!--       Send front boundary, receive rear one (synchronous)
142          CALL MPI_SENDRECV(                                                   &
[707]143              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
144              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
145              comm2d, status, ierr )
[683]146!
147!--       Send rear boundary, receive front one (synchronous)
[707]148          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
149                             type_xz(grid_level), pnorth, 1,             &
150                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
151                             type_xz(grid_level), psouth, 1,             &
152                             comm2d, status, ierr )
[667]153
[683]154       ELSE
155
156          req = 0
[1]157!
[683]158!--       Send front boundary, receive rear one (asynchronous)
[707]159          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
160                          psouth, 0, comm2d, req(1), ierr )
161          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
162                          pnorth, 0, comm2d, req(2), ierr )
[683]163!
164!--       Send rear boundary, receive front one (asynchronous)
165          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
[707]166                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
[683]167          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
[707]168                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
[75]169
[683]170          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
171
172       ENDIF
173
[1]174    ENDIF
175
176#else
177
178!
179!-- Lateral boundary conditions in the non-parallel case
180    IF ( bc_lr == 'cyclic' )  THEN
[667]181        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
182        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]183    ENDIF
184
185    IF ( bc_ns == 'cyclic' )  THEN
[667]186        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
187        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]188    ENDIF
189
190#endif
191    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
192
[841]193#endif
[1]194 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.