source: palm/trunk/SOURCE/exchange_horiz.f90 @ 1036

Last change on this file since 1036 was 1036, checked in by raasch, 11 years ago

code has been put under the GNU General Public License (v3)

  • Property svn:keywords set to Id
File size: 7.5 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! -----------------
22!
23!
24! Former revisions:
25! -----------------
26! $Id: exchange_horiz.f90 1036 2012-10-22 13:43:42Z raasch $
27!
28! 841 2012-02-28 12:29:49Z maronga
29! Excluded routine from compilation of namelist_file_check
30!
31! 709 2011-03-30 09:31:40Z raasch
32! formatting adjustments
33!
34! 707 2011-03-29 11:39:40Z raasch
35! grid_level directly used as index for MPI data type arrays,
36! bc_lr/ns replaced by bc_lr/ns_cyc
37!
38! 689 2011-02-20 19:31:12z gryschka
39! Bugfix for some logical expressions
40! (syntax was not compatible with all compilers)
41!
42! 683 2011-02-09 14:25:15Z raasch
43! optional synchronous exchange (sendrecv) implemented, code partly reformatted
44!
45! 667 2010-12-23 12:06:00Z suehring/gryschka
46! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
47! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
48! used for normal grid, the remaining types used for the several grid levels.
49! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
50! depend on the advection scheme. Exchange of left and right PEs is 10% faster
51! with MPI-Vectors than without.
52!
53! 75 2007-03-22 09:54:05Z raasch
54! Special cases for additional gridpoints along x or y in case of non-cyclic
55! boundary conditions are not regarded any more
56!
57! RCS Log replace by Id keyword, revision history cleaned up
58!
59! Revision 1.16  2006/02/23 12:19:08  raasch
60! anz_yz renamed ngp_yz
61!
62! Revision 1.1  1997/07/24 11:13:29  raasch
63! Initial revision
64!
65!
66! Description:
67! ------------
68! Exchange of lateral boundary values (parallel computers) and cyclic
69! lateral boundary conditions, respectively.
70!------------------------------------------------------------------------------!
71
72    USE control_parameters
73    USE cpulog
74    USE indices
75    USE interfaces
76    USE pegrid
77
78    IMPLICIT NONE
79
80
81    INTEGER ::  nbgp_local
82    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
83                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
84
85#if ! defined( __check )
86#if defined( __parallel )
87    INTEGER, DIMENSION(4)                 ::  req
88    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
89#endif
90
91
92    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
93
94#if defined( __parallel )
95
96!
97!-- Exchange of lateral boundary values
98    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
99!
100!--    One-dimensional decomposition along y, boundary values can be exchanged
101!--    within the PE memory
102       IF ( bc_lr_cyc )  THEN
103          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
104          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
105       ENDIF
106
107    ELSE
108
109       IF ( synchronous_exchange )  THEN
110!
111!--       Send left boundary, receive right one (synchronous)
112          CALL MPI_SENDRECV(                                                   &
113              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
114              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
115              comm2d, status, ierr )
116!
117!--       Send right boundary, receive left one (synchronous)
118          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
119                             type_yz(grid_level), pright, 1,             &
120                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
121                             type_yz(grid_level), pleft,  1,             &
122                             comm2d, status, ierr )
123
124       ELSE
125
126          req = 0
127!
128!--       Send left boundary, receive right one (asynchronous)
129          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
130                          pleft, 0, comm2d, req(1), ierr )
131          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
132                          pright, 0, comm2d, req(2), ierr )
133!
134!--       Send right boundary, receive left one (asynchronous)
135          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
136                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
137          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
138                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
139
140          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
141
142       ENDIF
143
144    ENDIF
145
146
147    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
148!
149!--    One-dimensional decomposition along x, boundary values can be exchanged
150!--    within the PE memory
151       IF ( bc_ns_cyc )  THEN
152          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
153          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
154       ENDIF
155
156    ELSE
157
158       IF ( synchronous_exchange )  THEN
159!
160!--       Send front boundary, receive rear one (synchronous)
161          CALL MPI_SENDRECV(                                                   &
162              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
163              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
164              comm2d, status, ierr )
165!
166!--       Send rear boundary, receive front one (synchronous)
167          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
168                             type_xz(grid_level), pnorth, 1,             &
169                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
170                             type_xz(grid_level), psouth, 1,             &
171                             comm2d, status, ierr )
172
173       ELSE
174
175          req = 0
176!
177!--       Send front boundary, receive rear one (asynchronous)
178          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
179                          psouth, 0, comm2d, req(1), ierr )
180          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
181                          pnorth, 0, comm2d, req(2), ierr )
182!
183!--       Send rear boundary, receive front one (asynchronous)
184          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
185                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
186          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
187                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
188
189          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
190
191       ENDIF
192
193    ENDIF
194
195#else
196
197!
198!-- Lateral boundary conditions in the non-parallel case
199    IF ( bc_lr == 'cyclic' )  THEN
200        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
201        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
202    ENDIF
203
204    IF ( bc_ns == 'cyclic' )  THEN
205        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
206        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
207    ENDIF
208
209#endif
210    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
211
212#endif
213 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.