source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 1056

Last change on this file since 1056 was 1037, checked in by raasch, 12 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 7.7 KB
RevLine 
[1]1 SUBROUTINE exchange_horiz_2d( ar )
2
[1036]3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
[484]20! Current revisions:
[1]21! -----------------
22!
[842]23!
[1]24! Former revisions:
25! -----------------
[3]26! $Id: exchange_horiz_2d.f90 1037 2012-10-22 14:10:22Z raasch $
[77]27!
[1037]28! 1036 2012-10-22 13:43:42Z raasch
29! code put under GPL (PALM 3.9)
30!
[842]31! 841 2012-02-28 12:29:49Z maronga
32! Excluded routine from compilation of namelist_file_check
33!
[708]34! 707 2011-03-29 11:39:40Z raasch
35! bc_lr/ns replaced by bc_lr/ns_cyc
36!
[703]37! 702 2011-03-24 19:33:15Z suehring
38! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks
39! in MPI_SENDRECV().
40!
[668]41! 667 2010-12-23 12:06:00Z suehring/gryschka
42! Dynamic exchange of ghost points with nbgp, which depends on the advection
43! scheme. Exchange between left and right PEs is now done with MPI-vectors.
44!
[77]45! 73 2007-03-20 08:33:14Z raasch
46! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
47! conditions
48!
[3]49! RCS Log replace by Id keyword, revision history cleaned up
50!
[1]51! Revision 1.9  2006/05/12 19:15:52  letzel
52! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
53!
54! Revision 1.1  1998/01/23 09:58:21  raasch
55! Initial revision
56!
57!
58! Description:
59! ------------
60! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
61! boundary conditions, respectively, for 2D-arrays.
62!------------------------------------------------------------------------------!
63
64    USE control_parameters
65    USE cpulog
66    USE indices
67    USE interfaces
68    USE pegrid
69
70    IMPLICIT NONE
71
[841]72
[667]73    REAL ::  ar(nysg:nyng,nxlg:nxrg)
74    INTEGER :: i
[1]75
[841]76#if ! defined( __check )
[1]77    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
78
79#if defined( __parallel )
80
81!
82!-- Exchange of lateral boundary values for parallel computers
83    IF ( pdims(1) == 1 )  THEN
84
85!
86!--    One-dimensional decomposition along y, boundary values can be exchanged
87!--    within the PE memory
[702]88       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
89       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]90
91    ELSE
92!
93!--    Send left boundary, receive right one
[667]94
[702]95       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
96                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
[1]97                          comm2d, status, ierr )
98!
99!--    Send right boundary, receive left one
[702]100       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
101                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
[1]102                          comm2d, status, ierr )
[702]103                         
104     
[1]105    ENDIF
106
107    IF ( pdims(2) == 1 )  THEN
108!
109!--    One-dimensional decomposition along x, boundary values can be exchanged
110!--    within the PE memory
[702]111       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
112       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]113
114    ELSE
115!
116!--    Send front boundary, receive rear one
[667]117
[702]118       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
119                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
[1]120                          comm2d, status, ierr )
121!
122!--    Send rear boundary, receive front one
[702]123       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
124                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
[1]125                          comm2d, status, ierr )
[667]126
[1]127    ENDIF
128
129#else
130
131!
132!-- Lateral boundary conditions in the non-parallel case
[707]133    IF ( bc_lr_cyc )  THEN
[702]134       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
135       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]136    ENDIF
137
[707]138    IF ( bc_ns_cyc )  THEN
[702]139       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
140       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]141    ENDIF
142
[667]143
[1]144#endif
145
[73]146!
147!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
148!-- conditions
[667]149    IF ( inflow_l .OR. outflow_l )  THEN
150       DO i=nbgp, 1, -1
151         ar(:,nxl-i) = ar(:,nxl)
152       END DO
153    END IF
154    IF ( inflow_r .OR. outflow_r )  THEN
155       DO i=1, nbgp
156          ar(:,nxr+i) = ar(:,nxr)
157       END DO
158    END IF
159    IF ( inflow_s .OR. outflow_s )  THEN
160       DO i=nbgp, 1, -1
161         ar(nys-i,:) = ar(nys,:)
162       END DO
163    END IF
164    IF ( inflow_n .OR. outflow_n )  THEN
165       DO i=1, nbgp
166         ar(nyn+i,:) = ar(nyn,:)
167       END DO
168    END IF
[1]169    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
170
[841]171#endif
[1]172 END SUBROUTINE exchange_horiz_2d
173
174
175
176 SUBROUTINE exchange_horiz_2d_int( ar )
177
178!------------------------------------------------------------------------------!
179! Description:
180! ------------
181! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
182! boundary conditions, respectively, for 2D integer arrays.
183!------------------------------------------------------------------------------!
184
185    USE control_parameters
186    USE cpulog
187    USE indices
188    USE interfaces
189    USE pegrid
190
191    IMPLICIT NONE
192
[702]193    INTEGER ::  ar(nysg:nyng,nxlg:nxrg)
[667]194    INTEGER :: i
[1]195
[841]196#if ! defined( __check )
[1]197    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
198
199#if defined( __parallel )
200
201!
202!-- Exchange of lateral boundary values for parallel computers
203    IF ( pdims(1) == 1 )  THEN
204
205!
206!--    One-dimensional decomposition along y, boundary values can be exchanged
207!--    within the PE memory
[702]208       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
209       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]210
[702]211
[1]212    ELSE
213!
214!--    Send left boundary, receive right one
[702]215       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0,             &
216                          ar(nysg,nxr+1), 1, type_y_int, pright, 0,           &
[1]217                          comm2d, status, ierr )
218!
219!--    Send right boundary, receive left one
[702]220       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1,     &
221                          ar(nysg,nxlg), 1, type_y_int, pleft,   1,           &
[1]222                          comm2d, status, ierr )
[667]223
[1]224    ENDIF
225
226    IF ( pdims(2) == 1 )  THEN
227!
228!--    One-dimensional decomposition along x, boundary values can be exchanged
229!--    within the PE memory
[667]230       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
231       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]232
[667]233
[1]234    ELSE
235!
236!--    Send front boundary, receive rear one
[702]237       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0,             &
238                          ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0,           &
239                          comm2d, status, ierr )                         
[667]240
[1]241!
242!--    Send rear boundary, receive front one
[702]243       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1,      &
244                          ar(nysg,nxlg), 1, type_x_int, psouth, 1,            &
[1]245                          comm2d, status, ierr )
[667]246
[1]247    ENDIF
248
249#else
250
251!
252!-- Lateral boundary conditions in the non-parallel case
[707]253    IF ( bc_lr_cyc )  THEN
[702]254       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
255       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]256    ENDIF
257
[707]258    IF ( bc_ns_cyc )  THEN
[667]259       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
260       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]261    ENDIF
262
263#endif
264    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
265
[841]266#endif
[1]267 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.