source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 1092

Last change on this file since 1092 was 1092, checked in by raasch, 11 years ago

unused variables remove from several routines

  • Property svn:keywords set to Id
File size: 7.7 KB
RevLine 
[1]1 SUBROUTINE exchange_horiz_2d( ar )
2
[1036]3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2012  Leibniz University Hannover
18!--------------------------------------------------------------------------------!
19!
[484]20! Current revisions:
[1]21! -----------------
[1092]22! unused variables removed
[1]23!
24! Former revisions:
25! -----------------
[3]26! $Id: exchange_horiz_2d.f90 1092 2013-02-02 11:24:22Z raasch $
[77]27!
[1037]28! 1036 2012-10-22 13:43:42Z raasch
29! code put under GPL (PALM 3.9)
30!
[842]31! 841 2012-02-28 12:29:49Z maronga
32! Excluded routine from compilation of namelist_file_check
33!
[708]34! 707 2011-03-29 11:39:40Z raasch
35! bc_lr/ns replaced by bc_lr/ns_cyc
36!
[703]37! 702 2011-03-24 19:33:15Z suehring
38! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks
39! in MPI_SENDRECV().
40!
[668]41! 667 2010-12-23 12:06:00Z suehring/gryschka
42! Dynamic exchange of ghost points with nbgp, which depends on the advection
43! scheme. Exchange between left and right PEs is now done with MPI-vectors.
44!
[77]45! 73 2007-03-20 08:33:14Z raasch
46! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
47! conditions
48!
[3]49! RCS Log replace by Id keyword, revision history cleaned up
50!
[1]51! Revision 1.9  2006/05/12 19:15:52  letzel
52! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
53!
54! Revision 1.1  1998/01/23 09:58:21  raasch
55! Initial revision
56!
57!
58! Description:
59! ------------
60! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
61! boundary conditions, respectively, for 2D-arrays.
62!------------------------------------------------------------------------------!
63
64    USE control_parameters
65    USE cpulog
66    USE indices
67    USE interfaces
68    USE pegrid
69
70    IMPLICIT NONE
71
[841]72
[667]73    REAL ::  ar(nysg:nyng,nxlg:nxrg)
74    INTEGER :: i
[1]75
[841]76#if ! defined( __check )
[1]77    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
78
79#if defined( __parallel )
80
81!
82!-- Exchange of lateral boundary values for parallel computers
83    IF ( pdims(1) == 1 )  THEN
84
85!
86!--    One-dimensional decomposition along y, boundary values can be exchanged
87!--    within the PE memory
[702]88       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
89       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]90
91    ELSE
92!
93!--    Send left boundary, receive right one
[667]94
[702]95       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
96                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
[1]97                          comm2d, status, ierr )
98!
99!--    Send right boundary, receive left one
[702]100       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
101                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
[1]102                          comm2d, status, ierr )
[702]103                         
104     
[1]105    ENDIF
106
107    IF ( pdims(2) == 1 )  THEN
108!
109!--    One-dimensional decomposition along x, boundary values can be exchanged
110!--    within the PE memory
[702]111       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
112       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]113
114    ELSE
115!
116!--    Send front boundary, receive rear one
[667]117
[702]118       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
119                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
[1]120                          comm2d, status, ierr )
121!
122!--    Send rear boundary, receive front one
[702]123       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
124                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
[1]125                          comm2d, status, ierr )
[667]126
[1]127    ENDIF
128
129#else
130
131!
132!-- Lateral boundary conditions in the non-parallel case
[707]133    IF ( bc_lr_cyc )  THEN
[702]134       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
135       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]136    ENDIF
137
[707]138    IF ( bc_ns_cyc )  THEN
[702]139       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
140       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]141    ENDIF
142
[667]143
[1]144#endif
145
[73]146!
147!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
148!-- conditions
[667]149    IF ( inflow_l .OR. outflow_l )  THEN
150       DO i=nbgp, 1, -1
151         ar(:,nxl-i) = ar(:,nxl)
152       END DO
153    END IF
154    IF ( inflow_r .OR. outflow_r )  THEN
155       DO i=1, nbgp
156          ar(:,nxr+i) = ar(:,nxr)
157       END DO
158    END IF
159    IF ( inflow_s .OR. outflow_s )  THEN
160       DO i=nbgp, 1, -1
161         ar(nys-i,:) = ar(nys,:)
162       END DO
163    END IF
164    IF ( inflow_n .OR. outflow_n )  THEN
165       DO i=1, nbgp
166         ar(nyn+i,:) = ar(nyn,:)
167       END DO
168    END IF
[1]169    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
170
[841]171#endif
[1]172 END SUBROUTINE exchange_horiz_2d
173
174
175
176 SUBROUTINE exchange_horiz_2d_int( ar )
177
178!------------------------------------------------------------------------------!
179! Description:
180! ------------
181! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
182! boundary conditions, respectively, for 2D integer arrays.
183!------------------------------------------------------------------------------!
184
185    USE control_parameters
186    USE cpulog
187    USE indices
188    USE interfaces
189    USE pegrid
190
191    IMPLICIT NONE
192
[702]193    INTEGER ::  ar(nysg:nyng,nxlg:nxrg)
[1]194
[841]195#if ! defined( __check )
[1]196    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
197
198#if defined( __parallel )
199
200!
201!-- Exchange of lateral boundary values for parallel computers
202    IF ( pdims(1) == 1 )  THEN
203
204!
205!--    One-dimensional decomposition along y, boundary values can be exchanged
206!--    within the PE memory
[702]207       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
208       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]209
[702]210
[1]211    ELSE
212!
213!--    Send left boundary, receive right one
[702]214       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0,             &
215                          ar(nysg,nxr+1), 1, type_y_int, pright, 0,           &
[1]216                          comm2d, status, ierr )
217!
218!--    Send right boundary, receive left one
[702]219       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1,     &
220                          ar(nysg,nxlg), 1, type_y_int, pleft,   1,           &
[1]221                          comm2d, status, ierr )
[667]222
[1]223    ENDIF
224
225    IF ( pdims(2) == 1 )  THEN
226!
227!--    One-dimensional decomposition along x, boundary values can be exchanged
228!--    within the PE memory
[667]229       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
230       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]231
[667]232
[1]233    ELSE
234!
235!--    Send front boundary, receive rear one
[702]236       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0,             &
237                          ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0,           &
238                          comm2d, status, ierr )                         
[667]239
[1]240!
241!--    Send rear boundary, receive front one
[702]242       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1,      &
243                          ar(nysg,nxlg), 1, type_x_int, psouth, 1,            &
[1]244                          comm2d, status, ierr )
[667]245
[1]246    ENDIF
247
248#else
249
250!
251!-- Lateral boundary conditions in the non-parallel case
[707]252    IF ( bc_lr_cyc )  THEN
[702]253       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
254       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]255    ENDIF
256
[707]257    IF ( bc_ns_cyc )  THEN
[667]258       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
259       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]260    ENDIF
261
262#endif
263    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
264
[841]265#endif
[1]266 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.