source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 1318

Last change on this file since 1318 was 1318, checked in by raasch, 10 years ago

former files/routines cpu_log and cpu_statistics combined to one module,
which also includes the former data module cpulog from the modules-file,
module interfaces removed

  • Property svn:keywords set to Id
File size: 7.7 KB
Line 
1 SUBROUTINE exchange_horiz_2d( ar )
2
3!--------------------------------------------------------------------------------!
4! This file is part of PALM.
5!
6! PALM is free software: you can redistribute it and/or modify it under the terms
7! of the GNU General Public License as published by the Free Software Foundation,
8! either version 3 of the License, or (at your option) any later version.
9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
17! Copyright 1997-2014 Leibniz Universitaet Hannover
18!--------------------------------------------------------------------------------!
19!
20! Current revisions:
21! -----------------
22! module interfaces removed
23!
24! Former revisions:
25! -----------------
26! $Id: exchange_horiz_2d.f90 1318 2014-03-17 13:35:16Z raasch $
27!
28! 1092 2013-02-02 11:24:22Z raasch
29! unused variables removed
30!
31! 1036 2012-10-22 13:43:42Z raasch
32! code put under GPL (PALM 3.9)
33!
34! 841 2012-02-28 12:29:49Z maronga
35! Excluded routine from compilation of namelist_file_check
36!
37! 707 2011-03-29 11:39:40Z raasch
38! bc_lr/ns replaced by bc_lr/ns_cyc
39!
40! 702 2011-03-24 19:33:15Z suehring
41! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks
42! in MPI_SENDRECV().
43!
44! 667 2010-12-23 12:06:00Z suehring/gryschka
45! Dynamic exchange of ghost points with nbgp, which depends on the advection
46! scheme. Exchange between left and right PEs is now done with MPI-vectors.
47!
48! 73 2007-03-20 08:33:14Z raasch
49! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
50! conditions
51!
52! RCS Log replace by Id keyword, revision history cleaned up
53!
54! Revision 1.9  2006/05/12 19:15:52  letzel
55! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
56!
57! Revision 1.1  1998/01/23 09:58:21  raasch
58! Initial revision
59!
60!
61! Description:
62! ------------
63! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
64! boundary conditions, respectively, for 2D-arrays.
65!------------------------------------------------------------------------------!
66
67    USE control_parameters
68    USE cpulog
69    USE indices
70    USE pegrid
71
72    IMPLICIT NONE
73
74
75    REAL ::  ar(nysg:nyng,nxlg:nxrg)
76    INTEGER :: i
77
78#if ! defined( __check )
79    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
80
81#if defined( __parallel )
82
83!
84!-- Exchange of lateral boundary values for parallel computers
85    IF ( pdims(1) == 1 )  THEN
86
87!
88!--    One-dimensional decomposition along y, boundary values can be exchanged
89!--    within the PE memory
90       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
91       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
92
93    ELSE
94!
95!--    Send left boundary, receive right one
96
97       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
98                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
99                          comm2d, status, ierr )
100!
101!--    Send right boundary, receive left one
102       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
103                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
104                          comm2d, status, ierr )
105                         
106     
107    ENDIF
108
109    IF ( pdims(2) == 1 )  THEN
110!
111!--    One-dimensional decomposition along x, boundary values can be exchanged
112!--    within the PE memory
113       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
114       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
115
116    ELSE
117!
118!--    Send front boundary, receive rear one
119
120       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
121                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
122                          comm2d, status, ierr )
123!
124!--    Send rear boundary, receive front one
125       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
126                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
127                          comm2d, status, ierr )
128
129    ENDIF
130
131#else
132
133!
134!-- Lateral boundary conditions in the non-parallel case
135    IF ( bc_lr_cyc )  THEN
136       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
137       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
138    ENDIF
139
140    IF ( bc_ns_cyc )  THEN
141       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
142       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
143    ENDIF
144
145
146#endif
147
148!
149!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
150!-- conditions
151    IF ( inflow_l .OR. outflow_l )  THEN
152       DO i=nbgp, 1, -1
153         ar(:,nxl-i) = ar(:,nxl)
154       END DO
155    END IF
156    IF ( inflow_r .OR. outflow_r )  THEN
157       DO i=1, nbgp
158          ar(:,nxr+i) = ar(:,nxr)
159       END DO
160    END IF
161    IF ( inflow_s .OR. outflow_s )  THEN
162       DO i=nbgp, 1, -1
163         ar(nys-i,:) = ar(nys,:)
164       END DO
165    END IF
166    IF ( inflow_n .OR. outflow_n )  THEN
167       DO i=1, nbgp
168         ar(nyn+i,:) = ar(nyn,:)
169       END DO
170    END IF
171    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
172
173#endif
174 END SUBROUTINE exchange_horiz_2d
175
176
177
178 SUBROUTINE exchange_horiz_2d_int( ar )
179
180!------------------------------------------------------------------------------!
181! Description:
182! ------------
183! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
184! boundary conditions, respectively, for 2D integer arrays.
185!------------------------------------------------------------------------------!
186
187    USE control_parameters
188    USE cpulog
189    USE indices
190    USE pegrid
191
192    IMPLICIT NONE
193
194    INTEGER ::  ar(nysg:nyng,nxlg:nxrg)
195
196#if ! defined( __check )
197    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
198
199#if defined( __parallel )
200
201!
202!-- Exchange of lateral boundary values for parallel computers
203    IF ( pdims(1) == 1 )  THEN
204
205!
206!--    One-dimensional decomposition along y, boundary values can be exchanged
207!--    within the PE memory
208       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
209       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
210
211
212    ELSE
213!
214!--    Send left boundary, receive right one
215       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0,             &
216                          ar(nysg,nxr+1), 1, type_y_int, pright, 0,           &
217                          comm2d, status, ierr )
218!
219!--    Send right boundary, receive left one
220       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1,     &
221                          ar(nysg,nxlg), 1, type_y_int, pleft,   1,           &
222                          comm2d, status, ierr )
223
224    ENDIF
225
226    IF ( pdims(2) == 1 )  THEN
227!
228!--    One-dimensional decomposition along x, boundary values can be exchanged
229!--    within the PE memory
230       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
231       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
232
233
234    ELSE
235!
236!--    Send front boundary, receive rear one
237       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0,             &
238                          ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0,           &
239                          comm2d, status, ierr )                         
240
241!
242!--    Send rear boundary, receive front one
243       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1,      &
244                          ar(nysg,nxlg), 1, type_x_int, psouth, 1,            &
245                          comm2d, status, ierr )
246
247    ENDIF
248
249#else
250
251!
252!-- Lateral boundary conditions in the non-parallel case
253    IF ( bc_lr_cyc )  THEN
254       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
255       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
256    ENDIF
257
258    IF ( bc_ns_cyc )  THEN
259       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
260       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
261    ENDIF
262
263#endif
264    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
265
266#endif
267 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.