source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 689

Last change on this file since 689 was 668, checked in by suehring, 14 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 6.4 KB
RevLine 
[1]1 SUBROUTINE exchange_horiz_2d( ar )
2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
6!
7! Former revisions:
8! -----------------
[3]9! $Id: exchange_horiz_2d.f90 668 2010-12-23 13:22:58Z gryschka $
[77]10!
[668]11! 667 2010-12-23 12:06:00Z suehring/gryschka
12! Dynamic exchange of ghost points with nbgp, which depends on the advection
13! scheme. Exchange between left and right PEs is now done with MPI-vectors.
14!
[77]15! 73 2007-03-20 08:33:14Z raasch
16! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
17! conditions
18!
[3]19! RCS Log replace by Id keyword, revision history cleaned up
20!
[1]21! Revision 1.9  2006/05/12 19:15:52  letzel
22! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
23!
24! Revision 1.1  1998/01/23 09:58:21  raasch
25! Initial revision
26!
27!
28! Description:
29! ------------
30! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
31! boundary conditions, respectively, for 2D-arrays.
32!------------------------------------------------------------------------------!
33
34    USE control_parameters
35    USE cpulog
36    USE indices
37    USE interfaces
38    USE pegrid
39
40    IMPLICIT NONE
41
[667]42    REAL ::  ar(nysg:nyng,nxlg:nxrg)
43    INTEGER :: i
[1]44
45
46    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
47
48#if defined( __parallel )
49
50!
51!-- Exchange of lateral boundary values for parallel computers
52    IF ( pdims(1) == 1 )  THEN
53
54!
55!--    One-dimensional decomposition along y, boundary values can be exchanged
56!--    within the PE memory
[667]57       ar(:,nxl-nbgp:nxl-1) = ar(:,nxr-nbgp+1:nxr)
58       ar(:,nxr+1:nxr+nbgp) = ar(:,nxl:nxl+nbgp-1)
[1]59
60    ELSE
61!
62!--    Send left boundary, receive right one
[667]63
64       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0, &
65                          ar(nysg,nxr+1), 1, type_y, pright, 0, &
[1]66                          comm2d, status, ierr )
67!
68!--    Send right boundary, receive left one
[667]69       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1, &
70                          ar(nysg,nxlg), 1, type_y, pleft,   1, &
[1]71                          comm2d, status, ierr )
72    ENDIF
73
74    IF ( pdims(2) == 1 )  THEN
75!
76!--    One-dimensional decomposition along x, boundary values can be exchanged
77!--    within the PE memory
[667]78       ar(nys-nbgp:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
79       ar(nyn+1:nyn+nbgp,:) = ar(nys:nys+nbgp-1,:)
[1]80
81    ELSE
82!
83!--    Send front boundary, receive rear one
[667]84
85       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0, &        !replace number of sended elements from
86                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0, &      ! nbgp to 1
[1]87                          comm2d, status, ierr )
88!
89!--    Send rear boundary, receive front one
[667]90       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1, &
91                          ar(nysg,nxlg), 1, type_x, psouth, 1, &
[1]92                          comm2d, status, ierr )
[667]93
[1]94    ENDIF
95
96#else
97
98!
99!-- Lateral boundary conditions in the non-parallel case
100    IF ( bc_lr == 'cyclic' )  THEN
[667]101       ar(:,nxl-nbgp:nxl-1) = ar(:,nxr-nbgp+1:nxr)
102       ar(:,nxr+1:nxr+nbgp) = ar(:,nxl:nxl+nbgp-1)
[1]103    ENDIF
104
105    IF ( bc_ns == 'cyclic' )  THEN
[667]106       ar(nys-nbgp:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
107       ar(nyn+1:nyn+nbgp,:) = ar(nys:nys+nbgp-1,:)
[1]108    ENDIF
109
[667]110
[1]111#endif
112
[73]113!
114!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
115!-- conditions
[667]116    IF ( inflow_l .OR. outflow_l )  THEN
117       DO i=nbgp, 1, -1
118         ar(:,nxl-i) = ar(:,nxl)
119       END DO
120    END IF
121    IF ( inflow_r .OR. outflow_r )  THEN
122       DO i=1, nbgp
123          ar(:,nxr+i) = ar(:,nxr)
124       END DO
125    END IF
126    IF ( inflow_s .OR. outflow_s )  THEN
127       DO i=nbgp, 1, -1
128         ar(nys-i,:) = ar(nys,:)
129       END DO
130    END IF
131    IF ( inflow_n .OR. outflow_n )  THEN
132       DO i=1, nbgp
133         ar(nyn+i,:) = ar(nyn,:)
134       END DO
135    END IF
[1]136    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
137
138 END SUBROUTINE exchange_horiz_2d
139
140
141
142 SUBROUTINE exchange_horiz_2d_int( ar )
143
144!------------------------------------------------------------------------------!
145! Description:
146! ------------
147! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
148! boundary conditions, respectively, for 2D integer arrays.
149!------------------------------------------------------------------------------!
150
151    USE control_parameters
152    USE cpulog
153    USE indices
154    USE interfaces
155    USE pegrid
156
157    IMPLICIT NONE
158
[667]159    REAL ::  ar(nysg:nyng,nxlg:nxrg)
160    INTEGER :: i
[1]161
162
[667]163
[1]164    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
165
166#if defined( __parallel )
167
168!
169!-- Exchange of lateral boundary values for parallel computers
170    IF ( pdims(1) == 1 )  THEN
171
172!
173!--    One-dimensional decomposition along y, boundary values can be exchanged
174!--    within the PE memory
175       ar(nys:nyn,nxl-1) = ar(nys:nyn,nxr)
176       ar(nys:nyn,nxr+1) = ar(nys:nyn,nxl)
177
178    ELSE
179!
180!--    Send left boundary, receive right one
[667]181       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0, &
182                          ar(nysg,nxr+1), 1, type_y_int, pright, 0, &
[1]183                          comm2d, status, ierr )
184!
185!--    Send right boundary, receive left one
[667]186       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1, &
187                          ar(nysg,nxlg), 1, type_y_int, pleft,   1, &
[1]188                          comm2d, status, ierr )
[667]189
[1]190    ENDIF
191
192    IF ( pdims(2) == 1 )  THEN
193!
194!--    One-dimensional decomposition along x, boundary values can be exchanged
195!--    within the PE memory
[667]196       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
197       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]198
[667]199
[1]200    ELSE
201!
202!--    Send front boundary, receive rear one
[667]203       CALL MPI_SENDRECV( ar(nys,nxlg),1, type_x_int, psouth, 0, &
204                          ar(nyn+1,nxlg),1, type_x_int, pnorth, 0, &
[1]205                          comm2d, status, ierr )
[667]206
[1]207!
208!--    Send rear boundary, receive front one
[667]209       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), nbgp, type_x_int, pnorth, 1, &
210                          ar(nysg,nxlg), nbgp, type_x_int, psouth, 1, &
[1]211                          comm2d, status, ierr )
[667]212
[1]213    ENDIF
214
215#else
216
217!
218!-- Lateral boundary conditions in the non-parallel case
219    IF ( bc_lr == 'cyclic' )  THEN
220       ar(nys:nyn,nxl-1) = ar(nys:nyn,nxr)
221       ar(nys:nyn,nxr+1) = ar(nys:nyn,nxl)
222    ENDIF
223
224    IF ( bc_ns == 'cyclic' )  THEN
[667]225       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
226       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]227    ENDIF
228
229#endif
230
231    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
232
233 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.