source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 707

Last change on this file since 707 was 707, checked in by raasch, 13 years ago

New:
---

In case of multigrid method, on coarse grid levels, gathered data are
identically processed on all PEs (before, on PE0 only), so that the subsequent
scattering of data is not neccessary any more. (modules, init_pegrid, poismg)

Changed:


Calculation of weighted average of p is now handled in the same way
regardless of the number of ghost layers (advection scheme). (pres)

multigrid and sor method are using p_loc for iterative
advancements of pressure. p_sub removed. (init_3d_model, modules, poismg, pres, sor)

bc_lr and bc_ns replaced by bc_lr_dirrad, bc_lr_raddir, bc_ns_dirrad, bc_ns_raddir
for speed optimization. (calc_spectra, check_parameters, exchange_horiz,
exchange_horiz_2d, header, init_3d_model, init_grid, init_pegrid, modules,
poismg, pres, sor, time_integration, timestep)

grid_level directly used as index for MPI data type arrays. (exchange_horiz,
poismg)

initial assignments of zero to array p for iterative solvers only (init_3d_model)

Errors:


localsum calculation modified for proper OpenMP reduction. (pres)

Bugfix: bottom (nzb) and top (nzt+1) boundary conditions set in routines
resid and restrict. They were missed before, which may have led to
unpredictable results. (poismg)

  • Property svn:keywords set to Id
File size: 6.7 KB
Line 
1 SUBROUTINE exchange_horiz_2d( ar )
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6! bc_lr/ns replaced by bc_lr/ns_cyc
7!
8! Former revisions:
9! -----------------
10! $Id: exchange_horiz_2d.f90 707 2011-03-29 11:39:40Z raasch $
11!
12! 702 2011-03-24 19:33:15Z suehring
13! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks
14! in MPI_SENDRECV().
15!
16! 667 2010-12-23 12:06:00Z suehring/gryschka
17! Dynamic exchange of ghost points with nbgp, which depends on the advection
18! scheme. Exchange between left and right PEs is now done with MPI-vectors.
19!
20! 73 2007-03-20 08:33:14Z raasch
21! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
22! conditions
23!
24! RCS Log replace by Id keyword, revision history cleaned up
25!
26! Revision 1.9  2006/05/12 19:15:52  letzel
27! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
28!
29! Revision 1.1  1998/01/23 09:58:21  raasch
30! Initial revision
31!
32!
33! Description:
34! ------------
35! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
36! boundary conditions, respectively, for 2D-arrays.
37!------------------------------------------------------------------------------!
38
39    USE control_parameters
40    USE cpulog
41    USE indices
42    USE interfaces
43    USE pegrid
44
45    IMPLICIT NONE
46
47    REAL ::  ar(nysg:nyng,nxlg:nxrg)
48    INTEGER :: i
49
50
51    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
52
53#if defined( __parallel )
54
55!
56!-- Exchange of lateral boundary values for parallel computers
57    IF ( pdims(1) == 1 )  THEN
58
59!
60!--    One-dimensional decomposition along y, boundary values can be exchanged
61!--    within the PE memory
62       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
63       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
64
65    ELSE
66!
67!--    Send left boundary, receive right one
68
69       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
70                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
71                          comm2d, status, ierr )
72!
73!--    Send right boundary, receive left one
74       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
75                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
76                          comm2d, status, ierr )
77                         
78     
79    ENDIF
80
81    IF ( pdims(2) == 1 )  THEN
82!
83!--    One-dimensional decomposition along x, boundary values can be exchanged
84!--    within the PE memory
85       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
86       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
87
88    ELSE
89!
90!--    Send front boundary, receive rear one
91
92       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
93                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
94                          comm2d, status, ierr )
95!
96!--    Send rear boundary, receive front one
97       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
98                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
99                          comm2d, status, ierr )
100
101    ENDIF
102
103#else
104
105!
106!-- Lateral boundary conditions in the non-parallel case
107    IF ( bc_lr_cyc )  THEN
108       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
109       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
110    ENDIF
111
112    IF ( bc_ns_cyc )  THEN
113       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
114       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
115    ENDIF
116
117
118#endif
119
120!
121!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
122!-- conditions
123    IF ( inflow_l .OR. outflow_l )  THEN
124       DO i=nbgp, 1, -1
125         ar(:,nxl-i) = ar(:,nxl)
126       END DO
127    END IF
128    IF ( inflow_r .OR. outflow_r )  THEN
129       DO i=1, nbgp
130          ar(:,nxr+i) = ar(:,nxr)
131       END DO
132    END IF
133    IF ( inflow_s .OR. outflow_s )  THEN
134       DO i=nbgp, 1, -1
135         ar(nys-i,:) = ar(nys,:)
136       END DO
137    END IF
138    IF ( inflow_n .OR. outflow_n )  THEN
139       DO i=1, nbgp
140         ar(nyn+i,:) = ar(nyn,:)
141       END DO
142    END IF
143    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
144
145 END SUBROUTINE exchange_horiz_2d
146
147
148
149 SUBROUTINE exchange_horiz_2d_int( ar )
150
151!------------------------------------------------------------------------------!
152! Description:
153! ------------
154! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
155! boundary conditions, respectively, for 2D integer arrays.
156!------------------------------------------------------------------------------!
157
158    USE control_parameters
159    USE cpulog
160    USE indices
161    USE interfaces
162    USE pegrid
163
164    IMPLICIT NONE
165
166    INTEGER ::  ar(nysg:nyng,nxlg:nxrg)
167    INTEGER :: i
168
169    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
170
171#if defined( __parallel )
172
173!
174!-- Exchange of lateral boundary values for parallel computers
175    IF ( pdims(1) == 1 )  THEN
176
177!
178!--    One-dimensional decomposition along y, boundary values can be exchanged
179!--    within the PE memory
180       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
181       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
182
183
184    ELSE
185!
186!--    Send left boundary, receive right one
187       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0,             &
188                          ar(nysg,nxr+1), 1, type_y_int, pright, 0,           &
189                          comm2d, status, ierr )
190!
191!--    Send right boundary, receive left one
192       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1,     &
193                          ar(nysg,nxlg), 1, type_y_int, pleft,   1,           &
194                          comm2d, status, ierr )
195
196    ENDIF
197
198    IF ( pdims(2) == 1 )  THEN
199!
200!--    One-dimensional decomposition along x, boundary values can be exchanged
201!--    within the PE memory
202       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
203       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
204
205
206    ELSE
207!
208!--    Send front boundary, receive rear one
209       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0,             &
210                          ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0,           &
211                          comm2d, status, ierr )                         
212
213!
214!--    Send rear boundary, receive front one
215       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1,      &
216                          ar(nysg,nxlg), 1, type_x_int, psouth, 1,            &
217                          comm2d, status, ierr )
218
219    ENDIF
220
221#else
222
223!
224!-- Lateral boundary conditions in the non-parallel case
225    IF ( bc_lr_cyc )  THEN
226       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
227       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
228    ENDIF
229
230    IF ( bc_ns_cyc )  THEN
231       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
232       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
233    ENDIF
234
235#endif
236    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
237
238 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.