source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 707

Last change on this file since 707 was 707, checked in by raasch, 13 years ago

New:
---

In case of multigrid method, on coarse grid levels, gathered data are
identically processed on all PEs (before, on PE0 only), so that the subsequent
scattering of data is not neccessary any more. (modules, init_pegrid, poismg)

Changed:


Calculation of weighted average of p is now handled in the same way
regardless of the number of ghost layers (advection scheme). (pres)

multigrid and sor method are using p_loc for iterative
advancements of pressure. p_sub removed. (init_3d_model, modules, poismg, pres, sor)

bc_lr and bc_ns replaced by bc_lr_dirrad, bc_lr_raddir, bc_ns_dirrad, bc_ns_raddir
for speed optimization. (calc_spectra, check_parameters, exchange_horiz,
exchange_horiz_2d, header, init_3d_model, init_grid, init_pegrid, modules,
poismg, pres, sor, time_integration, timestep)

grid_level directly used as index for MPI data type arrays. (exchange_horiz,
poismg)

initial assignments of zero to array p for iterative solvers only (init_3d_model)

Errors:


localsum calculation modified for proper OpenMP reduction. (pres)

Bugfix: bottom (nzb) and top (nzt+1) boundary conditions set in routines
resid and restrict. They were missed before, which may have led to
unpredictable results. (poismg)

  • Property svn:keywords set to Id
File size: 6.7 KB
RevLine 
[1]1 SUBROUTINE exchange_horiz_2d( ar )
2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[707]6! bc_lr/ns replaced by bc_lr/ns_cyc
[1]7!
8! Former revisions:
9! -----------------
[3]10! $Id: exchange_horiz_2d.f90 707 2011-03-29 11:39:40Z raasch $
[77]11!
[703]12! 702 2011-03-24 19:33:15Z suehring
13! Bugfix in declaration of ar in exchange_horiz_2d_int and number of MPI-blocks
14! in MPI_SENDRECV().
15!
[668]16! 667 2010-12-23 12:06:00Z suehring/gryschka
17! Dynamic exchange of ghost points with nbgp, which depends on the advection
18! scheme. Exchange between left and right PEs is now done with MPI-vectors.
19!
[77]20! 73 2007-03-20 08:33:14Z raasch
21! Neumann boundary conditions at inflow/outflow in case of non-cyclic boundary
22! conditions
23!
[3]24! RCS Log replace by Id keyword, revision history cleaned up
25!
[1]26! Revision 1.9  2006/05/12 19:15:52  letzel
27! MPI_REAL replaced by MPI_INTEGER in exchange_horiz_2d_int
28!
29! Revision 1.1  1998/01/23 09:58:21  raasch
30! Initial revision
31!
32!
33! Description:
34! ------------
35! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
36! boundary conditions, respectively, for 2D-arrays.
37!------------------------------------------------------------------------------!
38
39    USE control_parameters
40    USE cpulog
41    USE indices
42    USE interfaces
43    USE pegrid
44
45    IMPLICIT NONE
46
[667]47    REAL ::  ar(nysg:nyng,nxlg:nxrg)
48    INTEGER :: i
[1]49
50
51    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
52
53#if defined( __parallel )
54
55!
56!-- Exchange of lateral boundary values for parallel computers
57    IF ( pdims(1) == 1 )  THEN
58
59!
60!--    One-dimensional decomposition along y, boundary values can be exchanged
61!--    within the PE memory
[702]62       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
63       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]64
65    ELSE
66!
67!--    Send left boundary, receive right one
[667]68
[702]69       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
70                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
[1]71                          comm2d, status, ierr )
72!
73!--    Send right boundary, receive left one
[702]74       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
75                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
[1]76                          comm2d, status, ierr )
[702]77                         
78     
[1]79    ENDIF
80
81    IF ( pdims(2) == 1 )  THEN
82!
83!--    One-dimensional decomposition along x, boundary values can be exchanged
84!--    within the PE memory
[702]85       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
86       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]87
88    ELSE
89!
90!--    Send front boundary, receive rear one
[667]91
[702]92       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
93                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
[1]94                          comm2d, status, ierr )
95!
96!--    Send rear boundary, receive front one
[702]97       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
98                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
[1]99                          comm2d, status, ierr )
[667]100
[1]101    ENDIF
102
103#else
104
105!
106!-- Lateral boundary conditions in the non-parallel case
[707]107    IF ( bc_lr_cyc )  THEN
[702]108       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
109       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]110    ENDIF
111
[707]112    IF ( bc_ns_cyc )  THEN
[702]113       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
114       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]115    ENDIF
116
[667]117
[1]118#endif
119
[73]120!
121!-- Neumann-conditions at inflow/outflow in case of non-cyclic boundary
122!-- conditions
[667]123    IF ( inflow_l .OR. outflow_l )  THEN
124       DO i=nbgp, 1, -1
125         ar(:,nxl-i) = ar(:,nxl)
126       END DO
127    END IF
128    IF ( inflow_r .OR. outflow_r )  THEN
129       DO i=1, nbgp
130          ar(:,nxr+i) = ar(:,nxr)
131       END DO
132    END IF
133    IF ( inflow_s .OR. outflow_s )  THEN
134       DO i=nbgp, 1, -1
135         ar(nys-i,:) = ar(nys,:)
136       END DO
137    END IF
138    IF ( inflow_n .OR. outflow_n )  THEN
139       DO i=1, nbgp
140         ar(nyn+i,:) = ar(nyn,:)
141       END DO
142    END IF
[1]143    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
144
145 END SUBROUTINE exchange_horiz_2d
146
147
148
149 SUBROUTINE exchange_horiz_2d_int( ar )
150
151!------------------------------------------------------------------------------!
152! Description:
153! ------------
154! Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
155! boundary conditions, respectively, for 2D integer arrays.
156!------------------------------------------------------------------------------!
157
158    USE control_parameters
159    USE cpulog
160    USE indices
161    USE interfaces
162    USE pegrid
163
164    IMPLICIT NONE
165
[702]166    INTEGER ::  ar(nysg:nyng,nxlg:nxrg)
[667]167    INTEGER :: i
[1]168
169    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
170
171#if defined( __parallel )
172
173!
174!-- Exchange of lateral boundary values for parallel computers
175    IF ( pdims(1) == 1 )  THEN
176
177!
178!--    One-dimensional decomposition along y, boundary values can be exchanged
179!--    within the PE memory
[702]180       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
181       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]182
[702]183
[1]184    ELSE
185!
186!--    Send left boundary, receive right one
[702]187       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y_int, pleft,  0,             &
188                          ar(nysg,nxr+1), 1, type_y_int, pright, 0,           &
[1]189                          comm2d, status, ierr )
190!
191!--    Send right boundary, receive left one
[702]192       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y_int, pright,  1,     &
193                          ar(nysg,nxlg), 1, type_y_int, pleft,   1,           &
[1]194                          comm2d, status, ierr )
[667]195
[1]196    ENDIF
197
198    IF ( pdims(2) == 1 )  THEN
199!
200!--    One-dimensional decomposition along x, boundary values can be exchanged
201!--    within the PE memory
[667]202       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
203       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]204
[667]205
[1]206    ELSE
207!
208!--    Send front boundary, receive rear one
[702]209       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x_int, psouth, 0,             &
210                          ar(nyn+1,nxlg), 1, type_x_int, pnorth, 0,           &
211                          comm2d, status, ierr )                         
[667]212
[1]213!
214!--    Send rear boundary, receive front one
[702]215       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x_int, pnorth, 1,      &
216                          ar(nysg,nxlg), 1, type_x_int, psouth, 1,            &
[1]217                          comm2d, status, ierr )
[667]218
[1]219    ENDIF
220
221#else
222
223!
224!-- Lateral boundary conditions in the non-parallel case
[707]225    IF ( bc_lr_cyc )  THEN
[702]226       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
227       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]228    ENDIF
229
[707]230    IF ( bc_ns_cyc )  THEN
[667]231       ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:)
232       ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:)
[1]233    ENDIF
234
235#endif
236    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
237
238 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.