source: palm/trunk/SOURCE/exchange_horiz.f90 @ 707

Last change on this file since 707 was 707, checked in by raasch, 13 years ago

New:
---

In case of multigrid method, on coarse grid levels, gathered data are
identically processed on all PEs (before, on PE0 only), so that the subsequent
scattering of data is not neccessary any more. (modules, init_pegrid, poismg)

Changed:


Calculation of weighted average of p is now handled in the same way
regardless of the number of ghost layers (advection scheme). (pres)

multigrid and sor method are using p_loc for iterative
advancements of pressure. p_sub removed. (init_3d_model, modules, poismg, pres, sor)

bc_lr and bc_ns replaced by bc_lr_dirrad, bc_lr_raddir, bc_ns_dirrad, bc_ns_raddir
for speed optimization. (calc_spectra, check_parameters, exchange_horiz,
exchange_horiz_2d, header, init_3d_model, init_grid, init_pegrid, modules,
poismg, pres, sor, time_integration, timestep)

grid_level directly used as index for MPI data type arrays. (exchange_horiz,
poismg)

initial assignments of zero to array p for iterative solvers only (init_3d_model)

Errors:


localsum calculation modified for proper OpenMP reduction. (pres)

Bugfix: bottom (nzb) and top (nzt+1) boundary conditions set in routines
resid and restrict. They were missed before, which may have led to
unpredictable results. (poismg)

  • Property svn:keywords set to Id
File size: 6.6 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6! grid_level directly used as index for MPI data type arrays,
7! bc_lr/ns replaced by bc_lr/ns_cyc
8!
9! Former revisions:
10! -----------------
11!
12! 689 2011-02-20 19:31:12z gryschka
13! Bugfix for some logical expressions
14! (syntax was not compatible with all compilers)
15!
16! 683 2011-02-09 14:25:15Z raasch
17! optional synchronous exchange (sendrecv) implemented, code partly reformatted
18!
19! 667 2010-12-23 12:06:00Z suehring/gryschka
20! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
21! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
22! used for normal grid, the remaining types used for the several grid levels.
23! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
24! depend on the advection scheme. Exchange of left and right PEs is 10% faster
25! with MPI-Vectors than without.
26!
27! 75 2007-03-22 09:54:05Z raasch
28! Special cases for additional gridpoints along x or y in case of non-cyclic
29! boundary conditions are not regarded any more
30!
31! RCS Log replace by Id keyword, revision history cleaned up
32!
33! Revision 1.16  2006/02/23 12:19:08  raasch
34! anz_yz renamed ngp_yz
35!
36! Revision 1.1  1997/07/24 11:13:29  raasch
37! Initial revision
38!
39!
40! Description:
41! ------------
42! Exchange of lateral boundary values (parallel computers) and cyclic
43! lateral boundary conditions, respectively.
44!------------------------------------------------------------------------------!
45
46    USE control_parameters
47    USE cpulog
48    USE indices
49    USE interfaces
50    USE pegrid
51
52    IMPLICIT NONE
53
54#if defined( __parallel )
55    INTEGER, DIMENSION(4)                 ::  req
56    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
57#endif
58    INTEGER ::  nbgp_local
59    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
60                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
61
62    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
63
64#if defined( __parallel )
65
66!
67!-- Exchange of lateral boundary values for parallel computers
68    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
69!
70!--    One-dimensional decomposition along y, boundary values can be exchanged
71!--    within the PE memory
72       IF ( bc_lr_cyc )  THEN
73          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
74          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
75       ENDIF
76
77    ELSE
78
79       IF ( synchronous_exchange )  THEN
80!
81!--       Send left boundary, receive right one (synchronous)
82          CALL MPI_SENDRECV(                                                   &
83              ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), pleft,  0, &
84              ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, &
85              comm2d, status, ierr )
86!
87!--       Send right boundary, receive left one (synchronous)
88          CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &
89                             type_yz(grid_level), pright, 1,             &
90                             ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1,   &
91                             type_yz(grid_level), pleft,  1,             &
92                             comm2d, status, ierr )
93
94       ELSE
95
96          req = 0
97!
98!--       Send left boundary, receive right one (asynchronous)
99          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(grid_level), &
100                          pleft, 0, comm2d, req(1), ierr )
101          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &
102                          pright, 0, comm2d, req(2), ierr )
103!
104!--       Send right boundary, receive left one (asynchronous)
105          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
106                          type_yz(grid_level), pright, 1, comm2d, req(3), ierr )
107          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
108                          type_yz(grid_level), pleft,  1, comm2d, req(4), ierr )
109
110          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
111
112       ENDIF
113
114    ENDIF
115
116
117    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
118!
119!--    One-dimensional decomposition along x, boundary values can be exchanged
120!--    within the PE memory
121       IF ( bc_ns_cyc )  THEN
122          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
123          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
124       ENDIF
125
126    ELSE
127
128       IF ( synchronous_exchange )  THEN
129!
130!--       Send front boundary, receive rear one (synchronous)
131          CALL MPI_SENDRECV(                                                   &
132              ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), psouth, 0, &
133              ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, &
134              comm2d, status, ierr )
135!
136!--       Send rear boundary, receive front one (synchronous)
137          CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &
138                             type_xz(grid_level), pnorth, 1,             &
139                             ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, &
140                             type_xz(grid_level), psouth, 1,             &
141                             comm2d, status, ierr )
142
143       ELSE
144
145          req = 0
146!
147!--       Send front boundary, receive rear one (asynchronous)
148          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(grid_level), &
149                          psouth, 0, comm2d, req(1), ierr )
150          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &
151                          pnorth, 0, comm2d, req(2), ierr )
152!
153!--       Send rear boundary, receive front one (asynchronous)
154          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
155                          type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr )
156          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
157                          type_xz(grid_level), psouth, 1, comm2d, req(4), ierr )
158
159          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
160
161       ENDIF
162
163    ENDIF
164
165#else
166
167!
168!-- Lateral boundary conditions in the non-parallel case
169    IF ( bc_lr == 'cyclic' )  THEN
170        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
171        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
172    ENDIF
173
174    IF ( bc_ns == 'cyclic' )  THEN
175        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
176        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
177    ENDIF
178
179#endif
180    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
181
182
183 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.