source: palm/trunk/SOURCE/exchange_horiz.f90 @ 683

Last change on this file since 683 was 683, checked in by raasch, 13 years ago

New:
---

optional exchange of ghost points in synchronous mode via MPI_SENDRCV,
steered by d3par parameter synchronous_exchange
(cpu_statistics, exchange_horiz, modules, parin)

openMP-parallelization of pressure solver (fft-method) for 2d-domain-decomposition
(poisfft, transpose)

Changed:


Errors:


mpt bugfix for netCDF4 usage (mrun)

  • Property svn:keywords set to Id
File size: 6.4 KB
Line 
1 SUBROUTINE exchange_horiz( ar, nbgp_local)
2
3!------------------------------------------------------------------------------!
4! Current revisions:
5! -----------------
6! optional synchronous exchange (sendrecv) implemented, code partly reformatted
7!
8! Former revisions:
9! -----------------
10! $Id: exchange_horiz.f90 683 2011-02-09 14:25:15Z raasch $
11!
12! 667 2010-12-23 12:06:00Z suehring/gryschka
13! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
14! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
15! used for normal grid, the remaining types used for the several grid levels.
16! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
17! depend on the advection scheme. Exchange of left and right PEs is 10% faster
18! with MPI-Vectors than without.
19!
20! 75 2007-03-22 09:54:05Z raasch
21! Special cases for additional gridpoints along x or y in case of non-cyclic
22! boundary conditions are not regarded any more
23!
24! RCS Log replace by Id keyword, revision history cleaned up
25!
26! Revision 1.16  2006/02/23 12:19:08  raasch
27! anz_yz renamed ngp_yz
28!
29! Revision 1.1  1997/07/24 11:13:29  raasch
30! Initial revision
31!
32!
33! Description:
34! ------------
35! Exchange of lateral boundary values (parallel computers) and cyclic
36! lateral boundary conditions, respectively.
37!------------------------------------------------------------------------------!
38
39    USE control_parameters
40    USE cpulog
41    USE indices
42    USE interfaces
43    USE pegrid
44
45    IMPLICIT NONE
46
47#if defined( __parallel )
48    INTEGER, DIMENSION(4)                 ::  req
49    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
50#endif
51    INTEGER ::  i, nbgp_local
52    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
53                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
54
55    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
56
57!
58!-- In the Poisson multigrid solver arrays with coarser grids are used.
59!-- Set i appropriately, because the coarser grids have different
60!-- MPI datatypes type_xz, type_yz.
61    IF ( exchange_mg == .TRUE. )  THEN
62       i = grid_level
63    ELSE
64       i = 0
65    END IF
66
67#if defined( __parallel )
68
69!
70!-- Exchange of lateral boundary values for parallel computers
71    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
72!
73!--    One-dimensional decomposition along y, boundary values can be exchanged
74!--    within the PE memory
75       IF ( bc_lr == 'cyclic' )  THEN
76          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
77          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
78       ENDIF
79
80    ELSE
81
82       IF ( synchronous_exchange )  THEN
83!
84!--       Send left boundary, receive right one (synchronous)
85          CALL MPI_SENDRECV(                                                   &
86                       ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  0, &
87                       ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, &
88                       comm2d, status, ierr )
89!
90!--       Send right boundary, receive left one (synchronous)
91          CALL MPI_SENDRECV(                                                   &
92            ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, &
93            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_yz(i), pleft,  1, &
94                       comm2d, status, ierr )
95
96       ELSE
97
98          req = 0
99!
100!--       Send left boundary, receive right one (asynchronous)
101          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl),   1, type_yz(i), pleft,  &
102                          0, comm2d, req(1), ierr )
103          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, &
104                          0, comm2d, req(2), ierr )
105!
106!--       Send right boundary, receive left one (asynchronous)
107          CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1,          &
108                          type_yz(i), pright, 1, comm2d, req(3), ierr )
109          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
110                          type_yz(i), pleft,  1, comm2d, req(4), ierr )
111
112          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
113
114       ENDIF
115
116    ENDIF
117
118
119    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
120!
121!--    One-dimensional decomposition along x, boundary values can be exchanged
122!--    within the PE memory
123       IF ( bc_ns == 'cyclic' )  THEN
124          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
125          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
126       ENDIF
127
128    ELSE
129
130       IF ( synchronous_exchange )  THEN
131!
132!--       Send front boundary, receive rear one (synchronous)
133          CALL MPI_SENDRECV(                                                   &
134                       ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, 0, &
135                       ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, &
136                       comm2d, status, ierr )
137!
138!--       Send rear boundary, receive front one (synchronous)
139          CALL MPI_SENDRECV(                                                   &
140            ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, &
141            ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1, type_xz(i), psouth, 1, &
142            comm2d, status, ierr )
143
144       ELSE
145
146          req = 0
147!
148!--       Send front boundary, receive rear one (asynchronous)
149          CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),   1, type_xz(i), psouth, &
150                          0, comm2d, req(1), ierr )
151          CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, &
152                          0, comm2d, req(2), ierr )
153!
154!--       Send rear boundary, receive front one (asynchronous)
155          CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1,          &
156                          type_xz(i), pnorth, 1, comm2d, req(3), ierr )
157          CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),   1,          &
158                          type_xz(i), psouth, 1, comm2d, req(4), ierr )
159
160          CALL MPI_WAITALL( 4, req, wait_stat, ierr )
161
162       ENDIF
163
164    ENDIF
165
166#else
167
168!
169!-- Lateral boundary conditions in the non-parallel case
170    IF ( bc_lr == 'cyclic' )  THEN
171        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
172        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
173    ENDIF
174
175    IF ( bc_ns == 'cyclic' )  THEN
176        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
177        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
178    ENDIF
179
180#endif
181    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
182
183
184 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.