1 | SUBROUTINE exchange_horiz( ar, nbgp_local) |
---|
2 | |
---|
3 | !------------------------------------------------------------------------------! |
---|
4 | ! Current revisions: |
---|
5 | ! ----------------- |
---|
6 | ! |
---|
7 | ! |
---|
8 | ! Former revisions: |
---|
9 | ! ----------------- |
---|
10 | ! |
---|
11 | ! 689 2011-02-20 19:31:12z gryschka |
---|
12 | ! Bugfix for some logical expressions |
---|
13 | ! (syntax was not compatible with all compilers) |
---|
14 | ! |
---|
15 | ! 683 2011-02-09 14:25:15Z raasch |
---|
16 | ! optional synchronous exchange (sendrecv) implemented, code partly reformatted |
---|
17 | ! |
---|
18 | ! 667 2010-12-23 12:06:00Z suehring/gryschka |
---|
19 | ! Dynamic exchange of ghost points with nbgp_local to ensure that no useless |
---|
20 | ! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0) |
---|
21 | ! used for normal grid, the remaining types used for the several grid levels. |
---|
22 | ! Exchange is done via MPI-Vectors with a dynamic value of ghost points which |
---|
23 | ! depend on the advection scheme. Exchange of left and right PEs is 10% faster |
---|
24 | ! with MPI-Vectors than without. |
---|
25 | ! |
---|
26 | ! 75 2007-03-22 09:54:05Z raasch |
---|
27 | ! Special cases for additional gridpoints along x or y in case of non-cyclic |
---|
28 | ! boundary conditions are not regarded any more |
---|
29 | ! |
---|
30 | ! RCS Log replace by Id keyword, revision history cleaned up |
---|
31 | ! |
---|
32 | ! Revision 1.16 2006/02/23 12:19:08 raasch |
---|
33 | ! anz_yz renamed ngp_yz |
---|
34 | ! |
---|
35 | ! Revision 1.1 1997/07/24 11:13:29 raasch |
---|
36 | ! Initial revision |
---|
37 | ! |
---|
38 | ! |
---|
39 | ! Description: |
---|
40 | ! ------------ |
---|
41 | ! Exchange of lateral boundary values (parallel computers) and cyclic |
---|
42 | ! lateral boundary conditions, respectively. |
---|
43 | !------------------------------------------------------------------------------! |
---|
44 | |
---|
45 | USE control_parameters |
---|
46 | USE cpulog |
---|
47 | USE indices |
---|
48 | USE interfaces |
---|
49 | USE pegrid |
---|
50 | |
---|
51 | IMPLICIT NONE |
---|
52 | |
---|
53 | #if defined( __parallel ) |
---|
54 | INTEGER, DIMENSION(4) :: req |
---|
55 | INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat |
---|
56 | #endif |
---|
57 | INTEGER :: i, nbgp_local |
---|
58 | REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & |
---|
59 | nxl-nbgp_local:nxr+nbgp_local) :: ar |
---|
60 | |
---|
61 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) |
---|
62 | |
---|
63 | ! |
---|
64 | !-- In the Poisson multigrid solver arrays with coarser grids are used. |
---|
65 | !-- Set i appropriately, because the coarser grids have different |
---|
66 | !-- MPI datatypes type_xz, type_yz. |
---|
67 | IF ( exchange_mg ) THEN |
---|
68 | i = grid_level |
---|
69 | ELSE |
---|
70 | i = 0 |
---|
71 | END IF |
---|
72 | |
---|
73 | #if defined( __parallel ) |
---|
74 | |
---|
75 | ! |
---|
76 | !-- Exchange of lateral boundary values for parallel computers |
---|
77 | IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
78 | ! |
---|
79 | !-- One-dimensional decomposition along y, boundary values can be exchanged |
---|
80 | !-- within the PE memory |
---|
81 | IF ( bc_lr == 'cyclic' ) THEN |
---|
82 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
83 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
84 | ENDIF |
---|
85 | |
---|
86 | ELSE |
---|
87 | |
---|
88 | IF ( synchronous_exchange ) THEN |
---|
89 | ! |
---|
90 | !-- Send left boundary, receive right one (synchronous) |
---|
91 | CALL MPI_SENDRECV( & |
---|
92 | ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, 0, & |
---|
93 | ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, & |
---|
94 | comm2d, status, ierr ) |
---|
95 | ! |
---|
96 | !-- Send right boundary, receive left one (synchronous) |
---|
97 | CALL MPI_SENDRECV( & |
---|
98 | ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, & |
---|
99 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(i), pleft, 1, & |
---|
100 | comm2d, status, ierr ) |
---|
101 | |
---|
102 | ELSE |
---|
103 | |
---|
104 | req = 0 |
---|
105 | ! |
---|
106 | !-- Send left boundary, receive right one (asynchronous) |
---|
107 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, & |
---|
108 | 0, comm2d, req(1), ierr ) |
---|
109 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, & |
---|
110 | 0, comm2d, req(2), ierr ) |
---|
111 | ! |
---|
112 | !-- Send right boundary, receive left one (asynchronous) |
---|
113 | CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & |
---|
114 | type_yz(i), pright, 1, comm2d, req(3), ierr ) |
---|
115 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
116 | type_yz(i), pleft, 1, comm2d, req(4), ierr ) |
---|
117 | |
---|
118 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
119 | |
---|
120 | ENDIF |
---|
121 | |
---|
122 | ENDIF |
---|
123 | |
---|
124 | |
---|
125 | IF ( pdims(2) == 1 .OR. mg_switch_to_pe0 ) THEN |
---|
126 | ! |
---|
127 | !-- One-dimensional decomposition along x, boundary values can be exchanged |
---|
128 | !-- within the PE memory |
---|
129 | IF ( bc_ns == 'cyclic' ) THEN |
---|
130 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
131 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
132 | ENDIF |
---|
133 | |
---|
134 | ELSE |
---|
135 | |
---|
136 | IF ( synchronous_exchange ) THEN |
---|
137 | ! |
---|
138 | !-- Send front boundary, receive rear one (synchronous) |
---|
139 | CALL MPI_SENDRECV( & |
---|
140 | ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, 0, & |
---|
141 | ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, & |
---|
142 | comm2d, status, ierr ) |
---|
143 | ! |
---|
144 | !-- Send rear boundary, receive front one (synchronous) |
---|
145 | CALL MPI_SENDRECV( & |
---|
146 | ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, & |
---|
147 | ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(i), psouth, 1, & |
---|
148 | comm2d, status, ierr ) |
---|
149 | |
---|
150 | ELSE |
---|
151 | |
---|
152 | req = 0 |
---|
153 | ! |
---|
154 | !-- Send front boundary, receive rear one (asynchronous) |
---|
155 | CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, & |
---|
156 | 0, comm2d, req(1), ierr ) |
---|
157 | CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, & |
---|
158 | 0, comm2d, req(2), ierr ) |
---|
159 | ! |
---|
160 | !-- Send rear boundary, receive front one (asynchronous) |
---|
161 | CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & |
---|
162 | type_xz(i), pnorth, 1, comm2d, req(3), ierr ) |
---|
163 | CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & |
---|
164 | type_xz(i), psouth, 1, comm2d, req(4), ierr ) |
---|
165 | |
---|
166 | CALL MPI_WAITALL( 4, req, wait_stat, ierr ) |
---|
167 | |
---|
168 | ENDIF |
---|
169 | |
---|
170 | ENDIF |
---|
171 | |
---|
172 | #else |
---|
173 | |
---|
174 | ! |
---|
175 | !-- Lateral boundary conditions in the non-parallel case |
---|
176 | IF ( bc_lr == 'cyclic' ) THEN |
---|
177 | ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) |
---|
178 | ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) |
---|
179 | ENDIF |
---|
180 | |
---|
181 | IF ( bc_ns == 'cyclic' ) THEN |
---|
182 | ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) |
---|
183 | ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) |
---|
184 | ENDIF |
---|
185 | |
---|
186 | #endif |
---|
187 | CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' ) |
---|
188 | |
---|
189 | |
---|
190 | END SUBROUTINE exchange_horiz |
---|