Changeset 4461
- Timestamp:
- Mar 12, 2020 4:51:59 PM (5 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/exchange_horiz_mod.f90
r4457 r4461 25 25 ! ----------------- 26 26 ! $Id$ 27 ! optional communicator added to exchange_horiz 28 ! 29 ! 4457 2020-03-11 14:20:43Z raasch 27 30 ! routine has been modularized, file exchange_horiz_2d has been merged 28 31 ! … … 45 48 ! Description: 46 49 ! ------------ 47 !> Exchange of lateral boundary values (parallel computers) and cyclic48 !> lateral boundary conditions, respectively.50 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting 51 !> of cyclic lateral boundary conditions for the total domain . 49 52 !------------------------------------------------------------------------------! 50 53 MODULE exchange_horiz_mod … … 84 87 85 88 86 SUBROUTINE exchange_horiz( ar, nbgp_local) 89 !------------------------------------------------------------------------------! 90 ! Description: 91 ! ------------ 92 !> Exchange of ghost point layers for subdomains (in parallel mode) and setting 93 !> of cyclic lateral boundary conditions for the total domain. 94 !> This routine is for REAL 3d-arrays. 95 !------------------------------------------------------------------------------! 96 SUBROUTINE exchange_horiz( ar, nbgp_local, alternative_communicator) 87 97 88 98 USE control_parameters, & … … 105 115 #endif 106 116 107 INTEGER(iwp) :: nbgp_local !< 117 INTEGER(iwp), OPTIONAL :: alternative_communicator !< alternative MPI communicator to be used 118 INTEGER(iwp) :: communicator !< communicator that is used as argument in MPI calls 119 INTEGER(iwp) :: left_pe !< id of left pe that is used as argument in MPI calls 120 INTEGER(iwp) :: nbgp_local !< number of ghost point layers 121 INTEGER(iwp) :: north_pe !< id of north pe that is used as argument in MPI calls 122 INTEGER(iwp) :: right_pe !< id of right pe that is used as argument in MPI calls 123 INTEGER(iwp) :: south_pe !< id of south pe that is used as argument in MPI calls 108 124 109 125 REAL(wp), DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & 110 nxl-nbgp_local:nxr+nbgp_local) :: ar !<126 nxl-nbgp_local:nxr+nbgp_local) :: ar !< 3d-array for which exchange is done 111 127 112 128 … … 136 152 #endif 137 153 154 ! 155 !-- Set the communicator to be used 156 IF ( PRESENT( alternative_communicator ) ) THEN 157 ! 158 !-- Alternative communicator is to be used 159 communicator = communicator_configurations(alternative_communicator)%mpi_communicator 160 left_pe = communicator_configurations(alternative_communicator)%pleft 161 right_pe = communicator_configurations(alternative_communicator)%pright 162 south_pe = communicator_configurations(alternative_communicator)%psouth 163 north_pe = communicator_configurations(alternative_communicator)%pnorth 164 165 ELSE 166 ! 167 !-- Main communicator is to be used 168 communicator = comm2d 169 left_pe = pleft 170 right_pe = pright 171 south_pe = psouth 172 north_pe = pnorth 173 174 ENDIF 175 138 176 #if defined( __parallel ) 139 177 … … 154 192 ! 155 193 !-- Send left boundary, receive right one (synchronous) 156 CALL MPI_SENDRECV( & 157 ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), pleft, 0, & 158 ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, & 159 comm2d, status, ierr ) 194 CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, 0, & 195 ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, 0, & 196 communicator, status, ierr ) 160 197 ! 161 198 !-- Send right boundary, receive left one (synchronous) 162 CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, &163 type_yz(grid_level), pright, 1,&164 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, &165 type_yz(grid_level), pleft, 1,&166 comm 2d, status, ierr )199 CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & 200 type_yz(grid_level), right_pe, 1, & 201 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 202 type_yz(grid_level), left_pe, 1, & 203 communicator, status, ierr ) 167 204 168 205 ELSE … … 176 213 ! 177 214 !-- Send left boundary, receive right one (asynchronous) 178 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), &179 pleft, req_count, comm2d, req(req_count+1), ierr )180 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), &181 pright, req_count, comm2d, req(req_count+2), ierr )215 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), left_pe, & 216 req_count, communicator, req(req_count+1), ierr ) 217 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), right_pe, & 218 req_count, communicator, req(req_count+2), ierr ) 182 219 ! 183 220 !-- Send right boundary, receive left one (asynchronous) 184 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & 185 type_yz(grid_level), pright, req_count+1, comm2d, & 186 req(req_count+3), ierr ) 187 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 188 type_yz(grid_level), pleft, req_count+1, comm2d, & 189 req(req_count+4), ierr ) 221 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(grid_level), & 222 right_pe, req_count+1, communicator, req(req_count+3), ierr ) 223 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(grid_level), & 224 left_pe, req_count+1, communicator, req(req_count+4), ierr ) 190 225 191 226 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) … … 219 254 ! 220 255 !-- Send front boundary, receive rear one (synchronous) 221 CALL MPI_SENDRECV( & 222 ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), psouth, 0, & 223 ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, & 224 comm2d, status, ierr ) 256 CALL MPI_SENDRECV( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, 0, & 257 ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, 0, & 258 communicator, status, ierr ) 225 259 ! 226 260 !-- Send rear boundary, receive front one (synchronous) 227 CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, &228 type_xz(grid_level), pnorth, 1,&229 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, &230 type_xz(grid_level), psouth, 1,&231 comm 2d, status, ierr )261 CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & 262 type_xz(grid_level), north_pe, 1, & 263 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 264 type_xz(grid_level), south_pe, 1, & 265 communicator, status, ierr ) 232 266 233 267 ELSE … … 242 276 ! 243 277 !-- Send front boundary, receive rear one (asynchronous) 244 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), &245 psouth, req_count, comm2d, req(req_count+1), ierr )246 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), &247 pnorth, req_count, comm2d, req(req_count+2), ierr )278 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), south_pe, & 279 req_count, communicator, req(req_count+1), ierr ) 280 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), north_pe, & 281 req_count, communicator, req(req_count+2), ierr ) 248 282 ! 249 283 !-- Send rear boundary, receive front one (asynchronous) 250 CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & 251 type_xz(grid_level), pnorth, req_count+1, comm2d, & 252 req(req_count+3), ierr ) 253 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 254 type_xz(grid_level), psouth, req_count+1, comm2d, & 255 req(req_count+4), ierr ) 284 CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(grid_level), & 285 north_pe, req_count+1, communicator, req(req_count+3), ierr ) 286 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(grid_level), & 287 south_pe, req_count+1, communicator, req(req_count+4), ierr ) 256 288 257 289 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) -
palm/trunk/SOURCE/init_pegrid.f90
r4444 r4461 25 25 ! ----------------- 26 26 ! $Id$ 27 ! communicator configurations for four virtual pe grids defined 28 ! 29 ! 4444 2020-03-05 15:59:50Z raasch 27 30 ! bugfix: cpp-directives for serial mode added 28 31 ! … … 242 245 243 246 ! 247 !-- Create four default MPI communicators for the 2d virtual PE grid. One of them will be used 248 !-- as the main communicator for this run, while others might be used for specific quantities like 249 !-- aerosol, chemical species, or passive scalars), if their horizontal boundary conditions shall 250 !-- be different from those of the other quantities (e.g. non-cyclic conditions for aerosols, and 251 !-- cyclic conditions for all others). 252 DO i = 1, 4 253 254 IF ( i == 1 ) cyclic = (/ .TRUE., .TRUE. /) ! cyclic along x and y 255 IF ( i == 2 ) cyclic = (/ .TRUE., .FALSE. /) ! cyclic along x 256 IF ( i == 3 ) cyclic = (/ .FALSE., .TRUE. /) ! cyllic along y 257 IF ( i == 4 ) cyclic = (/ .FALSE., .FALSE. /) ! non-cyclic 258 259 CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, & 260 communicator_configurations(i)%mpi_communicator, ierr ) 261 262 CALL MPI_CART_SHIFT( communicator_configurations(i)%mpi_communicator, 0, 1, & 263 communicator_configurations(i)%pleft, & 264 communicator_configurations(i)%pright, ierr ) 265 266 CALL MPI_CART_SHIFT( communicator_configurations(i)%mpi_communicator, 1, 1, & 267 communicator_configurations(i)%psouth, & 268 communicator_configurations(i)%pnorth, ierr ) 269 270 ENDDO 271 272 ! 244 273 !-- If necessary, set horizontal boundary conditions to non-cyclic 245 274 IF ( bc_lr /= 'cyclic' ) cyclic(1) = .FALSE. … … 248 277 249 278 ! 250 !-- Create the virtual processor grid 251 CALL MPI_CART_CREATE( comm_palm, ndim, pdims, cyclic, reorder, & 252 comm2d, ierr ) 279 !-- Set the main communicator (virtual pe grid) for this run 280 IF ( bc_lr == 'cyclic' .AND. bc_ns == 'cyclic' ) i = 1 281 IF ( bc_lr == 'cyclic' .AND. bc_ns /= 'cyclic' ) i = 2 282 IF ( bc_lr /= 'cyclic' .AND. bc_ns == 'cyclic' ) i = 3 283 IF ( bc_lr /= 'cyclic' .AND. bc_ns /= 'cyclic' ) i = 4 284 285 comm2d = communicator_configurations(i)%mpi_communicator 286 pleft = communicator_configurations(i)%pleft 287 pright = communicator_configurations(i)%pright 288 psouth = communicator_configurations(i)%psouth 289 pnorth = communicator_configurations(i)%pnorth 290 291 ! 292 !-- Set rank and coordinates of the main communicator 253 293 CALL MPI_COMM_RANK( comm2d, myid, ierr ) 254 294 WRITE (myid_char,'(''_'',I6.6)') myid 255 295 256 296 CALL MPI_CART_COORDS( comm2d, myid, ndim, pcoord, ierr ) 257 CALL MPI_CART_SHIFT( comm2d, 0, 1, pleft, pright, ierr ) 258 CALL MPI_CART_SHIFT( comm2d, 1, 1, psouth, pnorth, ierr ) 297 259 298 ! 260 299 !-- In case of cyclic boundary conditions, a y-shift at the boundaries in … … 316 355 ENDIF 317 356 ENDIF 357 318 358 ! 319 359 !-- Vertical nesting: store four lists that identify partner ranks to exchange -
palm/trunk/SOURCE/modules.f90
r4414 r4461 25 25 ! ----------------- 26 26 ! $Id$ 27 ! +virtual_pe_grid, communicator_configurations 28 ! 29 ! 4414 2020-02-19 20:16:04Z suehring 27 30 ! - nzb_diff_s_inner, nzb_diff_s_outer, nzb_inner,nzb_outer, nzb_s_inner, 28 31 ! nzb_s_outer, nzb_u_inner, nzb_u_outer, nzb_v_inner, nzb_v_outer, … … 1241 1244 INTEGER(iwp) :: numprocs = 1 !< total number of appointed processor elements 1242 1245 INTEGER(iwp) :: numprocs_previous_run = -1 !< total number of appointed processor elements in previous run (job chain) 1243 INTEGER(iwp) :: pleft !< MPI -address of the processor left of the current one1244 INTEGER(iwp) :: pnorth !< MPI -address of the processor north of the current one1245 INTEGER(iwp) :: pright !< MPI -address of the processor right of the current one1246 INTEGER(iwp) :: psouth !< MPI -address of the processor south of the current one1246 INTEGER(iwp) :: pleft !< MPI id of left neigbour pe 1247 INTEGER(iwp) :: pnorth !< MPI id of right neigbour pe 1248 INTEGER(iwp) :: pright !< MPI id of south neigbour pe 1249 INTEGER(iwp) :: psouth !< MPI id of north neigbour pe 1247 1250 INTEGER(iwp) :: req_count = 0 !< MPI return variable - checks if Send-Receive operation is already finished 1248 1251 INTEGER(iwp) :: sendrecvcount_xy !< number of subdomain gridpoints to be exchanged in direct transpositions (y --> x, or x --> y) or second (2d) transposition x --> y … … 1264 1267 1265 1268 LOGICAL :: collective_wait = .FALSE. !< switch to set an explicit MPI barrier in front of all collective MPI calls 1269 1270 TYPE virtual_pe_grid 1271 INTEGER(iwp) :: mpi_communicator !< MPI communicator id 1272 INTEGER(iwp) :: pleft !< MPI id of left neigbour pe 1273 INTEGER(iwp) :: pright !< MPI id of right neigbour pe 1274 INTEGER(iwp) :: psouth !< MPI id of south neigbour pe 1275 INTEGER(iwp) :: pnorth !< MPI id of north neigbour pe 1276 END TYPE virtual_pe_grid 1277 1278 TYPE(virtual_pe_grid) :: communicator_configurations(4) !< stores the four possible 2d virtual grids: 1279 !< cyclic, cyclic along x, cyclic along y, non-cyclic 1266 1280 1267 1281 #if defined( __parallel )
Note: See TracChangeset
for help on using the changeset viewer.