Changeset 683 for palm/trunk
- Timestamp:
- Feb 9, 2011 2:25:15 PM (14 years ago)
- Location:
- palm/trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/mrun
r678 r683 219 219 # 02/02/10 - Siggi - further adjustments on Tsubame and concerning openMP 220 220 # usage 221 # 09/02/10 - Siggi - mpt bugfix for netCDF4 usage 221 222 222 223 … … 2113 2114 fi 2114 2115 2115 # bugfix for wrong netcdf module 2116 # bugfix for wrong netcdf module and for netCDF4 usage in case of mpt 2116 2117 if [[ $host = lcsgib || $host = lcsgih ]] 2117 2118 then … … 2119 2120 then 2120 2121 export module_calls="$module_calls export LD_LIBRARY_PATH=/sw/dataformats/netcdf/3.6.3-intel/lib:\$LD_LIBRARY_PATH;" 2122 fi 2123 if [[ $(echo $module_calls | grep -c mpt) != 0 ]] 2124 then 2125 export module_calls="$module_calls export LD_LIBRARY_PATH=/sw/sgi/mpt/2011-02-07/lib:\$LD_LIBRARY_PATH;" 2126 echo "*** module_calls = $module_calls" 2121 2127 fi 2122 2128 fi … … 3305 3311 export MPI_TYPE_DEPTH=20 3306 3312 echo "*** MPI_TYPE_DEPTH=$MPI_TYPE_DEPTH" 3313 export MPI_GROUP_MAX=64 3314 echo "*** MPI_GROUP_MAX=$MPI_GROUP_MAX" 3307 3315 mpiexec_mpt -np $ii ./a.out $ROPTS < runfile_atmos 3308 3316 -
palm/trunk/SOURCE/cpu_statistics.f90
r623 r683 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! output of handling of ghostpoint exchange 7 7 ! 8 8 ! Former revisions: … … 251 251 252 252 ! 253 !-- Output handling of collectiveoperations253 !-- Output of handling of MPI operations 254 254 IF ( collective_wait ) THEN 255 255 WRITE ( 18, 103 ) … … 257 257 WRITE ( 18, 104 ) 258 258 ENDIF 259 IF ( synchronous_exchange ) THEN 260 WRITE ( 18, 105 ) 261 ELSE 262 WRITE ( 18, 106 ) 263 ENDIF 259 264 260 265 ! 261 266 !-- Empty lines in order to create a gap to the results of the model 262 267 !-- continuation runs 263 WRITE ( 18, 10 5)268 WRITE ( 18, 107 ) 264 269 265 270 ! … … 288 293 103 FORMAT (/'Barriers are set in front of collective operations') 289 294 104 FORMAT (/'No barriers are set in front of collective operations') 290 105 FORMAT (//) 295 105 FORMAT (/'Exchange of ghostpoints via MPI_SENDRCV') 296 106 FORMAT (/'Exchange of ghostpoints via MPI_ISEND/MPI_IRECV') 297 107 FORMAT (//) 291 298 292 299 END SUBROUTINE cpu_statistics -
palm/trunk/SOURCE/exchange_horiz.f90
r668 r683 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! optional synchronous exchange (sendrecv) implemented, code partly reformatted 6 7 ! 7 8 ! Former revisions: … … 48 49 INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat 49 50 #endif 50 INTEGER :: i,nbgp_local51 INTEGER :: i, nbgp_local 51 52 REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & 52 53 nxl-nbgp_local:nxr+nbgp_local) :: ar … … 54 55 CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) 55 56 56 IF ( exchange_mg == .TRUE. ) THEN 57 i = grid_level 57 ! 58 !-- In the Poisson multigrid solver arrays with coarser grids are used. 59 !-- Set i appropriately, because the coarser grids have different 60 !-- MPI datatypes type_xz, type_yz. 61 IF ( exchange_mg == .TRUE. ) THEN 62 i = grid_level 58 63 ELSE 59 i = 064 i = 0 60 65 END IF 66 61 67 #if defined( __parallel ) 62 68 … … 74 80 ELSE 75 81 76 req = 082 IF ( synchronous_exchange ) THEN 77 83 ! 78 !-- Send left boundary, receive right one79 CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxl),1,type_yz(i),pleft,0,comm2d,&80 req(1),ierr)81 CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxr+1),1,type_yz(i),pright,0,&82 comm2d,req(2),ierr)84 !-- Send left boundary, receive right one (synchronous) 85 CALL MPI_SENDRECV( & 86 ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, 0, & 87 ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, & 88 comm2d, status, ierr ) 83 89 ! 84 !-- Send right boundary, receive left one 90 !-- Send right boundary, receive left one (synchronous) 91 CALL MPI_SENDRECV( & 92 ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, & 93 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(i), pleft, 1, & 94 comm2d, status, ierr ) 85 95 96 ELSE 86 97 87 CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxr+1-nbgp_local),1,type_yz(i),pright, 1, & 88 comm2d, req(3), ierr ) 89 CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxl-nbgp_local),1,type_yz(i),pleft,1,& 90 comm2d,req(4), ierr) 98 req = 0 99 ! 100 !-- Send left boundary, receive right one (asynchronous) 101 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, & 102 0, comm2d, req(1), ierr ) 103 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, & 104 0, comm2d, req(2), ierr ) 105 ! 106 !-- Send right boundary, receive left one (asynchronous) 107 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & 108 type_yz(i), pright, 1, comm2d, req(3), ierr ) 109 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 110 type_yz(i), pleft, 1, comm2d, req(4), ierr ) 91 111 92 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) 112 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) 113 114 ENDIF 93 115 94 116 ENDIF … … 106 128 ELSE 107 129 108 req = 0130 IF ( synchronous_exchange ) THEN 109 131 ! 110 !-- Send front boundary, receive rear one 111 !-- MPI_ISEND initial send adress changed, type_xz() is sendet nbgp times 132 !-- Send front boundary, receive rear one (synchronous) 133 CALL MPI_SENDRECV( & 134 ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, 0, & 135 ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, & 136 comm2d, status, ierr ) 137 ! 138 !-- Send rear boundary, receive front one (synchronous) 139 CALL MPI_SENDRECV( & 140 ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, & 141 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(i), psouth, 1, & 142 comm2d, status, ierr ) 112 143 113 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),1, type_xz(i), psouth, 0, & 114 comm2d, req(1), ierr ) 115 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local),1, type_xz(i), pnorth, 0, & 116 comm2d, req(2), ierr ) 144 ELSE 145 146 req = 0 117 147 ! 118 !-- Send rear boundary, receive front one 119 CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local),1, type_xz(i), pnorth, 1, & 120 comm2d, req(3), ierr ) 121 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),1, type_xz(i), psouth, 1, & 122 comm2d, req(4), ierr ) 123 call MPI_WAITALL( 4, req, wait_stat, ierr ) 148 !-- Send front boundary, receive rear one (asynchronous) 149 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, & 150 0, comm2d, req(1), ierr ) 151 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, & 152 0, comm2d, req(2), ierr ) 153 ! 154 !-- Send rear boundary, receive front one (asynchronous) 155 CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & 156 type_xz(i), pnorth, 1, comm2d, req(3), ierr ) 157 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 158 type_xz(i), psouth, 1, comm2d, req(4), ierr ) 159 160 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) 161 162 ENDIF 124 163 125 164 ENDIF -
palm/trunk/SOURCE/modules.f90
r674 r683 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! +synchronous_exchange 7 8 ! 8 9 ! Former revisions: … … 1227 1228 INTEGER, DIMENSION(:), ALLOCATABLE :: ngp_yz, type_xz, type_yz 1228 1229 1229 LOGICAL :: collective_wait = .FALSE., reorder = .TRUE. 1230 LOGICAL :: collective_wait = .FALSE., reorder = .TRUE., & 1231 synchronous_exchange = .FALSE. 1230 1232 LOGICAL, DIMENSION(2) :: cyclic = (/ .TRUE. , .TRUE. /), & 1231 1233 remain_dims -
palm/trunk/SOURCE/parin.f90
r668 r683 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! +synchronous_exchange in d3par 6 7 ! 7 8 ! Former revisions: … … 185 186 skip_time_data_output, skip_time_data_output_av, skip_time_dopr, & 186 187 skip_time_do2d_xy, skip_time_do2d_xz, skip_time_do2d_yz, & 187 skip_time_do3d, skip_time_domask, termination_time_needed, &188 use_prior_plot1d_parameters, z_max_do1d, z_max_do1d_normalized, &189 z_max_do 2d188 skip_time_do3d, skip_time_domask, synchronous_exchange, & 189 termination_time_needed, use_prior_plot1d_parameters, z_max_do1d, & 190 z_max_do1d_normalized, z_max_do2d 190 191 191 192 -
palm/trunk/SOURCE/poisfft.f90
r668 r683 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! openMP parallelization for 2d-domain-decomposition 6 7 ! 7 8 ! Former revisions: … … 287 288 ! 288 289 !-- Define constant elements of the tridiagonal matrix. 290 !$OMP PARALLEL PRIVATE ( k, i ) 291 !$OMP DO 289 292 DO k = 0, nz-1 290 293 DO i = nxl_z, nxr_z … … 293 296 ENDDO 294 297 ENDDO 298 !$OMP END PARALLEL 295 299 296 300 #if defined( __parallel ) 297 301 ! 298 302 !-- Repeat for all y-levels. 303 !$OMP PARALLEL FIRSTPRIVATE( tri ) PRIVATE ( ar1, j ) 304 !$OMP DO 299 305 DO j = nys_z, nyn_z 300 306 IF ( j <= nnyh ) THEN … … 306 312 CALL substi( ar, ar1, tri, j ) 307 313 ENDDO 314 !$OMP END PARALLEL 308 315 #else 309 316 ! … … 527 534 ! 528 535 !-- Performing the fft with one of the methods implemented 536 !$OMP PARALLEL PRIVATE ( j, k ) 537 !$OMP DO 529 538 DO k = nzb_x, nzt_x 530 539 DO j = nys_x, nyn_x … … 532 541 ENDDO 533 542 ENDDO 543 !$OMP END PARALLEL 534 544 535 545 END SUBROUTINE fftxp … … 550 560 ! 551 561 !-- Performing the fft with one of the methods implemented 562 !$OMP PARALLEL PRIVATE ( j, k ) 563 !$OMP DO 552 564 DO k = 1, nz 553 565 DO j = 0, ny … … 555 567 ENDDO 556 568 ENDDO 569 !$OMP END PARALLEL 557 570 558 571 END SUBROUTINE fftx … … 575 588 ! 576 589 !-- Performing the fft with one of the methods implemented 590 !$OMP PARALLEL PRIVATE ( i, k ) 591 !$OMP DO 577 592 DO k = nzb_y, nzt_y 578 593 DO i = nxl_y, nxr_y … … 580 595 ENDDO 581 596 ENDDO 597 !$OMP END PARALLEL 582 598 583 599 END SUBROUTINE fftyp … … 598 614 ! 599 615 !-- Performing the fft with one of the methods implemented 616 !$OMP PARALLEL PRIVATE ( i, k ) 617 !$OMP DO 600 618 DO k = 1, nz 601 619 DO i = 0, nx … … 603 621 ENDDO 604 622 ENDDO 623 !$OMP END PARALLEL 605 624 606 625 END SUBROUTINE ffty -
palm/trunk/SOURCE/transpose.f90
r623 r683 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! openMP parallelization of transpositions for 2d-domain-decomposition 7 7 ! 8 8 ! Former revisions: … … 61 61 !-- Rearrange indices of input array in order to make data to be send 62 62 !-- by MPI contiguous 63 !$OMP PARALLEL PRIVATE ( i, j, k ) 64 !$OMP DO 63 65 DO i = 0, nxa 64 66 DO k = nzb_x, nzt_xa … … 68 70 ENDDO 69 71 ENDDO 72 !$OMP END PARALLEL 70 73 71 74 ! … … 80 83 ! 81 84 !-- Reorder transposed array 82 m = 0 85 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, ys ) 86 !$OMP DO 83 87 DO l = 0, pdims(2) - 1 88 m = l * ( nxr_ya - nxl_y + 1 ) * ( nzt_ya - nzb_y + 1 ) * & 89 ( nyn_xa - nys_x + 1 ) 84 90 ys = 0 + l * ( nyn_xa - nys_x + 1 ) 85 91 DO i = nxl_y, nxr_ya … … 92 98 ENDDO 93 99 ENDDO 100 !$OMP END PARALLEL 94 101 95 102 #endif … … 131 138 ! 132 139 !-- Reorder input array for transposition 133 m = 0 140 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, xs ) 141 !$OMP DO 134 142 DO l = 0, pdims(1) - 1 143 m = l * ( nzt_xa - nzb_x + 1 ) * nnx * ( nyn_xa - nys_x + 1 ) 135 144 xs = 0 + l * nnx 136 145 DO k = nzb_x, nzt_xa … … 143 152 ENDDO 144 153 ENDDO 154 !$OMP END PARALLEL 145 155 146 156 ! … … 155 165 ! 156 166 !-- Reorder transposed array in a way that the z index is in first position 167 !$OMP PARALLEL PRIVATE ( i, j, k ) 168 !$OMP DO 157 169 DO k = 1, nza 158 170 DO i = nxl, nxra … … 162 174 ENDDO 163 175 ENDDO 176 !$OMP END PARALLEL 164 177 ELSE 165 178 ! 166 179 !-- Reorder the array in a way that the z index is in first position 180 !$OMP PARALLEL PRIVATE ( i, j, k ) 181 !$OMP DO 167 182 DO i = nxl, nxra 168 183 DO j = nys, nyna … … 172 187 ENDDO 173 188 ENDDO 174 189 !$OMP END PARALLEL 190 191 !$OMP PARALLEL PRIVATE ( i, j, k ) 192 !$OMP DO 175 193 DO k = 1, nza 176 194 DO i = nxl, nxra … … 180 198 ENDDO 181 199 ENDDO 200 !$OMP END PARALLEL 182 201 183 202 ENDIF … … 218 237 ! 219 238 !-- Reorder input array for transposition 220 m = 0 239 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, ys ) 240 !$OMP DO 221 241 DO l = 0, pdims(2) - 1 242 m = l * ( nxr_ya - nxl_y + 1 ) * ( nzt_ya - nzb_y + 1 ) * & 243 ( nyn_xa - nys_x + 1 ) 222 244 ys = 0 + l * ( nyn_xa - nys_x + 1 ) 223 245 DO i = nxl_y, nxr_ya … … 230 252 ENDDO 231 253 ENDDO 254 !$OMP END PARALLEL 232 255 233 256 ! … … 242 265 ! 243 266 !-- Reorder transposed array in a way that the x index is in first position 267 !$OMP PARALLEL PRIVATE ( i, j, k ) 268 !$OMP DO 244 269 DO i = 0, nxa 245 270 DO k = nzb_x, nzt_xa … … 249 274 ENDDO 250 275 ENDDO 276 !$OMP END PARALLEL 251 277 252 278 #endif … … 353 379 !-- Rearrange indices of input array in order to make data to be send 354 380 !-- by MPI contiguous 381 !$OMP PARALLEL PRIVATE ( i, j, k ) 382 !$OMP DO 355 383 DO j = 0, nya 356 384 DO k = nzb_y, nzt_ya … … 360 388 ENDDO 361 389 ENDDO 390 !$OMP END PARALLEL 362 391 363 392 ! … … 367 396 !-- of the data is necessary and no transposition has to be done. 368 397 IF ( pdims(1) == 1 ) THEN 398 !$OMP PARALLEL PRIVATE ( i, j, k ) 399 !$OMP DO 369 400 DO j = 0, nya 370 401 DO k = nzb_y, nzt_ya … … 374 405 ENDDO 375 406 ENDDO 407 !$OMP END PARALLEL 376 408 RETURN 377 409 ENDIF … … 388 420 ! 389 421 !-- Reorder transposed array 390 m = 0 422 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, zs ) 423 !$OMP DO 391 424 DO l = 0, pdims(1) - 1 425 m = l * ( nyn_za - nys_z + 1 ) * ( nzt_ya - nzb_y + 1 ) * & 426 ( nxr_za - nxl_z + 1 ) 392 427 zs = 1 + l * ( nzt_ya - nzb_y + 1 ) 393 428 DO j = nys_z, nyn_za … … 400 435 ENDDO 401 436 ENDDO 437 !$OMP END PARALLEL 402 438 403 439 #endif … … 435 471 !-- Rearrange indices of input array in order to make data to be send 436 472 !-- by MPI contiguous 473 !$OMP PARALLEL PRIVATE ( i, j, k ) 474 !$OMP DO 437 475 DO k = 1,nza 438 476 DO i = nxl, nxra … … 442 480 ENDDO 443 481 ENDDO 482 !$OMP END PARALLEL 444 483 445 484 ! … … 449 488 !-- of the data is necessary and no transposition has to be done. 450 489 IF ( pdims(1) == 1 ) THEN 490 !$OMP PARALLEL PRIVATE ( i, j, k ) 491 !$OMP DO 451 492 DO k = 1, nza 452 493 DO i = nxl, nxra … … 456 497 ENDDO 457 498 ENDDO 499 !$OMP END PARALLEL 458 500 RETURN 459 501 ENDIF … … 470 512 ! 471 513 !-- Reorder transposed array 472 m = 0 514 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, xs ) 515 !$OMP DO 473 516 DO l = 0, pdims(1) - 1 517 m = l * ( nzt_xa - nzb_x + 1 ) * nnx * ( nyn_xa - nys_x + 1 ) 474 518 xs = 0 + l * nnx 475 519 DO k = nzb_x, nzt_xa … … 482 526 ENDDO 483 527 ENDDO 528 !$OMP END PARALLEL 484 529 485 530 #endif … … 521 566 ! 522 567 !-- Reorder input array for transposition 523 m = 0 568 !$OMP PARALLEL PRIVATE ( i, j, k, l, m, zs ) 569 !$OMP DO 524 570 DO l = 0, pdims(1) - 1 571 m = l * ( nyn_za - nys_z + 1 ) * ( nzt_ya - nzb_y + 1 ) * & 572 ( nxr_za - nxl_z + 1 ) 525 573 zs = 1 + l * ( nzt_ya - nzb_y + 1 ) 526 574 DO j = nys_z, nyn_za … … 533 581 ENDDO 534 582 ENDDO 583 !$OMP END PARALLEL 535 584 536 585 ! … … 545 594 ! 546 595 !-- Reorder transposed array in a way that the y index is in first position 596 !$OMP PARALLEL PRIVATE ( i, j, k ) 597 !$OMP DO 547 598 DO j = 0, nya 548 599 DO k = nzb_y, nzt_ya … … 552 603 ENDDO 553 604 ENDDO 605 !$OMP END PARALLEL 554 606 ELSE 555 607 ! 556 608 !-- Reorder the array in a way that the y index is in first position 609 !$OMP PARALLEL PRIVATE ( i, j, k ) 610 !$OMP DO 557 611 DO k = nzb_y, nzt_ya 558 612 DO j = 0, nya … … 562 616 ENDDO 563 617 ENDDO 618 !$OMP END PARALLEL 564 619 ! 565 620 !-- Move data to output array 621 !$OMP PARALLEL PRIVATE ( i, j, k ) 622 !$OMP DO 566 623 DO k = nzb_y, nzt_ya 567 624 DO i = nxl_y, nxr_ya … … 571 628 ENDDO 572 629 ENDDO 630 !$OMP END PARALLEL 573 631 574 632 ENDIF
Note: See TracChangeset
for help on using the changeset viewer.