Changeset 2599
- Timestamp:
- Nov 1, 2017 1:18:45 PM (7 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/Makefile
r2576 r2599 446 446 # The following line is needed for palm_simple_install, don't remove it! 447 447 #to_be_replaced_by_include 448 449 #BOUNDS="-Rbc" # Array bounds checking. Compromises performance seriously. 448 450 449 451 .SUFFIXES: -
palm/trunk/SOURCE/parin.f90
r2575 r2599 25 25 ! ----------------- 26 26 ! $Id$ 27 ! The i/o grouping is updated to work correctly also in nested runs. 28 ! 29 ! 2575 2017-10-24 09:57:58Z maronga 27 30 ! Renamed phi -> latitude, added longitude 28 31 ! … … 390 393 INTEGER(iwp) :: i !< 391 394 INTEGER(iwp) :: ioerr !< error flag for open/read/write 392 395 INTEGER(iwp) :: myworldid !< 396 INTEGER(iwp) :: numworldprocs !< 393 397 394 398 NAMELIST /inipar/ aerosol_bulk, alpha_surface, approximation, bc_e_b, & … … 513 517 514 518 CALL location_message( 'finished', .TRUE. ) 515 516 519 ! 517 520 !-- Calculate the number of groups into which parallel I/O is split. … … 522 525 !-- system. 523 526 !-- First, set the default: 527 CALL MPI_COMM_RANK( MPI_COMM_WORLD, myworldid, ierr ) 528 CALL MPI_COMM_SIZE( MPI_COMM_WORLD, numworldprocs, ierr ) 524 529 IF ( maximum_parallel_io_streams == -1 .OR. & 525 maximum_parallel_io_streams > num procs ) THEN526 maximum_parallel_io_streams = num procs530 maximum_parallel_io_streams > numworldprocs ) THEN 531 maximum_parallel_io_streams = numworldprocs 527 532 ENDIF 528 533 ! … … 532 537 !-- These settings are repeated in init_pegrid for the communicator comm2d, 533 538 !-- which is not available here 534 io_blocks = numprocs / maximum_parallel_io_streams 535 io_group = MOD( myid+1, io_blocks ) 536 539 !io_blocks = numprocs / maximum_parallel_io_streams 540 io_blocks = numworldprocs / maximum_parallel_io_streams 541 !io_group = MOD( myid+1, io_blocks ) 542 io_group = MOD( myworldid+1, io_blocks ) 543 537 544 CALL location_message( 'reading NAMELIST parameters from PARIN', .FALSE. ) 538 545 ! -
palm/trunk/SOURCE/pmc_child_mod.f90
r2101 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Some cleanup and commenting improvements only. 29 ! 30 ! 2101 2017-01-05 16:42:31Z suehring 28 31 ! 29 32 ! 2000 2016-08-20 18:09:15Z knoop … … 171 174 CALL MPI_COMM_SIZE( me%model_comm, me%model_npes, istat ) 172 175 CALL MPI_COMM_REMOTE_SIZE( me%inter_comm, me%inter_npes, istat ) 173 174 ! 175 !-- Intra-communicater is used for MPI_GET 176 ! 177 !-- Intra-communicator is used for MPI_GET 176 178 CALL MPI_INTERCOMM_MERGE( me%inter_comm, .TRUE., me%intra_comm, istat ) 177 179 CALL MPI_COMM_RANK( me%intra_comm, me%intra_rank, istat ) 178 180 179 181 ALLOCATE( me%pes(me%inter_npes) ) 180 181 ! 182 !-- Allocate an array of type arraydef for all parent PEs to store information 183 !-- of then transfer array 182 ! 183 !-- Allocate an array of type arraydef for all parent processes to store 184 !-- information of then transfer array 184 185 DO i = 1, me%inter_npes 185 186 ALLOCATE( me%pes(i)%array_list(pmc_max_array) ) … … 201 202 202 203 INTEGER, INTENT(OUT) :: istat !< 203 204 204 ! 205 205 !-- Local variables … … 211 211 212 212 istat = pmc_status_ok 213 214 213 ! 215 214 !-- Check length of array names … … 229 228 230 229 ! 231 !-- Broadca t to all child PEs230 !-- Broadcast to all child processes 232 231 !-- TODO: describe what is broadcast here and why it is done 233 232 CALL pmc_bcast( myname%couple_index, 0, comm=m_model_comm ) … … 236 235 CALL pmc_bcast( myname%childdesc, 0, comm=m_model_comm ) 237 236 CALL pmc_bcast( myname%nameonchild, 0, comm=m_model_comm ) 238 239 ! 240 !-- Broadcat to all parent PEs 237 ! 238 !-- Broadcast to all parent processes 241 239 !-- TODO: describe what is broadcast here and why it is done 242 240 IF ( m_model_rank == 0 ) THEN … … 263 261 264 262 LOGICAL, INTENT(IN), OPTIONAL :: lastentry !< 265 266 263 ! 267 264 !-- Local variables … … 290 287 INTEGER :: i, ierr, i2, j, nr !< 291 288 INTEGER :: indwin !< MPI window object 292 INTEGER :: indwin2 !< MPI window object289 INTEGER :: indwin2 !< MPI window object 293 290 294 291 INTEGER(KIND=MPI_ADDRESS_KIND) :: win_size !< Size of MPI window 1 (in bytes) … … 306 303 CALL MPI_WIN_CREATE( dummy, win_size, iwp, MPI_INFO_NULL, me%intra_comm, & 307 304 indwin, ierr ) 308 309 305 ! 310 306 !-- Open window on parent side 311 307 !-- TODO: why is the next MPI routine called twice?? 312 308 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 313 314 309 ! 315 310 !-- Close window on parent side and open on child side … … 321 316 MPI_INTEGER, indwin, ierr ) 322 317 ENDDO 323 324 318 ! 325 319 !-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is 326 320 !-- called 327 321 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 328 329 322 ! 330 323 !-- Allocate memory for index array … … 344 337 ALLOCATE( myind(2*winsize) ) 345 338 winsize = 1 346 347 339 ! 348 340 !-- Local buffer used in MPI_GET can but must not be inside the MPI Window. 349 !-- Here, we use a dummy for the MPI window because the parent PEs do not access350 !-- the RMA window via MPI_GET or MPI_PUT341 !-- Here, we use a dummy for the MPI window because the parent processes do 342 !-- not access the RMA window via MPI_GET or MPI_PUT 351 343 CALL MPI_WIN_CREATE( dummy, winsize, iwp, MPI_INFO_NULL, me%intra_comm, & 352 344 indwin2, ierr ) 353 354 345 ! 355 346 !-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is … … 377 368 ENDIF 378 369 ENDDO 379 380 370 ! 381 371 !-- Don't know why, but this barrier is necessary before we can free the windows … … 406 396 !-- pmc_interface 407 397 CHARACTER(LEN=*), INTENT(OUT) :: myname !< 408 409 398 ! 410 399 !-- Local variables … … 414 403 415 404 next_array_in_list = next_array_in_list + 1 416 417 ! 418 !-- Array names are the same on all child PEs, so take first PE to get the name405 ! 406 !-- Array names are the same on all child PEs, so take first process to 407 !-- get the name 419 408 ape => me%pes(1) 420 421 409 ! 422 410 !-- Check if all arrays have been processed … … 429 417 430 418 myname = ar%name 431 432 419 ! 433 420 !-- Return true if legal array … … 545 532 myindex = 0 546 533 bufsize = 8 547 548 534 ! 549 535 !-- Parent to child direction. 550 536 !-- First stride: compute size and set index 551 537 DO i = 1, me%inter_npes 552 553 538 ape => me%pes(i) 554 539 tag = 200 555 556 540 DO j = 1, ape%nr_arrays 557 558 541 ar => ape%array_list(j) 559 560 542 ! 561 543 !-- Receive index from child … … 564 546 MPI_STATUS_IGNORE, ierr ) 565 547 ar%recvindex = myindex 566 567 548 ! 568 549 !-- Determine max, because child buffer is allocated only once … … 573 554 bufsize = MAX( bufsize, ar%a_dim(1)*ar%a_dim(2) ) 574 555 ENDIF 575 576 556 ENDDO 577 578 ENDDO 579 557 ENDDO 580 558 ! 581 559 !-- Create RMA (one sided communication) data buffer. … … 584 562 CALL pmc_alloc_mem( base_array_pc, bufsize, base_ptr ) 585 563 me%totalbuffersize = bufsize*wp ! total buffer size in byte 586 587 564 ! 588 565 !-- Second stride: set buffer pointer 589 566 DO i = 1, me%inter_npes 590 591 567 ape => me%pes(i) 592 593 568 DO j = 1, ape%nr_arrays 594 569 ar => ape%array_list(j) 595 570 ar%recvbuf = base_ptr 596 571 ENDDO 597 598 ENDDO 599 572 ENDDO 600 573 ! 601 574 !-- Child to parent direction … … 605 578 606 579 DO i = 1, me%inter_npes 607 608 580 ape => me%pes(i) 609 581 tag = 300 610 611 582 DO j = 1, ape%nr_arrays 612 613 583 ar => ape%array_list(j) 614 584 IF( ar%nrdims == 2 ) THEN … … 617 587 arlen = ape%nrele*ar%a_dim(1) 618 588 ENDIF 619 620 589 tag = tag + 1 621 590 rcount = rcount + 1 … … 629 598 ar%sendindex = noindex 630 599 ENDIF 631 632 600 ! 633 601 !-- Maximum of 1024 outstanding requests 634 !-- TODO: explain where this maximum comes from (arbitrary?) 602 !-- TODO: explain where this maximum comes from (arbitrary?). 603 !-- Outstanding = pending? 635 604 IF ( rcount == 1024 ) THEN 636 605 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) … … 651 620 652 621 ENDDO 653 654 622 ! 655 623 !-- Create RMA (one sided communication) window for data buffer child to parent … … 670 638 CALL MPI_WIN_FENCE( 0, me%win_parent_child, ierr ) 671 639 CALL MPI_BARRIER( me%intra_comm, ierr ) 672 673 640 ! 674 641 !-- Second stride: set buffer pointer 675 642 DO i = 1, me%inter_npes 676 677 643 ape => me%pes(i) 678 679 644 DO j = 1, ape%nr_arrays 680 681 ar => ape%array_list(j) 682 645 ar => ape%array_list(j) 683 646 IF ( ape%nrele > 0 ) THEN 684 647 ar%sendbuf = C_LOC( base_array_cp(ar%sendindex) ) 685 686 648 ! 687 649 !-- TODO: if this is an error to be really expected, replace the … … 695 657 ENDIF 696 658 ENDIF 697 698 659 ENDDO 699 700 660 ENDDO 701 661 … … 743 703 waittime = t2 - t1 744 704 ENDIF 745 746 705 ! 747 706 !-- Wait for buffer is filled. … … 753 712 754 713 DO ip = 1, me%inter_npes 755 756 714 ape => me%pes(ip) 757 758 715 DO j = 1, ape%nr_arrays 759 760 716 ar => ape%array_list(j) 761 762 717 IF ( ar%nrdims == 2 ) THEN 763 718 nr = ape%nrele … … 765 720 nr = ape%nrele * ar%a_dim(1) 766 721 ENDIF 767 768 722 buf_shape(1) = nr 769 723 CALL C_F_POINTER( ar%recvbuf, buf, buf_shape ) 770 771 724 ! 772 725 !-- MPI passive target RMA … … 780 733 CALL MPI_WIN_UNLOCK( ip-1, me%win_parent_child, ierr ) 781 734 ENDIF 782 783 735 myindex = 1 784 736 IF ( ar%nrdims == 2 ) THEN 785 786 737 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 787 788 738 DO ij = 1, ape%nrele 789 739 data_2d(ape%locind(ij)%j,ape%locind(ij)%i) = buf(myindex) 790 740 myindex = myindex + 1 791 741 ENDDO 792 793 742 ELSEIF ( ar%nrdims == 3 ) THEN 794 795 743 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 796 797 744 DO ij = 1, ape%nrele 798 745 data_3d(:,ape%locind(ij)%j,ape%locind(ij)%i) = & … … 800 747 myindex = myindex+ar%a_dim(1) 801 748 ENDDO 802 803 ENDIF 804 749 ENDIF 805 750 ENDDO 806 807 751 ENDDO 808 752 … … 849 793 850 794 DO ip = 1, me%inter_npes 851 852 795 ape => me%pes(ip) 853 854 796 DO j = 1, ape%nr_arrays 855 856 797 ar => aPE%array_list(j) 857 798 myindex = 1 858 859 799 IF ( ar%nrdims == 2 ) THEN 860 861 800 buf_shape(1) = ape%nrele 862 801 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 863 802 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 864 865 803 DO ij = 1, ape%nrele 866 804 buf(myindex) = data_2d(ape%locind(ij)%j,ape%locind(ij)%i) 867 805 myindex = myindex + 1 868 806 ENDDO 869 870 807 ELSEIF ( ar%nrdims == 3 ) THEN 871 872 808 buf_shape(1) = ape%nrele*ar%a_dim(1) 873 809 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 874 810 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 875 876 811 DO ij = 1, ape%nrele 877 812 buf(myindex:myindex+ar%a_dim(1)-1) = & … … 879 814 myindex = myindex + ar%a_dim(1) 880 815 ENDDO 881 882 ENDIF 883 816 ENDIF 884 817 ENDDO 885 886 ENDDO 887 818 ENDDO 888 819 ! 889 820 !-- TODO: Fence might do it, test later -
palm/trunk/SOURCE/pmc_general_mod.f90
r2101 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Some cleanup and commenting improvements only. 29 ! 30 ! 2101 2017-01-05 16:42:31Z suehring 28 31 ! 29 32 ! 2000 2016-08-20 18:09:15Z knoop … … 153 156 CONTAINS 154 157 158 159 155 160 SUBROUTINE pmc_g_setname( mychild, couple_index, aname ) 156 161 … … 170 175 !-- Set name of array in arraydef structure 171 176 DO i = 1, mychild%inter_npes 172 173 177 ape => mychild%pes(i) 174 178 ape%nr_arrays = ape%nr_arrays + 1 175 179 ape%array_list(ape%nr_arrays)%name = aname 176 180 ape%array_list(ape%nr_arrays)%coupleindex = couple_index 177 178 181 ENDDO 179 182 … … 196 199 197 200 n = SIZE(array,2) 198 199 201 DO j = 1, n-1 200 202 DO i = j+1, n 201 202 203 IF ( array(sort_ind,i) < array(sort_ind,j) ) THEN 203 204 tmp = array(:,i) … … 205 206 array(:,j) = tmp 206 207 ENDIF 207 208 208 ENDDO 209 209 ENDDO -
palm/trunk/SOURCE/pmc_handle_communicator_mod.f90
r2516 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Separate peer communicator peer_comm introduced for MPI_INTERCOMM_CREATE. 29 ! Some cleanup and commenting improvements. 30 ! 31 ! 2516 2017-10-04 11:03:04Z suehring 28 32 ! Remove tabs 29 33 ! … … 127 131 PUBLIC pmc_status_ok, pmc_status_error 128 132 129 INTEGER, PARAMETER, PUBLIC :: pmc_error_npes = 1 !< illegal number of PEs133 INTEGER, PARAMETER, PUBLIC :: pmc_error_npes = 1 !< illegal number of processes 130 134 INTEGER, PARAMETER, PUBLIC :: pmc_namelist_error = 2 !< error(s) in nestpar namelist 131 135 INTEGER, PARAMETER, PUBLIC :: pmc_no_namelist_found = 3 !< no couple layout namelist found … … 144 148 INTEGER, PUBLIC :: m_model_rank !< 145 149 INTEGER, PUBLIC :: m_model_npes !< 146 INTEGER :: m_parent_remote_size !< number of parent PEs 150 INTEGER :: m_parent_remote_size !< number of processes in the parent model 151 INTEGER :: peer_comm !< peer_communicator for inter communicators 147 152 148 153 INTEGER, DIMENSION(pmc_max_models), PUBLIC :: m_to_child_comm !< communicator to the child(ren) … … 200 205 CALL MPI_COMM_SIZE( MPI_COMM_WORLD, m_world_npes, istat ) 201 206 ! 202 !-- Only PE0 of root model reads207 !-- Only process 0 of root model reads 203 208 IF ( m_world_rank == 0 ) THEN 204 209 … … 210 215 THEN 211 216 ! 212 !-- Calculate start PE of everymodel217 !-- Determine the first process id of each model 213 218 start_pe(1) = 0 214 219 DO i = 2, m_ncpl+1 … … 217 222 218 223 ! 219 !-- The number of cores provided with the run must be the same as the220 !-- total sum of cores required by all nest domains224 !-- The sum of numbers of processes requested by all the domains 225 !-- must be equal to the total number of processes of the run 221 226 IF ( start_pe(m_ncpl+1) /= m_world_npes ) THEN 222 227 WRITE ( message_string, '(2A,I6,2A,I6,A)' ) & … … 231 236 ENDIF 232 237 ! 233 !-- Broadcast the read status. This synchronises all other PEs with PE 0 of 234 !-- the root model. Without synchronisation, they would not behave in the 235 !-- correct way (e.g. they would not return in case of a missing NAMELIST) 238 !-- Broadcast the read status. This synchronises all other processes with 239 !-- process 0 of the root model. Without synchronisation, they would not 240 !-- behave in the correct way (e.g. they would not return in case of a 241 !-- missing NAMELIST). 236 242 CALL MPI_BCAST( pmc_status, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 237 243 … … 254 260 ENDIF 255 261 256 CALL MPI_BCAST( m_ncpl, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 257 CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat) 258 262 CALL MPI_BCAST( m_ncpl, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 263 CALL MPI_BCAST( start_pe, m_ncpl+1, MPI_INTEGER, 0, MPI_COMM_WORLD, istat ) 259 264 ! 260 265 !-- Broadcast coupling layout … … 277 282 CALL MPI_BCAST( nesting_datatransfer_mode, LEN(nesting_datatransfer_mode), & 278 283 MPI_CHARACTER, 0, MPI_COMM_WORLD, istat ) 279 280 284 ! 281 285 !-- Assign global MPI processes to individual models by setting the couple id … … 288 292 ENDDO 289 293 m_my_cpl_rank = m_world_rank - start_pe(i) 290 291 294 ! 292 295 !-- MPI_COMM_WORLD is the communicator for ALL models (MPI-1 approach). … … 296 299 istat ) 297 300 ! 298 !-- Get size and rank of the model running on this PE301 !-- Get size and rank of the model running on this process 299 302 CALL MPI_COMM_RANK( comm, m_model_rank, istat ) 300 303 CALL MPI_COMM_SIZE( comm, m_model_npes, istat ) 301 302 ! 303 !-- Broadcast (from PE 0) the parent id and id of every model 304 ! 305 !-- Broadcast (from process 0) the parent id and id of every model 304 306 DO i = 1, m_ncpl 305 307 CALL MPI_BCAST( m_couplers(i)%parent_id, 1, MPI_INTEGER, 0, & … … 308 310 MPI_COMM_WORLD, istat ) 309 311 ENDDO 310 311 312 ! 312 313 !-- Save the current model communicator for pmc internal use … … 314 315 315 316 ! 316 !-- Create intercommunicator between parent and children.317 !-- Create intercommunicator between the parent and children. 317 318 !-- MPI_INTERCOMM_CREATE creates an intercommunicator between 2 groups of 318 319 !-- different colors. 319 !-- The grouping was done above with MPI_COMM_SPLIT 320 !-- The grouping was done above with MPI_COMM_SPLIT. 321 !-- A duplicate of MPI_COMM_WORLD is created and used as peer communicator 322 !-- (peer_comm) for MPI_INTERCOMM_CREATE. 323 CALL MPI_COMM_DUP( MPI_COMM_WORLD, peer_comm, ierr ) 320 324 DO i = 2, m_ncpl 321 322 325 IF ( m_couplers(i)%parent_id == m_my_cpl_id ) THEN 323 326 ! 324 !-- Collect parent PEs. 325 !-- Every model exept the root model has a parent model which acts as 326 !-- parent model. Create an intercommunicator to connect current PE to 327 !-- all children PEs 327 !-- Identify all children models of the current model and create 328 !-- inter-communicators to connect between the current model and its 329 !-- children models. 328 330 tag = 500 + i 329 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD, start_pe(i),&331 CALL MPI_INTERCOMM_CREATE( comm, 0, peer_comm, start_pe(i), & 330 332 tag, m_to_child_comm(i), istat) 331 333 childcount = childcount + 1 332 334 activeparent(i) = 1 333 334 335 ELSEIF ( i == m_my_cpl_id) THEN 335 336 ! 336 !-- Collect children PEs. 337 !-- Every model except the root model has a parent model. 338 !-- Create an intercommunicator to connect current PE to all parent PEs 337 !-- Create an inter-communicator to connect between the current 338 !-- model and its parent model. 339 339 tag = 500 + i 340 CALL MPI_INTERCOMM_CREATE( comm, 0, MPI_COMM_WORLD,&340 CALL MPI_INTERCOMM_CREATE( comm, 0, peer_comm, & 341 341 start_pe(m_couplers(i)%parent_id), & 342 342 tag, m_to_parent_comm, istat ) 343 343 ENDIF 344 345 344 ENDDO 346 347 ! 348 !-- If I am parent, count the number of children that I have 345 ! 346 !-- If I am a parent, count the number of children I have. 349 347 !-- Although this loop is symmetric on all processes, the "activeparent" flag 350 !-- is true (==1) on the respective individual PEonly.348 !-- is true (==1) on the respective individual process only. 351 349 ALLOCATE( pmc_parent_for_child(childcount+1) ) 352 350 … … 362 360 IF ( m_my_cpl_id > 1 ) THEN 363 361 CALL MPI_COMM_REMOTE_SIZE( m_to_parent_comm, m_parent_remote_size, & 364 istat )362 istat ) 365 363 ELSE 366 364 ! … … 369 367 ENDIF 370 368 ! 371 !-- Set myid to non- tero value except for the root domain. This is a setting369 !-- Set myid to non-zero value except for the root domain. This is a setting 372 370 !-- for the message routine which is called at the end of pmci_init. That 373 371 !-- routine outputs messages for myid = 0, only. However, myid has not been 374 !-- assigened so far, so that all PEs of the root model would output a375 !-- message. To avoid this, set myid to some other value except for PE0 of the376 !-- root domain.372 !-- assigened so far, so that all processes of the root model would output a 373 !-- message. To avoid this, set myid to some other value except for process 0 374 !-- of the root domain. 377 375 IF ( m_world_rank /= 0 ) myid = 1 378 376 … … 401 399 INTEGER, INTENT(OUT), OPTIONAL :: npe_total !< 402 400 403 INTEGER :: requested_cpl_id !<404 405 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_x !<406 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_y !<401 INTEGER :: requested_cpl_id !< 402 403 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_x !< 404 REAL(wp), INTENT(OUT), OPTIONAL :: lower_left_y !< 407 405 408 406 ! … … 416 414 requested_cpl_id = m_my_cpl_id 417 415 ENDIF 418 419 416 ! 420 417 !-- Return the requested information … … 459 456 460 457 SUBROUTINE read_coupling_layout( nesting_datatransfer_mode, nesting_mode, & 461 458 pmc_status ) 462 459 463 460 IMPLICIT NONE … … 481 478 482 479 pmc_status = pmc_status_ok 483 484 480 ! 485 481 !-- Open the NAMELIST-file and read the nesting layout … … 487 483 READ ( 11, nestpar, IOSTAT=istat ) 488 484 ! 489 !-- Set filepointer to the beginning of the file. Otherwise PE0 will later485 !-- Set filepointer to the beginning of the file. Otherwise process 0 will later 490 486 !-- be unable to read the inipar-NAMELIST 491 487 REWIND ( 11 ) 492 488 493 489 IF ( istat < 0 ) THEN 494 495 490 ! 496 491 !-- No nestpar-NAMELIST found 497 492 pmc_status = pmc_no_namelist_found 498 499 493 RETURN 500 501 494 ELSEIF ( istat > 0 ) THEN 502 503 495 ! 504 496 !-- Errors in reading nestpar-NAMELIST 505 497 pmc_status = pmc_namelist_error 506 498 RETURN 507 508 ENDIF 509 499 ENDIF 510 500 ! 511 501 !-- Output location message 512 502 CALL location_message( 'initialize communicators for nesting', .FALSE. ) 513 514 ! 515 !-- Assign the layout to the internally used variable 503 ! 504 !-- Assign the layout to the corresponding internally used variable m_couplers 516 505 m_couplers = domain_layouts 517 518 506 ! 519 507 !-- Get the number of nested models given in the nestpar-NAMELIST … … 521 509 ! 522 510 !-- When id=-1 is found for the first time, the list of domains is finished 523 511 IF ( m_couplers(i)%id == -1 .OR. i == pmc_max_models ) THEN 524 512 IF ( m_couplers(i)%id == -1 ) THEN 525 513 m_ncpl = i - 1 … … 529 517 ENDIF 530 518 ENDIF 531 532 519 ENDDO 533 534 520 ! 535 521 !-- Make sure that all domains have equal lower left corner in case of vertical -
palm/trunk/SOURCE/pmc_interface_mod.f90
r2582 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Some cleanup and commenting improvements only. 29 ! 30 ! 2582 2017-10-26 13:19:46Z hellstea 28 31 ! Resetting of e within buildings / topography in pmci_parent_datatrans removed 29 32 ! as unnecessary since e is not anterpolated, and as incorrect since it overran -
palm/trunk/SOURCE/pmc_mpi_wrapper_mod.f90
r2101 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Some cleanup and commenting improvements only. 29 ! 30 ! 2101 2017-01-05 16:42:31Z suehring 28 31 ! 29 32 ! 2000 2016-08-20 18:09:15Z knoop … … 142 145 INTEGER, INTENT(OUT) :: ierr !< 143 146 147 144 148 ierr = 0 145 149 CALL MPI_SEND( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm, & 146 ierr )150 ierr ) 147 151 148 152 END SUBROUTINE pmc_send_to_parent_integer … … 160 164 INTEGER, INTENT(OUT) :: ierr !< 161 165 166 162 167 ierr = 0 163 168 CALL MPI_RECV( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm, & … … 178 183 INTEGER, INTENT(OUT) :: ierr !< 179 184 185 180 186 ierr = 0 181 187 CALL MPI_SEND( buf, n, MPI_INTEGER, parent_rank, tag, m_to_parent_comm, & … … 196 202 INTEGER, INTENT(OUT) :: ierr !< 197 203 204 198 205 ierr = 0 199 206 CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr ) … … 213 220 INTEGER, INTENT(OUT) :: ierr !< 214 221 222 215 223 ierr = 0 216 224 CALL MPI_RECV( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, & … … 231 239 INTEGER, INTENT(OUT) :: ierr !< 232 240 241 233 242 ierr = 0 234 243 CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr ) … … 265 274 INTEGER, INTENT(OUT) :: ierr !< 266 275 276 267 277 ierr = 0 268 278 CALL MPI_SEND( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, ierr ) … … 282 292 INTEGER, INTENT(OUT) :: ierr !< 283 293 294 284 295 ierr = 0 285 296 CALL MPI_RECV( buf, n, MPI_REAL, parent_rank, tag, m_to_parent_comm, & … … 302 313 INTEGER, INTENT(OUT) :: ierr !< 303 314 315 304 316 ierr = 0 305 317 CALL MPI_SEND( buf, n, MPI_INTEGER, child_rank, tag, & … … 322 334 INTEGER, INTENT(OUT) :: ierr !< 323 335 336 324 337 ierr = 0 325 338 CALL MPI_RECV( buf, n, MPI_INTEGER, child_rank, tag, & … … 342 355 INTEGER, INTENT(OUT) :: ierr !< 343 356 357 344 358 ierr = 0 345 359 CALL MPI_RECV( buf, n, MPI_INTEGER, child_rank, tag, & … … 362 376 INTEGER, INTENT(OUT) :: ierr !< 363 377 378 364 379 ierr = 0 365 380 CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag, & … … 382 397 INTEGER, INTENT(OUT) :: ierr !< 383 398 399 384 400 ierr = 0 385 401 CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag, & … … 402 418 INTEGER, INTENT(OUT) :: ierr !< 403 419 420 404 421 ierr = 0 405 422 CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag, & … … 422 439 INTEGER, INTENT(OUT) :: ierr !< 423 440 441 424 442 ierr = 0 425 443 CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag, & … … 431 449 432 450 SUBROUTINE pmc_send_to_child_real_r3( child_id, buf, n, child_rank, tag, & 433 ierr )451 ierr ) 434 452 435 453 IMPLICIT NONE … … 442 460 INTEGER, INTENT(OUT) :: ierr !< 443 461 462 444 463 ierr = 0 445 464 CALL MPI_SEND( buf, n, MPI_REAL, child_rank, tag, & … … 462 481 INTEGER, INTENT(OUT) :: ierr !< 463 482 483 464 484 ierr = 0 465 485 CALL MPI_RECV( buf, n, MPI_REAL, child_rank, tag, & … … 511 531 INTEGER :: myerr !< 512 532 533 513 534 IF ( PRESENT( comm ) ) THEN 514 535 mycomm = comm … … 540 561 541 562 ! 542 !-- P E 0 on parent broadcast to all child PEs563 !-- Process 0 on parent broadcast to all child processes 543 564 IF ( PRESENT( child_id ) ) THEN 544 565 … … 581 602 TYPE(C_PTR) :: p_myind !< 582 603 604 583 605 winsize = idim1 * C_SIZEOF( ierr ) 584 606 … … 605 627 TYPE(C_PTR) :: p_myind !< 606 628 629 607 630 winsize = idim1 * wp 608 631 … … 623 646 REAL(kind=wp) :: pmc_time !< 624 647 648 625 649 pmc_time = MPI_WTIME() 626 650 -
palm/trunk/SOURCE/pmc_parent_mod.f90
r2101 r2599 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Some cleanup and commenting improvements only. 29 ! 30 ! 2101 2017-01-05 16:42:31Z suehring 28 31 ! 29 32 ! 2000 2016-08-20 18:09:15Z knoop … … 192 195 193 196 ! 194 !-- Intra communicat er is used for MPI_GET197 !-- Intra communicator is used for MPI_GET 195 198 CALL MPI_INTERCOMM_MERGE( children(childid)%inter_comm, .FALSE., & 196 199 children(childid)%intra_comm, istat ) … … 232 235 233 236 IF ( m_model_rank == 0 ) THEN 234 235 ! 236 !-- Sort to ascending parent PE order 237 ! 238 !-- Sort to ascending parent process order 237 239 CALL pmc_sort( index_list, 6 ) 238 239 240 is = 1 240 241 DO ip = 0, m_model_npes-1 241 242 ! 243 !-- Split into parent PEs 242 ! 243 !-- Split into parent processes 244 244 ie = is - 1 245 246 ! 247 !-- There may be no entry for this PE 245 ! 246 !-- There may be no entry for this process 248 247 IF ( is <= SIZE( index_list,2 ) .AND. ie >= 0 ) THEN 249 250 248 DO WHILE ( index_list(6,ie+1 ) == ip ) 251 249 ie = ie + 1 252 250 IF ( ie == SIZE( index_list,2 ) ) EXIT 253 251 ENDDO 254 255 252 ian = ie - is + 1 256 257 253 ELSE 258 254 is = -1 … … 260 256 ian = 0 261 257 ENDIF 262 263 ! 264 !-- Send data to other parent PEs 258 ! 259 !-- Send data to other parent processes 265 260 IF ( ip == 0 ) THEN 266 261 indchildren(childid)%nrpoints = ian … … 279 274 ENDIF 280 275 is = ie + 1 281 282 276 ENDDO 283 284 277 ELSE 285 286 278 CALL MPI_RECV( indchildren(childid)%nrpoints, 1, MPI_INTEGER, 0, 1000, & 287 279 m_model_comm, MPI_STATUS_IGNORE, istat ) 288 280 ian = indchildren(childid)%nrpoints 289 290 281 IF ( ian > 0 ) THEN 291 282 ALLOCATE( indchildren(childid)%index_list_2d(6,ian) ) … … 294 285 MPI_STATUS_IGNORE, istat) 295 286 ENDIF 296 297 287 ENDIF 298 299 288 CALL set_pe_index_list( childid, children(childid), & 300 289 indchildren(childid)%index_list_2d, & … … 328 317 329 318 next_array_in_list = next_array_in_list + 1 330 331 ! 332 !-- Array names are the same on all children PEs, so take first PEto get the name319 ! 320 !-- Array names are the same on all children processes, so take first 321 !-- process to get the name 333 322 ape => children(childid)%pes(1) 334 323 335 324 IF ( next_array_in_list > ape%nr_arrays ) THEN 336 337 325 ! 338 326 !-- All arrays are done … … 343 331 ar => ape%array_list(next_array_in_list) 344 332 myname = ar%name 345 346 333 ! 347 334 !-- Return true if legal array … … 413 400 414 401 array_adr = C_LOC(array) 415 416 402 ! 417 403 !-- In PALM's pointer version, two indices have to be stored internally. … … 469 455 rcount = 0 470 456 bufsize = 8 471 472 457 ! 473 458 !-- First stride: compute size and set index … … 493 478 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, & 494 479 children(childid)%inter_comm, req(rcount), ierr ) 495 496 480 ! 497 481 !-- Maximum of 1024 outstanding requests 498 !-- TODO: what does this limit mean? 482 !-- TODO: what does this limit mean? Does outstanding mean pending? 499 483 IF ( rcount == 1024 ) THEN 500 484 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) … … 505 489 bufsize = bufsize + arlen 506 490 ar%sendsize = arlen 507 508 491 ENDDO 509 492 … … 513 496 514 497 ENDDO 515 516 498 ! 517 499 !-- Create RMA (One Sided Communication) window for data buffer parent to … … 530 512 children(childid)%intra_comm, & 531 513 children(childid)%win_parent_child, ierr ) 532 533 514 ! 534 515 !-- Open window to set data 535 516 CALL MPI_WIN_FENCE( 0, children(childid)%win_parent_child, ierr ) 536 537 517 ! 538 518 !-- Second stride: set buffer pointer … … 555 535 ENDDO 556 536 ENDDO 557 558 537 ! 559 538 !-- Child to parent direction 560 539 bufsize = 8 561 562 540 ! 563 541 !-- First stride: compute size and set index 564 542 DO i = 1, children(childid)%inter_npes 565 566 543 ape => children(childid)%pes(i) 567 544 tag = 300 568 569 545 DO j = 1, ape%nr_arrays 570 571 546 ar => ape%array_list(j) 572 573 547 ! 574 548 !-- Receive index from child … … 576 550 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, & 577 551 children(childid)%inter_comm, MPI_STATUS_IGNORE, ierr ) 578 579 552 IF ( ar%nrdims == 3 ) THEN 580 553 bufsize = MAX( bufsize, ape%nrele * ar%a_dim(4) ) … … 583 556 ENDIF 584 557 ar%recvindex = myindex 585 586 558 ENDDO 587 588 ENDDO 589 559 ENDDO 590 560 ! 591 561 !-- Create RMA (one sided communication) data buffer. … … 596 566 597 567 CALL MPI_BARRIER( children(childid)%intra_comm, ierr ) 598 599 568 ! 600 569 !-- Second stride: set buffer pointer 601 570 DO i = 1, children(childid)%inter_npes 602 603 571 ape => children(childid)%pes(i) 604 605 572 DO j = 1, ape%nr_arrays 606 573 ar => ape%array_list(j) 607 574 ar%recvbuf = base_ptr 608 575 ENDDO 609 610 576 ENDDO 611 577 … … 654 620 655 621 DO ip = 1, children(childid)%inter_npes 656 657 622 ape => children(childid)%pes(ip) 658 659 623 DO j = 1, ape%nr_arrays 660 661 624 ar => ape%array_list(j) 662 625 myindex = 1 663 664 626 IF ( ar%nrdims == 2 ) THEN 665 666 627 buf_shape(1) = ape%nrele 667 628 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) … … 671 632 myindex = myindex + 1 672 633 ENDDO 673 674 634 ELSEIF ( ar%nrdims == 3 ) THEN 675 676 635 buf_shape(1) = ape%nrele*ar%a_dim(4) 677 636 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) … … 682 641 myindex = myindex + ar%a_dim(4) 683 642 ENDDO 684 685 643 ENDIF 686 687 644 ENDDO 688 689 ENDDO 690 645 ENDDO 691 646 ! 692 647 !-- Buffer is filled … … 727 682 728 683 t1 = pmc_time() 729 730 684 ! 731 685 !-- Wait for child to fill buffer … … 733 687 t2 = pmc_time() - t1 734 688 IF ( PRESENT( waittime ) ) waittime = t2 735 736 689 ! 737 690 !-- TODO: check next statement … … 741 694 742 695 DO ip = 1, children(childid)%inter_npes 743 744 696 ape => children(childid)%pes(ip) 745 746 697 DO j = 1, ape%nr_arrays 747 748 698 ar => ape%array_list(j) 749 699 750 700 IF ( ar%recvindex < 0 ) CYCLE 751 701 … … 755 705 nr = ape%nrele * ar%a_dim(4) 756 706 ENDIF 757 758 707 buf_shape(1) = nr 759 708 CALL C_F_POINTER( ar%recvbuf, buf, buf_shape ) 760 761 709 ! 762 710 !-- MPI passive target RMA 763 711 IF ( nr > 0 ) THEN 764 712 target_disp = ar%recvindex - 1 765 766 ! 767 !-- Child PEs are located behind parent PEs 713 ! 714 !-- Child processes are located behind parent process 768 715 target_pe = ip - 1 + m_model_npes 769 716 CALL MPI_WIN_LOCK( MPI_LOCK_SHARED, target_pe, 0, & … … 774 721 children(childid)%win_parent_child, ierr ) 775 722 ENDIF 776 777 723 myindex = 1 778 724 IF ( ar%nrdims == 2 ) THEN 779 780 725 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 781 726 DO ij = 1, ape%nrele … … 783 728 myindex = myindex + 1 784 729 ENDDO 785 786 730 ELSEIF ( ar%nrdims == 3 ) THEN 787 788 731 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3)) 789 732 DO ij = 1, ape%nrele … … 792 735 myindex = myindex + ar%a_dim(4) 793 736 ENDDO 794 795 737 ENDIF 796 797 738 ENDDO 798 799 739 ENDDO 800 740 … … 832 772 833 773 ! 834 !-- Set array for child inter PE0774 !-- Set array for child inter process 0 835 775 IMPLICIT NONE 836 776 … … 849 789 850 790 DO i = 1, children(childid)%inter_npes 851 852 791 ape => children(childid)%pes(i) 853 792 ar => ape%array_list(next_array_in_list) … … 855 794 ar%a_dim = dims 856 795 ar%data = array_adr 857 858 796 IF ( PRESENT( second_adr ) ) THEN 859 797 ar%po_data(1) = array_adr … … 863 801 ar%po_data(2) = C_NULL_PTR 864 802 ENDIF 865 866 803 ENDDO 867 804 … … 885 822 886 823 DO ip = 1, children(childid)%inter_npes 887 888 824 ape => children(childid)%pes(ip) 889 890 825 DO j = 1, ape%nr_arrays 891 892 826 ar => ape%array_list(j) 893 827 IF ( iactive == 1 .OR. iactive == 2 ) THEN 894 828 ar%data = ar%po_data(iactive) 895 829 ENDIF 896 897 830 ENDDO 898 899 831 ENDDO 900 832 … … 931 863 932 864 ! 933 !-- First, count entries for every remote child PE865 !-- First, count entries for every remote child process 934 866 DO i = 1, mychild%inter_npes 935 867 ape => mychild%pes(i) 936 868 ape%nrele = 0 937 869 ENDDO 938 939 870 ! 940 871 !-- Loop over number of coarse grid cells 941 872 DO j = 1, nrp 942 rempe = index_list(5,j) + 1 ! PE number on remote PE873 rempe = index_list(5,j) + 1 ! process number on remote process 943 874 ape => mychild%pes(rempe) 944 ape%nrele = ape%nrele + 1 ! Increment number of elements for this child PE875 ape%nrele = ape%nrele + 1 ! Increment number of elements for this child process 945 876 ENDDO 946 877 … … 951 882 952 883 remind = 0 953 954 884 ! 955 885 !-- Second, create lists … … 963 893 ape%locind(ind)%j = index_list(2,j) 964 894 ENDDO 965 966 ! 967 !-- Prepare number of elements for children PEs 895 ! 896 !-- Prepare number of elements for children processes 968 897 CALL pmc_alloc_mem( rldef, mychild%inter_npes*2 ) 969 970 ! 971 !-- Number of child PEs * size of INTEGER (i just arbitrary INTEGER) 898 ! 899 !-- Number of child processes * size of INTEGER (i just arbitrary INTEGER) 972 900 winsize = mychild%inter_npes*c_sizeof(i)*2 973 901 974 902 CALL MPI_WIN_CREATE( rldef, winsize, iwp, MPI_INFO_NULL, & 975 903 mychild%intra_comm, indwin, ierr ) 976 977 904 ! 978 905 !-- Open window to set data 979 906 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 980 907 981 rldef(1) = 0 ! index on remote PE 0 982 rldef(2) = remind(1) ! number of elements on remote PE 0 983 908 rldef(1) = 0 ! index on remote process 0 909 rldef(2) = remind(1) ! number of elements on remote process 0 984 910 ! 985 911 !-- Reserve buffer for index array 986 912 DO i = 2, mychild%inter_npes 987 913 i2 = (i-1) * 2 + 1 988 rldef(i2) = rldef(i2-2) + rldef(i2-1) * 2 ! index on remote PE 989 rldef(i2+1) = remind(i) ! number of elements on remote PE 990 ENDDO 991 914 rldef(i2) = rldef(i2-2) + rldef(i2-1) * 2 ! index on remote process 915 rldef(i2+1) = remind(i) ! number of elements on remote process 916 ENDDO 992 917 ! 993 918 !-- Close window to allow child to access data 994 919 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 995 996 920 ! 997 921 !-- Child has retrieved data … … 1000 924 i2 = 2 * mychild%inter_npes - 1 1001 925 winsize = ( rldef(i2) + rldef(i2+1) ) * 2 1002 1003 926 ! 1004 927 !-- Make sure, MPI_ALLOC_MEM works … … 1013 936 !-- Open window to set data 1014 937 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 1015 1016 938 ! 1017 939 !-- Create the 2D index list 1018 940 DO j = 1, nrp 1019 rempe = index_list(5,j) + 1 ! PE number on remote PE941 rempe = index_list(5,j) + 1 ! process number on remote process 1020 942 ape => mychild%pes(rempe) 1021 943 i2 = rempe * 2 - 1 … … 1025 947 rldef(i2) = rldef(i2)+2 1026 948 ENDDO 1027 1028 949 ! 1029 950 !-- All data are set 1030 951 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 1031 1032 952 ! 1033 953 !-- Don't know why, but this barrier is necessary before windows can be freed
Note: See TracChangeset
for help on using the changeset viewer.