- Timestamp:
- May 8, 2019 7:40:33 PM (6 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/pmc_child_mod.f90
r3945 r3962 22 22 ! ------------------ 23 23 ! 24 ! 24 ! 25 25 ! Former revisions: 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Bugfixes in initial settings of child and parent communication patterns. 29 ! 30 ! 3945 2019-05-02 11:29:27Z raasch 28 31 ! 29 32 ! 3932 2019-04-24 17:31:34Z suehring … … 583 586 !-- recv -> parent to child transfer 584 587 !-- send -> child to parent transfer 585 INTEGER(iwp) :: arlen !< 586 INTEGER(iwp) :: myindex !< 587 INTEGER(iwp) :: i !< 588 INTEGER(iwp) :: ierr !< 589 INTEGER(iwp) :: istat !< 590 INTEGER(iwp) :: j !< 591 INTEGER(iwp) :: rcount !< 592 INTEGER(iwp) :: tag !< 588 INTEGER(iwp) :: arlen !< 589 INTEGER(iwp) :: myindex !< 590 INTEGER(iwp) :: i !< 591 INTEGER(iwp) :: ierr !< 592 INTEGER(iwp) :: istat !< 593 INTEGER(iwp) :: j !< 594 INTEGER(iwp) :: lo_nr_arrays !< 595 INTEGER(iwp) :: rcount !< 596 INTEGER(iwp) :: tag !< 597 INTEGER(iwp) :: total_npes !< 593 598 594 599 INTEGER(iwp), PARAMETER :: noindex = -1 !< … … 596 601 INTEGER(idp) :: bufsize !< size of MPI data window 597 602 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 603 604 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: myindex_s 605 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: myindex_r 598 606 599 607 INTEGER(iwp),DIMENSION(1024) :: req !< … … 606 614 Type(C_PTR) :: base_ptr !< 607 615 616 617 CALL MPI_COMM_SIZE (me%intra_comm, total_npes, ierr) 618 619 lo_nr_arrays = me%pes(1)%nr_arrays 620 621 ALLOCATE(myindex_s(lo_nr_arrays,0:total_npes-1)) 622 ALLOCATE(myindex_r(lo_nr_arrays,0:total_npes-1)) 623 624 myindex_s = 0 625 626 ! 627 !-- Receive indices from child 628 CALL MPI_ALLTOALL( myindex_s, lo_nr_arrays, MPI_INTEGER, & 629 myindex_r, lo_nr_arrays, MPI_INTEGER, & 630 me%intra_comm, ierr ) 608 631 609 632 myindex = 0 … … 614 637 DO i = 1, me%inter_npes 615 638 ape => me%pes(i) 616 tag = 200617 639 DO j = 1, ape%nr_arrays 618 640 ar => ape%array_list(j) 619 ! 620 !-- Receive index from child 621 tag = tag + 1 622 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, & 623 MPI_STATUS_IGNORE, ierr ) 624 ar%recvindex = myindex 641 ar%recvindex = myindex_r(j,i-1) 625 642 ! 626 643 !-- Determine max, because child buffer is allocated only once … … 637 654 ENDDO 638 655 ENDDO 656 639 657 ! 640 658 !-- Create RMA (one sided communication) data buffer. … … 657 675 rcount = 0 658 676 bufsize = 8 677 678 myindex_s = 0 679 myindex_r = 0 659 680 660 681 DO i = 1, me%inter_npes … … 668 689 arlen = ape%nrele*ar%a_dim(1) 669 690 ENDIF 670 tag = tag + 1 671 rcount = rcount + 1 691 672 692 IF ( ape%nrele > 0 ) THEN 673 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, &674 req(rcount), ierr )675 693 ar%sendindex = myindex 676 694 ELSE 677 CALL MPI_ISEND( noindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, &678 req(rcount), ierr )679 695 ar%sendindex = noindex 680 696 ENDIF 681 ! 682 !-- Maximum of 1024 pending requests 683 ! 1024 is an arbitrary value just to make sure the number of pending 684 ! requests is getting too large. It is possible that this value has to 685 ! be adjusted in case of running the model on large number of cores. 686 687 IF ( rcount == 1024 ) THEN 688 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 689 rcount = 0 690 ENDIF 697 698 myindex_s(j,i-1) = ar%sendindex 691 699 692 700 IF ( ape%nrele > 0 ) THEN … … 698 706 ENDDO 699 707 700 IF ( rcount > 0 ) THEN 701 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 702 ENDIF 703 704 ENDDO 708 ENDDO 709 ! 710 !-- Send indices to parent 711 712 CALL MPI_ALLTOALL( myindex_s, lo_nr_arrays, MPI_INTEGER, & 713 myindex_r, lo_nr_arrays, MPI_INTEGER, & 714 me%intra_comm, ierr) 715 716 DEALLOCATE( myindex_s ) 717 DEALLOCATE( myindex_r ) 718 705 719 ! 706 720 !-- Create RMA (one sided communication) window for data buffer child to parent -
palm/trunk/SOURCE/pmc_parent_mod.f90
r3655 r3962 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Bugfixes in initial settings of child and parent communication patterns. 29 ! 30 ! 3655 2019-01-07 16:51:22Z knoop 28 31 ! explicit kind settings 29 32 ! … … 473 476 INTEGER(iwp), INTENT(IN) :: childid !< 474 477 475 INTEGER(iwp) :: arlen !<476 INTEGER(iwp) :: i !<477 INTEGER(iwp) :: ierr !<478 INTEGER(iwp) :: j !<479 INTEGER(iwp) :: myindex !<480 INTEGER(iwp) :: rcount !< count MPI requests481 INTEGER(iwp) :: t ag !<478 INTEGER(iwp) :: arlen !< 479 INTEGER(iwp) :: i !< 480 INTEGER(iwp) :: ierr !< 481 INTEGER(iwp) :: j !< 482 INTEGER(iwp) :: lo_nr_arrays !< store number of arrays in local variiab le 483 INTEGER(iwp) :: myindex !< 484 INTEGER(iwp) :: total_npes !< Total Number of PEs Parent and Child 482 485 483 486 INTEGER(idp) :: bufsize !< size of MPI data window 484 487 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 485 486 INTEGER(iwp), DIMENSION(1024) :: req !< 488 489 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: myindex_s 490 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: myindex_r 487 491 488 492 TYPE(C_PTR) :: base_ptr !< … … 493 497 REAL(wp),DIMENSION(:), POINTER, SAVE :: base_array_cp !< base array for child to parent transfer 494 498 499 call MPI_COMM_SIZE (children(childid)%intra_comm, total_npes, ierr) 495 500 ! 496 501 !-- Parent to child direction 497 502 myindex = 1 498 rcount = 0499 503 bufsize = 8 504 ! 505 !-- All Child processes get the same number of arrays 506 !-- Therfore the number of arrays form the first Child process can be used for Dimension. 507 lo_nr_arrays = children(childid)%pes(1)%nr_arrays 508 509 ALLOCATE( myindex_s(lo_nr_arrays,0:total_npes-1) ) 510 ALLOCATE( myindex_r(lo_nr_arrays,0:total_npes-1) ) 511 512 myindex_s = 0 513 500 514 ! 501 515 !-- First stride: compute size and set index … … 503 517 504 518 ape => children(childid)%pes(i) 505 tag = 200506 519 507 520 DO j = 1, ape%nr_arrays … … 516 529 ENDIF 517 530 ar%sendindex = myindex 518 519 tag = tag + 1 520 rcount = rcount + 1 521 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, & 522 children(childid)%inter_comm, req(rcount), ierr ) 523 ! 524 !-- Maximum of 1024 pending requests 525 526 IF ( rcount == 1024 ) THEN 527 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 528 rcount = 0 529 ENDIF 531 ! 532 ! Using intra communicator for MPU_Alltoall, the numbers of the child processes are after the paremt ones 533 534 myindex_s(j,i-1+children(childid)%model_npes) = myindex 530 535 531 536 myindex = myindex + arlen … … 534 539 ENDDO 535 540 536 IF ( rcount > 0 ) THEN 537 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 538 ENDIF 539 540 ENDDO 541 ENDDO 542 ! 543 !-- Using MPI_Alltoall to send indices from Parent to Child 544 !-- The data comming back from the child processes are ignored. 545 546 CALL MPI_ALLTOALL( myindex_s, lo_nr_arrays, MPI_INTEGER, & 547 myindex_r, lo_nr_arrays, MPI_INTEGER, & 548 children(childid)%intra_comm, ierr ) 549 550 ! 551 !-- Using MPI_Alltoall to receive indices from Child 552 myindex_s = 0 553 myindex_r = 0 554 555 CALL MPI_ALLTOALL( myindex_s, lo_nr_arrays, MPI_INTEGER, & 556 myindex_r, lo_nr_arrays, MPI_INTEGER, & 557 children(childid)%intra_comm, ierr ) 541 558 ! 542 559 !-- Create RMA (One Sided Communication) window for data buffer parent to … … 585 602 DO i = 1, children(childid)%inter_npes 586 603 ape => children(childid)%pes(i) 587 tag = 300588 604 DO j = 1, ape%nr_arrays 589 605 ar => ape%array_list(j) 590 606 ! 591 607 !-- Receive index from child 592 tag = tag + 1593 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, &594 children(childid)%inter_comm, MPI_STATUS_IGNORE, ierr )595 608 IF ( ar%nrdims == 3 ) THEN 596 609 bufsize = MAX( bufsize, & … … 599 612 bufsize = MAX( bufsize, INT( ape%nrele, MPI_ADDRESS_KIND ) ) 600 613 ENDIF 601 ar%recvindex = myindex 614 ar%recvindex = myindex_r(j,i-1+children(childid)%model_npes) 602 615 ENDDO 603 616 ENDDO 617 618 DEALLOCATE( myindex_s ) 619 DEALLOCATE( myindex_r ) 620 604 621 ! 605 622 !-- Create RMA (one sided communication, RMA = Remote Memory Access) data buffer.
Note: See TracChangeset
for help on using the changeset viewer.