Changeset 1896 for palm/trunk/SOURCE
- Timestamp:
- May 3, 2016 8:06:41 AM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/pmc_client_mod.f90
r1851 r1896 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatted to match PALM style 23 23 ! 24 24 ! Former revisions: … … 69 69 #if defined( __parallel ) 70 70 71 use, intrinsic ::iso_c_binding71 USE, INTRINSIC :: iso_c_binding 72 72 73 73 #if defined( __mpifh ) … … 76 76 USE MPI 77 77 #endif 78 USE kinds 79 USE PMC_general, ONLY: ClientDef, DA_NameDef, DA_Namelen, PMC_STATUS_OK, PMC_DA_NAME_ERR, PeDef, ArrayDef, & 80 DA_Desclen, DA_Namelen, PMC_G_SetName, PMC_MAX_ARRAY 81 USE PMC_handle_communicator, ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_server_comm 82 USE PMC_MPI_wrapper, ONLY: PMC_Send_to_Server, PMC_Recv_from_Server, PMC_Time, & 83 PMC_Bcast, PMC_Inter_Bcast, PMC_Alloc_mem 84 IMPLICIT none 78 79 USE kinds 80 USE pmc_general, & 81 ONLY: arraydef, clientdef, da_desclen, da_namedef, da_namelen, pedef, & 82 pmc_da_name_err, pmc_g_setname, pmc_max_array, pmc_status_ok 83 84 USE pmc_handle_communicator, & 85 ONLY: m_model_comm, m_model_npes, m_model_rank, m_to_server_comm 86 87 USE pmc_mpi_wrapper, & 88 ONLY: pmc_alloc_mem, pmc_bcast, pmc_inter_bcast, & 89 pmc_recv_from_server, pmc_send_to_server, pmc_time 90 91 IMPLICIT NONE 92 85 93 PRIVATE 86 94 SAVE 87 95 88 Type(ClientDef) :: me 89 90 INTEGER :: next_array_in_list = 0 91 INTEGER :: myIndex = 0 !Counter and unique number for Data Arrays 92 93 ! INTERFACE section 94 95 INTERFACE PMC_ClientInit 96 MODULE procedure PMC_ClientInit 96 TYPE(clientdef) :: me !< 97 98 INTEGER :: myindex = 0 !< counter and unique number for data arrays 99 INTEGER :: next_array_in_list = 0 !< 100 101 102 INTERFACE pmc_clientinit 103 MODULE PROCEDURE pmc_clientinit 97 104 END INTERFACE PMC_ClientInit 98 105 99 INTERFACE PMC_Set_DataArray_Name 100 MODULE procedure PMC_Set_DataArray_Name 101 MODULE procedure PMC_Set_DataArray_Name_LastEntry 102 END INTERFACE PMC_Set_DataArray_Name 103 104 INTERFACE PMC_C_Get_2D_index_list 105 MODULE procedure PMC_C_Get_2D_index_list 106 END INTERFACE PMC_C_Get_2D_index_list 107 108 INTERFACE PMC_C_clear_next_array_list 109 MODULE procedure PMC_C_clear_next_array_list 110 END INTERFACE PMC_C_clear_next_array_list 111 112 INTERFACE PMC_C_GetNextArray 113 MODULE procedure PMC_C_GetNextArray 114 END INTERFACE PMC_C_GetNextArray 115 116 INTERFACE PMC_C_Set_DataArray 117 MODULE procedure PMC_C_Set_DataArray_2d 118 MODULE procedure PMC_C_Set_DataArray_3d 119 END INTERFACE PMC_C_Set_DataArray 120 121 INTERFACE PMC_C_setInd_and_AllocMem 122 MODULE procedure PMC_C_setInd_and_AllocMem 123 END INTERFACE PMC_C_setInd_and_AllocMem 124 125 INTERFACE PMC_C_GetBuffer 126 MODULE procedure PMC_C_GetBuffer 127 END INTERFACE PMC_C_GetBuffer 128 129 INTERFACE PMC_C_PutBuffer 130 MODULE procedure PMC_C_PutBuffer 131 END INTERFACE PMC_C_PutBuffer 132 133 ! Public section 134 135 PUBLIC PMC_ClientInit , PMC_Set_DataArray_Name, PMC_C_Get_2D_index_list 136 PUBLIC PMC_C_GetNextArray, PMC_C_Set_DataArray, PMC_C_clear_next_array_list 137 PUBLIC PMC_C_setInd_and_AllocMem , PMC_C_GetBuffer, PMC_C_PutBuffer 138 139 CONTAINS 140 141 SUBROUTINE PMC_ClientInit 142 IMPLICIT none 143 144 INTEGER :: i 145 INTEGER :: istat 146 147 148 ! Tailor MPI environment 149 150 me%model_comm = m_model_comm 151 me%inter_comm = m_to_server_comm 152 153 ! Get rank and size 154 CALL MPI_Comm_rank (me%model_comm, me%model_rank, istat); 155 CALL MPI_Comm_size (me%model_comm, me%model_npes, istat); 156 CALL MPI_Comm_remote_size (me%inter_comm, me%inter_npes, istat); 157 158 ! intra communicater is used for MPI_Get 159 CALL MPI_Intercomm_merge (me%inter_comm, .true., me%intra_comm, istat); 160 CALL MPI_Comm_rank (me%intra_comm, me%intra_rank, istat); 161 ALLOCATE (me%PEs(me%inter_npes)) 162 163 ! 164 !-- Allocate for all Server PEs an array of TYPE ArrayDef to store information of transfer array 165 do i=1,me%inter_npes 166 ALLOCATE(me%PEs(i)%array_list(PMC_MAX_ARRAY)) 167 end do 168 169 ! if(me%model_rank == 0) write(0,'(a,5i6)') 'PMC_ClientInit ',me%model_rank,me%model_npes,me%inter_npes,me%intra_rank 170 171 return 172 END SUBROUTINE PMC_ClientInit 173 174 SUBROUTINE PMC_Set_DataArray_Name (ServerArrayDesc, ServerArrayName, ClientArrayDesc, ClientArrayName, istat) 175 IMPLICIT none 176 character(len=*),INTENT(IN) :: ServerArrayName 177 character(len=*),INTENT(IN) :: ServerArrayDesc 178 character(len=*),INTENT(IN) :: ClientArrayName 179 character(len=*),INTENT(IN) :: ClientArrayDesc 180 INTEGER,INTENT(OUT) :: istat 181 182 !-- local variables 183 type(DA_NameDef) :: myName 184 INTEGER :: myPe 185 INTEGER :: my_AddiArray=0 186 187 istat = PMC_STATUS_OK 188 if(len(trim(ServerArrayName)) > DA_Namelen .or. & 189 len(trim(ClientArrayName)) > DA_Namelen) then !Name too long 190 istat = PMC_DA_NAME_ERR 191 end if 192 193 if(m_model_rank == 0) then 194 myIndex = myIndex+1 195 myName%couple_index = myIndex 196 myName%ServerDesc = trim(ServerArrayDesc) 197 myName%NameOnServer = trim(ServerArrayName) 198 myName%ClientDesc = trim(ClientArrayDesc) 199 myName%NameOnClient = trim(ClientArrayName) 200 end if 201 202 ! Broadcat to all Client PEs 203 204 CALL PMC_Bcast ( myName%couple_index, 0, comm=m_model_comm) 205 CALL PMC_Bcast ( myName%ServerDesc, 0, comm=m_model_comm) 206 CALL PMC_Bcast ( myName%NameOnServer, 0, comm=m_model_comm) 207 CALL PMC_Bcast ( myName%ClientDesc, 0, comm=m_model_comm) 208 CALL PMC_Bcast ( myName%NameOnClient, 0, comm=m_model_comm) 209 210 ! Broadcat to all Server PEs 211 212 if(m_model_rank == 0) then 213 myPE = MPI_ROOT 214 else 215 myPE = MPI_PROC_NULL 216 endif 217 CALL PMC_Bcast ( myName%couple_index, myPE, comm=m_to_server_comm) 218 CALL PMC_Bcast ( myName%ServerDesc, myPE, comm=m_to_server_comm) 219 CALL PMC_Bcast ( myName%NameOnServer, myPE, comm=m_to_server_comm) 220 CALL PMC_Bcast ( myName%ClientDesc, myPE, comm=m_to_server_comm) 221 CALL PMC_Bcast ( myName%NameOnClient, myPE, comm=m_to_server_comm) 222 223 CALL PMC_G_SetName (me, myName%couple_index, myName%NameOnClient) 224 225 return 226 END SUBROUTINE PMC_Set_DataArray_Name 227 228 SUBROUTINE PMC_Set_DataArray_Name_LastEntry (LastEntry) 229 IMPLICIT none 230 LOGICAL,INTENT(IN),optional :: LastEntry 231 232 !-- local variables 233 type(DA_NameDef) :: myName 234 INTEGER :: myPe 235 236 myName%couple_index = -1 237 238 if(m_model_rank == 0) then 239 myPE = MPI_ROOT 240 else 241 myPE = MPI_PROC_NULL 242 endif 243 CALL PMC_Bcast ( myName%couple_index, myPE, comm=m_to_server_comm) 244 245 return 246 END SUBROUTINE PMC_Set_DataArray_Name_LastEntry 247 248 SUBROUTINE PMC_C_Get_2D_index_list 249 IMPLICIT none 250 251 INTEGER :: i,j,i2,nr,ierr 252 INTEGER :: dummy 253 INTEGER :: indWin !: MPI window object 254 INTEGER :: indWin2 !: MPI window object 255 INTEGER(KIND=MPI_ADDRESS_KIND) :: win_size !: Size of MPI window 1 (in bytes) 256 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !: Displacement Unit (Integer = 4, floating poit = 8 257 INTEGER,DIMENSION(me%inter_npes*2) :: NrEle !: Number of Elements of a horizontal slice 258 TYPE(PeDef),POINTER :: aPE !: Pointer to PeDef structure 259 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize !: Size of MPI window 2 (in bytes) 260 INTEGER,DIMENSION(:),POINTER :: myInd 261 262 ! CALL PMC_C_CGet_Rem_index_list 263 264 win_size = c_sizeof(dummy) 265 CALL MPI_Win_create (dummy, win_size, iwp, MPI_INFO_NULL, me%intra_comm, indWin, ierr); 266 CALL MPI_Win_fence (0, indWin, ierr) ! Open Window on Server side 267 CALL MPI_Win_fence (0, indWin, ierr) ! Close Window on Server Side and opem on Client side 268 269 do i=1,me%inter_npes 270 disp = me%model_rank*2 271 CALL MPI_Get (NrEle((i-1)*2+1),2,MPI_INTEGER,i-1,disp,2,MPI_INTEGER,indWin, ierr) 272 end do 273 CALL MPI_Win_fence (0, indWin, ierr) ! MPI_get is Non-blocking -> data in NrEle is not available until MPI_fence CALL 274 275 WinSize = 0 276 do i=1,me%inter_npes !Allocate memory for index array 277 aPE => me%PEs(i) 278 i2 = (i-1)*2+1 279 nr = NrEle(i2+1) 280 if(nr > 0) then 281 ALLOCATE(aPE%locInd(nr)) 282 else 283 NULLIFY (aPE%locInd) 284 endif 285 WinSize = max(nr,WinSize) !Maximum window size 286 end do 287 288 ALLOCATE(myInd(2*WinSize)) 289 WinSize = 1 290 291 ! local Buffer used in MPI_Get can but must not be inside the MPI Window 292 ! Here, we use a dummy for MPI Window because the server PEs do not access the RMA window via MPI_get or MPI_Put 293 294 CALL MPI_Win_create (dummy, WinSize, iwp, MPI_INFO_NULL, me%intra_comm, indWin2, ierr); 295 296 CALL MPI_Win_fence (0, indWin2, ierr) ! MPI_get is Non-blocking -> data in NrEle is not available until MPI_fence CALL 297 CALL MPI_Win_fence (0, indWin2, ierr) ! MPI_get is Non-blocking -> data in NrEle is not available until MPI_fence CALL 298 299 do i=1,me%inter_npes 300 aPE => me%PEs(i) 301 nr = NrEle(i*2) 302 if(nr > 0 ) then 303 disp = NrEle(2*(i-1)+1) 304 CALL MPI_Win_lock (MPI_LOCK_SHARED , i-1, 0, indWin2, ierr) 305 CALL MPI_Get (myInd,2*nr,MPI_INTEGER,i-1,disp,2*nr,MPI_INTEGER,indWin2, ierr) 306 CALL MPI_Win_unlock (i-1, indWin2, ierr) 307 do j=1,nr 308 aPE%locInd(j)%i = myInd(2*j-1) 309 aPE%locInd(j)%j = myInd(2*j) 310 end do 311 aPE%NrEle = nr 312 else 313 aPE%NrEle = -1 314 end if 315 end do 316 317 CALL MPI_Barrier(me%intra_comm, ierr) ! Dont know why, but this barrier is necessary before we can free the windows 318 319 CALL MPI_Win_free(indWin, ierr); 320 CALL MPI_Win_free(indWin2, ierr); 321 DEALLOCATE (myInd) 322 323 return 324 END SUBROUTINE PMC_C_Get_2D_index_list 325 326 SUBROUTINE PMC_C_clear_next_array_list 327 IMPLICIT none 328 329 next_array_in_list = 0 330 331 return 332 END SUBROUTINE PMC_C_clear_next_array_list 333 334 ! List handling is still required to get minimal interaction with pmc_interface 335 LOGICAL function PMC_C_GetNextArray (myName) 336 character(len=*),INTENT(OUT) :: myName 337 338 !-- local variables 339 TYPE(PeDef),POINTER :: aPE 340 TYPE(ArrayDef),POINTER :: ar 341 342 next_array_in_list = next_array_in_list+1 343 344 !-- Array Names are the same on all client PE, so take first PE to get the name 345 aPE => me%PEs(1) 346 347 if(next_array_in_list > aPE%Nr_arrays) then 348 PMC_C_GetNextArray = .false. !all arrays done 349 return 350 end if 351 352 ar => aPE%array_list(next_array_in_list) 353 354 myName = ar%name 355 356 PMC_C_GetNextArray = .true. ! Return true if legal array 357 return 358 END function PMC_C_GetNextArray 359 360 SUBROUTINE PMC_C_Set_DataArray_2d (array) 361 362 IMPLICIT none 363 364 REAL(wp), INTENT(IN) ,DIMENSION(:,:), POINTER :: array 365 366 INTEGER :: NrDims 367 INTEGER,DIMENSION (4) :: dims 368 TYPE(c_ptr) :: array_adr 369 INTEGER :: i 370 TYPE(PeDef),POINTER :: aPE 371 TYPE(ArrayDef),POINTER :: ar 372 373 374 dims = 1 375 376 NrDims = 2 377 dims(1) = size(array,1) 378 dims(2) = size(array,2) 379 380 array_adr = c_loc(array) 381 382 do i=1,me%inter_npes 383 aPE => me%PEs(i) 384 ar => aPE%array_list(next_array_in_list) 385 ar%NrDims = NrDims 386 ar%A_dim = dims 387 ar%data = array_adr 388 end do 389 390 return 391 END SUBROUTINE PMC_C_Set_DataArray_2d 392 393 SUBROUTINE PMC_C_Set_DataArray_3d (array) 394 395 IMPLICIT none 396 397 REAL(wp),INTENT(IN),DIMENSION(:,:,:), POINTER :: array 398 399 INTEGER :: NrDims 400 INTEGER,DIMENSION (4) :: dims 401 TYPE(c_ptr) :: array_adr 402 INTEGER :: i 403 TYPE(PeDef),POINTER :: aPE 404 TYPE(ArrayDef),POINTER :: ar 405 406 dims = 1 407 408 NrDims = 3 409 dims(1) = size(array,1) 410 dims(2) = size(array,2) 411 dims(3) = size(array,3) 412 413 array_adr = c_loc(array) 414 415 do i=1,me%inter_npes 416 aPE => me%PEs(i) 417 ar => aPE%array_list(next_array_in_list) 418 ar%NrDims = NrDims 419 ar%A_dim = dims 420 ar%data = array_adr 421 end do 422 423 return 424 END SUBROUTINE PMC_C_Set_DataArray_3d 425 426 SUBROUTINE PMC_C_setInd_and_AllocMem 427 428 IMPLICIT none 429 430 !-- naming convention: appending _sc -> server to client transfer 431 !-- _cs -> client to server transfer 432 !-- Recv -> server to client transfer 433 !-- Send -> client to server transfer 434 435 INTEGER :: i, istat, ierr, j 436 INTEGER,PARAMETER :: NoINdex=-1 437 INTEGER :: rcount 438 INTEGER :: arlen, myIndex, tag 439 INTEGER(idp) :: bufsize ! Size of MPI data Window 440 TYPE(PeDef),POINTER :: aPE 441 TYPE(ArrayDef),POINTER :: ar 442 INTEGER,DIMENSION(1024) :: req 443 character(len=DA_Namelen) :: myName 444 Type(c_ptr) :: base_ptr 445 REAL(kind=wp),DIMENSION(:),POINTER,save :: base_array_sc !Base array 446 REAL(kind=wp),DIMENSION(:),POINTER,save :: base_array_cs !Base array 447 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 448 449 myIndex = 0 450 bufsize = 8 451 452 !-- Server to client direction 453 454 !-- First stride, Compute size and set index 455 456 do i=1,me%inter_npes 457 aPE => me%PEs(i) 458 tag = 200 459 460 do j=1,aPE%Nr_arrays 461 ar => aPE%array_list(j) 462 463 ! Receive Index from client 464 tag = tag+1 465 CALL MPI_Recv (myIndex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, MPI_STATUS_IGNORE, ierr) 466 467 if(ar%NrDims == 3) then 468 bufsize = max(bufsize,ar%A_dim(1)*ar%A_dim(2)*ar%A_dim(3)) ! determine max, because client buffer is allocated only once 469 else 470 bufsize = max(bufsize,ar%A_dim(1)*ar%A_dim(2)) 471 end if 472 ar%RecvIndex = myIndex 473 474 end do 475 end do 476 477 478 !-- Create RMA (One Sided Communication) data buffer 479 !-- The buffer for MPI_Get can be PE local, i.e. it can but must not be part of the MPI RMA window 480 481 CALL PMC_Alloc_mem (base_array_sc, bufsize, base_ptr) 482 me%TotalBufferSize = bufsize*wp ! Total buffer size in Byte 483 484 !-- Second stride, Set Buffer pointer 485 486 do i=1,me%inter_npes 487 aPE => me%PEs(i) 488 489 do j=1,aPE%Nr_arrays 490 ar => aPE%array_list(j) 491 ar%RecvBuf = base_ptr 492 end do 493 end do 494 495 !-- Client to server direction 496 497 myIndex = 1 498 rCount = 0 499 bufsize = 8 500 501 do i=1,me%inter_npes 502 aPE => me%PEs(i) 503 tag = 300 504 do j=1,aPE%Nr_arrays 505 ar => aPE%array_list(j) 506 if(ar%NrDims == 2) then 507 arlen = aPE%NrEle ! 2D 508 else if(ar%NrDims == 3) then 509 arlen = aPE%NrEle*ar%A_dim(1) ! 3D 510 end if 511 512 tag = tag+1 513 rCount = rCount+1 514 if(aPE%NrEle > 0) then 515 CALL MPI_Isend (myIndex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, req(rCount),ierr) 516 ar%SendIndex = myIndex 517 else 518 CALL MPI_Isend (NoIndex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, req(rCount),ierr) 519 ar%SendIndex = NoIndex 520 end if 521 522 if(rCount == 1024) then ! Maximum of 1024 outstanding requests 523 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 524 rCount = 0; 525 end if 526 527 if(aPE%NrEle > 0) then 528 ar%SendSize = arlen 529 myIndex = myIndex+arlen 530 bufsize = bufsize+arlen 531 end if 532 end do 533 if(rCount > 0) then ! Wait for all send completed 534 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 535 end if 536 end do 537 538 !-- Create RMA (One Sided Communication) window for data buffer client to server transfer 539 !-- The buffer of MPI_Get (counter part of transfer) can be PE-local, i.e. it can but must not be part of the MPI RMA window 540 !-- Only one RMA window is required to prepare the data for server -> client transfer on the server side and 541 !-- for client -> server transfer on the client side 542 543 CALL PMC_Alloc_mem (base_array_cs, bufsize) 544 me%TotalBufferSize = bufsize*wp !Total buffer size in Byte 545 546 WinSize = me%TotalBufferSize 547 CALL MPI_Win_create (base_array_cs, WinSize, wp, MPI_INFO_NULL, me%intra_comm, me%win_server_client, ierr); 548 CALL MPI_Win_fence (0, me%win_server_client, ierr); ! Open Window to set data 549 CALL MPI_Barrier(me%intra_comm, ierr) 550 551 !-- Second stride, Set Buffer pointer 552 553 do i=1,me%inter_npes 554 aPE => me%PEs(i) 555 556 do j=1,aPE%Nr_arrays 557 ar => aPE%array_list(j) 558 if(aPE%NrEle > 0) then 559 ar%SendBuf = c_loc(base_array_cs(ar%SendIndex)) 560 if(ar%SendIndex+ar%SendSize > bufsize) then 561 write(0,'(a,i4,4i7,1x,a)') 'Client Buffer too small ',i, & 562 ar%SendIndex,ar%SendSize,ar%SendIndex+ar%SendSize,bufsize,trim(ar%name) 563 CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) 564 end if 565 end if 566 end do 567 end do 568 569 return 570 END SUBROUTINE PMC_C_setInd_and_AllocMem 571 572 SUBROUTINE PMC_C_GetBuffer (WaitTime) 573 574 IMPLICIT none 575 576 REAL(wp), INTENT(OUT), optional :: WaitTime 577 578 !-- local variables 579 INTEGER :: ip, ij, ierr, j 580 INTEGER :: nr ! Number of Elements to getb from server 581 INTEGER :: myIndex 582 REAL(wp) :: t1,t2 583 TYPE(PeDef),POINTER :: aPE 584 TYPE(ArrayDef),POINTER :: ar 585 INTEGER,DIMENSION(1) :: buf_shape 586 REAL(wp),POINTER,DIMENSION(:) :: buf 587 REAL(wp),POINTER,DIMENSION(:,:) :: data_2d 588 REAL(wp),POINTER,DIMENSION(:,:,:) :: data_3d 589 character(len=DA_Namelen) :: myName 590 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp 591 592 ! 593 !-- Synchronization of the model is done in pmci_client_synchronize and pmci_server_synchronize 594 !-- Therefor the RMA window can be filled without sychronization at this point and a barrier 595 !-- is not necessary 596 !-- Please note that WaitTime has to be set in PMC_S_FillBuffer AND PMC_C_GetBuffer 597 if(present(WaitTime)) then 598 t1 = PMC_Time() 599 CALL MPI_Barrier(me%intra_comm, ierr) 600 t2 = PMC_Time() 601 WaitTime = t2-t1 602 end if 603 604 CALL MPI_Barrier(me%intra_comm, ierr) ! Wait for buffer is filled 605 606 do ip=1,me%inter_npes 607 aPE => me%PEs(ip) 608 609 do j=1,aPE%Nr_arrays 610 ar => aPE%array_list(j) 611 if(ar%NrDims == 2) then 612 nr = aPE%NrEle 613 else if(ar%NrDims == 3) then 614 nr = aPE%NrEle*ar%A_dim(1) 615 end if 616 617 buf_shape(1) = nr 618 CALL c_f_pointer(ar%RecvBuf, buf, buf_shape) 619 ! 620 !-- MPI passive target RMA 621 if(nr > 0) then 622 target_disp = (ar%RecvIndex-1) 623 CALL MPI_Win_lock (MPI_LOCK_SHARED , ip-1, 0, me%win_server_client, ierr) 624 CALL MPI_Get (buf, nr, MPI_REAL, ip-1, target_disp, nr, MPI_REAL, me%win_server_client, ierr) 625 CALL MPI_Win_unlock (ip-1, me%win_server_client, ierr) 626 end if 627 628 myIndex = 1 629 if(ar%NrDims == 2) then 630 631 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 632 do ij=1,aPE%NrEle 633 data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex) 634 myIndex = myIndex+1 635 end do 636 else if(ar%NrDims == 3) then 637 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 638 do ij=1,aPE%NrEle 639 data_3d(:,aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex:myIndex+ar%A_dim(1)-1) 640 myIndex = myIndex+ar%A_dim(1) 641 end do 642 end if 643 644 end do 645 end do 646 return 647 END SUBROUTINE PMC_C_GetBuffer 648 649 SUBROUTINE PMC_C_PutBuffer (WaitTime) 650 651 IMPLICIT none 652 653 REAL(wp), INTENT(OUT), optional :: WaitTime 654 655 !-- local variables 656 INTEGER :: ip, ij, ierr, j 657 INTEGER :: nr ! Number of Elements to getb from server 658 INTEGER :: myIndex 659 REAL(wp) :: t1,t2 660 TYPE(PeDef),POINTER :: aPE 661 TYPE(ArrayDef),POINTER :: ar 662 INTEGER,DIMENSION(1) :: buf_shape 663 REAL(wp),POINTER,DIMENSION(:) :: buf 664 REAL(wp),POINTER,DIMENSION(:,:) :: data_2d 665 REAL(wp),POINTER,DIMENSION(:,:,:) :: data_3d 666 character(len=DA_Namelen) :: myName 667 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp 668 669 t1 = PMC_Time() 670 CALL MPI_Barrier(me%intra_comm, ierr) ! Wait for empty buffer 671 t2 = PMC_Time() 672 if(present(WaitTime)) WaitTime = t2-t1 673 674 do ip=1,me%inter_npes 675 aPE => me%PEs(ip) 676 677 do j=1,aPE%Nr_arrays 678 ar => aPE%array_list(j) 679 myIndex=1 680 if(ar%NrDims == 2) then 681 buf_shape(1) = aPE%NrEle 682 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 683 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 684 do ij=1,aPE%NrEle 685 buf(myIndex) = data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) 686 myIndex = myIndex+1 687 end do 688 else if(ar%NrDims == 3) then 689 buf_shape(1) = aPE%NrEle*ar%A_dim(1) 690 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 691 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 692 do ij=1,aPE%NrEle 693 buf(myIndex:myIndex+ar%A_dim(1)-1) = data_3d(:,aPE%locInd(ij)%j,aPE%locInd(ij)%i) 694 myIndex = myIndex+ar%A_dim(1) 695 end do 696 end if 697 end do 698 end do 699 700 701 ! CALL MPI_Win_fence (0, me%win_server_client, ierr) ! Fence might do it, test later 702 CALL MPI_Barrier(me%intra_comm, ierr) ! buffer is filled 703 704 return 705 END SUBROUTINE PMC_C_PutBuffer 106 INTERFACE pmc_c_clear_next_array_list 107 MODULE PROCEDURE pmc_c_clear_next_array_list 108 END INTERFACE pmc_c_clear_next_array_list 109 110 INTERFACE pmc_c_getbuffer 111 MODULE PROCEDURE pmc_c_getbuffer 112 END INTERFACE pmc_c_getbuffer 113 114 INTERFACE pmc_c_getnextarray 115 MODULE PROCEDURE pmc_c_getnextarray 116 END INTERFACE pmc_c_getnextarray 117 118 INTERFACE pmc_c_get_2d_index_list 119 MODULE PROCEDURE pmc_c_get_2d_index_list 120 END INTERFACE pmc_c_get_2d_index_list 121 122 INTERFACE pmc_c_putbuffer 123 MODULE PROCEDURE pmc_c_putbuffer 124 END INTERFACE pmc_c_putbuffer 125 126 INTERFACE pmc_c_setind_and_allocmem 127 MODULE PROCEDURE pmc_c_setind_and_allocmem 128 END INTERFACE pmc_c_setind_and_allocmem 129 130 INTERFACE pmc_c_set_dataarray 131 MODULE PROCEDURE pmc_c_set_dataarray_2d 132 MODULE PROCEDURE pmc_c_set_dataarray_3d 133 END INTERFACE pmc_c_set_dataarray 134 135 INTERFACE pmc_set_dataarray_name 136 MODULE PROCEDURE pmc_set_dataarray_name 137 MODULE PROCEDURE pmc_set_dataarray_name_lastentry 138 END INTERFACE pmc_set_dataarray_name 139 140 141 PUBLIC pmc_clientinit, pmc_c_clear_next_array_list, pmc_c_getbuffer, & 142 pmc_c_getnextarray, pmc_c_putbuffer, pmc_c_setind_and_allocmem, & 143 pmc_c_set_dataarray, pmc_set_dataarray_name, pmc_c_get_2d_index_list 144 145 CONTAINS 146 147 148 149 SUBROUTINE pmc_clientinit 150 151 IMPLICIT NONE 152 153 INTEGER :: i !< 154 INTEGER :: istat !< 155 156 ! 157 !-- Get / define the MPI environment 158 me%model_comm = m_model_comm 159 me%inter_comm = m_to_server_comm 160 161 CALL MPI_COMM_RANK( me%model_comm, me%model_rank, istat ) 162 CALL MPI_COMM_SIZE( me%model_comm, me%model_npes, istat ) 163 CALL MPI_COMM_REMOTE_SIZE( me%inter_comm, me%inter_npes, istat ) 164 ! 165 !-- Intra-communicater is used for MPI_GET 166 CALL MPI_INTERCOMM_MERGE( me%inter_comm, .TRUE., me%intra_comm, istat ) 167 CALL MPI_COMM_RANK( me%intra_comm, me%intra_rank, istat ) 168 169 ALLOCATE( me%pes(me%inter_npes) ) 170 171 ! 172 !-- Allocate an array of type arraydef for all server PEs to store information 173 !-- of then transfer array 174 DO i = 1, me%inter_npes 175 ALLOCATE( me%pes(i)%array_list(pmc_max_array) ) 176 ENDDO 177 178 END SUBROUTINE pmc_clientinit 179 180 181 182 SUBROUTINE pmc_set_dataarray_name( serverarraydesc, serverarrayname, & 183 clientarraydesc, clientarrayname, istat ) 184 185 IMPLICIT NONE 186 187 CHARACTER(LEN=*), INTENT(IN) :: serverarrayname !< 188 CHARACTER(LEN=*), INTENT(IN) :: serverarraydesc !< 189 CHARACTER(LEN=*), INTENT(IN) :: clientarrayname !< 190 CHARACTER(LEN=*), INTENT(IN) :: clientarraydesc !< 191 192 INTEGER, INTENT(OUT) :: istat !< 193 194 ! 195 !-- Local variables 196 TYPE(da_namedef) :: myname !< 197 198 INTEGER :: mype !< 199 INTEGER :: my_addiarray = 0 !< 200 201 202 istat = pmc_status_ok 203 ! 204 !-- Check length of array names 205 IF ( LEN( TRIM( serverarrayname) ) > da_namelen .OR. & 206 LEN( TRIM( clientarrayname) ) > da_namelen ) THEN 207 istat = pmc_da_name_err 208 ENDIF 209 210 IF ( m_model_rank == 0 ) THEN 211 myindex = myindex + 1 212 myname%couple_index = myIndex 213 myname%serverdesc = TRIM( serverarraydesc ) 214 myname%nameonserver = TRIM( serverarrayname ) 215 myname%clientdesc = TRIM( clientarraydesc ) 216 myname%nameonclient = TRIM( clientarrayname ) 217 ENDIF 218 219 ! 220 !-- Broadcat to all client PEs 221 !-- TODO: describe what is broadcast here and why it is done 222 CALL pmc_bcast( myname%couple_index, 0, comm=m_model_comm ) 223 CALL pmc_bcast( myname%serverdesc, 0, comm=m_model_comm ) 224 CALL pmc_bcast( myname%nameonserver, 0, comm=m_model_comm ) 225 CALL pmc_bcast( myname%clientdesc, 0, comm=m_model_comm ) 226 CALL pmc_bcast( myname%nameonclient, 0, comm=m_model_comm ) 227 228 ! 229 !-- Broadcat to all server PEs 230 !-- TODO: describe what is broadcast here and why it is done 231 IF ( m_model_rank == 0 ) THEN 232 mype = MPI_ROOT 233 ELSE 234 mype = MPI_PROC_NULL 235 ENDIF 236 237 CALL pmc_bcast( myname%couple_index, mype, comm=m_to_server_comm ) 238 CALL pmc_bcast( myname%serverdesc, mype, comm=m_to_server_comm ) 239 CALL pmc_bcast( myname%nameonserver, mype, comm=m_to_server_comm ) 240 CALL pmc_bcast( myname%clientdesc, mype, comm=m_to_server_comm ) 241 CALL pmc_bcast( myname%nameonclient, mype, comm=m_to_server_comm ) 242 243 CALL pmc_g_setname( me, myname%couple_index, myname%nameonclient ) 244 245 END SUBROUTINE pmc_set_dataarray_name 246 247 248 249 SUBROUTINE pmc_set_dataarray_name_lastentry( lastentry ) 250 251 IMPLICIT NONE 252 253 LOGICAL, INTENT(IN), OPTIONAL :: lastentry !< 254 255 ! 256 !-- Local variables 257 INTEGER :: mype !< 258 TYPE(dA_namedef) :: myname !< 259 260 myname%couple_index = -1 261 262 IF ( m_model_rank == 0 ) THEN 263 mype = MPI_ROOT 264 ELSE 265 mype = MPI_PROC_NULL 266 ENDIF 267 268 CALL pmc_bcast( myname%couple_index, mype, comm=m_to_server_comm ) 269 270 END SUBROUTINE pmc_set_dataarray_name_lastentry 271 272 273 274 SUBROUTINE pmc_c_get_2d_index_list 275 276 IMPLICIT NONE 277 278 INTEGER :: dummy !< 279 INTEGER :: i, ierr, i2, j, nr !< 280 INTEGER :: indwin !< MPI window object 281 INTEGER :: indwin2 ! < MPI window object 282 283 INTEGER(KIND=MPI_ADDRESS_KIND) :: win_size !< Size of MPI window 1 (in bytes) 284 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !< Displacement unit (Integer = 4, floating poit = 8 285 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< Size of MPI window 2 (in bytes) 286 287 INTEGER, DIMENSION(me%inter_npes*2) :: nrele !< Number of Elements of a 288 !< horizontal slice 289 INTEGER, DIMENSION(:), POINTER :: myind !< 290 291 TYPE(pedef), POINTER :: ape !> Pointer to pedef structure 292 293 294 win_size = C_SIZEOF( dummy ) 295 CALL MPI_WIN_CREATE( dummy, win_size, iwp, MPI_INFO_NULL, me%intra_comm, & 296 indwin, ierr ) 297 ! 298 !-- Open window on server side 299 !-- TODO: why is the next MPI routine called twice?? 300 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 301 ! 302 !-- Close window on server side and open on client side 303 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 304 305 DO i = 1, me%inter_npes 306 disp = me%model_rank * 2 307 CALL MPI_GET( nrele((i-1)*2+1), 2, MPI_INTEGER, i-1, disp, 2, & 308 MPI_INTEGER, indwin, ierr ) 309 ENDDO 310 ! 311 !-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is 312 !-- called 313 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 314 315 ! 316 !-- Allocate memory for index array 317 winsize = 0 318 DO i = 1, me%inter_npes 319 ape => me%pes(i) 320 i2 = ( i-1 ) * 2 + 1 321 nr = nrele(i2+1) 322 IF ( nr > 0 ) THEN 323 ALLOCATE( ape%locind(nr) ) 324 ELSE 325 NULLIFY( ape%locind ) 326 ENDIF 327 winsize = MAX( nr, winsize ) 328 ENDDO 329 330 ALLOCATE( myind(2*winsize) ) 331 winsize = 1 332 333 ! 334 !-- Local buffer used in MPI_GET can but must not be inside the MPI Window. 335 !-- Here, we use a dummy for the MPI window because the server PEs do not access 336 !-- the RMA window via MPI_GET or MPI_PUT 337 CALL MPI_WIN_CREATE( dummy, winsize, iwp, MPI_INFO_NULL, me%intra_comm, & 338 indwin2, ierr ) 339 ! 340 !-- MPI_GET is non-blocking -> data in nrele is not available until MPI_FENCE is 341 !-- called 342 !-- TODO: as before: why is this called twice?? 343 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 344 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 345 346 DO i = 1, me%inter_npes 347 ape => me%pes(i) 348 nr = nrele(i*2) 349 IF ( nr > 0 ) THEN 350 disp = nrele(2*(i-1)+1) 351 CALL MPI_WIN_LOCK( MPI_LOCK_SHARED , i-1, 0, indwin2, ierr ) 352 CALL MPI_GET( myind, 2*nr, MPI_INTEGER, i-1, disp, 2*nr, & 353 MPI_INTEGER, indwin2, ierr ) 354 CALL MPI_WIN_UNLOCK( i-1, indwin2, ierr ) 355 DO j = 1, nr 356 ape%locind(j)%i = myind(2*j-1) 357 ape%locind(j)%j = myind(2*j) 358 ENDDO 359 ape%nrele = nr 360 ELSE 361 ape%nrele = -1 362 ENDIF 363 ENDDO 364 365 ! 366 !-- Don't know why, but this barrier is necessary before we can free the windows 367 CALL MPI_BARRIER( me%intra_comm, ierr ) 368 369 CALL MPI_WIN_FREE( indWin, ierr ) 370 CALL MPI_WIN_FREE( indwin2, ierr ) 371 DEALLOCATE( myind ) 372 373 END SUBROUTINE pmc_c_get_2d_index_list 374 375 376 377 SUBROUTINE pmc_c_clear_next_array_list 378 379 IMPLICIT NONE 380 381 next_array_in_list = 0 382 383 END SUBROUTINE pmc_c_clear_next_array_list 384 385 386 387 LOGICAL FUNCTION pmc_c_getnextarray( myname ) 388 ! 389 !-- List handling is still required to get minimal interaction with 390 !-- pmc_interface 391 CHARACTER(LEN=*), INTENT(OUT) :: myname !< 392 393 ! 394 !-- Local variables 395 TYPE(pedef), POINTER :: ape 396 TYPE(arraydef), POINTER :: ar 397 398 399 next_array_in_list = next_array_in_list + 1 400 401 ! 402 !-- Array names are the same on all client PEs, so take first PE to get the name 403 ape => me%pes(1) 404 ! 405 !-- Check if all arrays have been processed 406 IF ( next_array_in_list > ape%nr_arrays ) THEN 407 pmc_c_getnextarray = .FALSE. 408 RETURN 409 ENDIF 410 411 ar => ape%array_list( next_array_in_list ) 412 413 myname = ar%name 414 415 ! 416 !-- Return true if legal array 417 !-- TODO: the case of a non-legal array does not seem to appear, so why is this 418 !-- setting required at all? 419 pmc_c_getnextarray = .TRUE. 420 421 END function pmc_c_getnextarray 422 423 424 425 SUBROUTINE pmc_c_set_dataarray_2d( array ) 426 427 IMPLICIT NONE 428 429 REAL(wp), INTENT(IN) , DIMENSION(:,:), POINTER :: array !< 430 431 INTEGER :: i !< 432 INTEGER :: nrdims !< 433 INTEGER, DIMENSION(4) :: dims !< 434 435 TYPE(C_PTR) :: array_adr 436 TYPE(arraydef), POINTER :: ar 437 TYPE(pedef), POINTER :: ape 438 439 440 dims = 1 441 nrdims = 2 442 dims(1) = SIZE( array, 1 ) 443 dims(2) = SIZE( array, 2 ) 444 445 array_adr = C_LOC( array ) 446 447 DO i = 1, me%inter_npes 448 ape => me%pes(i) 449 ar => ape%array_list(next_array_in_list) 450 ar%nrdims = nrdims 451 ar%a_dim = dims 452 ar%data = array_adr 453 ENDDO 454 455 END SUBROUTINE pmc_c_set_dataarray_2d 456 457 458 459 SUBROUTINE pmc_c_set_dataarray_3d (array) 460 461 IMPLICIT NONE 462 463 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER :: array !< 464 465 INTEGER :: i 466 INTEGER :: nrdims 467 INTEGER, DIMENSION (4) :: dims 468 TYPE(C_PTR) :: array_adr 469 TYPE(pedef), POINTER :: ape 470 TYPE(arraydef), POINTER :: ar 471 472 473 dims = 1 474 nrdims = 3 475 dims(1) = SIZE( array, 1 ) 476 dims(2) = SIZE( array, 2 ) 477 dims(3) = SIZE( array, 3 ) 478 479 array_adr = C_LOC( array ) 480 481 DO i = 1, me%inter_npes 482 ape => me%pes(i) 483 ar => ape%array_list(next_array_in_list) 484 ar%nrdims = nrdims 485 ar%a_dim = dims 486 ar%data = array_adr 487 ENDDO 488 489 END SUBROUTINE pmc_c_set_dataarray_3d 490 491 492 493 SUBROUTINE pmc_c_setind_and_allocmem 494 495 IMPLICIT NONE 496 ! 497 !-- Naming convention for appendices: _sc -> server to client transfer 498 !-- _cs -> client to server transfer 499 !-- recv -> server to client transfer 500 !-- send -> client to server transfer 501 CHARACTER(LEN=da_namelen) :: myname !< 502 503 INTEGER :: arlen !< 504 INTEGER :: myindex !< 505 INTEGER :: i !< 506 INTEGER :: ierr !< 507 INTEGER :: istat !< 508 INTEGER :: j !< 509 INTEGER :: rcount !< 510 INTEGER :: tag !< 511 512 INTEGER, PARAMETER :: noindex = -1 !< 513 514 INTEGER(idp) :: bufsize !< size of MPI data window 515 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 516 517 INTEGER,DIMENSION(1024) :: req !< 518 519 REAL(wp), DIMENSION(:), POINTER, SAVE :: base_array_sc !< base array 520 REAL(wp), DIMENSION(:), POINTER, SAVE :: base_array_cs !< base array 521 522 TYPE(pedef), POINTER :: ape !< 523 TYPE(arraydef), POINTER :: ar !< 524 Type(C_PTR) :: base_ptr !< 525 526 527 myindex = 0 528 bufsize = 8 529 530 ! 531 !-- Server to client direction. 532 !-- First stride: compute size and set index 533 DO i = 1, me%inter_npes 534 535 ape => me%pes(i) 536 tag = 200 537 538 DO j = 1, ape%nr_arrays 539 540 ar => ape%array_list(j) 541 ! 542 !-- Receive index from client 543 tag = tag + 1 544 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, & 545 MPI_STATUS_IGNORE, ierr ) 546 ar%recvindex = myindex 547 ! 548 !-- Determine max, because client buffer is allocated only once 549 !-- TODO: give a more meaningful comment 550 IF( ar%nrdims == 3 ) THEN 551 bufsize = MAX( bufsize, ar%a_dim(1)*ar%a_dim(2)*ar%a_dim(3) ) 552 ELSE 553 bufsize = MAX( bufsize, ar%a_dim(1)*ar%a_dim(2) ) 554 ENDIF 555 556 ENDDO 557 558 ENDDO 559 560 ! 561 !-- Create RMA (one sided communication) data buffer. 562 !-- The buffer for MPI_GET can be PE local, i.e. it can but must not be part of 563 !-- the MPI RMA window 564 CALL pmc_alloc_mem( base_array_sc, bufsize, base_ptr ) 565 me%totalbuffersize = bufsize*wp ! total buffer size in byte 566 567 ! 568 !-- Second stride: set buffer pointer 569 DO i = 1, me%inter_npes 570 571 ape => me%pes(i) 572 573 DO j = 1, ape%nr_arrays 574 ar => ape%array_list(j) 575 ar%recvbuf = base_ptr 576 ENDDO 577 578 ENDDO 579 580 ! 581 !-- Client to server direction 582 myindex = 1 583 rcount = 0 584 bufsize = 8 585 586 DO i = 1, me%inter_npes 587 588 ape => me%pes(i) 589 tag = 300 590 591 DO j = 1, ape%nr_arrays 592 593 ar => ape%array_list(j) 594 IF( ar%nrdims == 2 ) THEN 595 arlen = ape%nrele 596 ELSEIF( ar%nrdims == 3 ) THEN 597 arlen = ape%nrele*ar%a_dim(1) 598 ENDIF 599 600 tag = tag + 1 601 rcount = rcount + 1 602 IF ( ape%nrele > 0 ) THEN 603 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, & 604 req(rcount), ierr ) 605 ar%sendindex = myindex 606 ELSE 607 CALL MPI_ISEND( noindex, 1, MPI_INTEGER, i-1, tag, me%inter_comm, & 608 req(rcount), ierr ) 609 ar%sendindex = noindex 610 ENDIF 611 ! 612 !-- Maximum of 1024 outstanding requests 613 !-- TODO: explain where this maximum comes from (arbitrary?) 614 IF ( rcount == 1024 ) THEN 615 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 616 rcount = 0 617 ENDIF 618 619 IF ( ape%nrele > 0 ) THEN 620 ar%sendsize = arlen 621 myindex = myindex + arlen 622 bufsize = bufsize + arlen 623 ENDIF 624 625 ENDDO 626 627 IF ( rcount > 0 ) THEN 628 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 629 ENDIF 630 631 ENDDO 632 633 ! 634 !-- Create RMA (one sided communication) window for data buffer client to server 635 !-- transfer. 636 !-- The buffer of MPI_GET (counter part of transfer) can be PE-local, i.e. it 637 !-- can but must not be part of the MPI RMA window. Only one RMA window is 638 !-- required to prepare the data 639 !-- for server -> client transfer on the server side 640 !-- and 641 !-- for client -> server transfer on the client side 642 643 CALL pmc_alloc_mem( base_array_cs, bufsize ) 644 me%totalbuffersize = bufsize * wp ! total buffer size in byte 645 646 winSize = me%totalbuffersize 647 648 CALL MPI_WIN_CREATE( base_array_cs, winsize, wp, MPI_INFO_NULL, & 649 me%intra_comm, me%win_server_client, ierr ) 650 CALL MPI_WIN_FENCE( 0, me%win_server_client, ierr ) 651 CALL MPI_BARRIER( me%intra_comm, ierr ) 652 653 ! 654 !-- Second stride: set buffer pointer 655 DO i = 1, me%inter_npes 656 657 ape => me%pes(i) 658 659 DO j = 1, ape%nr_arrays 660 661 ar => ape%array_list(j) 662 663 IF ( ape%nrele > 0 ) THEN 664 ar%sendbuf = C_LOC( base_array_cs(ar%sendindex) ) 665 !-- TODO: if this is an error to be really expected, replace the 666 !-- following message by a meaningful standard PALM message using 667 !-- the message-routine 668 IF ( ar%sendindex+ar%sendsize > bufsize ) THEN 669 WRITE( 0,'(a,i4,4i7,1x,a)') 'Client Buffer too small ', i, & 670 ar%sendindex, ar%sendsize, ar%sendindex+ar%sendsize, & 671 bufsize, TRIM( ar%name ) 672 CALL MPI_ABORT( MPI_COMM_WORLD, istat, ierr ) 673 ENDIF 674 ENDIF 675 676 ENDDO 677 678 ENDDO 679 680 END SUBROUTINE pmc_c_setind_and_allocmem 681 682 683 684 SUBROUTINE pmc_c_getbuffer( waittime ) 685 686 IMPLICIT NONE 687 688 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 689 690 CHARACTER(LEN=da_namelen) :: myname !< 691 692 INTEGER :: ierr !< 693 INTEGER :: ij !< 694 INTEGER :: ip !< 695 INTEGER :: j !< 696 INTEGER :: myindex !< 697 INTEGER :: nr !< number of elements to get 698 !< from server 699 INTEGER(KIND=MPI_ADDRESS_KIND) :: target_disp 700 INTEGER,DIMENSION(1) :: buf_shape 701 702 REAL(wp) :: t1 703 REAL(wp) :: t2 704 705 REAL(wp), POINTER, DIMENSION(:) :: buf 706 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d 707 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d 708 TYPE(pedef), POINTER :: ape 709 TYPE(arraydef), POINTER :: ar 710 711 ! 712 !-- Synchronization of the model is done in pmci_client_synchronize and 713 !-- pmci_server_synchronize. Therefor the RMA window can be filled without 714 !-- sychronization at this point and a barrier is not necessary. 715 !-- Please note that waittime has to be set in pmc_s_fillbuffer AND 716 !-- pmc_c_getbuffer 717 IF ( PRESENT( waittime ) ) THEN 718 t1 = pmc_time() 719 CALL MPI_BARRIER( me%intra_comm, ierr ) 720 t2 = pmc_time() 721 waittime = t2 - t1 722 ENDIF 723 ! 724 !-- Wait for buffer is filled 725 !-- TODO: explain in more detail what is happening here. The barrier seems to 726 !-- contradict what is said a few lines beforer (i.e. that no barrier is necessary) 727 !-- TODO: In case of PRESENT( waittime ) the barrrier would be calles twice. Why? 728 !-- Shouldn't it be done the same way as in pmc_putbuffer? 729 CALL MPI_BARRIER( me%intra_comm, ierr ) 730 731 DO ip = 1, me%inter_npes 732 733 ape => me%pes(ip) 734 735 DO j = 1, ape%nr_arrays 736 737 ar => ape%array_list(j) 738 739 IF ( ar%nrdims == 2 ) THEN 740 nr = ape%nrele 741 ELSEIF ( ar%nrdims == 3 ) THEN 742 nr = ape%nrele * ar%a_dim(1) 743 ENDIF 744 745 buf_shape(1) = nr 746 CALL C_F_POINTER( ar%recvbuf, buf, buf_shape ) 747 ! 748 !-- MPI passive target RMA 749 !-- TODO: explain the above comment 750 IF ( nr > 0 ) THEN 751 target_disp = ar%recvindex - 1 752 CALL MPI_WIN_LOCK( MPI_LOCK_SHARED , ip-1, 0, & 753 me%win_server_client, ierr ) 754 CALL MPI_GET( buf, nr, MPI_REAL, ip-1, target_disp, nr, MPI_REAL, & 755 me%win_server_client, ierr ) 756 CALL MPI_WIN_UNLOCK( ip-1, me%win_server_client, ierr ) 757 ENDIF 758 759 myindex = 1 760 IF ( ar%nrdims == 2 ) THEN 761 762 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 763 764 DO ij = 1, ape%nrele 765 data_2d(ape%locind(ij)%j,ape%locind(ij)%i) = buf(myindex) 766 myindex = myindex + 1 767 ENDDO 768 769 ELSEIF ( ar%nrdims == 3 ) THEN 770 771 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 772 773 DO ij = 1, ape%nrele 774 data_3d(:,ape%locind(ij)%j,ape%locind(ij)%i) = & 775 buf(myindex:myindex+ar%a_dim(1)-1) 776 myindex = myindex+ar%a_dim(1) 777 ENDDO 778 779 ENDIF 780 781 ENDDO 782 783 ENDDO 784 785 END SUBROUTINE pmc_c_getbuffer 786 787 788 789 SUBROUTINE pmc_c_putbuffer( waittime ) 790 791 IMPLICIT NONE 792 793 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 794 795 CHARACTER(LEN=da_namelen) :: myname !< 796 797 INTEGER :: ierr !< 798 INTEGER :: ij !< 799 INTEGER :: ip !< 800 INTEGER :: j !< 801 INTEGER :: myindex !< 802 INTEGER :: nr !< number of elements to get 803 !< from server 804 INTEGER(KIND=MPI_ADDRESS_KIND) :: target_disp !< 805 806 INTEGER, DIMENSION(1) :: buf_shape !< 807 808 REAL(wp) :: t1 !< 809 REAL(wp) :: t2 !< 810 811 REAL(wp), POINTER, DIMENSION(:) :: buf !< 812 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d !< 813 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !< 814 815 TYPE(pedef), POINTER :: ape !< 816 TYPE(arraydef), POINTER :: ar !< 817 818 ! 819 !-- Wait for empty buffer 820 !-- TODO: explain what is done here 821 t1 = pmc_time() 822 CALL MPI_BARRIER( me%intra_comm, ierr ) 823 t2 = pmc_time() 824 IF ( PRESENT( waittime ) ) waittime = t2 - t1 825 826 DO ip = 1, me%inter_npes 827 828 ape => me%pes(ip) 829 830 DO j = 1, ape%nr_arrays 831 832 ar => aPE%array_list(j) 833 myindex = 1 834 835 IF ( ar%nrdims == 2 ) THEN 836 837 buf_shape(1) = ape%nrele 838 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 839 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 840 841 DO ij = 1, ape%nrele 842 buf(myindex) = data_2d(ape%locind(ij)%j,ape%locind(ij)%i) 843 myindex = myindex + 1 844 ENDDO 845 846 ELSEIF ( ar%nrdims == 3 ) THEN 847 848 buf_shape(1) = ape%nrele*ar%a_dim(1) 849 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 850 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 851 852 DO ij = 1, ape%nrele 853 buf(myindex:myindex+ar%a_dim(1)-1) = & 854 data_3d(:,ape%locind(ij)%j,ape%locind(ij)%i) 855 myindex = myindex + ar%a_dim(1) 856 ENDDO 857 858 ENDIF 859 860 ENDDO 861 862 ENDDO 863 ! 864 !-- TODO: Fence might do it, test later 865 !-- Call MPI_WIN_FENCE( 0, me%win_server_client, ierr) ! 866 ! 867 !-- Buffer is filled 868 !-- TODO: explain in more detail what is happening here 869 CALL MPI_Barrier(me%intra_comm, ierr) 870 871 END SUBROUTINE pmc_c_putbuffer 706 872 707 873 #endif 708 END MODULE pmc_client874 END MODULE pmc_client
Note: See TracChangeset
for help on using the changeset viewer.