Changeset 1900 for palm/trunk/SOURCE/pmc_server_mod.f90
- Timestamp:
- May 4, 2016 3:27:53 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/pmc_server_mod.f90
r1851 r1900 1 MODULE pmc_server1 MODULE pmc_server 2 2 3 3 !--------------------------------------------------------------------------------! … … 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! re-formatted to match PALM style 23 23 ! 24 24 ! Former revisions: … … 69 69 70 70 #if defined( __parallel ) 71 use, intrinsic :: iso_c_binding71 USE, INTRINSIC :: ISO_C_BINDING 72 72 73 73 #if defined( __mpifh ) … … 76 76 USE MPI 77 77 #endif 78 USE kinds 79 USE PMC_general, ONLY: ClientDef, PMC_MAX_MODELL,PMC_sort, DA_NameDef, DA_Desclen, DA_Namelen, & 80 PMC_G_SetName, PeDef, ArrayDef, PMC_MAX_ARRAY 81 USE PMC_handle_communicator, ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & 82 PMC_Server_for_Client, m_world_rank 83 USE PMC_MPI_wrapper, ONLY: PMC_Send_to_Client, PMC_Recv_from_Client, PMC_Bcast, PMC_Inter_Bcast, & 84 PMC_Alloc_mem, PMC_Time 85 86 IMPLICIT none 78 USE kinds 79 USE pmc_general, & 80 ONLY: arraydef, clientdef, da_namedef, da_namelen, pedef, & 81 pmc_g_setname, pmc_max_array, pmc_max_models, pmc_sort 82 83 USE pmc_handle_communicator, & 84 ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & 85 m_world_rank, pmc_server_for_client 86 87 USE pmc_mpi_wrapper, & 88 ONLY: pmc_alloc_mem, pmc_bcast, pmc_time 89 90 IMPLICIT NONE 91 87 92 PRIVATE 88 93 SAVE 89 94 90 TYPE ClientIndexDef 91 INTEGER :: NrPoints 92 INTEGER,DIMENSION(:,:),allocatable :: index_list_2d 93 END TYPE ClientIndexDef 94 95 TYPE(ClientDef),DIMENSION(PMC_MAX_MODELL) :: Clients 96 TYPE(ClientIndexDef),DIMENSION(PMC_MAX_MODELL) :: indClients 97 98 INTEGER :: next_array_in_list = 0 99 100 PUBLIC PMC_Server_for_Client 101 102 INTERFACE PMC_ServerInit 103 MODULE procedure PMC_ServerInit 104 END INTERFACE PMC_ServerInit 105 106 INTERFACE PMC_S_Set_2D_index_list 107 MODULE procedure PMC_S_Set_2D_index_list 108 END INTERFACE PMC_S_Set_2D_index_list 109 110 INTERFACE PMC_S_clear_next_array_list 111 MODULE procedure PMC_S_clear_next_array_list 112 END INTERFACE PMC_S_clear_next_array_list 113 114 INTERFACE PMC_S_GetNextArray 115 MODULE procedure PMC_S_GetNextArray 116 END INTERFACE PMC_S_GetNextArray 117 118 INTERFACE PMC_S_Set_DataArray 119 MODULE procedure PMC_S_Set_DataArray_2d 120 MODULE procedure PMC_S_Set_DataArray_3d 121 END INTERFACE PMC_S_Set_DataArray 122 123 INTERFACE PMC_S_setInd_and_AllocMem 124 MODULE procedure PMC_S_setInd_and_AllocMem 125 END INTERFACE PMC_S_setInd_and_AllocMem 126 127 INTERFACE PMC_S_FillBuffer 128 MODULE procedure PMC_S_FillBuffer 129 END INTERFACE PMC_S_FillBuffer 130 131 INTERFACE PMC_S_GetData_from_Buffer 132 MODULE procedure PMC_S_GetData_from_Buffer 133 END INTERFACE PMC_S_GetData_from_Buffer 134 135 INTERFACE PMC_S_Set_Active_data_array 136 MODULE procedure PMC_S_Set_Active_data_array 137 END INTERFACE PMC_S_Set_Active_data_array 138 139 ! PUBLIC section 140 141 PUBLIC PMC_ServerInit, PMC_S_Set_2D_index_list, PMC_S_GetNextArray, PMC_S_Set_DataArray 142 PUBLIC PMC_S_setInd_and_AllocMem, PMC_S_FillBuffer, PMC_S_GetData_from_Buffer, PMC_S_Set_Active_data_array 143 PUBLIC PMC_S_clear_next_array_list 144 145 CONTAINS 146 147 SUBROUTINE PMC_ServerInit 148 IMPLICIT none 149 INTEGER :: i 150 INTEGER :: j 151 INTEGER :: ClientId 152 INTEGER :: istat 153 154 do i=1,size(PMC_Server_for_Client)-1 155 ! if(m_model_comm == 0) write(0,*) 'PMC_Server: Initialize client Id',PMC_Server_for_Client(i) 156 157 ClientId = PMC_Server_for_Client(i) 158 159 Clients(ClientId)%model_comm = m_model_comm 160 Clients(ClientId)%inter_comm = m_to_client_comm(ClientId) 161 162 ! Get rank and size 163 CALL MPI_Comm_rank (Clients(ClientId)%model_comm, Clients(ClientId)%model_rank, istat); 164 CALL MPI_Comm_size (Clients(ClientId)%model_comm, Clients(ClientId)%model_npes, istat); 165 CALL MPI_Comm_remote_size (Clients(ClientId)%inter_comm, Clients(ClientId)%inter_npes, istat); 166 167 ! Intra communicater is used for MPI_Get 168 CALL MPI_Intercomm_merge (Clients(ClientId)%inter_comm, .false., Clients(ClientId)%intra_comm, istat); 169 CALL MPI_Comm_rank (Clients(ClientId)%intra_comm, Clients(ClientId)%intra_rank, istat); 170 171 ! write(9,*) 'ClientId ',i,ClientId,m_world_rank, Clients(ClientId)%inter_npes 172 173 ALLOCATE (Clients(ClientId)%PEs(Clients(ClientId)%inter_npes)) 174 ! 175 !-- Allocate for all client PEs an array of TYPE ArrayDef to store information of transfer array 176 do j=1,Clients(ClientId)%inter_npes 177 Allocate(Clients(ClientId)%PEs(j)%array_list(PMC_MAX_ARRAY)) 178 end do 179 180 CALL Get_DA_names_from_client (ClientId) 181 end do 182 183 return 184 END SUBROUTINE PMC_ServerInit 185 186 SUBROUTINE PMC_S_Set_2D_index_list (ClientId, index_list) 187 IMPLICIT none 188 INTEGER,INTENT(IN) :: ClientId 189 INTEGER,DIMENSION(:,:),INTENT(INOUT) :: index_list !Index list will be modified in sort, therefore INOUT 190 191 !-- Local variables 192 INTEGER :: ip,is,ie,ian,ic,n 193 INTEGER :: istat 194 195 if(m_model_rank == 0) then 196 CALL PMC_sort (index_list, 6) ! Sort to ascending Server PE 197 is = 1 198 199 do ip=0,m_model_npes-1 200 201 ! Split into Server PEs 202 ie = is-1 !there may be no entry for this PE 203 if(is <= size(index_list,2) .and. ie >= 0) then 204 do while ( index_list(6,ie+1) == ip) 205 ie = ie+1 206 if( ie == size(index_list,2)) EXIT 207 end do 208 209 ian = ie-is+1 210 else 211 is = -1 212 ie = -2 213 ian = 0 214 end if 215 216 ! Send data to other server PEs 217 218 if(ip == 0) then 219 indClients(ClientId)%NrPoints = ian 220 if(ian > 0) then 221 ALLOCATE (indClients(ClientId)%index_list_2d(6,ian)) 222 indClients(ClientId)%index_list_2d(:,1:ian) = index_list(:,is:ie) 223 end if 224 else 225 CALL MPI_Send (ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, istat) 226 if(ian > 0) then 227 CALL MPI_Send (index_list(1,is), 6*ian, MPI_INTEGER, ip, 1001, & 228 m_model_comm, istat) 229 end if 230 end if 231 is = ie+1 232 end do 233 else 234 CALL MPI_Recv (indClients(ClientId)%NrPoints, 1, MPI_INTEGER, 0, 1000, m_model_comm, & 235 MPI_STATUS_IGNORE, istat) 236 ian = indClients(ClientId)%NrPoints 237 if(ian > 0) then 238 ALLOCATE(indClients(ClientId)%index_list_2d(6,ian)) 239 CALL MPI_RECV (indClients(ClientId)%index_list_2d, 6*ian, MPI_INTEGER, 0, 1001, & 240 m_model_comm, MPI_STATUS_IGNORE, istat) 241 end if 242 end if 243 244 CALL Set_PE_index_list (ClientId,Clients(ClientId),indClients(ClientId)%index_list_2d,indClients(ClientId)%NrPoints) 245 246 return 247 END SUBROUTINE PMC_S_Set_2D_index_list 248 249 SUBROUTINE PMC_S_clear_next_array_list 250 IMPLICIT none 251 252 next_array_in_list = 0 253 254 return 255 END SUBROUTINE PMC_S_clear_next_array_list 256 257 ! List handling is still required to get minimal interaction with pmc_interface 258 logical function PMC_S_GetNextArray (ClientId, myName) 259 INTEGER(iwp),INTENT(IN) :: ClientId 260 CHARACTER(len=*),INTENT(OUT) :: myName 261 262 !-- local variables 263 TYPE(PeDef),POINTER :: aPE 264 TYPE(ArrayDef),POINTER :: ar 265 266 next_array_in_list = next_array_in_list+1 267 268 !-- Array Names are the same on all client PE, so take first PE to get the name 269 aPE => Clients(ClientId)%PEs(1) 270 271 if(next_array_in_list > aPE%Nr_arrays) then 272 PMC_S_GetNextArray = .false. ! all arrays done 273 return 274 end if 275 276 ar => aPE%array_list(next_array_in_list) 277 myName = ar%name 278 279 PMC_S_GetNextArray = .true. ! Return true if legal array 280 return 281 END function PMC_S_GetNextArray 282 283 SUBROUTINE PMC_S_Set_DataArray_2d (ClientId, array, array_2 ) 284 285 IMPLICIT none 286 287 INTEGER,INTENT(IN) :: ClientId 288 289 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER :: array 290 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER, OPTIONAL :: array_2 291 292 INTEGER :: NrDims 293 INTEGER,DIMENSION (4) :: dims 294 TYPE(c_ptr) :: array_adr 295 TYPE(c_ptr) :: second_adr 296 297 dims = 1 298 299 NrDims = 2 300 dims(1) = size(array,1) 301 dims(2) = size(array,2) 302 array_adr = c_loc(array) 303 304 IF ( PRESENT( array_2 ) ) THEN 305 second_adr = c_loc(array_2) 306 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) 307 ELSE 308 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) 95 TYPE clientindexdef 96 INTEGER :: nrpoints !< 97 INTEGER, DIMENSION(:,:), ALLOCATABLE :: index_list_2d !< 98 END TYPE clientindexdef 99 100 TYPE(clientdef), DIMENSION(pmc_max_models) :: clients !< 101 TYPE(clientindexdef), DIMENSION(pmc_max_models) :: indclients !< 102 103 INTEGER :: next_array_in_list = 0 !< 104 105 106 PUBLIC pmc_server_for_client 107 108 109 INTERFACE pmc_serverinit 110 MODULE PROCEDURE pmc_serverinit 111 END INTERFACE pmc_serverinit 112 113 INTERFACE pmc_s_set_2d_index_list 114 MODULE PROCEDURE pmc_s_set_2d_index_list 115 END INTERFACE pmc_s_set_2d_index_list 116 117 INTERFACE pmc_s_clear_next_array_list 118 MODULE PROCEDURE pmc_s_clear_next_array_list 119 END INTERFACE pmc_s_clear_next_array_list 120 121 INTERFACE pmc_s_getnextarray 122 MODULE PROCEDURE pmc_s_getnextarray 123 END INTERFACE pmc_s_getnextarray 124 125 INTERFACE pmc_s_set_dataarray 126 MODULE PROCEDURE pmc_s_set_dataarray_2d 127 MODULE PROCEDURE pmc_s_set_dataarray_3d 128 END INTERFACE pmc_s_set_dataarray 129 130 INTERFACE pmc_s_setind_and_allocmem 131 MODULE PROCEDURE pmc_s_setind_and_allocmem 132 END INTERFACE pmc_s_setind_and_allocmem 133 134 INTERFACE pmc_s_fillbuffer 135 MODULE PROCEDURE pmc_s_fillbuffer 136 END INTERFACE pmc_s_fillbuffer 137 138 INTERFACE pmc_s_getdata_from_buffer 139 MODULE PROCEDURE pmc_s_getdata_from_buffer 140 END INTERFACE pmc_s_getdata_from_buffer 141 142 INTERFACE pmc_s_set_active_data_array 143 MODULE PROCEDURE pmc_s_set_active_data_array 144 END INTERFACE pmc_s_set_active_data_array 145 146 PUBLIC pmc_serverinit, pmc_s_clear_next_array_list, pmc_s_fillbuffer, & 147 pmc_s_getdata_from_buffer, pmc_s_getnextarray, & 148 pmc_s_setind_and_allocmem, pmc_s_set_active_data_array, & 149 pmc_s_set_dataarray, pmc_s_set_2d_index_list 150 151 CONTAINS 152 153 154 SUBROUTINE pmc_serverinit 155 156 IMPLICIT NONE 157 158 INTEGER :: clientid !< 159 INTEGER :: i !< 160 INTEGER :: j !< 161 INTEGER :: istat !< 162 163 164 DO i = 1, SIZE( pmc_server_for_client )-1 165 166 clientid = pmc_server_for_client( i ) 167 168 clients(clientid)%model_comm = m_model_comm 169 clients(clientid)%inter_comm = m_to_client_comm(clientid) 170 ! 171 !-- Get rank and size 172 CALL MPI_COMM_RANK( clients(clientid)%model_comm, & 173 clients(clientid)%model_rank, istat ) 174 CALL MPI_COMM_SIZE( clients(clientid)%model_comm, & 175 clients(clientid)%model_npes, istat ) 176 CALL MPI_COMM_REMOTE_SIZE( clients(clientid)%inter_comm, & 177 clients(clientid)%inter_npes, istat ) 178 ! 179 !-- Intra communicater is used for MPI_GET 180 CALL MPI_INTERCOMM_MERGE( clients(clientid)%inter_comm, .FALSE., & 181 clients(clientid)%intra_comm, istat ) 182 CALL MPI_COMM_RANK( clients(clientid)%intra_comm, & 183 clients(clientid)%intra_rank, istat ) 184 185 ALLOCATE( clients(clientid)%pes(clients(clientid)%inter_npes)) 186 ! 187 !-- Allocate array of TYPE arraydef for all client PEs to store information 188 !-- of the transfer array 189 DO j = 1, clients(clientid)%inter_npes 190 ALLOCATE( clients(clientid)%pes(j)%array_list(pmc_max_array) ) 191 ENDDO 192 193 CALL get_da_names_from_client (clientid) 194 195 ENDDO 196 197 END SUBROUTINE pmc_serverinit 198 199 200 201 SUBROUTINE pmc_s_set_2d_index_list( clientid, index_list ) 202 203 IMPLICIT NONE 204 205 INTEGER, INTENT(IN) :: clientid !< 206 INTEGER, DIMENSION(:,:), INTENT(INOUT) :: index_list !< 207 208 INTEGER :: ian !< 209 INTEGER :: ic !< 210 INTEGER :: ie !< 211 INTEGER :: ip !< 212 INTEGER :: is !< 213 INTEGER :: istat !< 214 INTEGER :: n !< 215 216 217 IF ( m_model_rank == 0 ) THEN 218 ! 219 !-- Sort to ascending server PE 220 CALL pmc_sort( index_list, 6 ) 221 222 is = 1 223 DO ip = 0, m_model_npes-1 224 ! 225 !-- Split into server PEs 226 ie = is - 1 227 ! 228 !-- There may be no entry for this PE 229 IF ( is <= SIZE( index_list,2 ) .AND. ie >= 0 ) THEN 230 231 DO WHILE ( index_list(6,ie+1 ) == ip ) 232 ie = ie + 1 233 IF ( ie == SIZE( index_list,2 ) ) EXIT 234 ENDDO 235 236 ian = ie - is + 1 237 238 ELSE 239 is = -1 240 ie = -2 241 ian = 0 242 ENDIF 243 ! 244 !-- Send data to other server PEs 245 IF ( ip == 0 ) THEN 246 indclients(clientid)%nrpoints = ian 247 IF ( ian > 0) THEN 248 ALLOCATE( indclients(clientid)%index_list_2d(6,ian) ) 249 indclients(clientid)%index_list_2d(:,1:ian) = & 250 index_list(:,is:ie) 251 ENDIF 252 ELSE 253 CALL MPI_SEND( ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, & 254 istat ) 255 IF ( ian > 0) THEN 256 CALL MPI_SEND( index_list(1,is), 6*ian, MPI_INTEGER, ip, & 257 1001, m_model_comm, istat ) 258 ENDIF 259 ENDIF 260 is = ie + 1 261 262 ENDDO 263 264 ELSE 265 266 CALL MPI_RECV( indclients(clientid)%nrpoints, 1, MPI_INTEGER, 0, 1000, & 267 m_model_comm, MPI_STATUS_IGNORE, istat ) 268 ian = indclients(clientid)%nrpoints 269 270 IF ( ian > 0 ) THEN 271 ALLOCATE( indclients(clientid)%index_list_2d(6,ian) ) 272 CALL MPI_RECV( indclients(clientid)%index_list_2d, 6*ian, & 273 MPI_INTEGER, 0, 1001, m_model_comm, & 274 MPI_STATUS_IGNORE, istat) 309 275 ENDIF 310 276 311 return 312 END SUBROUTINE PMC_S_Set_DataArray_2d 313 314 SUBROUTINE PMC_S_Set_DataArray_3d (ClientId, array, nz_cl, nz, array_2 ) 315 316 IMPLICIT none 317 318 INTEGER,INTENT(IN) :: ClientId 319 320 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER :: array 321 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER, OPTIONAL :: array_2 322 INTEGER,INTENT(IN) :: nz_cl 323 INTEGER,INTENT(IN) :: nz 324 325 INTEGER :: NrDims 326 INTEGER,DIMENSION (4) :: dims 327 TYPE(c_ptr) :: array_adr 328 TYPE(c_ptr) :: second_adr 329 330 dims = 1 331 332 dims = 0 333 NrDims = 3 334 dims(1) = size(array,1) 335 dims(2) = size(array,2) 336 dims(3) = size(array,3) 337 dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 338 339 array_adr = c_loc(array) 340 341 ! 342 !-- In PALM's pointer version, two indices have to be stored internally. 343 !-- The active address of the data array is set in swap_timelevel 344 IF ( PRESENT( array_2 ) ) THEN 345 second_adr = c_loc(array_2) 346 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) 347 ELSE 348 CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) 349 ENDIF 350 351 return 352 END SUBROUTINE PMC_S_Set_DataArray_3d 353 354 SUBROUTINE PMC_S_setInd_and_AllocMem (ClientId) 355 356 USE control_parameters, & 357 ONLY: message_string 358 359 IMPLICIT none 360 361 ! 362 !-- Naming convention for appendices: _sc -> server to client transfer 363 !-- _cs -> client to server transfer 364 !-- Send -> Server to client transfer 365 !-- Recv -> client to server transfer 366 INTEGER,INTENT(IN) :: ClientId 367 368 INTEGER :: i, istat, ierr, j 369 INTEGER :: arlen, myIndex, tag 370 INTEGER :: rCount ! count MPI requests 371 INTEGER(idp) :: bufsize ! Size of MPI data Window 372 TYPE(PeDef),POINTER :: aPE 373 TYPE(ArrayDef),POINTER :: ar 374 CHARACTER(len=DA_Namelen) :: myName 375 INTEGER,DIMENSION(1024) :: req 376 Type(c_ptr) :: base_ptr 377 REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_sc !Base array for server to client transfer 378 REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_cs !Base array for client to server transfer 379 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 380 381 ! 382 !-- Server to client direction 383 myIndex = 1 384 rCount = 0 385 bufsize = 8 386 387 ! 388 !-- First stride: Compute size and set index 389 do i=1,Clients(ClientId)%inter_npes 390 aPE => Clients(ClientId)%PEs(i) 391 tag = 200 392 do j=1,aPE%Nr_arrays 393 ar => aPE%array_list(j) 394 if(ar%NrDims == 2) then 395 arlen = aPE%NrEle ! 2D 396 else if(ar%NrDims == 3) then 397 arlen = aPE%NrEle * ar%A_dim(4); ! 3D 398 else 399 arlen = -1 400 end if 401 ar%SendIndex = myIndex 402 403 tag = tag+1 404 rCount = rCount+1 405 CALL MPI_Isend (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, req(rCount),ierr) 406 407 if(rCount == 1024) then ! Maximum of 1024 outstanding requests 408 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 409 rCount = 0; 410 end if 411 412 myIndex = myIndex+arlen 413 bufsize = bufsize+arlen 414 ar%SendSize = arlen 415 416 end do 417 if(rCount > 0) then ! Wait for all send completed 418 CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) 419 end if 420 end do 421 422 ! 423 !-- Create RMA (One Sided Communication) window for data buffer server to 424 !-- client transfer. 425 !-- The buffer of MPI_Get (counter part of transfer) can be PE-local, i.e. 426 !-- it can but must not be part of the MPI RMA window. 427 !-- Only one RMA window is required to prepare the data 428 !-- for server -> client transfer on the server side and 429 !-- for client -> server transfer on the client side 430 CALL PMC_Alloc_mem (base_array_sc, bufsize) 431 Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte 432 433 WinSize = bufsize*wp 434 CALL MPI_Win_create (base_array_sc, WinSize, wp, MPI_INFO_NULL, & 435 Clients(ClientId)%intra_comm, Clients(ClientId)%win_server_client, ierr) 436 CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr); ! Open Window to set data 437 ! 438 !-- Second stride: Set Buffer pointer 439 do i=1,Clients(ClientId)%inter_npes 440 aPE => Clients(ClientId)%PEs(i) 441 do j=1,aPE%Nr_arrays 442 ar => aPE%array_list(j) 443 ar%SendBuf = c_loc(base_array_sc(ar%SendIndex)) 444 if(ar%SendIndex+ar%SendSize > bufsize) then 445 write(0,'(a,i4,4i7,1x,a)') 'Server Buffer too small ',i, & 446 ar%SendIndex,ar%SendSize,ar%SendIndex+ar%SendSize,bufsize,trim(ar%name) 447 CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) 448 end if 449 end do 450 end do 451 452 !-- Client to server direction 453 454 bufsize = 8 455 456 !-- First stride, Compute size and set index 457 458 do i=1,Clients(ClientId)%inter_npes 459 aPE => Clients(ClientId)%PEs(i) 460 tag = 300 461 462 do j=1,aPE%Nr_arrays 463 ar => aPE%array_list(j) 464 465 ! Receive Index from client 466 tag = tag+1 467 CALL MPI_Recv (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, MPI_STATUS_IGNORE, ierr) 468 469 if(ar%NrDims == 3) then 470 bufsize = max(bufsize,aPE%NrEle * ar%A_dim(4)) ! 3D 471 else 472 bufsize = max(bufsize,aPE%NrEle) ! 2D 473 end if 474 ar%RecvIndex = myIndex 475 end do 476 477 end do 478 479 !-- Create RMA (One Sided Communication) data buffer 480 !-- The buffer for MPI_Get can be PE local, i.e. it can but must not be part of the MPI RMA window 481 482 CALL PMC_Alloc_mem (base_array_cs, bufsize, base_ptr) 483 Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte 484 485 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) 486 487 !-- Second stride, Set Buffer pointer 488 489 do i=1,Clients(ClientId)%inter_npes 490 aPE => Clients(ClientId)%PEs(i) 491 492 do j=1,aPE%Nr_arrays 493 ar => aPE%array_list(j) 494 ar%RecvBuf = base_ptr 495 end do 496 end do 497 498 return 499 END SUBROUTINE PMC_S_setInd_and_AllocMem 500 501 SUBROUTINE PMC_S_FillBuffer (ClientId, WaitTime) 502 IMPLICIT none 503 INTEGER,INTENT(IN) :: ClientId 504 REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime 505 506 INTEGER :: ip,ij,istat,ierr,j 507 INTEGER :: myIndex 508 REAL(wp) :: t1,t2 509 TYPE(PeDef),POINTER :: aPE 510 TYPE(ArrayDef),POINTER :: ar 511 CHARACTER(len=DA_Namelen) :: myName 512 INTEGER,DIMENSION(1) :: buf_shape 513 REAL(wp), POINTER, DIMENSION(:) :: buf 514 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d 515 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d 516 517 !-- Synchronization of the model is done in pmci_client_synchronize and pmci_server_synchronize 518 !-- Therefor the RMA window cann be filled without sychronization at this point and the barrier 519 !-- is not necessary 520 !-- Please note that WaitTime has to be set in PMC_S_FillBuffer AND PMC_C_GetBuffer 521 522 if(present(WaitTime)) then 523 t1 = PMC_Time() 524 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) 525 t2 = PMC_Time() 526 WaitTime = t2-t1 527 end if 528 529 do ip=1,Clients(ClientId)%inter_npes 530 aPE => Clients(ClientId)%PEs(ip) 531 do j=1,aPE%Nr_arrays 532 ar => aPE%array_list(j) 533 myIndex=1 534 if(ar%NrDims == 2) then 535 buf_shape(1) = aPE%NrEle 536 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 537 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 538 do ij=1,aPE%NrEle 539 buf(myIndex) = data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) 540 myIndex = myIndex+1 541 end do 542 else if(ar%NrDims == 3) then 543 buf_shape(1) = aPE%NrEle*ar%A_dim(4) 544 CALL c_f_pointer(ar%SendBuf, buf, buf_shape) 545 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 546 do ij=1,aPE%NrEle 547 buf(myIndex:myIndex+ar%A_dim(4)-1) = data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) 548 myIndex = myIndex+ar%A_dim(4) 549 end do 550 end if 551 end do 552 end do 553 554 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! buffer is filled 555 556 return 557 END SUBROUTINE PMC_S_FillBuffer 558 559 SUBROUTINE PMC_S_GetData_from_Buffer (ClientId, WaitTime) 560 561 IMPLICIT none 562 563 INTEGER,INTENT(IN) :: ClientId 564 REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime 565 566 !-- local variables 567 INTEGER :: ip,ij,istat,ierr,j 568 INTEGER :: myIndex 569 INTEGER :: nr 570 REAL(wp) :: t1,t2 571 TYPE(PeDef),POINTER :: aPE 572 TYPE(ArrayDef),POINTER :: ar 573 CHARACTER(len=DA_Namelen) :: myName 574 INTEGER,DIMENSION(1) :: buf_shape 575 REAL(wp), POINTER, DIMENSION(:) :: buf 576 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d 577 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d 578 INTEGER :: target_pe 579 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp 580 581 t1 = PMC_Time() 582 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for client to fill buffer 583 t2 = PMC_Time()-t1 584 if(present(WaitTime)) WaitTime = t2 585 586 ! CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr) ! Fence might do it, test later 587 CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for buffer is filled 588 589 do ip=1,Clients(ClientId)%inter_npes 590 aPE => Clients(ClientId)%PEs(ip) 591 do j=1,aPE%Nr_arrays 592 ar => aPE%array_list(j) 593 594 if(ar%RecvIndex < 0) CYCLE 595 596 if(ar%NrDims == 2) then 597 nr = aPE%NrEle 598 else if(ar%NrDims == 3) then 599 nr = aPE%NrEle*ar%A_dim(4) 600 end if 601 602 buf_shape(1) = nr 603 CALL c_f_pointer(ar%RecvBuf, buf, buf_shape) 604 ! 605 !-- MPI passive target RMA 606 607 if(nr > 0) then 608 target_disp = ar%RecvIndex-1 609 target_pe = ip-1+m_model_npes ! client PEs are located behind server PEs 610 CALL MPI_Win_lock (MPI_LOCK_SHARED , target_pe, 0, Clients(ClientId)%win_server_client, ierr) 611 CALL MPI_Get (buf, nr, MPI_REAL, target_pe, target_disp, nr, MPI_REAL, Clients(ClientId)%win_server_client, ierr) 612 CALL MPI_Win_unlock (target_pe, Clients(ClientId)%win_server_client, ierr) 613 end if 614 615 myIndex=1 616 if(ar%NrDims == 2) then 617 CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) 618 do ij=1,aPE%NrEle 619 data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex) 620 myIndex = myIndex+1 621 end do 622 else if(ar%NrDims == 3) then 623 CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) 624 do ij=1,aPE%NrEle 625 data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex:myIndex+ar%A_dim(4)-1) 626 myIndex = myIndex+ar%A_dim(4) 627 end do 628 end if 629 end do 630 end do 631 632 END SUBROUTINE PMC_S_GetData_from_Buffer 633 634 ! Private SUBROUTINEs 635 636 SUBROUTINE Get_DA_names_from_client (ClientId) 637 IMPLICIT none 638 INTEGER,INTENT(IN) :: ClientId 639 !-- local variables 640 type(DA_NameDef) :: myName 641 642 ! Get Data Array Description and Name from Client 643 644 do 645 CALL PMC_Bcast ( myName%couple_index, 0, comm=m_to_client_comm(ClientId)) 646 if(myName%couple_index == -1) EXIT 647 CALL PMC_Bcast ( myName%ServerDesc, 0, comm=m_to_client_comm(ClientId)) 648 CALL PMC_Bcast ( myName%NameOnServer, 0, comm=m_to_client_comm(ClientId)) 649 CALL PMC_Bcast ( myName%ClientDesc, 0, comm=m_to_client_comm(ClientId)) 650 CALL PMC_Bcast ( myName%NameOnClient, 0, comm=m_to_client_comm(ClientId)) 651 652 CALL PMC_G_SetName (clients(ClientID), myName%couple_index, myName%NameOnServer ) 653 end do 654 655 return 656 END SUBROUTINE Get_DA_names_from_client 657 658 SUBROUTINE PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr) 659 IMPLICIT none 660 661 INTEGER,INTENT(IN) :: ClientId 662 INTEGER,INTENT(IN) :: NrDims 663 INTEGER,INTENT(IN),DIMENSION(:) :: dims 664 TYPE(c_ptr),INTENT(IN) :: array_adr 665 TYPE(c_ptr),INTENT(IN),OPTIONAL :: second_adr 666 667 INTEGER :: i 668 TYPE(PeDef),POINTER :: aPE 669 TYPE(ArrayDef),POINTER :: ar 670 CHARACTER(len=DA_Namelen) :: myName 671 672 ! Set Array for Client interPE 0 673 674 do i=1,Clients(ClientId)%inter_npes 675 aPE => Clients(ClientId)%PEs(i) 676 ar => aPE%array_list(next_array_in_list) 677 ar%NrDims = NrDims 678 ar%A_dim = dims 679 ar%data = array_adr 680 if(present(second_adr)) then 681 ar%po_data(1) = array_adr 682 ar%po_data(2) = second_adr 683 else 684 ar%po_data(1) = C_NULL_PTR 685 ar%po_data(2) = C_NULL_PTR 686 end if 687 end do 688 689 return 690 END SUBROUTINE PMC_S_SetArray 691 692 693 SUBROUTINE PMC_S_Set_Active_data_array (ClientId,iactive) 694 IMPLICIT none 695 696 INTEGER,INTENT(IN) :: ClientId 697 INTEGER,INTENT(IN) :: iactive 698 699 !-- local variables 700 INTEGER :: i, ip, j 701 TYPE(PeDef),POINTER :: aPE 702 TYPE(ArrayDef),POINTER :: ar 703 CHARACTER(len=DA_Namelen) :: myName 704 705 do ip=1,Clients(ClientId)%inter_npes 706 aPE => Clients(ClientId)%PEs(ip) 707 do j=1,aPE%Nr_arrays 708 ar => aPE%array_list(j) 709 if(iactive == 1 .OR. iactive == 2) then 710 ar%data = ar%po_data(iactive) 711 end if 712 end do 713 end do 714 715 return 716 END SUBROUTINE PMC_S_Set_Active_data_array 717 718 719 SUBROUTINE Set_PE_index_list (ClientId, myClient,index_list,NrP) 720 IMPLICIT none 721 722 INTEGER,INTENT(IN) :: ClientId 723 TYPE(ClientDef),INTENT(INOUT) :: myClient 724 INTEGER,INTENT(IN),DIMENSION(:,:) :: index_list 725 INTEGER,INTENT(IN) :: NrP 726 727 !-- local variables 728 INTEGER :: i,j,ind,ierr,i2 729 TYPE(PeDef),POINTER :: aPE 730 INTEGER :: RemPE 731 INTEGER,DIMENSION(myClient%inter_npes) :: RemInd 732 INTEGER,DIMENSION(:),POINTER :: RemIndw 733 INTEGER,DIMENSION(:),POINTER :: RLdef 734 INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize 735 INTEGER :: indWin,indWin2 736 737 ! First, count entries for every remote client PE 738 739 do i=1,myClient%inter_npes 740 aPE => myClient%PEs(i) 741 aPE%NrEle = 0 742 end do 743 744 do j=1,NrP ! loop over number of cells coarse grid 745 RemPE = index_list(5,j)+1 ! Pe number remote PE 746 aPE => myClient%PEs(RemPE) 747 aPE% NrEle = aPE% NrEle+1 ! Increment Number of elements for this client Pe 748 end do 749 750 do i=1,myClient%inter_npes 751 aPE => myClient%PEs(i) 752 ALLOCATE(aPE%locInd(aPE%NrEle)) 753 end do 754 755 RemInd = 0 756 757 ! Second, Create lists 758 759 do j=1,NrP ! loop over number of cells coarse grid 760 RemPE = index_list(5,j)+1 ! Pe number remote PE 761 aPE => myClient%PEs(RemPE) 762 RemInd(RemPE) = RemInd(RemPE)+1 763 ind = RemInd(RemPE) 764 aPE%locInd(ind)%i = index_list(1,j) 765 aPE%locInd(ind)%j = index_list(2,j) 766 end do 767 768 ! Prepare Number of Elements for Client PEs 769 CALL PMC_Alloc_mem (RLdef, myClient%inter_npes*2) 770 WinSize = myClient%inter_npes*c_sizeof(i)*2 ! Number of Client PEs * size of INTEGER (i just arbitrary INTEGER) 771 772 CALL MPI_Win_create (RLdef, WinSize, iwp, MPI_INFO_NULL, myClient%intra_comm, indWin, ierr); 773 CALL MPI_Win_fence (0, indWin, ierr); ! Open Window to set data 774 775 RLdef(1) = 0 ! Index on Remote PE 0 776 RLdef(2) = RemInd(1) ! Number of Elements on Rem PE 0 777 778 do i=2,myClient%inter_npes ! Reserve Buffer for index array 779 i2 = (i-1)*2+1 780 RLdef(i2) = RLdef(i2-2) + RLdef(i2-1)*2 ! Index on Remote PE 781 RLdef(i2+1) = RemInd(i) ! Number of Elements on Remote PE 782 end do 783 784 CALL MPI_Win_fence (0, indWin, ierr); ! Close Window to allow client to access data 785 CALL MPI_Win_fence (0, indWin, ierr); ! Client has retrieved data 786 787 i2 = 2*myClient%inter_npes-1 788 WinSize = (RLdef(i2)+RLdef(i2+1))*2 789 WinSize = max(WinSize,1) ! Make sure, MPI_Alloc_mem works 790 791 CALL PMC_Alloc_mem (RemIndw, int(WinSize)) 792 793 CALL MPI_Barrier (m_model_comm, ierr) 794 CALL MPI_Win_create (RemIndw, WinSize*c_sizeof(i), iwp, MPI_INFO_NULL, myClient%intra_comm, indWin2, ierr); 795 796 CALL MPI_Win_fence (0, indWin2, ierr); ! Open Window to set data 797 do j=1,NrP ! this loop creates the 2D index list 798 RemPE = index_list(5,j)+1 ! Pe number remote PE 799 aPE => myClient%PEs(RemPE) 800 i2 = RemPE*2-1 801 ind = RLdef(i2)+1 802 RemIndw(ind) = index_list(3,j) 803 RemIndw(ind+1) = index_list(4,j) 804 RLdef(i2) = RLdef(i2)+2 805 end do 806 CALL MPI_Win_fence (0, indWin2, ierr); !all data set 807 808 CALL MPI_Barrier(myClient%intra_comm, ierr) ! Dont know why, but this barrier is necessary before we can free the windows 809 810 CALL MPI_Win_free(indWin, ierr) 811 CALL MPI_Win_free(indWin2, ierr) 812 813 ! Sollte funktionieren, Problem mit MPI implementation 814 ! https://www.lrz.de/services/software/parallel/mpi/onesided 815 ! CALL MPI_Free_mem (RemIndw, ierr) 816 817 return 818 END SUBROUTINE Set_PE_index_list 277 ENDIF 278 279 CALL set_pe_index_list( clientid, clients(clientid), & 280 indclients(clientid)%index_list_2d, & 281 indclients(clientid)%nrpoints ) 282 283 END SUBROUTINE pmc_s_set_2d_index_list 284 285 286 287 SUBROUTINE pmc_s_clear_next_array_list 288 289 IMPLICIT NONE 290 291 next_array_in_list = 0 292 293 END SUBROUTINE pmc_s_clear_next_array_list 294 295 296 297 LOGICAL FUNCTION pmc_s_getnextarray( clientid, myname ) 298 ! 299 !-- List handling is still required to get minimal interaction with 300 !-- pmc_interface 301 !-- TODO: what does "still" mean? Is there a chance to change this! 302 CHARACTER(LEN=*), INTENT(OUT) :: myname !< 303 INTEGER(iwp), INTENT(IN) :: clientid !< 304 305 TYPE(arraydef), POINTER :: ar 306 TYPE(pedef), POINTER :: ape 307 308 next_array_in_list = next_array_in_list + 1 309 ! 310 !-- Array names are the same on all client PEs, so take first PE to get the name 311 ape => clients(clientid)%pes(1) 312 313 IF ( next_array_in_list > ape%nr_arrays ) THEN 314 ! 315 !-- All arrays are done 316 pmc_s_getnextarray = .FALSE. 317 RETURN 318 ENDIF 319 320 ar => ape%array_list(next_array_in_list) 321 myname = ar%name 322 ! 323 !-- Return true if legal array 324 !-- TODO: what does this comment mean? Can there be non-legal arrays?? 325 pmc_s_getnextarray = .TRUE. 326 327 END FUNCTION pmc_s_getnextarray 328 329 330 331 SUBROUTINE pmc_s_set_dataarray_2d( clientid, array, array_2 ) 332 333 IMPLICIT NONE 334 335 INTEGER,INTENT(IN) :: clientid !< 336 337 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER :: array !< 338 REAL(wp), INTENT(IN), DIMENSION(:,:), POINTER, OPTIONAL :: array_2 !< 339 340 INTEGER :: nrdims !< 341 INTEGER, DIMENSION(4) :: dims !< 342 TYPE(C_PTR) :: array_adr !< 343 TYPE(C_PTR) :: second_adr !< 344 345 346 dims = 1 347 nrdims = 2 348 dims(1) = SIZE( array,1 ) 349 dims(2) = SIZE( array,2 ) 350 array_adr = C_LOC( array ) 351 352 IF ( PRESENT( array_2 ) ) THEN 353 second_adr = C_LOC(array_2) 354 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr, & 355 second_adr = second_adr) 356 ELSE 357 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr ) 358 ENDIF 359 360 END SUBROUTINE pmc_s_set_dataarray_2d 361 362 363 364 SUBROUTINE pmc_s_set_dataarray_3d( clientid, array, nz_cl, nz, array_2 ) 365 366 IMPLICIT NONE 367 368 INTEGER, INTENT(IN) :: clientid !< 369 INTEGER, INTENT(IN) :: nz !< 370 INTEGER, INTENT(IN) :: nz_cl !< 371 372 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER :: array !< 373 REAL(wp), INTENT(IN), DIMENSION(:,:,:), POINTER, OPTIONAL :: array_2 !< 374 375 INTEGER :: nrdims !< 376 INTEGER, DIMENSION(4) :: dims !< 377 TYPE(C_PTR) :: array_adr !< 378 TYPE(C_PTR) :: second_adr !< 379 380 !-- TODO: the next assignment seems to be obsolete. Please check! 381 dims = 1 382 dims = 0 383 nrdims = 3 384 dims(1) = SIZE( array,1 ) 385 dims(2) = SIZE( array,2 ) 386 dims(3) = SIZE( array,3 ) 387 dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 388 389 array_adr = C_LOC(array) 390 391 ! 392 !-- In PALM's pointer version, two indices have to be stored internally. 393 !-- The active address of the data array is set in swap_timelevel. 394 IF ( PRESENT( array_2 ) ) THEN 395 second_adr = C_LOC( array_2 ) 396 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr, & 397 second_adr = second_adr) 398 ELSE 399 CALL pmc_s_setarray( clientid, nrdims, dims, array_adr ) 400 ENDIF 401 402 END SUBROUTINE pmc_s_set_dataarray_3d 403 404 405 406 SUBROUTINE pmc_s_setind_and_allocmem( clientid ) 407 408 USE control_parameters, & 409 ONLY: message_string 410 411 IMPLICIT NONE 412 413 ! 414 !-- Naming convention for appendices: _sc -> server to client transfer 415 !-- _cs -> client to server transfer 416 !-- send -> server to client transfer 417 !-- recv -> client to server transfer 418 INTEGER, INTENT(IN) :: clientid !< 419 420 INTEGER :: arlen !< 421 INTEGER :: i !< 422 INTEGER :: ierr !< 423 INTEGER :: istat !< 424 INTEGER :: j !< 425 INTEGER :: myindex !< 426 INTEGER :: rcount !< count MPI requests 427 INTEGER :: tag !< 428 429 INTEGER(idp) :: bufsize !< size of MPI data window 430 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 431 432 INTEGER, DIMENSION(1024) :: req !< 433 434 TYPE(C_PTR) :: base_ptr !< 435 TYPE(pedef), POINTER :: ape !< 436 TYPE(arraydef), POINTER :: ar !< 437 438 REAL(wp),DIMENSION(:), POINTER, SAVE :: base_array_sc !< base array for server to client transfer 439 REAL(wp),DIMENSION(:), POINTER, SAVE :: base_array_cs !< base array for client to server transfer 440 441 ! 442 !-- Server to client direction 443 myindex = 1 444 rcount = 0 445 bufsize = 8 446 447 ! 448 !-- First stride: compute size and set index 449 DO i = 1, clients(clientid)%inter_npes 450 451 ape => clients(clientid)%pes(i) 452 tag = 200 453 454 DO j = 1, ape%nr_arrays 455 456 ar => ape%array_list(j) 457 IF ( ar%nrdims == 2 ) THEN 458 arlen = ape%nrele 459 ELSEIF ( ar%nrdims == 3 ) THEN 460 arlen = ape%nrele * ar%a_dim(4) 461 ELSE 462 arlen = -1 463 ENDIF 464 ar%sendindex = myindex 465 466 tag = tag + 1 467 rcount = rcount + 1 468 CALL MPI_ISEND( myindex, 1, MPI_INTEGER, i-1, tag, & 469 clients(clientid)%inter_comm, req(rcount), ierr ) 470 ! 471 !-- Maximum of 1024 outstanding requests 472 !-- TODO: what does this limit means? 473 IF ( rcount == 1024 ) THEN 474 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 475 rcount = 0 476 ENDIF 477 478 myindex = myindex + arlen 479 bufsize = bufsize + arlen 480 ar%sendsize = arlen 481 482 ENDDO 483 484 IF ( rcount > 0 ) THEN 485 CALL MPI_WAITALL( rcount, req, MPI_STATUSES_IGNORE, ierr ) 486 ENDIF 487 488 ENDDO 489 490 ! 491 !-- Create RMA (One Sided Communication) window for data buffer server to 492 !-- client transfer. 493 !-- The buffer of MPI_GET (counterpart of transfer) can be PE-local, i.e. 494 !-- it can but must not be part of the MPI RMA window. Only one RMA window is 495 !-- required to prepare the data for 496 !-- server -> client transfer on the server side 497 !-- and for 498 !-- client -> server transfer on the client side 499 CALL pmc_alloc_mem( base_array_sc, bufsize ) 500 clients(clientid)%totalbuffersize = bufsize * wp 501 502 winsize = bufsize * wp 503 CALL MPI_WIN_CREATE( base_array_sc, winsize, wp, MPI_INFO_NULL, & 504 clients(clientid)%intra_comm, & 505 clients(clientid)%win_server_client, ierr ) 506 ! 507 !-- Open window to set data 508 CALL MPI_WIN_FENCE( 0, clients(clientid)%win_server_client, ierr ) 509 ! 510 !-- Second stride: set buffer pointer 511 DO i = 1, clients(clientid)%inter_npes 512 513 ape => clients(clientid)%pes(i) 514 515 DO j = 1, ape%nr_arrays 516 517 ar => ape%array_list(j) 518 ar%sendbuf = C_LOC( base_array_sc(ar%sendindex) ) 519 520 !-- TODO: replace this by standard PALM error message using the message routine 521 IF ( ar%sendindex + ar%sendsize > bufsize ) THEN 522 write(0,'(a,i4,4i7,1x,a)') 'Server Buffer too small ',i, & 523 ar%sendindex,ar%sendsize,ar%sendindex+ar%sendsize,bufsize,trim(ar%name) 524 CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) 525 ENDIF 526 ENDDO 527 ENDDO 528 529 ! 530 !-- Client to server direction 531 bufsize = 8 532 ! 533 !-- First stride: compute size and set index 534 DO i = 1, clients(clientid)%inter_npes 535 536 ape => clients(clientid)%pes(i) 537 tag = 300 538 539 DO j = 1, ape%nr_arrays 540 541 ar => ape%array_list(j) 542 ! 543 !-- Receive index from client 544 tag = tag + 1 545 CALL MPI_RECV( myindex, 1, MPI_INTEGER, i-1, tag, & 546 clients(clientid)%inter_comm, MPI_STATUS_IGNORE, ierr ) 547 548 IF ( ar%nrdims == 3 ) THEN 549 bufsize = MAX( bufsize, ape%nrele * ar%a_dim(4) ) 550 ELSE 551 bufsize = MAX( bufsize, ape%nrele ) 552 ENDIF 553 ar%recvindex = myindex 554 555 ENDDO 556 557 ENDDO 558 559 ! 560 !-- Create RMA (one sided communication) data buffer. 561 !-- The buffer for MPI_GET can be PE local, i.e. it can but must not be part of 562 !-- the MPI RMA window 563 CALL pmc_alloc_mem( base_array_cs, bufsize, base_ptr ) 564 clients(clientid)%totalbuffersize = bufsize * wp 565 566 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 567 ! 568 !-- Second stride: set buffer pointer 569 DO i = 1, clients(clientid)%inter_npes 570 571 ape => clients(clientid)%pes(i) 572 573 DO j = 1, ape%nr_arrays 574 ar => ape%array_list(j) 575 ar%recvbuf = base_ptr 576 ENDDO 577 578 ENDDO 579 580 END SUBROUTINE pmc_s_setind_and_allocmem 581 582 583 584 SUBROUTINE pmc_s_fillbuffer( clientid, waittime ) 585 586 IMPLICIT NONE 587 588 INTEGER, INTENT(IN) :: clientid !< 589 590 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 591 592 INTEGER :: ierr !< 593 INTEGER :: ij !< 594 INTEGER :: ip !< 595 INTEGER :: istat !< 596 INTEGER :: j !< 597 INTEGER :: myindex !< 598 599 INTEGER, DIMENSION(1) :: buf_shape 600 601 REAL(wp) :: t1 !< 602 REAL(wp) :: t2 !< 603 REAL(wp), POINTER, DIMENSION(:) :: buf !< 604 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d !< 605 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !< 606 607 TYPE(pedef), POINTER :: ape !< 608 TYPE(arraydef), POINTER :: ar !< 609 610 ! 611 !-- Synchronization of the model is done in pmci_client_synchronize and 612 !-- pmci_server_synchronize. Therefor the RMA window can be filled without 613 !-- sychronization at this point and a barrier is not necessary. 614 !-- Please note that waittime has to be set in pmc_s_fillbuffer AND 615 !-- pmc_c_getbuffer 616 IF ( PRESENT( waittime) ) THEN 617 t1 = pmc_time() 618 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 619 t2 = pmc_time() 620 waittime = t2- t1 621 ENDIF 622 623 DO ip = 1, clients(clientid)%inter_npes 624 625 ape => clients(clientid)%pes(ip) 626 627 DO j = 1, ape%nr_arrays 628 629 ar => ape%array_list(j) 630 myindex = 1 631 632 IF ( ar%nrdims == 2 ) THEN 633 634 buf_shape(1) = ape%nrele 635 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 636 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 637 DO ij = 1, ape%nrele 638 buf(myindex) = data_2d(ape%locind(ij)%j,ape%locind(ij)%i) 639 myindex = myindex + 1 640 ENDDO 641 642 ELSEIF ( ar%nrdims == 3 ) THEN 643 644 buf_shape(1) = ape%nrele*ar%a_dim(4) 645 CALL C_F_POINTER( ar%sendbuf, buf, buf_shape ) 646 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3) ) 647 DO ij = 1, ape%nrele 648 buf(myindex:myindex+ar%a_dim(4)-1) = & 649 data_3d(1:ar%a_dim(4),ape%locind(ij)%j,ape%locind(ij)%i) 650 myindex = myindex + ar%a_dim(4) 651 ENDDO 652 653 ENDIF 654 655 ENDDO 656 657 ENDDO 658 ! 659 !-- Buffer is filled 660 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 661 662 END SUBROUTINE pmc_s_fillbuffer 663 664 665 666 SUBROUTINE pmc_s_getdata_from_buffer( clientid, waittime ) 667 668 IMPLICIT NONE 669 670 INTEGER, INTENT(IN) :: clientid !< 671 REAL(wp), INTENT(OUT), OPTIONAL :: waittime !< 672 673 INTEGER :: ierr !< 674 INTEGER :: ij !< 675 INTEGER :: ip !< 676 INTEGER :: istat !< 677 INTEGER :: j !< 678 INTEGER :: myindex !< 679 INTEGER :: nr !< 680 INTEGER :: target_pe !< 681 INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp !< 682 683 INTEGER, DIMENSION(1) :: buf_shape !< 684 685 REAL(wp) :: t1 !< 686 REAL(wp) :: t2 !< 687 REAL(wp), POINTER, DIMENSION(:) :: buf !< 688 REAL(wp), POINTER, DIMENSION(:,:) :: data_2d !< 689 REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !< 690 691 TYPE(pedef), POINTER :: ape !< 692 TYPE(arraydef), POINTER :: ar !< 693 694 695 t1 = pmc_time() 696 ! 697 !-- Wait for client to fill buffer 698 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 699 t2 = pmc_time() - t1 700 IF ( PRESENT( waittime ) ) waittime = t2 701 ! 702 !-- TODO: check next statement 703 !-- Fence might do it, test later 704 !-- CALL MPI_WIN_FENCE( 0, clients(clientid)%win_server_client, ierr) 705 CALL MPI_BARRIER( clients(clientid)%intra_comm, ierr ) 706 707 DO ip = 1, clients(clientid)%inter_npes 708 709 ape => clients(clientid)%pes(ip) 710 711 DO j = 1, ape%nr_arrays 712 713 ar => ape%array_list(j) 714 715 IF ( ar%recvindex < 0 ) CYCLE 716 717 IF ( ar%nrdims == 2 ) THEN 718 nr = ape%nrele 719 ELSEIF ( ar%nrdims == 3 ) THEN 720 nr = ape%nrele * ar%a_dim(4) 721 ENDIF 722 723 buf_shape(1) = nr 724 CALL C_F_POINTER( ar%recvbuf, buf, buf_shape ) 725 726 ! 727 !-- MPI passive target RMA 728 IF ( nr > 0 ) THEN 729 target_disp = ar%recvindex - 1 730 ! 731 !-- Client PEs are located behind server PEs 732 target_pe = ip - 1 + m_model_npes 733 CALL MPI_WIN_LOCK( MPI_LOCK_SHARED, target_pe, 0, & 734 clients(clientid)%win_server_client, ierr ) 735 CALL MPI_GET( buf, nr, MPI_REAL, target_pe, target_disp, nr, & 736 MPI_REAL, clients(clientid)%win_server_client, ierr ) 737 CALL MPI_WIN_UNLOCK( target_pe, & 738 clients(clientid)%win_server_client, ierr ) 739 ENDIF 740 741 myindex = 1 742 IF ( ar%nrdims == 2 ) THEN 743 744 CALL C_F_POINTER( ar%data, data_2d, ar%a_dim(1:2) ) 745 DO ij = 1, ape%nrele 746 data_2d(ape%locind(ij)%j,ape%locind(ij)%i) = buf(myindex) 747 myindex = myindex + 1 748 ENDDO 749 750 ELSEIF ( ar%nrdims == 3 ) THEN 751 752 CALL C_F_POINTER( ar%data, data_3d, ar%a_dim(1:3)) 753 DO ij = 1, ape%nrele 754 data_3d(1:ar%a_dim(4),ape%locind(ij)%j,ape%locind(ij)%i) = & 755 buf(myindex:myindex+ar%a_dim(4)-1) 756 myindex = myindex + ar%a_dim(4) 757 ENDDO 758 759 ENDIF 760 761 ENDDO 762 763 ENDDO 764 765 END SUBROUTINE pmc_s_getdata_from_buffer 766 767 768 769 SUBROUTINE get_da_names_from_client( clientid ) 770 ! 771 !-- Get data array description and name from client 772 IMPLICIT NONE 773 774 INTEGER, INTENT(IN) :: clientid !< 775 776 TYPE(da_namedef) :: myname !< 777 778 DO 779 CALL pmc_bcast( myname%couple_index, 0, comm=m_to_client_comm(clientid) ) 780 IF ( myname%couple_index == -1 ) EXIT 781 CALL pmc_bcast( myname%serverdesc, 0, comm=m_to_client_comm(clientid) ) 782 CALL pmc_bcast( myname%nameonserver, 0, comm=m_to_client_comm(clientid) ) 783 CALL pmc_bcast( myname%clientdesc, 0, comm=m_to_client_comm(clientid) ) 784 CALL pmc_bcast( myname%nameonclient, 0, comm=m_to_client_comm(clientid) ) 785 786 CALL pmc_g_setname( clients(clientid), myname%couple_index, & 787 myname%nameonserver ) 788 ENDDO 789 790 END SUBROUTINE get_da_names_from_client 791 792 793 794 SUBROUTINE pmc_s_setarray(clientid, nrdims, dims, array_adr, second_adr ) 795 ! 796 !-- Set array for client interPE 0 797 IMPLICIT NONE 798 799 INTEGER, INTENT(IN) :: clientid !< 800 INTEGER, INTENT(IN) :: nrdims !< 801 INTEGER, INTENT(IN), DIMENSION(:) :: dims !< 802 803 TYPE(C_PTR), INTENT(IN) :: array_adr !< 804 TYPE(C_PTR), INTENT(IN), OPTIONAL :: second_adr !< 805 806 INTEGER :: i !< local counter 807 808 TYPE(pedef), POINTER :: ape !< 809 TYPE(arraydef), POINTER :: ar !< 810 811 812 DO i = 1, clients(clientid)%inter_npes 813 814 ape => clients(clientid)%pes(i) 815 ar => ape%array_list(next_array_in_list) 816 ar%nrdims = nrdims 817 ar%a_dim = dims 818 ar%data = array_adr 819 820 IF ( PRESENT( second_adr ) ) THEN 821 ar%po_data(1) = array_adr 822 ar%po_data(2) = second_adr 823 ELSE 824 ar%po_data(1) = C_NULL_PTR 825 ar%po_data(2) = C_NULL_PTR 826 ENDIF 827 828 ENDDO 829 830 END SUBROUTINE pmc_s_setarray 831 832 833 834 SUBROUTINE pmc_s_set_active_data_array( clientid, iactive ) 835 836 IMPLICIT NONE 837 838 INTEGER, INTENT(IN) :: clientid !< 839 INTEGER, INTENT(IN) :: iactive !< 840 841 INTEGER :: i !< 842 INTEGER :: ip !< 843 INTEGER :: j !< 844 845 TYPE(pedef), POINTER :: ape !< 846 TYPE(arraydef), POINTER :: ar !< 847 848 DO ip = 1, clients(clientid)%inter_npes 849 850 ape => clients(clientid)%pes(ip) 851 852 DO j = 1, ape%nr_arrays 853 854 ar => ape%array_list(j) 855 IF ( iactive == 1 .OR. iactive == 2 ) THEN 856 ar%data = ar%po_data(iactive) 857 ENDIF 858 859 ENDDO 860 861 ENDDO 862 863 END SUBROUTINE pmc_s_set_active_data_array 864 865 866 867 SUBROUTINE set_pe_index_list( clientid, myclient, index_list, nrp ) 868 869 IMPLICIT NONE 870 871 INTEGER, INTENT(IN) :: clientid !< 872 INTEGER, INTENT(IN), DIMENSION(:,:) :: index_list !< 873 INTEGER, INTENT(IN) :: nrp !< 874 875 TYPE(clientdef), INTENT(INOUT) :: myclient !< 876 877 INTEGER :: i !< 878 INTEGER :: ierr !< 879 INTEGER :: ind !< 880 INTEGER :: indwin !< 881 INTEGER :: indwin2 !< 882 INTEGER :: i2 !< 883 INTEGER :: j !< 884 INTEGER :: rempe !< 885 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< 886 887 INTEGER, DIMENSION(myclient%inter_npes) :: remind !< 888 889 INTEGER, DIMENSION(:), POINTER :: remindw !< 890 INTEGER, DIMENSION(:), POINTER :: rldef !< 891 892 TYPE(pedef), POINTER :: ape !< 893 894 ! 895 !-- First, count entries for every remote client PE 896 DO i = 1, myclient%inter_npes 897 ape => myclient%pes(i) 898 ape%nrele = 0 899 ENDDO 900 ! 901 !-- Loop over number of coarse grid cells 902 DO j = 1, nrp 903 rempe = index_list(5,j) + 1 ! PE number on remote PE 904 ape => myclient%pes(rempe) 905 ape%nrele = ape%nrele + 1 ! Increment number of elements for this client PE 906 ENDDO 907 908 DO i = 1, myclient%inter_npes 909 ape => myclient%pes(i) 910 ALLOCATE( ape%locind(ape%nrele) ) 911 ENDDO 912 913 remind = 0 914 915 ! 916 !-- Second, create lists 917 !-- Loop over number of coarse grid cells 918 DO j = 1, nrp 919 rempe = index_list(5,j) + 1 920 ape => myclient%pes(rempe) 921 remind(rempe) = remind(rempe)+1 922 ind = remind(rempe) 923 ape%locind(ind)%i = index_list(1,j) 924 ape%locind(ind)%j = index_list(2,j) 925 ENDDO 926 ! 927 !-- Prepare number of elements for client PEs 928 CALL pmc_alloc_mem( rldef, myclient%inter_npes*2 ) 929 ! 930 !-- Number of client PEs * size of INTEGER (i just arbitrary INTEGER) 931 winsize = myclient%inter_npes*c_sizeof(i)*2 932 933 CALL MPI_WIN_CREATE( rldef, winsize, iwp, MPI_INFO_NULL, & 934 myclient%intra_comm, indwin, ierr ) 935 ! 936 !-- Open window to set data 937 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 938 939 rldef(1) = 0 ! index on remote PE 0 940 rldef(2) = remind(1) ! number of elements on remote PE 0 941 ! 942 !-- Reserve buffer for index array 943 DO i = 2, myclient%inter_npes 944 i2 = (i-1) * 2 + 1 945 rldef(i2) = rldef(i2-2) + rldef(i2-1) * 2 ! index on remote PE 946 rldef(i2+1) = remind(i) ! number of elements on remote PE 947 ENDDO 948 ! 949 !-- Close window to allow client to access data 950 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 951 ! 952 !-- Client has retrieved data 953 CALL MPI_WIN_FENCE( 0, indwin, ierr ) 954 955 i2 = 2 * myclient%inter_npes - 1 956 winsize = ( rldef(i2) + rldef(i2+1) ) * 2 957 ! 958 !-- Make sure, MPI_ALLOC_MEM works 959 winsize = MAX( winsize, 1 ) 960 961 CALL pmc_alloc_mem( remindw, INT( winsize ) ) 962 963 CALL MPI_BARRIER( m_model_comm, ierr ) 964 CALL MPI_WIN_CREATE( remindw, winsize*c_sizeof(i), iwp, MPI_INFO_NULL, & 965 myclient%intra_comm, indwin2, ierr ) 966 ! 967 !-- Open window to set data 968 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 969 ! 970 !-- Create the 2D index list 971 DO j = 1, nrp 972 rempe = index_list(5,j) + 1 ! PE number on remote PE 973 ape => myclient%pes(rempe) 974 i2 = rempe * 2 - 1 975 ind = rldef(i2) + 1 976 remindw(ind) = index_list(3,j) 977 remindw(ind+1) = index_list(4,j) 978 rldef(i2) = rldef(i2)+2 979 ENDDO 980 ! 981 !-- All data areset 982 CALL MPI_WIN_FENCE( 0, indwin2, ierr ) 983 ! 984 !-- Don't know why, but this barrier is necessary before windows can be freed 985 !-- TODO: find out why this is required 986 CALL MPI_BARRIER( myclient%intra_comm, ierr ) 987 988 CALL MPI_WIN_FREE( indwin, ierr ) 989 CALL MPI_WIN_FREE( indwin2, ierr ) 990 991 !-- TODO: check if the following idea needs to be done 992 !-- Sollte funktionieren, Problem mit MPI implementation 993 !-- https://www.lrz.de/services/software/parallel/mpi/onesided 994 !-- CALL MPI_Free_mem (remindw, ierr) 995 996 END SUBROUTINE set_pe_index_list 819 997 820 998 #endif 821 END MODULE pmc_server999 END MODULE pmc_server
Note: See TracChangeset
for help on using the changeset viewer.