MODULE pmc_server !--------------------------------------------------------------------------------! ! This file is part of PALM. ! ! PALM is free software: you can redistribute it and/or modify it under the terms ! of the GNU General Public License as published by the Free Software Foundation, ! either version 3 of the License, or (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2015 Leibniz Universitaet Hannover !--------------------------------------------------------------------------------! ! ! Current revisions: ! ------------------ ! ! ! Former revisions: ! ----------------- ! $Id: pmc_server.f90 1809 2016-04-05 20:13:28Z knoop $ ! ! 1808 2016-04-05 19:44:00Z raasch ! MPI module used by default on all machines ! ! 1797 2016-03-21 16:50:28Z raasch ! introduction of different datatransfer modes ! ! 1791 2016-03-11 10:41:25Z raasch ! Debug write-statements commented out ! ! 1786 2016-03-08 05:49:27Z raasch ! change in client-server data transfer: server now gets data from client ! instead that client put's it to the server ! ! 1779 2016-03-03 08:01:28Z raasch ! kind=dp replaced by wp, ! error messages removed or changed to PALM style, dim_order removed ! array management changed from linked list to sequential loop ! ! 1766 2016-02-29 08:37:15Z raasch ! modifications to allow for using PALM's pointer version ! +new routine pmc_s_set_active_data_array ! ! 1764 2016-02-28 12:45:19Z raasch ! cpp-statement added (nesting can only be used in parallel mode) ! ! 1762 2016-02-25 12:31:13Z hellstea ! Initial revision by K. Ketelsen ! ! Description: ! ------------ ! ! Server part of Palm Model Coupler !------------------------------------------------------------------------------! #if defined( __parallel ) use, intrinsic :: iso_c_binding #if defined( __mpifh ) INCLUDE "mpif.h" #else USE MPI #endif USE kinds USE PMC_general, ONLY: ClientDef, PMC_MAX_MODELL,PMC_sort, DA_NameDef, DA_Desclen, DA_Namelen, & PMC_G_SetName, PeDef, ArrayDef, PMC_MAX_ARRAY USE PMC_handle_communicator, ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & PMC_Server_for_Client, m_world_rank USE PMC_MPI_wrapper, ONLY: PMC_Send_to_Client, PMC_Recv_from_Client, PMC_Bcast, PMC_Inter_Bcast, & PMC_Alloc_mem, PMC_Time IMPLICIT none PRIVATE SAVE TYPE ClientIndexDef INTEGER :: NrPoints INTEGER,DIMENSION(:,:),allocatable :: index_list_2d END TYPE ClientIndexDef TYPE(ClientDef),DIMENSION(PMC_MAX_MODELL) :: Clients TYPE(ClientIndexDef),DIMENSION(PMC_MAX_MODELL) :: indClients INTEGER :: next_array_in_list = 0 PUBLIC PMC_Server_for_Client INTERFACE PMC_ServerInit MODULE procedure PMC_ServerInit END INTERFACE PMC_ServerInit INTERFACE PMC_S_Set_2D_index_list MODULE procedure PMC_S_Set_2D_index_list END INTERFACE PMC_S_Set_2D_index_list INTERFACE PMC_S_clear_next_array_list MODULE procedure PMC_S_clear_next_array_list END INTERFACE PMC_S_clear_next_array_list INTERFACE PMC_S_GetNextArray MODULE procedure PMC_S_GetNextArray END INTERFACE PMC_S_GetNextArray INTERFACE PMC_S_Set_DataArray MODULE procedure PMC_S_Set_DataArray_2d MODULE procedure PMC_S_Set_DataArray_3d END INTERFACE PMC_S_Set_DataArray INTERFACE PMC_S_setInd_and_AllocMem MODULE procedure PMC_S_setInd_and_AllocMem END INTERFACE PMC_S_setInd_and_AllocMem INTERFACE PMC_S_FillBuffer MODULE procedure PMC_S_FillBuffer END INTERFACE PMC_S_FillBuffer INTERFACE PMC_S_GetData_from_Buffer MODULE procedure PMC_S_GetData_from_Buffer END INTERFACE PMC_S_GetData_from_Buffer INTERFACE PMC_S_Set_Active_data_array MODULE procedure PMC_S_Set_Active_data_array END INTERFACE PMC_S_Set_Active_data_array ! PUBLIC section PUBLIC PMC_ServerInit, PMC_S_Set_2D_index_list, PMC_S_GetNextArray, PMC_S_Set_DataArray PUBLIC PMC_S_setInd_and_AllocMem, PMC_S_FillBuffer, PMC_S_GetData_from_Buffer, PMC_S_Set_Active_data_array PUBLIC PMC_S_clear_next_array_list CONTAINS SUBROUTINE PMC_ServerInit IMPLICIT none INTEGER :: i INTEGER :: j INTEGER :: ClientId INTEGER :: istat do i=1,size(PMC_Server_for_Client)-1 ! if(m_model_comm == 0) write(0,*) 'PMC_Server: Initialize client Id',PMC_Server_for_Client(i) ClientId = PMC_Server_for_Client(i) Clients(ClientId)%model_comm = m_model_comm Clients(ClientId)%inter_comm = m_to_client_comm(ClientId) ! Get rank and size CALL MPI_Comm_rank (Clients(ClientId)%model_comm, Clients(ClientId)%model_rank, istat); CALL MPI_Comm_size (Clients(ClientId)%model_comm, Clients(ClientId)%model_npes, istat); CALL MPI_Comm_remote_size (Clients(ClientId)%inter_comm, Clients(ClientId)%inter_npes, istat); ! Intra communicater is used for MPI_Get CALL MPI_Intercomm_merge (Clients(ClientId)%inter_comm, .false., Clients(ClientId)%intra_comm, istat); CALL MPI_Comm_rank (Clients(ClientId)%intra_comm, Clients(ClientId)%intra_rank, istat); ! write(9,*) 'ClientId ',i,ClientId,m_world_rank, Clients(ClientId)%inter_npes ALLOCATE (Clients(ClientId)%PEs(Clients(ClientId)%inter_npes)) ! !-- Allocate for all client PEs an array of TYPE ArrayDef to store information of transfer array do j=1,Clients(ClientId)%inter_npes Allocate(Clients(ClientId)%PEs(j)%array_list(PMC_MAX_ARRAY)) end do CALL Get_DA_names_from_client (ClientId) end do return END SUBROUTINE PMC_ServerInit SUBROUTINE PMC_S_Set_2D_index_list (ClientId, index_list) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,DIMENSION(:,:),INTENT(INOUT) :: index_list !Index list will be modified in sort, therefore INOUT !-- Local variables INTEGER :: ip,is,ie,ian,ic,n INTEGER :: istat if(m_model_rank == 0) then CALL PMC_sort (index_list, 6) ! Sort to ascending Server PE is = 1 do ip=0,m_model_npes-1 ! Split into Server PEs ie = is-1 !there may be no entry for this PE if(is <= size(index_list,2) .and. ie >= 0) then do while ( index_list(6,ie+1) == ip) ie = ie+1 if( ie == size(index_list,2)) EXIT end do ian = ie-is+1 else is = -1 ie = -2 ian = 0 end if ! Send data to other server PEs if(ip == 0) then indClients(ClientId)%NrPoints = ian if(ian > 0) then ALLOCATE (indClients(ClientId)%index_list_2d(6,ian)) indClients(ClientId)%index_list_2d(:,1:ian) = index_list(:,is:ie) end if else CALL MPI_Send (ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, istat) if(ian > 0) then CALL MPI_Send (index_list(1,is), 6*ian, MPI_INTEGER, ip, 1001, & m_model_comm, istat) end if end if is = ie+1 end do else CALL MPI_Recv (indClients(ClientId)%NrPoints, 1, MPI_INTEGER, 0, 1000, m_model_comm, & MPI_STATUS_IGNORE, istat) ian = indClients(ClientId)%NrPoints if(ian > 0) then ALLOCATE(indClients(ClientId)%index_list_2d(6,ian)) CALL MPI_RECV (indClients(ClientId)%index_list_2d, 6*ian, MPI_INTEGER, 0, 1001, & m_model_comm, MPI_STATUS_IGNORE, istat) end if end if CALL Set_PE_index_list (ClientId,Clients(ClientId),indClients(ClientId)%index_list_2d,indClients(ClientId)%NrPoints) return END SUBROUTINE PMC_S_Set_2D_index_list SUBROUTINE PMC_S_clear_next_array_list IMPLICIT none next_array_in_list = 0 return END SUBROUTINE PMC_S_clear_next_array_list ! List handling is still required to get minimal interaction with pmc_interface logical function PMC_S_GetNextArray (ClientId, myName) INTEGER(iwp),INTENT(IN) :: ClientId CHARACTER(len=*),INTENT(OUT) :: myName !-- local variables TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar next_array_in_list = next_array_in_list+1 !-- Array Names are the same on all client PE, so take first PE to get the name aPE => Clients(ClientId)%PEs(1) if(next_array_in_list > aPE%Nr_arrays) then PMC_S_GetNextArray = .false. ! all arrays done return end if ar => aPE%array_list(next_array_in_list) myName = ar%name PMC_S_GetNextArray = .true. ! Return true if legal array return END function PMC_S_GetNextArray SUBROUTINE PMC_S_Set_DataArray_2d (ClientId, array, array_2 ) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(wp), INTENT(IN), DIMENSION(:,:) :: array REAL(wp), INTENT(IN), DIMENSION(:,:), OPTIONAL :: array_2 INTEGER :: NrDims INTEGER,DIMENSION (4) :: dims TYPE(c_ptr) :: array_adr TYPE(c_ptr) :: second_adr dims = 1 NrDims = 2 dims(1) = size(array,1) dims(2) = size(array,2) array_adr = c_loc(array) IF ( PRESENT( array_2 ) ) THEN second_adr = c_loc(array_2) CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) ELSE CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) ENDIF return END SUBROUTINE PMC_S_Set_DataArray_2d SUBROUTINE PMC_S_Set_DataArray_3d (ClientId, array, nz_cl, nz, array_2 ) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(wp), INTENT(IN), DIMENSION(:,:,:) :: array REAL(wp), INTENT(IN), DIMENSION(:,:,:), OPTIONAL :: array_2 INTEGER,INTENT(IN) :: nz_cl INTEGER,INTENT(IN) :: nz INTEGER :: NrDims INTEGER,DIMENSION (4) :: dims TYPE(c_ptr) :: array_adr TYPE(c_ptr) :: second_adr dims = 1 dims = 0 NrDims = 3 dims(1) = size(array,1) dims(2) = size(array,2) dims(3) = size(array,3) dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 array_adr = c_loc(array) ! !-- In PALM's pointer version, two indices have to be stored internally. !-- The active address of the data array is set in swap_timelevel IF ( PRESENT( array_2 ) ) THEN second_adr = c_loc(array_2) CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr = second_adr) ELSE CALL PMC_S_SetArray (ClientId, NrDims, dims, array_adr) ENDIF return END SUBROUTINE PMC_S_Set_DataArray_3d SUBROUTINE PMC_S_setInd_and_AllocMem (ClientId) USE control_parameters, & ONLY: message_string IMPLICIT none ! !-- Naming convention for appendices: _sc -> server to client transfer !-- _cs -> client to server transfer !-- Send -> Server to client transfer !-- Recv -> client to server transfer INTEGER,INTENT(IN) :: ClientId INTEGER :: i, istat, ierr, j INTEGER :: arlen, myIndex, tag INTEGER :: rCount ! count MPI requests INTEGER(idp) :: bufsize ! Size of MPI data Window TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1024) :: req Type(c_ptr) :: base_ptr REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_sc !Base array for server to client transfer REAL(wp),DIMENSION(:),POINTER,SAVE :: base_array_cs !Base array for client to server transfer INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize ! !-- Server to client direction myIndex = 1 rCount = 0 bufsize = 8 ! !-- First stride: Compute size and set index do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) tag = 200 do j=1,aPE%Nr_arrays ar => aPE%array_list(j) if(ar%NrDims == 2) then arlen = aPE%NrEle ! 2D else if(ar%NrDims == 3) then arlen = aPE%NrEle * ar%A_dim(4); ! 3D else arlen = -1 end if ar%SendIndex = myIndex tag = tag+1 rCount = rCount+1 CALL MPI_Isend (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, req(rCount),ierr) if(rCount == 1024) then ! Maximum of 1024 outstanding requests CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) rCount = 0; end if myIndex = myIndex+arlen bufsize = bufsize+arlen ar%SendSize = arlen end do if(rCount > 0) then ! Wait for all send completed CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) end if end do ! !-- Create RMA (One Sided Communication) window for data buffer server to !-- client transfer. !-- The buffer of MPI_Get (counter part of transfer) can be PE-local, i.e. !-- it can but must not be part of the MPI RMA window. !-- Only one RMA window is required to prepare the data !-- for server -> client transfer on the server side and !-- for client -> server transfer on the client side CALL PMC_Alloc_mem (base_array_sc, bufsize) Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte WinSize = bufsize*wp CALL MPI_Win_create (base_array_sc, WinSize, wp, MPI_INFO_NULL, & Clients(ClientId)%intra_comm, Clients(ClientId)%win_server_client, ierr) CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr); ! Open Window to set data ! !-- Second stride: Set Buffer pointer do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) do j=1,aPE%Nr_arrays ar => aPE%array_list(j) ar%SendBuf = c_loc(base_array_sc(ar%SendIndex)) if(ar%SendIndex+ar%SendSize > bufsize) then write(0,'(a,i4,4i7,1x,a)') 'Server Buffer too small ',i,ar%SendIndex,ar%SendSize,ar%SendIndex+ar%SendSize,bufsize,trim(ar%name) CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) end if end do end do !-- Client to server direction bufsize = 8 !-- First stride, Compute size and set index do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) tag = 300 do j=1,aPE%Nr_arrays ar => aPE%array_list(j) ! Receive Index from client tag = tag+1 CALL MPI_Recv (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, MPI_STATUS_IGNORE, ierr) if(ar%NrDims == 3) then bufsize = max(bufsize,aPE%NrEle * ar%A_dim(4)) ! 3D else bufsize = max(bufsize,aPE%NrEle) ! 2D end if ar%RecvIndex = myIndex end do end do !-- Create RMA (One Sided Communication) data buffer !-- The buffer for MPI_Get can be PE local, i.e. it can but must not be part of the MPI RMA window CALL PMC_Alloc_mem (base_array_cs, bufsize, base_ptr) Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) !-- Second stride, Set Buffer pointer do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) do j=1,aPE%Nr_arrays ar => aPE%array_list(j) ar%RecvBuf = base_ptr end do end do return END SUBROUTINE PMC_S_setInd_and_AllocMem SUBROUTINE PMC_S_FillBuffer (ClientId, WaitTime) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime INTEGER :: ip,ij,istat,ierr,j INTEGER :: myIndex REAL(wp) :: t1,t2 TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1) :: buf_shape REAL(wp), POINTER, DIMENSION(:) :: buf REAL(wp), POINTER, DIMENSION(:,:) :: data_2d REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d !-- Synchronization of the model is done in pmci_client_synchronize and pmci_server_synchronize !-- Therefor the RMA window cann be filled without sychronization at this point and the barrier !-- is not necessary !-- Please note that WaitTime has to be set in PMC_S_FillBuffer AND PMC_C_GetBuffer if(present(WaitTime)) then t1 = PMC_Time() CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) t2 = PMC_Time() WaitTime = t2-t1 end if do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do j=1,aPE%Nr_arrays ar => aPE%array_list(j) myIndex=1 if(ar%NrDims == 2) then buf_shape(1) = aPE%NrEle CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) do ij=1,aPE%NrEle buf(myIndex) = data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) myIndex = myIndex+1 end do else if(ar%NrDims == 3) then buf_shape(1) = aPE%NrEle*ar%A_dim(4) CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) do ij=1,aPE%NrEle buf(myIndex:myIndex+ar%A_dim(4)-1) = data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) myIndex = myIndex+ar%A_dim(4) end do end if end do end do CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! buffer is filled return END SUBROUTINE PMC_S_FillBuffer SUBROUTINE PMC_S_GetData_from_Buffer (ClientId, WaitTime) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(wp), INTENT(OUT), OPTIONAL :: WaitTime !-- local variables INTEGER :: ip,ij,istat,ierr,j INTEGER :: myIndex INTEGER :: nr REAL(wp) :: t1,t2 TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1) :: buf_shape REAL(wp), POINTER, DIMENSION(:) :: buf REAL(wp), POINTER, DIMENSION(:,:) :: data_2d REAL(wp), POINTER, DIMENSION(:,:,:) :: data_3d INTEGER :: target_pe INTEGER(kind=MPI_ADDRESS_KIND) :: target_disp t1 = PMC_Time() CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for client to fill buffer t2 = PMC_Time()-t1 if(present(WaitTime)) WaitTime = t2 ! CALL MPI_Win_fence (0, Clients(ClientId)%win_server_client, ierr) ! Fence might do it, test later CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for buffer is filled do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do j=1,aPE%Nr_arrays ar => aPE%array_list(j) if(ar%RecvIndex < 0) CYCLE if(ar%NrDims == 2) then nr = aPE%NrEle else if(ar%NrDims == 3) then nr = aPE%NrEle*ar%A_dim(4) end if buf_shape(1) = nr CALL c_f_pointer(ar%RecvBuf, buf, buf_shape) ! !-- MPI passive target RMA if(nr > 0) then target_disp = ar%RecvIndex-1 target_pe = ip-1+m_model_npes ! client PEs are located behind server PEs CALL MPI_Win_lock (MPI_LOCK_SHARED , target_pe, 0, Clients(ClientId)%win_server_client, ierr) CALL MPI_Get (buf, nr, MPI_REAL, target_pe, target_disp, nr, MPI_REAL, Clients(ClientId)%win_server_client, ierr) CALL MPI_Win_unlock (target_pe, Clients(ClientId)%win_server_client, ierr) end if myIndex=1 if(ar%NrDims == 2) then CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) do ij=1,aPE%NrEle data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex) myIndex = myIndex+1 end do else if(ar%NrDims == 3) then CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) do ij=1,aPE%NrEle data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex:myIndex+ar%A_dim(4)-1) myIndex = myIndex+ar%A_dim(4) end do end if end do end do END SUBROUTINE PMC_S_GetData_from_Buffer ! Private SUBROUTINEs SUBROUTINE Get_DA_names_from_client (ClientId) IMPLICIT none INTEGER,INTENT(IN) :: ClientId !-- local variables type(DA_NameDef) :: myName ! Get Data Array Description and Name from Client do CALL PMC_Bcast ( myName%couple_index, 0, comm=m_to_client_comm(ClientId)) if(myName%couple_index == -1) EXIT CALL PMC_Bcast ( myName%ServerDesc, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%NameOnServer, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%ClientDesc, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%NameOnClient, 0, comm=m_to_client_comm(ClientId)) CALL PMC_G_SetName (clients(ClientID), myName%couple_index, myName%NameOnServer ) end do return END SUBROUTINE Get_DA_names_from_client SUBROUTINE PMC_S_SetArray (ClientId, NrDims, dims, array_adr, second_adr) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,INTENT(IN) :: NrDims INTEGER,INTENT(IN),DIMENSION(:) :: dims TYPE(c_ptr),INTENT(IN) :: array_adr TYPE(c_ptr),INTENT(IN),OPTIONAL :: second_adr INTEGER :: i TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName ! Set Array for Client interPE 0 do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) ar => aPE%array_list(next_array_in_list) ar%NrDims = NrDims ar%A_dim = dims ar%data = array_adr if(present(second_adr)) then ar%po_data(1) = array_adr ar%po_data(2) = second_adr else ar%po_data(1) = C_NULL_PTR ar%po_data(2) = C_NULL_PTR end if end do return END SUBROUTINE PMC_S_SetArray SUBROUTINE PMC_S_Set_Active_data_array (ClientId,iactive) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,INTENT(IN) :: iactive !-- local variables INTEGER :: i, ip, j TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do j=1,aPE%Nr_arrays ar => aPE%array_list(j) if(iactive == 1 .OR. iactive == 2) then ar%data = ar%po_data(iactive) end if end do end do return END SUBROUTINE PMC_S_Set_Active_data_array SUBROUTINE Set_PE_index_list (ClientId, myClient,index_list,NrP) IMPLICIT none INTEGER,INTENT(IN) :: ClientId TYPE(ClientDef),INTENT(INOUT) :: myClient INTEGER,INTENT(IN),DIMENSION(:,:) :: index_list INTEGER,INTENT(IN) :: NrP !-- local variables INTEGER :: i,j,ind,ierr,i2 TYPE(PeDef),POINTER :: aPE INTEGER :: RemPE INTEGER,DIMENSION(myClient%inter_npes) :: RemInd INTEGER,DIMENSION(:),POINTER :: RemIndw INTEGER,DIMENSION(:),POINTER :: RLdef INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize INTEGER :: indWin,indWin2 ! First, count entries for every remote client PE do i=1,myClient%inter_npes aPE => myClient%PEs(i) aPE%NrEle = 0 end do do j=1,NrP ! loop over number of cells coarse grid RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) aPE% NrEle = aPE% NrEle+1 ! Increment Number of elements for this client Pe end do do i=1,myClient%inter_npes aPE => myClient%PEs(i) ALLOCATE(aPE%locInd(aPE%NrEle)) end do RemInd = 0 ! Second, Create lists do j=1,NrP ! loop over number of cells coarse grid RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) RemInd(RemPE) = RemInd(RemPE)+1 ind = RemInd(RemPE) aPE%locInd(ind)%i = index_list(1,j) aPE%locInd(ind)%j = index_list(2,j) end do ! Prepare Number of Elements for Client PEs CALL PMC_Alloc_mem (RLdef, myClient%inter_npes*2) WinSize = myClient%inter_npes*c_sizeof(i)*2 ! Number of Client PEs * size of INTEGER (i just arbitrary INTEGER) CALL MPI_Win_create (RLdef, WinSize, iwp, MPI_INFO_NULL, myClient%intra_comm, indWin, ierr); CALL MPI_Win_fence (0, indWin, ierr); ! Open Window to set data RLdef(1) = 0 ! Index on Remote PE 0 RLdef(2) = RemInd(1) ! Number of Elements on Rem PE 0 do i=2,myClient%inter_npes ! Reserve Buffer for index array i2 = (i-1)*2+1 RLdef(i2) = RLdef(i2-2) + RLdef(i2-1)*2 ! Index on Remote PE RLdef(i2+1) = RemInd(i) ! Number of Elements on Remote PE end do CALL MPI_Win_fence (0, indWin, ierr); ! Close Window to allow client to access data CALL MPI_Win_fence (0, indWin, ierr); ! Client has retrieved data i2 = 2*myClient%inter_npes-1 WinSize = (RLdef(i2)+RLdef(i2+1))*2 WinSize = max(WinSize,1) ! Make sure, MPI_Alloc_mem works CALL PMC_Alloc_mem (RemIndw, int(WinSize)) CALL MPI_Barrier (m_model_comm, ierr) CALL MPI_Win_create (RemIndw, WinSize*c_sizeof(i), iwp, MPI_INFO_NULL, myClient%intra_comm, indWin2, ierr); CALL MPI_Win_fence (0, indWin2, ierr); ! Open Window to set data do j=1,NrP ! this loop creates the 2D index list RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) i2 = RemPE*2-1 ind = RLdef(i2)+1 RemIndw(ind) = index_list(3,j) RemIndw(ind+1) = index_list(4,j) RLdef(i2) = RLdef(i2)+2 end do CALL MPI_Win_fence (0, indWin2, ierr); !all data set CALL MPI_Barrier(myClient%intra_comm, ierr) ! Dont know why, but this barrier is necessary before we can free the windows CALL MPI_Win_free(indWin, ierr) CALL MPI_Win_free(indWin2, ierr) ! Sollte funktionieren, Problem mit MPI implementation ! https://www.lrz.de/services/software/parallel/mpi/onesided ! CALL MPI_Free_mem (RemIndw, ierr) return END SUBROUTINE Set_PE_index_list #endif END MODULE pmc_server