MODULE pmc_server !--------------------------------------------------------------------------------! ! This file is part of PALM. ! ! PALM is free software: you can redistribute it and/or modify it under the terms ! of the GNU General Public License as published by the Free Software Foundation, ! either version 3 of the License, or (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2015 Leibniz Universitaet Hannover !--------------------------------------------------------------------------------! ! ! Current revisions: ! ------------------ ! ! ! Former revisions: ! ----------------- ! $Id: pmc_server.f90 1767 2016-02-29 08:47:57Z raasch $ ! ! 1766 2016-02-29 08:37:15Z raasch ! modifications to allow for using PALM's pointer version ! +new routine pmc_s_set_active_data_array ! ! 1764 2016-02-28 12:45:19Z raasch ! cpp-statement added (nesting can only be used in parallel mode) ! ! 1762 2016-02-25 12:31:13Z hellstea ! Initial revision by K. Ketelsen ! ! Description: ! ------------ ! ! Server part of Palm Model Coupler !------------------------------------------------------------------------------! #if defined( __parallel ) use, intrinsic :: iso_c_binding #if defined( __lc ) USE MPI #else INCLUDE "mpif.h" #endif USE kinds USE PMC_general, ONLY: ClientDef, PMC_MAX_MODELL,PMC_sort, DA_NameDef, DA_Desclen, DA_Namelen, & PMC_G_SetName, PMC_G_GetName, PeDef, ArrayDef USE PMC_handle_communicator, ONLY: m_model_comm,m_model_rank,m_model_npes, m_to_client_comm, & PMC_Server_for_Client, m_world_rank USE PMC_MPI_wrapper, ONLY: PMC_Send_to_Client, PMC_Recv_from_Client, PMC_Bcast, PMC_Inter_Bcast, & PMC_Alloc_mem, PMC_Time IMPLICIT none PRIVATE SAVE TYPE ClientIndexDef INTEGER :: NrPoints INTEGER,DIMENSION(:,:),allocatable :: index_list_2d END TYPE ClientIndexDef TYPE(ClientDef),DIMENSION(PMC_MAX_MODELL) :: Clients TYPE(ClientIndexDef),DIMENSION(PMC_MAX_MODELL) :: indClients PUBLIC PMC_Server_for_Client !-- TO_DO: what is the meaning of this? Could variables declared in this module !-- also have single precision? ! INTEGER, PARAMETER :: dp = wp ! INTERFACE section INTERFACE PMC_ServerInit MODULE procedure PMC_ServerInit END INTERFACE PMC_ServerInit INTERFACE PMC_S_Set_2D_index_list MODULE procedure PMC_S_Set_2D_index_list END INTERFACE PMC_S_Set_2D_index_list INTERFACE PMC_S_GetNextArray MODULE procedure PMC_S_GetNextArray END INTERFACE PMC_S_GetNextArray INTERFACE PMC_S_Set_DataArray MODULE procedure PMC_S_Set_DataArray_2d MODULE procedure PMC_S_Set_DataArray_3d END INTERFACE PMC_S_Set_DataArray INTERFACE PMC_S_setInd_and_AllocMem MODULE procedure PMC_S_setInd_and_AllocMem END INTERFACE PMC_S_setInd_and_AllocMem INTERFACE PMC_S_FillBuffer MODULE procedure PMC_S_FillBuffer END INTERFACE PMC_S_FillBuffer INTERFACE PMC_S_GetData_from_Buffer MODULE procedure PMC_S_GetData_from_Buffer END INTERFACE PMC_S_GetData_from_Buffer INTERFACE PMC_S_Set_Active_data_array MODULE procedure PMC_S_Set_Active_data_array END INTERFACE PMC_S_Set_Active_data_array ! PUBLIC section PUBLIC PMC_ServerInit, PMC_S_Set_2D_index_list, PMC_S_GetNextArray, PMC_S_Set_DataArray PUBLIC PMC_S_setInd_and_AllocMem, PMC_S_FillBuffer, PMC_S_GetData_from_Buffer, PMC_S_Set_Active_data_array CONTAINS SUBROUTINE PMC_ServerInit IMPLICIT none INTEGER :: i INTEGER :: j INTEGER :: ClientId INTEGER :: istat do i=1,size(PMC_Server_for_Client)-1 if(m_model_comm == 0) write(0,*) 'PMC_Server: Initialize client Id',PMC_Server_for_Client(i) ClientId = PMC_Server_for_Client(i) Clients(ClientId)%model_comm = m_model_comm Clients(ClientId)%inter_comm = m_to_client_comm(ClientId) ! Get rank and size CALL MPI_Comm_rank (Clients(ClientId)%model_comm, Clients(ClientId)%model_rank, istat); CALL MPI_Comm_size (Clients(ClientId)%model_comm, Clients(ClientId)%model_npes, istat); CALL MPI_Comm_remote_size (Clients(ClientId)%inter_comm, Clients(ClientId)%inter_npes, istat); ! Intra communicater is used for MPI_Get CALL MPI_Intercomm_merge (Clients(ClientId)%inter_comm, .false., Clients(ClientId)%intra_comm, istat); CALL MPI_Comm_rank (Clients(ClientId)%intra_comm, Clients(ClientId)%intra_rank, istat); write(9,*) 'ClientId ',i,ClientId,m_world_rank, Clients(ClientId)%inter_npes ALLOCATE (Clients(ClientId)%PEs(Clients(ClientId)%inter_npes)) do j=1,Clients(ClientId)%inter_npes ! Loop over all client PEs NULLIFY(Clients(ClientId)%PEs(j)%Arrays) end do CALL Get_DA_names_from_client (ClientId) end do return END SUBROUTINE PMC_ServerInit SUBROUTINE PMC_S_Set_2D_index_list (ClientId, index_list) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,DIMENSION(:,:),INTENT(INOUT) :: index_list !Index list will be modified in sort, therefore INOUT !-- Local variables INTEGER :: ip,is,ie,ian,ic,n INTEGER :: istat if(m_model_rank == 0) then CALL PMC_sort (index_list, 6) ! Sort to ascending Server PE is = 1 do ip=0,m_model_npes-1 ! Split into Server PEs ie = is-1 !there may be no entry for this PE if(is <= size(index_list,2) .and. ie >= 0) then do while ( index_list(6,ie+1) == ip) ie = ie+1 if( ie == size(index_list,2)) EXIT end do ian = ie-is+1 else is = -1 ie = -2 ian = 0 end if ! Send data to other server PEs if(ip == 0) then indClients(ClientId)%NrPoints = ian if(ian > 0) then ALLOCATE (indClients(ClientId)%index_list_2d(6,ian)) indClients(ClientId)%index_list_2d(:,1:ian) = index_list(:,is:ie) end if else CALL MPI_Send (ian, 1, MPI_INTEGER, ip, 1000, m_model_comm, istat) if(ian > 0) then CALL MPI_Send (index_list(1,is), 6*ian, MPI_INTEGER, ip, 1001, & m_model_comm, istat) end if end if is = ie+1 end do else CALL MPI_Recv (indClients(ClientId)%NrPoints, 1, MPI_INTEGER, 0, 1000, m_model_comm, & MPI_STATUS_IGNORE, istat) ian = indClients(ClientId)%NrPoints if(ian > 0) then ALLOCATE(indClients(ClientId)%index_list_2d(6,ian)) CALL MPI_RECV (indClients(ClientId)%index_list_2d, 6*ian, MPI_INTEGER, 0, 1001, & m_model_comm, MPI_STATUS_IGNORE, istat) end if end if CALL Set_PE_index_list (ClientId,Clients(ClientId),indClients(ClientId)%index_list_2d,indClients(ClientId)%NrPoints) return END SUBROUTINE PMC_S_Set_2D_index_list logical function PMC_S_GetNextArray (ClientId, myName,Client_PeIndex) INTEGER,INTENT(IN) :: ClientId CHARACTER(len=*),INTENT(OUT) :: myName !-- local variables INTEGER :: MyCoupleIndex logical :: MyLast CHARACTER(len=DA_Namelen) :: loName INTEGER,INTENT(IN),optional :: Client_PeIndex loName = ' ' CALL PMC_G_GetName (clients(ClientId), MyCoupleIndex, loName, MyLast, Client_PeIndex) myName = loName PMC_S_GetNextArray = .NOT. MyLast ! Return true if valid array return END function PMC_S_GetNextArray SUBROUTINE PMC_S_Set_DataArray_2d (ClientId, array, array_2 ) IMPLICIT none INTEGER,INTENT(IN) :: ClientId !-- TO_DO: has array always to be of dp-kind, or can wp used here !-- this effects all respective declarations in this file REAL(kind=dp),INTENT(IN),DIMENSION(:,:) :: array REAL(kind=dp),INTENT(IN),DIMENSION(:,:),OPTIONAL :: array_2 !-- local variables INTEGER :: NrDims INTEGER,DIMENSION (4) :: dims INTEGER :: dim_order TYPE(c_ptr) :: array_adr TYPE(c_ptr) :: second_adr dims = 1 NrDims = 2 dims(1) = size(array,1) dims(2) = size(array,2) dim_order = 2 array_adr = c_loc(array) IF ( PRESENT( array_2 ) ) THEN second_adr = c_loc(array_2) CALL PMC_S_SetArray (ClientId, NrDims, dims, dim_order, array_adr, second_adr = second_adr) ELSE CALL PMC_S_SetArray (ClientId, NrDims, dims, dim_order, array_adr) ENDIF return END SUBROUTINE PMC_S_Set_DataArray_2d SUBROUTINE PMC_S_Set_DataArray_3d (ClientId, array, nz_cl, nz, array_2 ) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(kind=dp),INTENT(IN),DIMENSION(:,:,:) :: array REAL(kind=dp),INTENT(IN),DIMENSION(:,:,:),OPTIONAL :: array_2 INTEGER,INTENT(IN) :: nz_cl INTEGER,INTENT(IN) :: nz !-- local variables INTEGER :: NrDims INTEGER,DIMENSION (4) :: dims INTEGER :: dim_order TYPE(c_ptr) :: array_adr TYPE(c_ptr) :: second_adr dims = 1 dims = 0 NrDims = 3 dims(1) = size(array,1) dims(2) = size(array,2) dims(3) = size(array,3) dim_order = 33 dims(4) = nz_cl+dims(1)-nz ! works for first dimension 1:nz and 0:nz+1 array_adr = c_loc(array) ! !-- In PALM's pointer version, two indices have to be stored internally. !-- The active address of the data array is set in swap_timelevel IF ( PRESENT( array_2 ) ) THEN second_adr = c_loc(array_2) CALL PMC_S_SetArray (ClientId, NrDims, dims, dim_order, array_adr, second_adr = second_adr) ELSE CALL PMC_S_SetArray (ClientId, NrDims, dims, dim_order, array_adr) ENDIF return END SUBROUTINE PMC_S_Set_DataArray_3d SUBROUTINE PMC_S_setInd_and_AllocMem (ClientId) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER :: i, istat, ierr INTEGER :: arlen, myIndex, tag INTEGER :: rCount ! count MPI requests INTEGER(idp) :: bufsize ! Size of MPI data Window TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1024) :: req Type(c_ptr) :: base_ptr REAL(kind=wp),DIMENSION(:),POINTER :: base_array INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize myIndex = 1 rCount = 0 bufsize = 8 ! First stride, Compute size and set index do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) tag = 200 do while (PMC_S_GetNextArray ( ClientId, myName,i)) ar => aPE%Arrays if(ar%dim_order == 2) then arlen = aPE%NrEle; ! 2D else if(ar%dim_order == 33) then arlen = aPE%NrEle * ar%A_dim(4); ! PALM 3D else arlen = -1 end if ar%BufIndex = myIndex tag = tag+1 rCount = rCount+1 CALL MPI_Isend (myIndex, 1, MPI_INTEGER, i-1, tag, Clients(ClientId)%inter_comm, req(rCount),ierr) if(rCount == 1024) then ! Maximum of 1024 outstanding requests CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) rCount = 0; end if myIndex = myIndex+arlen bufsize = bufsize+arlen ar%BufSize = arlen end do if(rCount > 0) then ! Wait for all send completed CALL MPI_Waitall (rCount, req, MPI_STATUSES_IGNORE, ierr) end if end do ! Create RMA (One Sided Communication) window for data buffer CALL PMC_Alloc_mem (base_array, bufsize, base_ptr) Clients(ClientId)%TotalBufferSize = bufsize*wp !Total buffer size in Byte WinSize = bufsize*wp ! write(9,*) 'PMC_S_SetInd_and_Mem ',m_model_rank,Clients(ClientId)%inter_npes,WinSize,bufsize CALL MPI_Win_create (base_array, WinSize, wp, MPI_INFO_NULL, Clients(ClientId)%intra_comm, Clients(ClientId)%BufWin, ierr); CALL MPI_Win_fence (0, Clients(ClientId)%BufWin, ierr); ! Open Window to set data CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Second stride, Set Buffer pointer do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) do while (PMC_S_GetNextArray ( ClientId, myName,i)) ar => aPE%Arrays !-- TO_DO: Adressrechnung ueberlegen? ar%SendBuf = c_loc(base_array(ar%BufIndex)) !kk Adressrechnung ueberlegen if(ar%BufIndex+ar%BufSize > bufsize) then !-- TO_DO: can this error really happen, and what can be the reason? write(0,'(a,i4,4i7,1x,a)') 'Buffer too small ',i,ar%BufIndex,ar%BufSize,ar%BufIndex+ar%BufSize,bufsize,trim(myName) CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr) end if end do end do return END SUBROUTINE PMC_S_setInd_and_AllocMem SUBROUTINE PMC_S_FillBuffer (ClientId, WaitTime) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(kind=dp),INTENT(OUT),optional :: WaitTime !-- local variables INTEGER :: ip,ij,istat,ierr INTEGER :: myIndex REAL(kind=dp) :: t1,t2 TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1) :: buf_shape REAL(kind=wp),POINTER,DIMENSION(:) :: buf REAL(kind=wp),POINTER,DIMENSION(:,:) :: data_2d REAL(kind=wp),POINTER,DIMENSION(:,:,:) :: data_3d t1 = PMC_Time() CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for buffer empty t2 = PMC_Time() if(present(WaitTime)) WaitTime = t2-t1 do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do while (PMC_S_GetNextArray ( ClientId, myName,ip)) ar => aPE%Arrays myIndex=1 if(ar%dim_order == 2) then buf_shape(1) = aPE%NrEle CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) do ij=1,aPE%NrEle buf(myIndex) = data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) myIndex = myIndex+1 end do else if(ar%dim_order == 33) then buf_shape(1) = aPE%NrEle*ar%A_dim(4) CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) do ij=1,aPE%NrEle buf(myIndex:myIndex+ar%A_dim(4)-1) = data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) myIndex = myIndex+ar%A_dim(4) end do else !-- TO_DO: can this error really happen, and what can be the reason? write(0,*) "Illegal Order of Dimension ",ar%dim_order CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr); end if end do end do CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! buffer is full return END SUBROUTINE PMC_S_FillBuffer SUBROUTINE PMC_S_GetData_from_Buffer (ClientId, WaitTime) IMPLICIT none INTEGER,INTENT(IN) :: ClientId REAL(kind=dp),INTENT(OUT),optional :: WaitTime !-- local variables INTEGER :: ip,ij,istat,ierr INTEGER :: myIndex REAL(kind=dp) :: t1,t2 TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName INTEGER,DIMENSION(1) :: buf_shape REAL(kind=wp),POINTER,DIMENSION(:) :: buf REAL(kind=wp),POINTER,DIMENSION(:,:) :: data_2d REAL(kind=wp),POINTER,DIMENSION(:,:,:) :: data_3d t1 = PMC_Time() CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! Wait for MPI_Put from client t2 = PMC_Time() if(present(WaitTime)) WaitTime = t2-t1 do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do while (PMC_S_GetNextArray ( ClientId, myName,ip)) ar => aPE%Arrays myIndex=1 if(ar%dim_order == 2) then buf_shape(1) = aPE%NrEle CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_2d, ar%A_dim(1:2)) do ij=1,aPE%NrEle data_2d(aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex) myIndex = myIndex+1 end do else if(ar%dim_order == 33) then buf_shape(1) = aPE%NrEle*ar%A_dim(4) CALL c_f_pointer(ar%SendBuf, buf, buf_shape) CALL c_f_pointer(ar%data, data_3d, ar%A_dim(1:3)) do ij=1,aPE%NrEle data_3d(1:ar%A_dim(4),aPE%locInd(ij)%j,aPE%locInd(ij)%i) = buf(myIndex:myIndex+ar%A_dim(4)-1) myIndex = myIndex+ar%A_dim(4) end do else !-- TO_DO: can this error really happen, and what can be the reason? write(0,*) "Illegal Order of Dimension ",ar%dim_order CALL MPI_Abort (MPI_COMM_WORLD, istat, ierr); end if end do end do CALL MPI_Barrier(Clients(ClientId)%intra_comm, ierr) ! data copy finished, buffer is free for use agein return END SUBROUTINE PMC_S_GetData_from_Buffer ! Private SUBROUTINEs SUBROUTINE Get_DA_names_from_client (ClientId) IMPLICIT none INTEGER,INTENT(IN) :: ClientId !-- local variables type(DA_NameDef) :: myName ! Get Data Array Description and Name from Client do CALL PMC_Bcast ( myName%couple_index, 0, comm=m_to_client_comm(ClientId)) if(myName%couple_index == -1) EXIT CALL PMC_Bcast ( myName%ServerDesc, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%NameOnServer, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%ClientDesc, 0, comm=m_to_client_comm(ClientId)) CALL PMC_Bcast ( myName%NameOnClient, 0, comm=m_to_client_comm(ClientId)) CALL PMC_G_SetName (clients(ClientID), myName%couple_index, myName%NameOnServer ) end do return END SUBROUTINE Get_DA_names_from_client SUBROUTINE PMC_S_SetArray (ClientId, NrDims, dims, dim_order, array_adr, second_adr) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,INTENT(IN) :: NrDims INTEGER,INTENT(IN),DIMENSION(:) :: dims INTEGER,INTENT(IN) :: dim_order TYPE(c_ptr),INTENT(IN) :: array_adr TYPE(c_ptr),INTENT(IN),OPTIONAL :: second_adr INTEGER :: i TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName ! Set Array for Client interPE 0 do i=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(i) ar => aPE%Arrays ar%NrDims = NrDims ar%A_dim = dims ar%dim_order = dim_order ar%data = array_adr if(present(second_adr)) then ar%po_data(1) = array_adr ar%po_data(2) = second_adr else ar%po_data(1) = C_NULL_PTR ar%po_data(2) = C_NULL_PTR end if end do return END SUBROUTINE PMC_S_SetArray SUBROUTINE PMC_S_Set_Active_data_array (ClientId,iactive) IMPLICIT none INTEGER,INTENT(IN) :: ClientId INTEGER,INTENT(IN) :: iactive !-- local variables INTEGER :: i, ip TYPE(PeDef),POINTER :: aPE TYPE(ArrayDef),POINTER :: ar CHARACTER(len=DA_Namelen) :: myName do ip=1,Clients(ClientId)%inter_npes aPE => Clients(ClientId)%PEs(ip) do while (PMC_S_GetNextArray ( ClientId, myName,ip)) ar => aPE%Arrays if(iactive == 1 .OR. iactive == 2) then ar%data = ar%po_data(iactive) end if end do end do return END SUBROUTINE PMC_S_Set_Active_data_array SUBROUTINE Set_PE_index_list (ClientId, myClient,index_list,NrP) IMPLICIT none INTEGER,INTENT(IN) :: ClientId TYPE(ClientDef),INTENT(INOUT) :: myClient INTEGER,INTENT(IN),DIMENSION(:,:) :: index_list INTEGER,INTENT(IN) :: NrP !-- local variables INTEGER :: i,j,ind,ierr,i2 TYPE(PeDef),POINTER :: aPE INTEGER :: RemPE INTEGER,DIMENSION(myClient%inter_npes) :: RemInd INTEGER,DIMENSION(:),POINTER :: RemIndw INTEGER,DIMENSION(:),POINTER :: RLdef INTEGER(KIND=MPI_ADDRESS_KIND) :: WinSize INTEGER :: indWin,indWin2 ! First, count entries for every remote client PE do i=1,myClient%inter_npes aPE => myClient%PEs(i) aPE%NrEle = 0 end do do j=1,NrP ! loop over number of cells coarse grid RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) aPE% NrEle = aPE% NrEle+1 ! Increment Number of elements for this client Pe end do do i=1,myClient%inter_npes aPE => myClient%PEs(i) ALLOCATE(aPE%locInd(aPE%NrEle)) end do RemInd = 0 ! Second, Create lists do j=1,NrP ! loop over number of cells coarse grid RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) RemInd(RemPE) = RemInd(RemPE)+1 ind = RemInd(RemPE) aPE%locInd(ind)%i = index_list(1,j) aPE%locInd(ind)%j = index_list(2,j) end do ! Prepare Number of Elements for Client PEs CALL PMC_Alloc_mem (RLdef, myClient%inter_npes*2) WinSize = myClient%inter_npes*c_sizeof(i)*2 ! Number of Client PEs * size of INTEGER (i just arbitrary INTEGER) CALL MPI_Win_create (RLdef, WinSize, iwp, MPI_INFO_NULL, myClient%intra_comm, indWin, ierr); CALL MPI_Win_fence (0, indWin, ierr); ! Open Window to set data RLdef(1) = 0 ! Index on Remote PE 0 RLdef(2) = RemInd(1) ! Number of Elements on Rem PE 0 do i=2,myClient%inter_npes ! Reserve Buffer for index array i2 = (i-1)*2+1 RLdef(i2) = RLdef(i2-2) + RLdef(i2-1)*2 ! Index on Remote PE RLdef(i2+1) = RemInd(i) ! Number of Elements on Remote PE end do CALL MPI_Win_fence (0, indWin, ierr); ! Close Window to allow client to access data CALL MPI_Win_fence (0, indWin, ierr); ! Client has retrieved data i2 = 2*myClient%inter_npes-1 WinSize = (RLdef(i2)+RLdef(i2+1))*2 WinSize = max(WinSize,1) ! Make sure, MPI_Alloc_mem works CALL PMC_Alloc_mem (RemIndw, int(WinSize)) CALL MPI_Barrier (m_model_comm, ierr) CALL MPI_Win_create (RemIndw, WinSize*c_sizeof(i), iwp, MPI_INFO_NULL, myClient%intra_comm, indWin2, ierr); CALL MPI_Win_fence (0, indWin2, ierr); ! Open Window to set data do j=1,NrP ! this loop creates the 2D index list RemPE = index_list(5,j)+1 ! Pe number remote PE aPE => myClient%PEs(RemPE) i2 = RemPE*2-1 ind = RLdef(i2)+1 RemIndw(ind) = index_list(3,j) RemIndw(ind+1) = index_list(4,j) RLdef(i2) = RLdef(i2)+2 end do CALL MPI_Win_fence (0, indWin2, ierr); !all data set CALL MPI_Barrier(myClient%intra_comm, ierr) ! Dont know why, but this barrier is necessary before we can free the windows CALL MPI_Win_free(indWin, ierr) CALL MPI_Win_free(indWin2, ierr) ! Sollte funktionieren, Problem mit MPI implementation ! https://www.lrz.de/services/software/parallel/mpi/onesided ! CALL MPI_Free_mem (RemIndw, ierr) return END SUBROUTINE Set_PE_index_list #endif END MODULE pmc_server