source: palm/trunk/SOURCE/exchange_horiz.f90 @ 668

Last change on this file since 668 was 668, checked in by suehring, 13 years ago

last commit documented

  • Property svn:keywords set to Id
File size: 4.5 KB
RevLine 
[667]1 SUBROUTINE exchange_horiz( ar, nbgp_local)
[1]2
3!------------------------------------------------------------------------------!
[484]4! Current revisions:
[1]5! -----------------
[668]6!
7! Former revisions:
8! -----------------
9! $Id: exchange_horiz.f90 668 2010-12-23 13:22:58Z suehring $
10!
11! 667 2010-12-23 12:06:00Z suehring/gryschka
[667]12! Dynamic exchange of ghost points with nbgp_local to ensure that no useless
13! ghost points exchanged in case of multigrid. type_yz(0) and type_xz(0)
14! used for normal grid, the remaining types used for the several grid levels.
15! Exchange is done via MPI-Vectors with a dynamic value of ghost points which
16! depend on the advection scheme. Exchange of left and right PEs is 10% faster
[668]17! with MPI-Vectors than without.
[1]18!
[77]19! 75 2007-03-22 09:54:05Z raasch
20! Special cases for additional gridpoints along x or y in case of non-cyclic
21! boundary conditions are not regarded any more
22!
[3]23! RCS Log replace by Id keyword, revision history cleaned up
24!
[1]25! Revision 1.16  2006/02/23 12:19:08  raasch
26! anz_yz renamed ngp_yz
27!
28! Revision 1.1  1997/07/24 11:13:29  raasch
29! Initial revision
30!
31!
32! Description:
33! ------------
34! Exchange of lateral boundary values (parallel computers) and cyclic
35! lateral boundary conditions, respectively.
36!------------------------------------------------------------------------------!
37
38    USE control_parameters
39    USE cpulog
40    USE indices
41    USE interfaces
42    USE pegrid
43
44    IMPLICIT NONE
45
46#if defined( __parallel )
47    INTEGER, DIMENSION(4)                 ::  req
48    INTEGER, DIMENSION(MPI_STATUS_SIZE,4) ::  wait_stat
49#endif
[667]50    INTEGER :: i,nbgp_local
51    REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, &
52                    nxl-nbgp_local:nxr+nbgp_local) ::  ar
[1]53
54    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' )
55
[667]56    IF ( exchange_mg == .TRUE. ) THEN
57      i = grid_level
58    ELSE
59      i = 0
60    END IF
[1]61#if defined( __parallel )
62
63!
64!-- Exchange of lateral boundary values for parallel computers
65    IF ( pdims(1) == 1  .OR.  mg_switch_to_pe0 )  THEN
66!
67!--    One-dimensional decomposition along y, boundary values can be exchanged
68!--    within the PE memory
69       IF ( bc_lr == 'cyclic' )  THEN
[667]70          ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
71          ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]72       ENDIF
73
74    ELSE
[75]75
[1]76       req = 0
77!
78!--    Send left boundary, receive right one
[667]79       CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxl),1,type_yz(i),pleft,0,comm2d,&
80                      req(1),ierr)
81       CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxr+1),1,type_yz(i),pright,0,&
82                     comm2d,req(2),ierr)
[1]83!
84!--    Send right boundary, receive left one
[667]85
86
87       CALL MPI_ISEND(ar(nzb,nys-nbgp_local,nxr+1-nbgp_local),1,type_yz(i),pright, 1,  &
88                      comm2d, req(3), ierr )
89       CALL MPI_IRECV(ar(nzb,nys-nbgp_local,nxl-nbgp_local),1,type_yz(i),pleft,1,&
90                      comm2d,req(4), ierr)
91
[75]92       CALL MPI_WAITALL( 4, req, wait_stat, ierr )
93
[1]94    ENDIF
95
96
97    IF ( pdims(2) == 1  .OR.  mg_switch_to_pe0 )  THEN
98!
99!--    One-dimensional decomposition along x, boundary values can be exchanged
100!--    within the PE memory
101       IF ( bc_ns == 'cyclic' )  THEN
[667]102          ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
103          ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]104       ENDIF
105
106    ELSE
107
108       req = 0
109!
110!--    Send front boundary, receive rear one
[667]111!--    MPI_ISEND initial send adress changed, type_xz() is sendet nbgp times
112
113       CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local),1, type_xz(i), psouth, 0, &
[75]114                       comm2d, req(1), ierr )
[667]115       CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local),1, type_xz(i), pnorth, 0, &
[75]116                       comm2d, req(2), ierr )
[1]117!
118!--    Send rear boundary, receive front one
[667]119       CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local),1, type_xz(i), pnorth, 1, &
[75]120                       comm2d, req(3), ierr )
[667]121       CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local),1, type_xz(i), psouth, 1, &
[75]122                       comm2d, req(4), ierr )
123       call MPI_WAITALL( 4, req, wait_stat, ierr )
124
[1]125    ENDIF
126
127#else
128
129!
130!-- Lateral boundary conditions in the non-parallel case
131    IF ( bc_lr == 'cyclic' )  THEN
[667]132        ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr)
133        ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1)
[1]134    ENDIF
135
136    IF ( bc_ns == 'cyclic' )  THEN
[667]137        ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:)
138        ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:)
[1]139    ENDIF
140
141#endif
142    CALL cpu_log( log_point_s(2), 'exchange_horiz', 'stop' )
143
[667]144
[1]145 END SUBROUTINE exchange_horiz
Note: See TracBrowser for help on using the repository browser.