source: palm/trunk/SOURCE/exchange_horiz_2d.f90 @ 3182

Last change on this file since 3182 was 3182, checked in by suehring, 6 years ago

New Inifor features: grid stretching, improved command-interface, support start dates in different formats in both YYYYMMDD and YYYYMMDDHH, Ability to manually control input file prefixes (--radiation-prefix, --soil-preifx, --flow-prefix, --soilmoisture-prefix) for compatiblity with DWD forcast naming scheme; GNU-style short and long option; Prepared output of large-scale forcing profiles (no computation yet); Added preprocessor flag netcdf4 to switch output format between netCDF 3 and 4; Updated netCDF variable names and attributes to comply with PIDS v1.9; Inifor bugfixes: Improved compatibility with older Intel Intel compilers by avoiding implicit array allocation; Added origin_lon/_lat values and correct reference time in dynamic driver global attributes; corresponding PALM changes: adjustments to revised Inifor; variables names in dynamic driver adjusted; enable geostrophic forcing also in offline nested mode; variable names in LES-LES and COSMO offline nesting changed; lateral boundary flags for nesting, in- and outflow conditions renamed

  • Property svn:keywords set to Id
File size: 11.5 KB
RevLine 
[1682]1!> @file exchange_horiz_2d.f90
[2000]2!------------------------------------------------------------------------------!
[2696]3! This file is part of the PALM model system.
[1036]4!
[2000]5! PALM is free software: you can redistribute it and/or modify it under the
6! terms of the GNU General Public License as published by the Free Software
7! Foundation, either version 3 of the License, or (at your option) any later
8! version.
[1036]9!
10! PALM is distributed in the hope that it will be useful, but WITHOUT ANY
11! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
12! A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
13!
14! You should have received a copy of the GNU General Public License along with
15! PALM. If not, see <http://www.gnu.org/licenses/>.
16!
[2718]17! Copyright 1997-2018 Leibniz Universitaet Hannover
[2000]18!------------------------------------------------------------------------------!
[1036]19!
[484]20! Current revisions:
[1]21! -----------------
[3182]22! Rename variables in offline nesting mode and flags indicating lateral
23! boundary conditions
[1683]24!
[1321]25! Former revisions:
26! -----------------
27! $Id: exchange_horiz_2d.f90 3182 2018-07-27 13:36:03Z suehring $
[2716]28! Corrected "Former revisions" section
29!
30! 2696 2017-12-14 17:12:51Z kanani
31! Change in file header (GPL part)
[2696]32! Forcing implemented (MS)
33!
34! 2101 2017-01-05 16:42:31Z suehring
[1321]35!
[2001]36! 2000 2016-08-20 18:09:15Z knoop
37! Forced header and separation lines into 80 columns
38!
[1969]39! 1968 2016-07-18 12:01:49Z suehring
40! 2D-INTEGER exchange adopted for different multigrid level
41!
[1933]42! 1818 2016-04-06 15:53:27Z maronga
43! Initial version of purely vertical nesting introduced.
44!
[1805]45! 1804 2016-04-05 16:30:18Z maronga
46! Removed code for parameter file check (__check)
47!
[1763]48! 1762 2016-02-25 12:31:13Z hellstea
49! Introduction of nested domain feature
50!
[1683]51! 1682 2015-10-07 23:56:08Z knoop
52! Code annotations made doxygen readable
53!
[1349]54! 1348 2014-03-27 18:01:03Z raasch
55! bugfix: bc_lr_cyc and bc_ns_cyc added to ONLY-list
56!
[1321]57! 1320 2014-03-20 08:40:49Z raasch
[1320]58! ONLY-attribute added to USE-statements,
59! kind-parameters added to all INTEGER and REAL declaration statements,
60! kinds are defined in new module kinds,
61! revision history before 2012 removed,
62! comment fields (!:) to be used for variable explanations added to
63! all variable declaration statements
[1]64!
[1093]65! 1092 2013-02-02 11:24:22Z raasch
66! unused variables removed
67!
[1037]68! 1036 2012-10-22 13:43:42Z raasch
69! code put under GPL (PALM 3.9)
70!
[842]71! 841 2012-02-28 12:29:49Z maronga
72! Excluded routine from compilation of namelist_file_check
73!
[1]74! Revision 1.1  1998/01/23 09:58:21  raasch
75! Initial revision
76!
77!
78! Description:
79! ------------
[1682]80!> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
81!> boundary conditions, respectively, for 2D-arrays.
[1]82!------------------------------------------------------------------------------!
[1682]83 SUBROUTINE exchange_horiz_2d( ar )
84 
[1]85
[1320]86    USE control_parameters,                                                    &
[3182]87        ONLY :  bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r,                &
88                bc_dirichlet_s, bc_lr_cyc, bc_ns_cyc, bc_radiation_l,          &
89                bc_radiation_n, bc_radiation_r, bc_radiation_s 
[1320]90               
91    USE cpulog,                                                                &
92        ONLY :  cpu_log, log_point_s
93       
94    USE indices,                                                               &
95        ONLY :  nbgp, nxl, nxlg, nxr, nxrg, nyn, nyng, nys, nysg
96       
97    USE kinds
98   
[1]99    USE pegrid
100
[1933]101    USE pmc_interface,                                                         &
102        ONLY : nesting_mode
103
104
[1]105    IMPLICIT NONE
106
[841]107
[1682]108    INTEGER(iwp) :: i  !<
[1320]109   
[1682]110    REAL(wp) ::  ar(nysg:nyng,nxlg:nxrg)  !<
[1320]111   
[1]112
113    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
114
115#if defined( __parallel )
116
117!
118!-- Exchange of lateral boundary values for parallel computers
119    IF ( pdims(1) == 1 )  THEN
120
121!
122!--    One-dimensional decomposition along y, boundary values can be exchanged
123!--    within the PE memory
[702]124       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
125       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]126
127    ELSE
128!
129!--    Send left boundary, receive right one
[667]130
[702]131       CALL MPI_SENDRECV( ar(nysg,nxl), 1, type_y, pleft,  0,                 &
132                          ar(nysg,nxr+1), 1, type_y, pright, 0,               &
[1]133                          comm2d, status, ierr )
134!
135!--    Send right boundary, receive left one
[702]136       CALL MPI_SENDRECV( ar(nysg,nxr+1-nbgp), 1, type_y, pright,  1,         &
137                          ar(nysg,nxlg), 1, type_y, pleft,   1,               &
[1]138                          comm2d, status, ierr )
[702]139                         
140     
[1]141    ENDIF
142
143    IF ( pdims(2) == 1 )  THEN
144!
145!--    One-dimensional decomposition along x, boundary values can be exchanged
146!--    within the PE memory
[702]147       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
148       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]149
150    ELSE
151!
152!--    Send front boundary, receive rear one
[667]153
[702]154       CALL MPI_SENDRECV( ar(nys,nxlg), 1, type_x, psouth, 0,                 &         
155                          ar(nyn+1,nxlg), 1, type_x, pnorth, 0,               &
[1]156                          comm2d, status, ierr )
157!
158!--    Send rear boundary, receive front one
[702]159       CALL MPI_SENDRECV( ar(nyn+1-nbgp,nxlg), 1, type_x, pnorth, 1,          &
160                          ar(nysg,nxlg), 1, type_x, psouth, 1,                &
[1]161                          comm2d, status, ierr )
[667]162
[1]163    ENDIF
164
165#else
166
167!
168!-- Lateral boundary conditions in the non-parallel case
[707]169    IF ( bc_lr_cyc )  THEN
[702]170       ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr)
171       ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1)
[1]172    ENDIF
173
[707]174    IF ( bc_ns_cyc )  THEN
[702]175       ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:)
176       ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:)
[1]177    ENDIF
178
[667]179
[1]180#endif
181
[73]182!
[1762]183!-- Neumann-conditions at inflow/outflow/nested boundaries
[1933]184    IF ( nesting_mode /= 'vertical' )  THEN
[3182]185       IF ( bc_dirichlet_l  .OR.  bc_radiation_l )  THEN
[1933]186          DO  i = nbgp, 1, -1
187             ar(:,nxl-i) = ar(:,nxl)
188          ENDDO
189       ENDIF
[3182]190       IF ( bc_dirichlet_r  .OR.  bc_radiation_r )  THEN
[1933]191          DO  i = 1, nbgp
192             ar(:,nxr+i) = ar(:,nxr)
193          ENDDO
194       ENDIF
[3182]195       IF ( bc_dirichlet_s  .OR.  bc_radiation_s )  THEN
[1933]196          DO  i = nbgp, 1, -1
197             ar(nys-i,:) = ar(nys,:)
198          ENDDO
199       ENDIF
[3182]200       IF ( bc_dirichlet_n  .OR.  bc_radiation_n )  THEN
[1933]201          DO  i = 1, nbgp
202             ar(nyn+i,:) = ar(nyn,:)
203          ENDDO
204       ENDIF
[1762]205    ENDIF
206
[1]207    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
208
209 END SUBROUTINE exchange_horiz_2d
210
211
212
213!------------------------------------------------------------------------------!
214! Description:
215! ------------
[1682]216!> Exchange of lateral (ghost) boundaries (parallel computers) and cyclic
217!> boundary conditions, respectively, for 2D integer arrays.
[1]218!------------------------------------------------------------------------------!
[1682]219 
[1968]220 SUBROUTINE exchange_horiz_2d_int( ar, nys_l, nyn_l, nxl_l, nxr_l, nbgp_local )
[1]221
[1682]222
[1320]223    USE control_parameters,                                                    &
[3182]224        ONLY:  bc_dirichlet_l, bc_dirichlet_n, bc_dirichlet_r,                 &
225               bc_dirichlet_s, bc_lr_cyc, bc_ns_cyc, bc_radiation_l,           &
226               bc_radiation_n, bc_radiation_r, bc_radiation_s, grid_level
[1320]227       
228    USE cpulog,                                                                &
229        ONLY:  cpu_log, log_point_s
[1968]230               
[1320]231    USE kinds
232   
[1]233    USE pegrid
234
235    IMPLICIT NONE
236
[1968]237    INTEGER(iwp) ::  i           !< dummy index to zero-gradient conditions at in/outflow boundaries
238    INTEGER(iwp) ::  nxl_l       !< local index bound at current grid level, left side
239    INTEGER(iwp) ::  nxr_l       !< local index bound at current grid level, right side
240    INTEGER(iwp) ::  nyn_l       !< local index bound at current grid level, north side
241    INTEGER(iwp) ::  nys_l       !< local index bound at current grid level, south side
242    INTEGER(iwp) ::  nbgp_local  !< number of ghost layers to be exchanged
[1]243
[1968]244    INTEGER(iwp), DIMENSION(nys_l-nbgp_local:nyn_l+nbgp_local,                 &
245                            nxl_l-nbgp_local:nxr_l+nbgp_local) ::  ar  !< treated array
246
[1]247    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'start' )
248
249#if defined( __parallel )
250
251!
252!-- Exchange of lateral boundary values for parallel computers
253    IF ( pdims(1) == 1 )  THEN
254
255!
256!--    One-dimensional decomposition along y, boundary values can be exchanged
257!--    within the PE memory
[1968]258       ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l)
259       ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1)
[1]260
261    ELSE
262!
263!--    Send left boundary, receive right one
[1968]264       CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxl_l),   1,                     &
265                          type_y_int(grid_level), pleft,  0,                   &
266                          ar(nys_l-nbgp_local,nxr_l+1), 1,                     &
267                          type_y_int(grid_level), pright, 0,                   &
[1]268                          comm2d, status, ierr )
269!
270!--    Send right boundary, receive left one
[1968]271       CALL MPI_SENDRECV( ar(nys_l-nbgp_local,nxr_l+1-nbgp_local), 1,          &
272                          type_y_int(grid_level), pright, 1,                   &
273                          ar(nys_l-nbgp_local,nxl_l-nbgp_local),   1,          & 
274                          type_y_int(grid_level), pleft,  1,                   &
275                          comm2d, status, ierr )                         
[667]276
[1]277    ENDIF
278
279    IF ( pdims(2) == 1 )  THEN
280!
281!--    One-dimensional decomposition along x, boundary values can be exchanged
282!--    within the PE memory
[1968]283       ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:)
284       ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:)
[1]285
[667]286
[1]287    ELSE
288!
289!--    Send front boundary, receive rear one
[1968]290       CALL MPI_SENDRECV( ar(nys_l,nxl_l-nbgp_local),   1,                    &
291                          type_x_int(grid_level), psouth, 0,                  &
292                          ar(nyn_l+1,nxl_l-nbgp_local), 1,                    &
293                          type_x_int(grid_level), pnorth, 0,                  &
[702]294                          comm2d, status, ierr )                         
[667]295
[1]296!
297!--    Send rear boundary, receive front one
[1968]298       CALL MPI_SENDRECV( ar(nyn_l+1-nbgp_local,nxl_l-nbgp_local), 1,         &
299                          type_x_int(grid_level), pnorth, 1,                  &
300                          ar(nys_l-nbgp_local,nxl_l-nbgp_local),   1,         &
301                          type_x_int(grid_level), psouth, 1,                  &
[1]302                          comm2d, status, ierr )
[667]303
[1]304    ENDIF
305
306#else
307
308!
309!-- Lateral boundary conditions in the non-parallel case
[707]310    IF ( bc_lr_cyc )  THEN
[1968]311       ar(:,nxl_l-nbgp_local:nxl_l-1) = ar(:,nxr_l-nbgp_local+1:nxr_l)
312       ar(:,nxr_l+1:nxr_l+nbgp_local) = ar(:,nxl_l:nxl_l+nbgp_local-1)
[1]313    ENDIF
314
[707]315    IF ( bc_ns_cyc )  THEN
[1968]316       ar(nys_l-nbgp_local:nys_l-1,:) = ar(nyn_l+1-nbgp_local:nyn_l,:)
317       ar(nyn_l+1:nyn_l+nbgp_local,:) = ar(nys_l:nys_l-1+nbgp_local,:)
[1]318    ENDIF
319
320#endif
[1762]321!
322!-- Neumann-conditions at inflow/outflow/nested boundaries
[3182]323    IF ( bc_dirichlet_l )  THEN
[1968]324       DO  i = nbgp_local, 1, -1
325         ar(:,nxl_l-i) = ar(:,nxl_l)
[1762]326       ENDDO
327    ENDIF
[3182]328    IF ( bc_dirichlet_r )  THEN
[1968]329       DO  i = 1, nbgp_local
330          ar(:,nxr_l+i) = ar(:,nxr_l)
[1762]331       ENDDO
332    ENDIF
[3182]333    IF ( bc_dirichlet_s )  THEN
[1968]334       DO  i = nbgp_local, 1, -1
335         ar(nys_l-i,:) = ar(nys_l,:)
[1762]336       ENDDO
337    ENDIF
[3182]338    IF ( bc_dirichlet_n )  THEN
[1968]339       DO  i = 1, nbgp_local
340         ar(nyn_l+i,:) = ar(nyn_l,:)
[1762]341       ENDDO
342    ENDIF
343
[1]344    CALL cpu_log( log_point_s(13), 'exchange_horiz_2d', 'stop' )
345
346 END SUBROUTINE exchange_horiz_2d_int
Note: See TracBrowser for help on using the repository browser.