Changeset 707 for palm/trunk
- Timestamp:
- Mar 29, 2011 11:39:40 AM (14 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/calc_spectra.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 6 7 ! 7 8 ! Former revisions: … … 88 89 ! 89 90 !-- Calculation of spectra works for cyclic boundary conditions only 90 IF ( bc_lr /= 'cyclic') THEN91 IF ( .NOT. bc_lr_cyc ) THEN 91 92 92 93 message_string = 'non-cyclic lateral boundaries along x do not'// & … … 119 120 ! 120 121 !-- Calculation of spectra works for cyclic boundary conditions only 121 IF ( bc_ns /= 'cyclic') THEN122 IF ( .NOT. bc_ns_cyc ) THEN 122 123 IF ( myid == 0 ) THEN 123 124 message_string = 'non-cyclic lateral boundaries along y do' // & -
palm/trunk/SOURCE/check_parameters.f90
r690 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! setting of bc_lr/ns_dirrad/raddir 6 7 ! 7 8 ! Former revisions: … … 1253 1254 ! 1254 1255 !-- Internal variables used for speed optimization in if clauses 1255 IF ( bc_lr /= 'cyclic' ) bc_lr_cyc = .FALSE. 1256 IF ( bc_ns /= 'cyclic' ) bc_ns_cyc = .FALSE. 1256 IF ( bc_lr /= 'cyclic' ) bc_lr_cyc = .FALSE. 1257 IF ( bc_lr == 'dirichlet/radiation' ) bc_lr_dirrad = .TRUE. 1258 IF ( bc_lr == 'radiation/dirichlet' ) bc_lr_raddir = .TRUE. 1259 IF ( bc_ns /= 'cyclic' ) bc_ns_cyc = .FALSE. 1260 IF ( bc_ns == 'dirichlet/radiation' ) bc_ns_dirrad = .TRUE. 1261 IF ( bc_ns == 'radiation/dirichlet' ) bc_ns_raddir = .TRUE. 1257 1262 1258 1263 ! -
palm/trunk/SOURCE/exchange_horiz.f90
r690 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! grid_level directly used as index for MPI data type arrays, 7 ! bc_lr/ns replaced by bc_lr/ns_cyc 7 8 ! 8 9 ! Former revisions: … … 55 56 INTEGER, DIMENSION(MPI_STATUS_SIZE,4) :: wait_stat 56 57 #endif 57 INTEGER :: i,nbgp_local58 INTEGER :: nbgp_local 58 59 REAL, DIMENSION(nzb:nzt+1,nys-nbgp_local:nyn+nbgp_local, & 59 60 nxl-nbgp_local:nxr+nbgp_local) :: ar 60 61 61 62 CALL cpu_log( log_point_s(2), 'exchange_horiz', 'start' ) 62 63 !64 !-- In the Poisson multigrid solver arrays with coarser grids are used.65 !-- Set i appropriately, because the coarser grids have different66 !-- MPI datatypes type_xz, type_yz.67 IF ( exchange_mg ) THEN68 i = grid_level69 ELSE70 i = 071 END IF72 63 73 64 #if defined( __parallel ) … … 79 70 !-- One-dimensional decomposition along y, boundary values can be exchanged 80 71 !-- within the PE memory 81 IF ( bc_lr == 'cyclic') THEN72 IF ( bc_lr_cyc ) THEN 82 73 ar(:,:,nxl-nbgp_local:nxl-1) = ar(:,:,nxr-nbgp_local+1:nxr) 83 74 ar(:,:,nxr+1:nxr+nbgp_local) = ar(:,:,nxl:nxl+nbgp_local-1) … … 90 81 !-- Send left boundary, receive right one (synchronous) 91 82 CALL MPI_SENDRECV( & 92 ar(nzb,nys-nbgp_local,nxl), 1, type_yz(i), pleft, 0, &93 ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(i), pright, 0, &94 83 ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), pleft, 0, & 84 ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), pright, 0, & 85 comm2d, status, ierr ) 95 86 ! 96 87 !-- Send right boundary, receive left one (synchronous) 97 CALL MPI_SENDRECV( & 98 ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, type_yz(i), pright, 1, & 99 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_yz(i), pleft, 1, & 100 comm2d, status, ierr ) 88 CALL MPI_SENDRECV( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & 89 type_yz(grid_level), pright, 1, & 90 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 91 type_yz(grid_level), pleft, 1, & 92 comm2d, status, ierr ) 101 93 102 94 ELSE … … 105 97 ! 106 98 !-- Send left boundary, receive right one (asynchronous) 107 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz( i), pleft,&108 0, comm2d, req(1), ierr )109 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz( i), pright, &110 0, comm2d, req(2), ierr )99 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxl), 1, type_yz(grid_level), & 100 pleft, 0, comm2d, req(1), ierr ) 101 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxr+1), 1, type_yz(grid_level), & 102 pright, 0, comm2d, req(2), ierr ) 111 103 ! 112 104 !-- Send right boundary, receive left one (asynchronous) 113 105 CALL MPI_ISEND( ar(nzb,nys-nbgp_local,nxr+1-nbgp_local), 1, & 114 type_yz( i), pright, 1, comm2d, req(3), ierr )106 type_yz(grid_level), pright, 1, comm2d, req(3), ierr ) 115 107 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 116 type_yz( i), pleft, 1, comm2d, req(4), ierr )108 type_yz(grid_level), pleft, 1, comm2d, req(4), ierr ) 117 109 118 110 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) … … 127 119 !-- One-dimensional decomposition along x, boundary values can be exchanged 128 120 !-- within the PE memory 129 IF ( bc_ns == 'cyclic') THEN121 IF ( bc_ns_cyc ) THEN 130 122 ar(:,nys-nbgp_local:nys-1,:) = ar(:,nyn-nbgp_local+1:nyn,:) 131 123 ar(:,nyn+1:nyn+nbgp_local,:) = ar(:,nys:nys+nbgp_local-1,:) … … 138 130 !-- Send front boundary, receive rear one (synchronous) 139 131 CALL MPI_SENDRECV( & 140 ar(nzb,nys,nxl-nbgp_local), 1, type_xz(i), psouth, 0, &141 ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 0, &142 132 ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), psouth, 0, & 133 ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), pnorth, 0, & 134 comm2d, status, ierr ) 143 135 ! 144 136 !-- Send rear boundary, receive front one (synchronous) 145 CALL MPI_SENDRECV( & 146 ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, type_xz(i), pnorth, 1, & 147 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, type_xz(i), psouth, 1, & 148 comm2d, status, ierr ) 137 CALL MPI_SENDRECV( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & 138 type_xz(grid_level), pnorth, 1, & 139 ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 140 type_xz(grid_level), psouth, 1, & 141 comm2d, status, ierr ) 149 142 150 143 ELSE … … 153 146 ! 154 147 !-- Send front boundary, receive rear one (asynchronous) 155 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz( i), psouth, &156 0, comm2d, req(1), ierr )157 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz( i), pnorth, &158 0, comm2d, req(2), ierr )148 CALL MPI_ISEND( ar(nzb,nys,nxl-nbgp_local), 1, type_xz(grid_level), & 149 psouth, 0, comm2d, req(1), ierr ) 150 CALL MPI_IRECV( ar(nzb,nyn+1,nxl-nbgp_local), 1, type_xz(grid_level), & 151 pnorth, 0, comm2d, req(2), ierr ) 159 152 ! 160 153 !-- Send rear boundary, receive front one (asynchronous) 161 154 CALL MPI_ISEND( ar(nzb,nyn-nbgp_local+1,nxl-nbgp_local), 1, & 162 type_xz( i), pnorth, 1, comm2d, req(3), ierr )155 type_xz(grid_level), pnorth, 1, comm2d, req(3), ierr ) 163 156 CALL MPI_IRECV( ar(nzb,nys-nbgp_local,nxl-nbgp_local), 1, & 164 type_xz( i), psouth, 1, comm2d, req(4), ierr )157 type_xz(grid_level), psouth, 1, comm2d, req(4), ierr ) 165 158 166 159 CALL MPI_WAITALL( 4, req, wait_stat, ierr ) -
palm/trunk/SOURCE/exchange_horiz_2d.f90
r703 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 6 7 ! 7 8 ! Former revisions: … … 104 105 ! 105 106 !-- Lateral boundary conditions in the non-parallel case 106 IF ( bc_lr == 'cyclic') THEN107 ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) 108 ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) 109 ENDIF 110 111 IF ( bc_ns == 'cyclic') THEN107 IF ( bc_lr_cyc ) THEN 108 ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) 109 ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) 110 ENDIF 111 112 IF ( bc_ns_cyc ) THEN 112 113 ar(nysg:nys-1,:) = ar(nyn-nbgp+1:nyn,:) 113 114 ar(nyn+1:nyng,:) = ar(nys:nys+nbgp-1,:) … … 222 223 ! 223 224 !-- Lateral boundary conditions in the non-parallel case 224 IF ( bc_lr == 'cyclic') THEN225 ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) 226 ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) 227 ENDIF 228 229 IF ( bc_ns == 'cyclic') THEN225 IF ( bc_lr_cyc ) THEN 226 ar(:,nxlg:nxl-1) = ar(:,nxr-nbgp+1:nxr) 227 ar(:,nxr+1:nxrg) = ar(:,nxl:nxl+nbgp-1) 228 ENDIF 229 230 IF ( bc_ns_cyc ) THEN 230 231 ar(nysg:nys-1,:) = ar(nyn+1-nbgp:nyn,:) 231 232 ar(nyn+1:nyng,:) = ar(nys:nys-1+nbgp,:) -
palm/trunk/SOURCE/header.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 6 7 ! 7 8 ! Former revisions: … … 686 687 687 688 WRITE ( io, 317 ) bc_lr, bc_ns 688 IF ( bc_lr /= 'cyclic' .OR. bc_ns /= 'cyclic') THEN689 IF ( .NOT. bc_lr_cyc .OR. .NOT. bc_ns_cyc ) THEN 689 690 WRITE ( io, 318 ) outflow_damping_width, km_damp_max 690 691 IF ( turbulent_inflow ) THEN … … 1477 1478 zu(disturbance_level_ind_b), disturbance_level_ind_b,& 1478 1479 zu(disturbance_level_ind_t), disturbance_level_ind_t 1479 IF ( bc_lr /= 'cyclic' .OR. bc_ns /= 'cyclic') THEN1480 IF ( .NOT. bc_lr_cyc .OR. .NOT. bc_ns_cyc ) THEN 1480 1481 WRITE ( io, 472 ) inflow_disturbance_begin, inflow_disturbance_end 1481 1482 ELSE -
palm/trunk/SOURCE/init_3d_model.f90
r681 r707 7 7 ! Current revisions: 8 8 ! ----------------- 9 ! 9 ! p_sub renamed p_loc and allocated depending on the chosen pressure solver, 10 ! initial assignments of zero to array p for iterative solvers only, 11 ! bc_lr/ns replaced by bc_lr/ns_dirrad/raddir 10 12 ! 11 13 ! Former revisions: … … 216 218 w_3(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 217 219 ! 218 !-- Following array is required to buffer the perturbation pressure during 219 !-- Runge-Kutta 3rd order time integration. 220 IF ( psolver == 'multigrid' .OR. psolver == 'sor' ) THEN 221 ALLOCATE( p_sub(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 220 !-- Following array is required for perturbation pressure within the iterative 221 !-- pressure solvers. For the multistep schemes (Runge-Kutta), array p holds 222 !-- the weighted average of the substeps and cannot be used in the Poisson 223 !-- solver. 224 IF ( psolver == 'sor' ) THEN 225 ALLOCATE( p_loc(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 226 ELSEIF ( psolver == 'multigrid' ) THEN 227 ! 228 !-- For performance reasons, multigrid is using one ghost layer only 229 ALLOCATE( p_loc(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 222 230 ENDIF 223 231 … … 833 841 834 842 ! 835 !-- For the moment, perturbation pressure and vertical velocity arezero836 p = 0.0;w = 0.0843 !-- For the moment, vertical velocity is zero 844 w = 0.0 837 845 838 846 ! 839 847 !-- Initialize array sums (must be defined in first call of pres) 840 848 sums = 0.0 849 850 ! 851 !-- In case of iterative solvers, p must get an initial value 852 IF ( psolver == 'multigrid' .OR. psolver == 'sor' ) p = 0.0 841 853 842 854 ! … … 881 893 !-- Initialize the random number generator (from numerical recipes) 882 894 CALL random_function_ini 883 884 !885 !-- Once again set the perturbation pressure explicitly to zero in order to886 !-- assure that it does not generate any divergences in the first time step.887 !-- At t=0 the velocity field is free of divergence (as constructed above).888 !-- Divergences being created during a time step are not yet known and thus889 !-- cannot be corrected during the time step yet.890 p = 0.0891 895 892 896 ! … … 1410 1414 !-- non-cyclic lateral boundaries. A linear increase is assumed over the first 1411 1415 !-- half of the width of the damping layer 1412 IF ( bc_lr == 'dirichlet/radiation') THEN1416 IF ( bc_lr_dirrad ) THEN 1413 1417 1414 1418 DO i = nxlg, nxrg … … 1423 1427 ENDDO 1424 1428 1425 ELSEIF ( bc_lr == 'radiation/dirichlet') THEN1429 ELSEIF ( bc_lr_raddir ) THEN 1426 1430 1427 1431 DO i = nxlg, nxrg … … 1438 1442 ENDIF 1439 1443 1440 IF ( bc_ns == 'dirichlet/radiation') THEN1444 IF ( bc_ns_dirrad ) THEN 1441 1445 1442 1446 DO j = nysg, nyng … … 1451 1455 ENDDO 1452 1456 1453 ELSEIF ( bc_ns == 'radiation/dirichlet') THEN1457 ELSEIF ( bc_ns_raddir ) THEN 1454 1458 1455 1459 DO j = nysg, nyng -
palm/trunk/SOURCE/init_grid.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 6 7 ! 7 8 ! Former revisions: … … 561 562 ENDIF 562 563 563 IF ( bc_lr == 'cyclic') THEN564 IF ( bc_lr_cyc ) THEN 564 565 IF ( ANY( nzb_local(:,-1) /= nzb_local(:,nx) ) .OR. & 565 566 ANY( nzb_local(:,0) /= nzb_local(:,nx+1) ) ) THEN … … 569 570 ENDIF 570 571 ENDIF 571 IF ( bc_ns == 'cyclic') THEN572 IF ( bc_ns_cyc ) THEN 572 573 IF ( ANY( nzb_local(-1,:) /= nzb_local(ny,:) ) .OR. & 573 574 ANY( nzb_local(0,:) /= nzb_local(ny+1,:) ) ) THEN -
palm/trunk/SOURCE/init_pegrid.f90
r669 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc/dirrad/raddir 6 7 ! 7 8 ! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!! … … 186 187 ! 187 188 !-- If necessary, set horizontal boundary conditions to non-cyclic 188 IF ( bc_lr /= 'cyclic') cyclic(1) = .FALSE.189 IF ( bc_ns /= 'cyclic') cyclic(2) = .FALSE.189 IF ( .NOT. bc_lr_cyc ) cyclic(1) = .FALSE. 190 IF ( .NOT. bc_ns_cyc ) cyclic(2) = .FALSE. 190 191 191 192 ! … … 331 332 nzt = MIN( nz, nzta ) 332 333 nnz = nza 334 335 ! 336 !-- Set switches to define if the PE is situated at the border of the virtual 337 !-- processor grid 338 IF ( nxl == 0 ) left_border_pe = .TRUE. 339 IF ( nxr == nx ) right_border_pe = .TRUE. 340 IF ( nys == 0 ) south_border_pe = .TRUE. 341 IF ( nyn == ny ) north_border_pe = .TRUE. 333 342 334 343 ! … … 1066 1075 !-- horizontal boundary conditions. 1067 1076 IF ( pleft == MPI_PROC_NULL ) THEN 1068 IF ( bc_lr == 'dirichlet/radiation') THEN1077 IF ( bc_lr_dirrad ) THEN 1069 1078 inflow_l = .TRUE. 1070 ELSEIF ( bc_lr == 'radiation/dirichlet') THEN1079 ELSEIF ( bc_lr_raddir ) THEN 1071 1080 outflow_l = .TRUE. 1072 1081 ENDIF … … 1074 1083 1075 1084 IF ( pright == MPI_PROC_NULL ) THEN 1076 IF ( bc_lr == 'dirichlet/radiation') THEN1085 IF ( bc_lr_dirrad ) THEN 1077 1086 outflow_r = .TRUE. 1078 ELSEIF ( bc_lr == 'radiation/dirichlet') THEN1087 ELSEIF ( bc_lr_raddir ) THEN 1079 1088 inflow_r = .TRUE. 1080 1089 ENDIF … … 1082 1091 1083 1092 IF ( psouth == MPI_PROC_NULL ) THEN 1084 IF ( bc_ns == 'dirichlet/radiation') THEN1093 IF ( bc_ns_dirrad ) THEN 1085 1094 outflow_s = .TRUE. 1086 ELSEIF ( bc_ns == 'radiation/dirichlet') THEN1095 ELSEIF ( bc_ns_raddir ) THEN 1087 1096 inflow_s = .TRUE. 1088 1097 ENDIF … … 1090 1099 1091 1100 IF ( pnorth == MPI_PROC_NULL ) THEN 1092 IF ( bc_ns == 'dirichlet/radiation') THEN1101 IF ( bc_ns_dirrad ) THEN 1093 1102 inflow_n = .TRUE. 1094 ELSEIF ( bc_ns == 'radiation/dirichlet') THEN1103 ELSEIF ( bc_ns_raddir ) THEN 1095 1104 outflow_n = .TRUE. 1096 1105 ENDIF … … 1122 1131 1123 1132 #else 1124 IF ( bc_lr == 'dirichlet/radiation') THEN1133 IF ( bc_lr_dirrad ) THEN 1125 1134 inflow_l = .TRUE. 1126 1135 outflow_r = .TRUE. 1127 ELSEIF ( bc_lr == 'radiation/dirichlet') THEN1136 ELSEIF ( bc_lr_raddir ) THEN 1128 1137 outflow_l = .TRUE. 1129 1138 inflow_r = .TRUE. 1130 1139 ENDIF 1131 1140 1132 IF ( bc_ns == 'dirichlet/radiation') THEN1141 IF ( bc_ns_dirrad ) THEN 1133 1142 inflow_n = .TRUE. 1134 1143 outflow_s = .TRUE. 1135 ELSEIF ( bc_ns == 'radiation/dirichlet') THEN1144 ELSEIF ( bc_ns_raddir ) THEN 1136 1145 outflow_n = .TRUE. 1137 1146 inflow_s = .TRUE. -
palm/trunk/SOURCE/modules.f90
r684 r707 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! +bc_lr_dirrad, bc_lr_raddir, bc_ns_dirrad, bc_ns_raddir, left_border_pe, 8 ! north_border_pe, right_border_pe, south_border_pe 9 ! p_sub renamed p_loc 8 10 ! 9 11 ! Former revisions: … … 261 263 REAL, DIMENSION(:,:,:), ALLOCATABLE :: & 262 264 canopy_heat_flux, cdc, d, diss, lad_s, lad_u, lad_v, lad_w, lai, & 263 l_wall, p_ sub, sec, sls, tend, u_m_l, u_m_n, u_m_r, u_m_s, v_m_l, &265 l_wall, p_loc, sec, sls, tend, u_m_l, u_m_n, u_m_r, u_m_s, v_m_l, & 264 266 v_m_n, v_m_r, v_m_s, w_m_l, w_m_n, w_m_r, w_m_s 265 267 … … 481 483 482 484 LOGICAL :: adjust_mixing_length = .FALSE., avs_output = .FALSE., & 483 bc_lr_cyc =.TRUE., bc_ns_cyc = .TRUE., & 485 bc_lr_cyc =.TRUE., bc_lr_dirrad = .FALSE., & 486 bc_lr_raddir = .FALSE., bc_ns_cyc = .TRUE., & 487 bc_ns_dirrad = .FALSE., bc_ns_raddir = .FALSE., & 484 488 call_psolver_at_all_substeps = .TRUE., & 485 489 cloud_droplets = .FALSE., cloud_physics = .FALSE., & … … 1231 1235 INTEGER, DIMENSION(:), ALLOCATABLE :: ngp_yz, type_xz, type_yz 1232 1236 1233 LOGICAL :: collective_wait = .FALSE., reorder = .TRUE., & 1237 LOGICAL :: collective_wait = .FALSE., left_border_pe = .FALSE., & 1238 north_border_pe = .FALSE., reorder = .TRUE., & 1239 right_border_pe = .FALSE., south_border_pe = .TRUE., & 1234 1240 synchronous_exchange = .FALSE. 1241 1235 1242 LOGICAL, DIMENSION(2) :: cyclic = (/ .TRUE. , .TRUE. /), & 1236 1243 remain_dims -
palm/trunk/SOURCE/poismg.f90
r668 r707 3 3 !------------------------------------------------------------------------------! 4 4 ! Attention: Loop unrolling and cache optimization in SOR-Red/Black method 5 ! still does not bring the expected speedup on ibm! Further work 6 ! is required. 5 ! still does not give the expected speedup! Further work required. 7 6 ! 8 7 ! Current revisions: 9 8 ! ----------------- 9 ! p_loc is used instead of p in the main routine (poismg). 10 ! On coarse grid levels, gathered data are identically processed on all PEs 11 ! (before, on PE0 only), so that the subsequent scattering of data is not 12 ! neccessary any more. 13 ! bc_lr/ns replaced by bc_lr/ns_cyc/dirrad/raddir 14 ! Bugfix: bottom (nzb) and top (nzt+1) boundary conditions set in routines 15 ! resid and restrict. They were missed before which may have led to 16 ! unpredictable results. 10 17 ! 11 18 ! Former revisions: … … 76 83 ALLOCATE ( p3(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 77 84 78 79 ! 80 !-- Some boundaries have to be added to divergence array 85 ! 86 !-- Ghost boundaries have to be added to divergence array. 87 !-- Exchange routine needs to know the grid level! 88 grid_level = maximum_grid_level 81 89 CALL exchange_horiz( d, 1) 82 90 d(nzb,:,:) = d(nzb+1,:,:) … … 88 96 !-- If the number of cycles is preset by the user, this number will be 89 97 !-- carried out regardless of the accuracy. 90 grid_level_count = 91 mgcycles = 98 grid_level_count = 0 99 mgcycles = 0 92 100 IF ( mg_cycles == -1 ) THEN 93 101 maximum_mgcycles = 0 … … 101 109 mgcycles < maximum_mgcycles ) 102 110 103 CALL next_mg_level( d, p , p3, r)111 CALL next_mg_level( d, p_loc, p3, r) 104 112 105 113 ! … … 107 115 !-- cycles to be performed 108 116 IF ( maximum_mgcycles == 0 ) THEN 109 CALL resid( d, p , r )117 CALL resid( d, p_loc, r ) 110 118 maxerror = SUM( r(nzb+1:nzt,nys:nyn,nxl:nxr)**2 ) 111 119 #if defined( __parallel ) … … 132 140 133 141 DEALLOCATE( p3 ) 142 143 ! 144 !-- Unset the grid level. Variable is used to determine the MPI datatypes for 145 !-- ghost point exchange 146 grid_level = 0 134 147 135 148 CALL cpu_log( log_point_s(29), 'poismg', 'stop' ) … … 223 236 CALL exchange_horiz( r, 1) 224 237 225 IF ( bc_lr /= 'cyclic') THEN238 IF ( .NOT. bc_lr_cyc ) THEN 226 239 IF ( inflow_l .OR. outflow_l ) r(:,:,nxl_mg(l)-1) = r(:,:,nxl_mg(l)) 227 240 IF ( inflow_r .OR. outflow_r ) r(:,:,nxr_mg(l)+1) = r(:,:,nxr_mg(l)) 228 241 ENDIF 229 242 230 IF ( bc_ns /= 'cyclic') THEN243 IF ( .NOT. bc_ns_cyc ) THEN 231 244 IF ( inflow_n .OR. outflow_n ) r(:,nyn_mg(l)+1,:) = r(:,nyn_mg(l),:) 232 245 IF ( inflow_s .OR. outflow_s ) r(:,nys_mg(l)-1,:) = r(:,nys_mg(l),:) … … 234 247 235 248 ! 236 !-- Top boundary condition 237 !-- A Neumann boundary condition for r is implicitly set in routine restrict 249 !-- Boundary conditions at bottom and top of the domain. 250 !-- These points are not handled by the above loop. Points may be within 251 !-- buildings, but that doesn't matter. 252 IF ( ibc_p_b == 1 ) THEN 253 r(nzb,:,: ) = r(nzb+1,:,:) 254 ELSE 255 r(nzb,:,: ) = 0.0 256 ENDIF 257 238 258 IF ( ibc_p_t == 1 ) THEN 239 259 r(nzt_mg(l)+1,:,: ) = r(nzt_mg(l),:,:) … … 396 416 CALL exchange_horiz( f_mg, 1) 397 417 398 IF ( bc_lr /= 'cyclic') THEN418 IF ( .NOT. bc_lr_cyc ) THEN 399 419 IF (inflow_l .OR. outflow_l) f_mg(:,:,nxl_mg(l)-1) = f_mg(:,:,nxl_mg(l)) 400 420 IF (inflow_r .OR. outflow_r) f_mg(:,:,nxr_mg(l)+1) = f_mg(:,:,nxr_mg(l)) 401 421 ENDIF 402 422 403 IF ( bc_ns /= 'cyclic') THEN423 IF ( .NOT. bc_ns_cyc ) THEN 404 424 IF (inflow_n .OR. outflow_n) f_mg(:,nyn_mg(l)+1,:) = f_mg(:,nyn_mg(l),:) 405 425 IF (inflow_s .OR. outflow_s) f_mg(:,nys_mg(l)-1,:) = f_mg(:,nys_mg(l),:) … … 407 427 408 428 ! 409 !-- Bottom and top boundary conditions 410 ! IF ( ibc_p_b == 1 ) THEN 411 ! f_mg(nzb,:,: ) = f_mg(nzb+1,:,:) 412 ! ELSE 413 ! f_mg(nzb,:,: ) = 0.0 414 ! ENDIF 415 ! 416 ! IF ( ibc_p_t == 1 ) THEN 417 ! f_mg(nzt_mg(l)+1,:,: ) = f_mg(nzt_mg(l),:,:) 418 ! ELSE 419 ! f_mg(nzt_mg(l)+1,:,: ) = 0.0 420 ! ENDIF 429 !-- Boundary conditions at bottom and top of the domain. 430 !-- These points are not handled by the above loop. Points may be within 431 !-- buildings, but that doesn't matter. 432 IF ( ibc_p_b == 1 ) THEN 433 f_mg(nzb,:,: ) = f_mg(nzb+1,:,:) 434 ELSE 435 f_mg(nzb,:,: ) = 0.0 436 ENDIF 437 438 IF ( ibc_p_t == 1 ) THEN 439 f_mg(nzt_mg(l)+1,:,: ) = f_mg(nzt_mg(l),:,:) 440 ELSE 441 f_mg(nzt_mg(l)+1,:,: ) = 0.0 442 ENDIF 421 443 422 444 … … 494 516 CALL exchange_horiz( temp, 1) 495 517 496 IF ( bc_lr /= 'cyclic') THEN518 IF ( .NOT. bc_lr_cyc ) THEN 497 519 IF (inflow_l .OR. outflow_l) temp(:,:,nxl_mg(l)-1) = temp(:,:,nxl_mg(l)) 498 520 IF (inflow_r .OR. outflow_r) temp(:,:,nxr_mg(l)+1) = temp(:,:,nxr_mg(l)) 499 521 ENDIF 500 522 501 IF ( bc_ns /= 'cyclic') THEN523 IF ( .NOT. bc_ns_cyc ) THEN 502 524 IF (inflow_n .OR. outflow_n) temp(:,nyn_mg(l)+1,:) = temp(:,nyn_mg(l),:) 503 525 IF (inflow_s .OR. outflow_s) temp(:,nys_mg(l)-1,:) = temp(:,nys_mg(l),:) … … 864 886 CALL exchange_horiz( p_mg, 1 ) 865 887 866 IF ( bc_lr /= 'cyclic') THEN888 IF ( .NOT. bc_lr_cyc ) THEN 867 889 IF ( inflow_l .OR. outflow_l ) THEN 868 890 p_mg(:,:,nxl_mg(l)-1) = p_mg(:,:,nxl_mg(l)) … … 873 895 ENDIF 874 896 875 IF ( bc_ns /= 'cyclic') THEN897 IF ( .NOT. bc_ns_cyc ) THEN 876 898 IF ( inflow_n .OR. outflow_n ) THEN 877 899 p_mg(:,nyn_mg(l)+1,:) = p_mg(:,nyn_mg(l),:) … … 953 975 IMPLICIT NONE 954 976 955 INTEGER :: n, nwords, sender977 INTEGER :: i, il, ir, j, jn, js, k, n, nwords, sender 956 978 957 979 REAL, DIMENSION(nzb:nzt_mg(grid_level)+1, & … … 963 985 mg_loc_ind(1,myid)-1:mg_loc_ind(2,myid)+1) :: f2_sub 964 986 965 ! 966 !-- Find out the number of array elements of the subdomain array 967 nwords = SIZE( f2_sub ) 987 REAL, DIMENSION(:,:,:), ALLOCATABLE :: f2_l 988 989 ALLOCATE( f2_l(nzb:nzt_mg(grid_level)+1, & 990 nys_mg(grid_level)-1:nyn_mg(grid_level)+1, & 991 nxl_mg(grid_level)-1:nxr_mg(grid_level)+1) ) 968 992 969 993 #if defined( __parallel ) 970 994 CALL cpu_log( log_point_s(34), 'mg_gather', 'start' ) 971 995 972 IF ( myid == 0 ) THEN 973 ! 974 !-- Store the local subdomain array on the total array 975 f2(:,mg_loc_ind(3,0)-1:mg_loc_ind(4,0)+1, & 976 mg_loc_ind(1,0)-1:mg_loc_ind(2,0)+1) = f2_sub 977 978 ! 979 !-- Receive the subdomain arrays from all other PEs and store them on the 980 !-- total array 981 DO n = 1, numprocs-1 982 ! 983 !-- Receive the arrays in arbitrary order from the PEs. 984 CALL MPI_RECV( f2_sub(nzb,mg_loc_ind(3,0)-1,mg_loc_ind(1,0)-1), & 985 nwords, MPI_REAL, MPI_ANY_SOURCE, 1, comm2d, status, & 986 ierr ) 987 sender = status(MPI_SOURCE) 988 f2(:,mg_loc_ind(3,sender)-1:mg_loc_ind(4,sender)+1, & 989 mg_loc_ind(1,sender)-1:mg_loc_ind(2,sender)+1) = f2_sub 996 f2_l = 0.0 997 998 ! 999 !-- Store the local subdomain array on the total array 1000 js = mg_loc_ind(3,myid) 1001 IF ( south_border_pe ) js = js - 1 1002 jn = mg_loc_ind(4,myid) 1003 IF ( north_border_pe ) jn = jn + 1 1004 il = mg_loc_ind(1,myid) 1005 IF ( left_border_pe ) il = il - 1 1006 ir = mg_loc_ind(2,myid) 1007 IF ( right_border_pe ) ir = ir + 1 1008 DO i = il, ir 1009 DO j = js, jn 1010 DO k = nzb, nzt_mg(grid_level)+1 1011 f2_l(k,j,i) = f2_sub(k,j,i) 1012 ENDDO 990 1013 ENDDO 991 992 ELSE 993 ! 994 !-- Send subdomain array to PE0 995 CALL MPI_SEND( f2_sub(nzb,mg_loc_ind(3,myid)-1,mg_loc_ind(1,myid)-1), & 996 nwords, MPI_REAL, 0, 1, comm2d, ierr ) 997 ENDIF 1014 ENDDO 1015 1016 ! 1017 !-- Find out the number of array elements of the total array 1018 nwords = SIZE( f2 ) 1019 1020 ! 1021 !-- Gather subdomain data from all PEs 1022 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1023 CALL MPI_ALLREDUCE( f2_l(nzb,nys_mg(grid_level)-1,nxl_mg(grid_level)-1), & 1024 f2(nzb,nys_mg(grid_level)-1,nxl_mg(grid_level)-1), & 1025 nwords, MPI_REAL, MPI_SUM, comm2d, ierr ) 1026 1027 DEALLOCATE( f2_l ) 998 1028 999 1029 CALL cpu_log( log_point_s(34), 'mg_gather', 'stop' ) … … 1034 1064 CALL cpu_log( log_point_s(35), 'mg_scatter', 'start' ) 1035 1065 1036 IF ( myid == 0 ) THEN 1037 ! 1038 !-- Scatter the subdomain arrays to the other PEs by blocking 1039 !-- communication 1040 DO n = 1, numprocs-1 1041 1042 p2_sub = p2(:,mg_loc_ind(3,n)-1:mg_loc_ind(4,n)+1, & 1043 mg_loc_ind(1,n)-1:mg_loc_ind(2,n)+1) 1044 1045 CALL MPI_SEND( p2_sub(nzb,mg_loc_ind(3,0)-1,mg_loc_ind(1,0)-1), & 1046 nwords, MPI_REAL, n, 1, comm2d, ierr ) 1047 1048 ENDDO 1049 1050 ! 1051 !-- Store data from the total array to the local subdomain array 1052 p2_sub = p2(:,mg_loc_ind(3,0)-1:mg_loc_ind(4,0)+1, & 1053 mg_loc_ind(1,0)-1:mg_loc_ind(2,0)+1) 1054 1055 ELSE 1056 ! 1057 !-- Receive subdomain array from PE0 1058 CALL MPI_RECV( p2_sub(nzb,mg_loc_ind(3,myid)-1,mg_loc_ind(1,myid)-1), & 1059 nwords, MPI_REAL, 0, 1, comm2d, status, ierr ) 1060 1061 ENDIF 1066 p2_sub = p2(:,mg_loc_ind(3,myid)-1:mg_loc_ind(4,myid)+1, & 1067 mg_loc_ind(1,myid)-1:mg_loc_ind(2,myid)+1) 1062 1068 1063 1069 CALL cpu_log( log_point_s(35), 'mg_scatter', 'stop' ) … … 1092 1098 nzt_mg_save 1093 1099 1094 LOGICAL :: restore_boundary_lr_on_pe0, restore_boundary_ns_on_pe01095 1096 1100 REAL, DIMENSION(nzb:nzt_mg(grid_level)+1, & 1097 1101 nys_mg(grid_level)-1:nyn_mg(grid_level)+1, & … … 1143 1147 1144 1148 IF ( grid_level == mg_switch_to_pe0_level ) THEN 1145 ! print*, 'myid=',myid, ' restrict and switch to PE0. level=', grid_level1146 1149 ! 1147 1150 !-- From this level on, calculations are done on PE0 only. … … 1196 1199 ! 1197 1200 !-- In case of non-cyclic lateral boundary conditions, both in- and 1198 !-- outflow conditions have to be used on PE0 after the switch, because 1199 !-- it then contains the total domain. Due to the virtual processor 1200 !-- grid, before the switch, PE0 can have in-/outflow at the left 1201 !-- and south wall only (or on opposite walls in case of a 1d 1202 !-- decomposition). 1203 restore_boundary_lr_on_pe0 = .FALSE. 1204 restore_boundary_ns_on_pe0 = .FALSE. 1205 IF ( myid == 0 ) THEN 1206 IF ( inflow_l .AND. .NOT. outflow_r ) THEN 1207 outflow_r = .TRUE. 1208 restore_boundary_lr_on_pe0 = .TRUE. 1209 ENDIF 1210 IF ( outflow_l .AND. .NOT. inflow_r ) THEN 1211 inflow_r = .TRUE. 1212 restore_boundary_lr_on_pe0 = .TRUE. 1213 ENDIF 1214 IF ( inflow_s .AND. .NOT. outflow_n ) THEN 1215 outflow_n = .TRUE. 1216 restore_boundary_ns_on_pe0 = .TRUE. 1217 ENDIF 1218 IF ( outflow_s .AND. .NOT. inflow_n ) THEN 1219 inflow_n = .TRUE. 1220 restore_boundary_ns_on_pe0 = .TRUE. 1221 ENDIF 1201 !-- outflow conditions have to be used on all PEs after the switch, 1202 !-- because then they have the total domain. 1203 IF ( bc_lr_dirrad ) THEN 1204 inflow_l = .TRUE. 1205 inflow_r = .FALSE. 1206 outflow_l = .FALSE. 1207 outflow_r = .TRUE. 1208 ELSEIF ( bc_lr_raddir ) THEN 1209 inflow_l = .FALSE. 1210 inflow_r = .TRUE. 1211 outflow_l = .TRUE. 1212 outflow_r = .FALSE. 1222 1213 ENDIF 1223 1214 1215 IF ( bc_ns_dirrad ) THEN 1216 inflow_n = .TRUE. 1217 inflow_s = .FALSE. 1218 outflow_n = .FALSE. 1219 outflow_s = .TRUE. 1220 ELSEIF ( bc_ns_raddir ) THEN 1221 inflow_n = .FALSE. 1222 inflow_s = .TRUE. 1223 outflow_n = .TRUE. 1224 outflow_s = .FALSE. 1225 ENDIF 1226 1224 1227 DEALLOCATE( f2_sub ) 1225 1228 … … 1229 1232 1230 1233 ENDIF 1234 1231 1235 p2 = 0.0 1232 1236 1233 1237 ! 1234 1238 !-- Repeat the same procedure till the coarsest grid is reached 1235 IF ( myid == 0 .OR. grid_level > mg_switch_to_pe0_level ) THEN 1236 CALL next_mg_level( f2, p2, p3, r ) 1237 ENDIF 1239 CALL next_mg_level( f2, p2, p3, r ) 1238 1240 1239 1241 ENDIF … … 1242 1244 !-- Now follows the prolongation 1243 1245 IF ( grid_level >= 2 ) THEN 1244 1245 !1246 !-- Grid level has to be incremented on the PEs where next_mg_level1247 !-- has not been called before (normally it is incremented at the end1248 !-- of next_mg_level)1249 IF ( myid /= 0 .AND. grid_level == mg_switch_to_pe0_level ) THEN1250 grid_level = grid_level + 11251 nxl = nxl_mg(grid_level)1252 nxr = nxr_mg(grid_level)1253 nys = nys_mg(grid_level)1254 nyn = nyn_mg(grid_level)1255 nzt = nzt_mg(grid_level)1256 ENDIF1257 1246 1258 1247 ! … … 1290 1279 1291 1280 ! 1292 !-- In case of non-cyclic lateral boundary conditions, restore the 1293 !-- in-/outflow conditions on PE0 1294 IF ( myid == 0 ) THEN 1295 IF ( restore_boundary_lr_on_pe0 ) THEN 1296 IF ( inflow_l ) outflow_r = .FALSE. 1297 IF ( outflow_l ) inflow_r = .FALSE. 1281 !-- For non-cyclic lateral boundary conditions, restore the 1282 !-- in-/outflow conditions 1283 inflow_l = .FALSE.; inflow_r = .FALSE. 1284 inflow_n = .FALSE.; inflow_s = .FALSE. 1285 outflow_l = .FALSE.; outflow_r = .FALSE. 1286 outflow_n = .FALSE.; outflow_s = .FALSE. 1287 1288 IF ( pleft == MPI_PROC_NULL ) THEN 1289 IF ( bc_lr_dirrad ) THEN 1290 inflow_l = .TRUE. 1291 ELSEIF ( bc_lr_raddir ) THEN 1292 outflow_l = .TRUE. 1298 1293 ENDIF 1299 IF ( restore_boundary_ns_on_pe0 ) THEN 1300 IF ( inflow_s ) outflow_n = .FALSE. 1301 IF ( outflow_s ) inflow_n = .FALSE. 1294 ENDIF 1295 1296 IF ( pright == MPI_PROC_NULL ) THEN 1297 IF ( bc_lr_dirrad ) THEN 1298 outflow_r = .TRUE. 1299 ELSEIF ( bc_lr_raddir ) THEN 1300 inflow_r = .TRUE. 1301 ENDIF 1302 ENDIF 1303 1304 IF ( psouth == MPI_PROC_NULL ) THEN 1305 IF ( bc_ns_dirrad ) THEN 1306 outflow_s = .TRUE. 1307 ELSEIF ( bc_ns_raddir ) THEN 1308 inflow_s = .TRUE. 1309 ENDIF 1310 ENDIF 1311 1312 IF ( pnorth == MPI_PROC_NULL ) THEN 1313 IF ( bc_ns_dirrad ) THEN 1314 inflow_n = .TRUE. 1315 ELSEIF ( bc_ns_raddir ) THEN 1316 outflow_n = .TRUE. 1302 1317 ENDIF 1303 1318 ENDIF -
palm/trunk/SOURCE/pres.f90
r695 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! Calculation of weighted average of p is now handled in the same way 7 ! regardless of the number of ghost layers (advection scheme), 8 ! multigrid and sor method are using p_loc for iterative advancements of 9 ! pressure, 10 ! localsum calculation modified for proper OpenMP reduction, 11 ! bc_lr/ns replaced by bc_lr/ns_cyc 7 12 ! 8 13 ! Former revisions: … … 99 104 100 105 ! 101 !-- Multigrid method expects 1 additional grid point for the arrays102 !-- d, tend and p106 !-- Multigrid method expects array d to have one ghost layer. 107 !-- 103 108 IF ( psolver == 'multigrid' ) THEN 104 109 105 110 DEALLOCATE( d ) 106 111 ALLOCATE( d(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 112 113 ! 114 !-- Since p is later used to hold the weighted average of the substeps, it 115 !-- cannot be used in the iterative solver. Therefore, its initial value is 116 !-- stored on p_loc, which is then iteratively advanced in every substep. 117 IF ( intermediate_timestep_count == 1 ) THEN 118 DO i = nxl-1, nxr+1 119 DO j = nys-1, nyn+1 120 DO k = nzb, nzt+1 121 p_loc(k,j,i) = p(k,j,i) 122 ENDDO 123 ENDDO 124 ENDDO 125 ENDIF 107 126 108 IF ( ws_scheme_mom .OR. ws_scheme_sca ) THEN 109 110 DEALLOCATE( tend ) 111 ALLOCATE( tend(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 112 DEALLOCATE( p ) 113 ALLOCATE( p(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) ) 114 115 ENDIF 116 127 ELSEIF ( psolver == 'sor' .AND. intermediate_timestep_count == 1 ) THEN 128 129 ! 130 !-- Since p is later used to hold the weighted average of the substeps, it 131 !-- cannot be used in the iterative solver. Therefore, its initial value is 132 !-- stored on p_loc, which is then iteratively advanced in every substep. 133 p_loc = p 134 117 135 ENDIF 118 136 … … 297 315 ENDDO 298 316 299 localsum = ( localsum + threadsum )* dt_3d * &300 weight_pres(intermediate_timestep_count)317 localsum = localsum + threadsum * dt_3d * & 318 weight_pres(intermediate_timestep_count) 301 319 302 320 !$OMP END PARALLEL … … 362 380 ENDDO 363 381 ENDDO 364 localsum = ( localsum + threadsum ) * dt_3d&365 *weight_pres(intermediate_timestep_count)382 localsum = localsum + threadsum * dt_3d * & 383 weight_pres(intermediate_timestep_count) 366 384 !$OMP END PARALLEL 367 385 #endif … … 371 389 !-- of the total domain 372 390 sums_divold_l(0:statistic_regions) = localsum 373 374 !375 !-- Determine absolute minimum/maximum (only for test cases, therefore as376 !-- comment line)377 ! CALL global_min_max( nzb+1, nzt, nys, nyn, nxl, nxr, d, 'abs', divmax, &378 ! divmax_ijk )379 391 380 392 CALL cpu_log( log_point_s(1), 'divergence', 'stop' ) … … 496 508 !-- Solve Poisson equation for perturbation pressure using SOR-Red/Black 497 509 !-- scheme 498 CALL sor( d, ddzu_pres, ddzw, p )499 tend = p 510 CALL sor( d, ddzu_pres, ddzw, p_loc ) 511 tend = p_loc 500 512 501 513 ELSEIF ( psolver == 'multigrid' ) THEN … … 506 518 !-- to discern data exchange in multigrid ( 1 ghostpoint ) and normal grid 507 519 !-- ( nbgp ghost points ). 508 exchange_mg = .TRUE.509 520 CALL poismg( tend ) 510 exchange_mg = .FALSE. 521 511 522 ! 512 523 !-- Restore perturbation pressure on tend because this array is used 513 524 !-- further below to correct the velocity fields 514 515 tend = p 516 IF( ws_scheme_mom .OR. ws_scheme_sca ) THEN 517 ! 518 !-- Allocate p to its normal size and restore pressure. 519 DEALLOCATE( p ) 520 ALLOCATE( p(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 521 522 ENDIF 523 524 ENDIF 525 526 ! 527 !-- Store perturbation pressure on array p, used in the momentum equations 528 IF ( psolver(1:7) == 'poisfft' ) THEN 529 530 IF ( intermediate_timestep_count == 1 ) THEN 531 !$OMP PARALLEL PRIVATE (i,j,k) 532 !$OMP DO 533 DO i = nxlg, nxrg 534 DO j = nysg, nyng 535 DO k = nzb, nzt+1 536 p(k,j,i) = tend(k,j,i) & 537 * weight_substep(intermediate_timestep_count) 538 ENDDO 539 ENDDO 540 ENDDO 541 !$OMP END PARALLEL 542 543 ELSE 544 !$OMP PARALLEL PRIVATE (i,j,k) 545 !$OMP DO 546 DO i = nxlg, nxrg 547 DO j = nysg, nyng 548 DO k = nzb, nzt+1 549 p(k,j,i) = p(k,j,i) + tend(k,j,i) & 550 * weight_substep(intermediate_timestep_count) 551 ENDDO 552 ENDDO 553 ENDDO 554 !$OMP END PARALLEL 555 556 ENDIF 525 DO i = nxl-1, nxr+1 526 DO j = nys-1, nyn+1 527 DO k = nzb, nzt+1 528 tend(k,j,i) = p_loc(k,j,i) 529 ENDDO 530 ENDDO 531 ENDDO 532 533 ENDIF 534 535 ! 536 !-- Store perturbation pressure on array p, used for pressure data output. 537 !-- Ghost layers are added in the output routines (except sor-method: see below) 538 IF ( intermediate_timestep_count == 1 ) THEN 539 !$OMP PARALLEL PRIVATE (i,j,k) 540 !$OMP DO 541 DO i = nxl-1, nxr+1 542 DO j = nys-1, nyn+1 543 DO k = nzb, nzt+1 544 p(k,j,i) = tend(k,j,i) * & 545 weight_substep(intermediate_timestep_count) 546 ENDDO 547 ENDDO 548 ENDDO 549 !$OMP END PARALLEL 550 551 ELSE 552 !$OMP PARALLEL PRIVATE (i,j,k) 553 !$OMP DO 554 DO i = nxl-1, nxr+1 555 DO j = nys-1, nyn+1 556 DO k = nzb, nzt+1 557 p(k,j,i) = p(k,j,i) + tend(k,j,i) * & 558 weight_substep(intermediate_timestep_count) 559 ENDDO 560 ENDDO 561 ENDDO 562 !$OMP END PARALLEL 563 564 ENDIF 557 565 558 ENDIF 566 ! 567 !-- SOR-method needs ghost layers for the next timestep 568 IF ( psolver == 'sor' ) CALL exchange_horiz( p, nbgp ) 559 569 560 570 ! 561 571 !-- Correction of the provisional velocities with the current perturbation 562 572 !-- pressure just computed 563 IF ( conserve_volume_flow .AND. & 564 ( bc_lr == 'cyclic' .OR. bc_ns == 'cyclic' ) ) THEN 573 IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR. bc_ns_cyc ) ) THEN 565 574 volume_flow_l(1) = 0.0 566 575 volume_flow_l(2) = 0.0 567 576 ENDIF 577 568 578 !$OMP PARALLEL PRIVATE (i,j,k) 569 579 !$OMP DO … … 571 581 DO j = nys, nyn 572 582 DO k = nzb_w_inner(j,i)+1, nzt 573 w(k,j,i) = w(k,j,i) - dt_3d * 574 ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) 575 *weight_pres(intermediate_timestep_count)583 w(k,j,i) = w(k,j,i) - dt_3d * & 584 ( tend(k+1,j,i) - tend(k,j,i) ) * ddzu(k+1) * & 585 weight_pres(intermediate_timestep_count) 576 586 ENDDO 577 587 DO k = nzb_u_inner(j,i)+1, nzt 578 588 u(k,j,i) = u(k,j,i) - dt_3d * & 579 ( tend(k,j,i) - tend(k,j,i-1) ) * ddx 580 *weight_pres(intermediate_timestep_count)589 ( tend(k,j,i) - tend(k,j,i-1) ) * ddx * & 590 weight_pres(intermediate_timestep_count) 581 591 ENDDO 582 592 DO k = nzb_v_inner(j,i)+1, nzt 583 593 v(k,j,i) = v(k,j,i) - dt_3d * & 584 ( tend(k,j,i) - tend(k,j-1,i) ) * ddy 585 *weight_pres(intermediate_timestep_count)594 ( tend(k,j,i) - tend(k,j-1,i) ) * ddy * & 595 weight_pres(intermediate_timestep_count) 586 596 ENDDO 587 597 ! 588 598 !-- Sum up the volume flow through the right and north boundary 589 IF ( conserve_volume_flow .AND. bc_lr == 'cyclic'.AND. &590 bc_ns == 'cyclic' .AND.i == nx ) THEN599 IF ( conserve_volume_flow .AND. bc_lr_cyc .AND. bc_ns_cyc .AND. & 600 i == nx ) THEN 591 601 !$OMP CRITICAL 592 602 DO k = nzb_2d(j,i) + 1, nzt … … 595 605 !$OMP END CRITICAL 596 606 ENDIF 597 IF ( conserve_volume_flow .AND. bc_ns == 'cyclic'.AND. &598 bc_lr == 'cyclic' .AND.j == ny ) THEN607 IF ( conserve_volume_flow .AND. bc_ns_cyc .AND. bc_lr_cyc .AND. & 608 j == ny ) THEN 599 609 !$OMP CRITICAL 600 610 DO k = nzb_2d(j,i) + 1, nzt … … 608 618 !$OMP END PARALLEL 609 619 610 IF ( psolver == 'multigrid' .OR. psolver == 'sor' ) THEN611 IF ( intermediate_timestep_count == 1 .OR. simulated_time == 0) THEN612 !$OMP PARALLEL PRIVATE (i,j,k)613 !$OMP DO614 DO i = nxl, nxr615 DO j = nys, nyn616 DO k = nzb, nzt+1617 p_sub(k,j,i) = tend(k,j,i) &618 * weight_substep(intermediate_timestep_count)619 ENDDO620 ENDDO621 ENDDO622 !$OMP END PARALLEL623 ELSE624 !$OMP PARALLEL PRIVATE (i,j,k)625 !$OMP DO626 DO i = nxl, nxr627 DO j = nys, nyn628 DO k = nzb, nzt+1629 p_sub(k,j,i) = p_sub(k,j,i) + tend(k,j,i) &630 * weight_substep(intermediate_timestep_count)631 ENDDO632 ENDDO633 ENDDO634 !$OMP END PARALLEL635 ENDIF636 637 IF ( intermediate_timestep_count == intermediate_timestep_count_max ) &638 THEN639 !$OMP PARALLEL PRIVATE (i,j,k)640 !$OMP DO641 DO i = nxl, nxr642 DO j = nys, nyn643 DO k = nzb, nzt+1644 p(k,j,i) = p_sub(k,j,i)645 ENDDO646 ENDDO647 ENDDO648 !$OMP END PARALLEL649 ENDIF650 ENDIF651 652 !653 !-- Resize tend to its normal size in case of multigrid and ws-scheme.654 IF ( psolver == 'multigrid' .AND. ( ws_scheme_mom &655 .OR. ws_scheme_sca ) ) THEN656 DEALLOCATE( tend )657 ALLOCATE( tend(nzb:nzt+1,nysg:nyng,nxlg:nxrg) )658 ENDIF659 660 661 620 ! 662 621 !-- Conserve the volume flow 663 IF ( conserve_volume_flow .AND. & 664 ( bc_lr == 'cyclic' .AND. bc_ns == 'cyclic' ) ) THEN 622 IF ( conserve_volume_flow .AND. ( bc_lr_cyc .AND. bc_ns_cyc ) ) THEN 665 623 666 624 #if defined( __parallel ) -
palm/trunk/SOURCE/sor.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 6 7 ! 7 8 ! Former revisions: … … 110 111 ! 111 112 !-- Horizontal (Neumann) boundary conditions in case of non-cyclic boundaries 112 IF ( bc_lr /= 'cyclic') THEN113 IF ( .NOT. bc_lr_cyc ) THEN 113 114 IF ( inflow_l .OR. outflow_l ) p(:,:,nxl-1) = p(:,:,nxl) 114 115 IF ( inflow_r .OR. outflow_r ) p(:,:,nxr+1) = p(:,:,nxr) 115 116 ENDIF 116 IF ( bc_ns /= 'cyclic') THEN117 IF ( .NOT. bc_ns_cyc ) THEN 117 118 IF ( inflow_n .OR. outflow_n ) p(:,nyn+1,:) = p(:,nyn,:) 118 119 IF ( inflow_s .OR. outflow_s ) p(:,nys-1,:) = p(:,nys,:) … … 172 173 ! 173 174 !-- Horizontal (Neumann) boundary conditions in case of non-cyclic boundaries 174 IF ( bc_lr /= 'cyclic') THEN175 IF ( .NOT. bc_lr_cyc ) THEN 175 176 IF ( inflow_l .OR. outflow_l ) p(:,:,nxl-1) = p(:,:,nxl) 176 177 IF ( inflow_r .OR. outflow_r ) p(:,:,nxr+1) = p(:,:,nxr) 177 178 ENDIF 178 IF ( bc_ns /= 'cyclic') THEN179 IF ( .NOT. bc_ns_cyc ) THEN 179 180 IF ( inflow_n .OR. outflow_n ) p(:,nyn+1,:) = p(:,nyn,:) 180 181 IF ( inflow_s .OR. outflow_s ) p(:,nys-1,:) = p(:,nys,:) -
palm/trunk/SOURCE/time_integration.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! Calls of exchange_horiz are modified. 7 ! Adaption to slooping surface. 6 ! bc_lr/ns replaced by bc_lr/ns_cyc, 7 ! calls of exchange_horiz are modified, 8 ! adaption to slooping surface, 8 9 ! 9 10 ! Former revisions: … … 234 235 !-- when a sloping surface is used 235 236 IF ( sloping_surface ) THEN 236 IF ( nxl == 0 ) pt(:,:,nxlg:nxl-1) = pt(:,:,nxlg:nxl-1) - pt_slope_offset 237 IF ( nxr == nx ) pt(:,:,nxr+1:nxrg) = pt(:,:,nxr+1:nxrg) + pt_slope_offset 237 IF ( nxl == 0 ) pt(:,:,nxlg:nxl-1) = pt(:,:,nxlg:nxl-1) - & 238 pt_slope_offset 239 IF ( nxr == nx ) pt(:,:,nxr+1:nxrg) = pt(:,:,nxr+1:nxrg) + & 240 pt_slope_offset 238 241 ENDIF 239 242 … … 255 258 CALL disturb_field( nzb_u_inner, tend, u ) 256 259 CALL disturb_field( nzb_v_inner, tend, v ) 257 ELSEIF ( bc_lr /= 'cyclic' .OR. bc_ns /= 'cyclic') THEN260 ELSEIF ( .NOT. bc_lr_cyc .OR. .NOT. bc_ns_cyc ) THEN 258 261 ! 259 262 !-- Runs with a non-cyclic lateral wall need perturbations -
palm/trunk/SOURCE/timestep.f90
r668 r707 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! bc_lr/ns replaced by bc_lr/ns_cyc 7 7 ! 8 8 ! Former revisions: … … 185 185 !-- may be further restricted by the lateral damping layer (damping only 186 186 !-- along x and y) 187 IF ( bc_lr /= 'cyclic') THEN187 IF ( .NOT. bc_lr_cyc ) THEN 188 188 dt_diff = MIN( dt_diff, 0.125 * dx2 / ( km_damp_max + 1E-20 ) ) 189 ELSEIF ( bc_ns /= 'cyclic') THEN189 ELSEIF ( .NOT. bc_ns_cyc ) THEN 190 190 dt_diff = MIN( dt_diff, 0.125 * dy2 / ( km_damp_max + 1E-20 ) ) 191 191 ENDIF
Note: See TracChangeset
for help on using the changeset viewer.