Changeset 709 for palm/trunk/SOURCE
- Timestamp:
- Mar 30, 2011 9:31:40 AM (14 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/advec_ws.f90
r706 r709 1 1 MODULE advec_ws 2 3 2 4 3 !-----------------------------------------------------------------------------! 5 4 ! Current revisions: 6 5 ! ----------------- 6 ! formatting adjustments 7 7 ! 8 8 ! Former revisions: … … 44 44 !-----------------------------------------------------------------------------! 45 45 46 47 46 PRIVATE 48 47 PUBLIC advec_s_ws, advec_u_ws, advec_v_ws, advec_w_ws, & … … 52 51 MODULE PROCEDURE ws_init 53 52 END INTERFACE ws_init 53 54 54 INTERFACE ws_statistics 55 55 MODULE PROCEDURE ws_statistics 56 56 END INTERFACE ws_statistics 57 57 58 INTERFACE advec_s_ws 58 59 MODULE PROCEDURE advec_s_ws 59 60 MODULE PROCEDURE advec_s_ws_ij 60 61 END INTERFACE advec_s_ws 62 61 63 INTERFACE advec_u_ws 62 64 MODULE PROCEDURE advec_u_ws 63 65 MODULE PROCEDURE advec_u_ws_ij 64 66 END INTERFACE advec_u_ws 67 65 68 INTERFACE advec_v_ws 66 69 MODULE PROCEDURE advec_v_ws 67 70 MODULE PROCEDURE advec_v_ws_ij 68 71 END INTERFACE advec_v_ws 72 69 73 INTERFACE advec_w_ws 70 74 MODULE PROCEDURE advec_w_ws 71 75 MODULE PROCEDURE advec_w_ws_ij 72 76 END INTERFACE advec_w_ws 77 73 78 INTERFACE local_diss 74 79 MODULE PROCEDURE local_diss … … 78 83 CONTAINS 79 84 80 ! 81 !-- Initialisation 82 85 86 !------------------------------------------------------------------------------! 87 ! Initialization of WS-scheme 88 !------------------------------------------------------------------------------! 83 89 SUBROUTINE ws_init 90 84 91 USE arrays_3d 85 92 USE constants … … 91 98 !-- Allocate arrays needed for dissipation control. 92 99 IF ( dissipation_control ) THEN 93 ! ALLOCATE(var_x(nzb+1:nzt,nys:nyn,nxl:nxr),&94 ! var_y(nzb+1:nzt,nys:nyn,nxl:nxr),&95 ! var_z(nzb+1:nzt,nys:nyn,nxl:nxr),&96 !gamma_x(nzb:nzt+1,nysg:nyng,nxlg:nxrg), &97 !gamma_y(nzb:nzt+1,nysg:nyng,nxlg:nxrg), &98 !gamma_z(nzb:nzt+1,nysg:nyng,nxlg:nxrg))100 ! ALLOCATE(var_x(nzb+1:nzt,nys:nyn,nxl:nxr), & 101 ! var_y(nzb+1:nzt,nys:nyn,nxl:nxr), & 102 ! var_z(nzb+1:nzt,nys:nyn,nxl:nxr), & 103 ! gamma_x(nzb:nzt+1,nysg:nyng,nxlg:nxrg), & 104 ! gamma_y(nzb:nzt+1,nysg:nyng,nxlg:nxrg), & 105 ! gamma_z(nzb:nzt+1,nysg:nyng,nxlg:nxrg)) 99 106 ENDIF 100 107 108 ! 101 109 !-- Set the appropriate factors for scalar and momentum advection. 102 110 adv_sca_5 = 0.016666666666666 103 adv_sca_3 = 0.083333333333333 111 adv_sca_3 = 0.083333333333333 104 112 adv_mom_5 = 0.0083333333333333 105 113 adv_mom_3 = 0.041666666666666 … … 108 116 !-- Arrays needed for statical evaluation of fluxes. 109 117 IF ( ws_scheme_mom ) THEN 110 ALLOCATE( sums_wsus_ws_l(nzb:nzt+1,0:statistic_regions), & 111 sums_wsvs_ws_l(nzb:nzt+1,0:statistic_regions), & 112 sums_us2_ws_l(nzb:nzt+1,0:statistic_regions), & 113 sums_vs2_ws_l(nzb:nzt+1,0:statistic_regions), & 114 sums_ws2_ws_l(nzb:nzt+1,0:statistic_regions)) 115 118 119 ALLOCATE( sums_wsus_ws_l(nzb:nzt+1,0:statistic_regions), & 120 sums_wsvs_ws_l(nzb:nzt+1,0:statistic_regions), & 121 sums_us2_ws_l(nzb:nzt+1,0:statistic_regions), & 122 sums_vs2_ws_l(nzb:nzt+1,0:statistic_regions), & 123 sums_ws2_ws_l(nzb:nzt+1,0:statistic_regions)) 124 125 sums_wsus_ws_l = 0.0 126 sums_wsvs_ws_l = 0.0 127 sums_us2_ws_l = 0.0 128 sums_vs2_ws_l = 0.0 129 sums_ws2_ws_l = 0.0 130 131 ENDIF 132 133 IF ( ws_scheme_sca ) THEN 134 135 ALLOCATE( sums_wspts_ws_l(nzb:nzt+1,0:statistic_regions) ) 136 sums_wspts_ws_l = 0.0 137 138 IF ( humidity .OR. passive_scalar ) THEN 139 ALLOCATE( sums_wsqs_ws_l(nzb:nzt+1,0:statistic_regions) ) 140 sums_wsqs_ws_l = 0.0 141 ENDIF 142 143 IF ( ocean ) THEN 144 ALLOCATE( sums_wssas_ws_l(nzb:nzt+1,0:statistic_regions) ) 145 sums_wssas_ws_l = 0.0 146 ENDIF 147 148 ENDIF 149 150 ! 151 !-- Arrays needed for reasons of speed optimization for cache and noopt 152 !-- version. For the vector version the buffer arrays are not necessary, 153 !-- because the the fluxes can swapped directly inside the loops of the 154 !-- advection routines. 155 IF ( loop_optimization /= 'vector' ) THEN 156 157 IF ( ws_scheme_mom ) THEN 158 159 ALLOCATE( flux_s_u(nzb+1:nzt), flux_s_v(nzb+1:nzt), & 160 flux_s_w(nzb+1:nzt), diss_s_u(nzb+1:nzt), & 161 diss_s_v(nzb+1:nzt), diss_s_w(nzb+1:nzt) ) 162 ALLOCATE( flux_l_u(nzb+1:nzt,nys:nyn), & 163 flux_l_v(nzb+1:nzt,nys:nyn), & 164 flux_l_w(nzb+1:nzt,nys:nyn), & 165 diss_l_u(nzb+1:nzt,nys:nyn), & 166 diss_l_v(nzb+1:nzt,nys:nyn), & 167 diss_l_w(nzb+1:nzt,nys:nyn) ) 168 169 ENDIF 170 171 IF ( ws_scheme_sca ) THEN 172 173 ALLOCATE( flux_s_pt(nzb+1:nzt), flux_s_e(nzb+1:nzt), & 174 diss_s_pt(nzb+1:nzt), diss_s_e(nzb+1:nzt) ) 175 ALLOCATE( flux_l_pt(nzb+1:nzt,nys:nyn), & 176 flux_l_e(nzb+1:nzt,nys:nyn), & 177 diss_l_pt(nzb+1:nzt,nys:nyn), & 178 diss_l_e(nzb+1:nzt,nys:nyn) ) 179 180 IF ( humidity .OR. passive_scalar ) THEN 181 ALLOCATE( flux_s_q(nzb+1:nzt), diss_s_q(nzb+1:nzt) ) 182 ALLOCATE( flux_l_q(nzb+1:nzt,nys:nyn), & 183 diss_l_q(nzb+1:nzt,nys:nyn) ) 184 ENDIF 185 186 IF ( ocean ) THEN 187 ALLOCATE( flux_s_sa(nzb+1:nzt), diss_s_sa(nzb+1:nzt) ) 188 ALLOCATE( flux_l_sa(nzb+1:nzt,nys:nyn), & 189 diss_l_sa(nzb+1:nzt,nys:nyn) ) 190 ENDIF 191 192 ENDIF 193 194 ENDIF 195 196 ! 197 !-- Determine the flags where the order of the scheme for horizontal 198 !-- advection has to be degraded. 199 ALLOCATE( boundary_flags(nys:nyn,nxl:nxr) ) 200 DO i = nxl, nxr 201 DO j = nys, nyn 202 203 boundary_flags(j,i) = 0 204 IF ( outflow_l ) THEN 205 IF ( i == nxlu ) boundary_flags(j,i) = 5 206 IF ( i == nxl ) boundary_flags(j,i) = 6 207 ELSEIF ( outflow_r ) THEN 208 IF ( i == nxr-1 ) boundary_flags(j,i) = 1 209 IF ( i == nxr ) boundary_flags(j,i) = 2 210 ELSEIF ( outflow_n ) THEN 211 IF ( j == nyn-1 ) boundary_flags(j,i) = 3 212 IF ( j == nyn ) boundary_flags(j,i) = 4 213 ELSEIF ( outflow_s ) THEN 214 IF ( j == nysv ) boundary_flags(j,i) = 7 215 IF ( j == nys ) boundary_flags(j,i) = 8 216 ENDIF 217 218 ENDDO 219 ENDDO 220 221 END SUBROUTINE ws_init 222 223 224 !------------------------------------------------------------------------------! 225 ! Initialize variables used for storing statistic qauntities (fluxes, variances) 226 !------------------------------------------------------------------------------! 227 SUBROUTINE ws_statistics 228 229 USE control_parameters 230 USE statistics 231 232 IMPLICIT NONE 233 234 ! 235 !-- The arrays needed for statistical evaluation are set to to 0 at the 236 !-- begin of prognostic_equations. 237 IF ( ws_scheme_mom ) THEN 116 238 sums_wsus_ws_l = 0.0 117 239 sums_wsvs_ws_l = 0.0 … … 122 244 123 245 IF ( ws_scheme_sca ) THEN 124 ALLOCATE( sums_wspts_ws_l(nzb:nzt+1,0:statistic_regions))125 246 sums_wspts_ws_l = 0.0 126 IF ( humidity .OR. passive_scalar ) THEN 127 ALLOCATE( sums_wsqs_ws_l(nzb:nzt+1,0:statistic_regions)) 128 sums_wsqs_ws_l = 0.0 129 ENDIF 130 IF ( ocean ) THEN 131 ALLOCATE( sums_wssas_ws_l(nzb:nzt+1,0:statistic_regions)) 132 sums_wssas_ws_l = 0.0 133 ENDIF 247 IF ( humidity .OR. passive_scalar ) sums_wsqs_ws_l = 0.0 248 IF ( ocean ) sums_wssas_ws_l = 0.0 249 134 250 ENDIF 135 !136 !-- Arrays needed for reasons of speed optimization for cache and noopt137 !-- version. For the vector version the buffer arrays are not necessary,138 !-- because the the fluxes can swapped directly inside the loops of the139 !-- advection routines.140 IF ( loop_optimization /= 'vector' ) THEN141 IF ( ws_scheme_mom ) THEN142 ALLOCATE(flux_s_u(nzb+1:nzt), flux_s_v(nzb+1:nzt), &143 flux_s_w(nzb+1:nzt), diss_s_u(nzb+1:nzt), &144 diss_s_v(nzb+1:nzt), diss_s_w(nzb+1:nzt))145 ALLOCATE(flux_l_u(nzb+1:nzt,nys:nyn), &146 flux_l_v(nzb+1:nzt,nys:nyn), flux_l_w(nzb+1:nzt,nys:nyn), &147 diss_l_u(nzb+1:nzt,nys:nyn), diss_l_v(nzb+1:nzt,nys:nyn), &148 diss_l_w(nzb+1:nzt,nys:nyn))149 ENDIF150 IF ( ws_scheme_sca ) THEN151 ALLOCATE(flux_s_pt(nzb+1:nzt), flux_s_e(nzb+1:nzt), &152 diss_s_pt(nzb+1:nzt), diss_s_e(nzb+1:nzt))153 ALLOCATE(flux_l_pt(nzb+1:nzt,nys:nyn), &154 flux_l_e(nzb+1:nzt,nys:nyn), diss_l_pt(nzb+1:nzt,nys:nyn), &155 diss_l_e(nzb+1:nzt,nys:nyn))156 IF ( humidity .OR. passive_scalar ) THEN157 ALLOCATE(flux_s_q(nzb+1:nzt), diss_s_q(nzb+1:nzt))158 ALLOCATE(flux_l_q(nzb+1:nzt,nys:nyn), &159 diss_l_q(nzb+1:nzt,nys:nyn))160 ENDIF161 IF ( ocean ) THEN162 ALLOCATE(flux_s_sa(nzb+1:nzt), diss_s_sa(nzb+1:nzt))163 ALLOCATE(flux_l_sa(nzb+1:nzt,nys:nyn), &164 diss_l_sa(nzb+1:nzt,nys:nyn))165 ENDIF166 ENDIF167 ENDIF168 !169 !-- Determine the flags where the order of the scheme for horizontal170 !-- advection should be degraded.171 ALLOCATE( boundary_flags(nys:nyn,nxl:nxr) )172 DO i = nxl, nxr173 DO j = nys, nyn174 boundary_flags(j,i) = 0175 IF ( outflow_l ) THEN176 IF ( i == nxlu ) boundary_flags(j,i) = 5177 IF ( i == nxl ) boundary_flags(j,i) = 6178 ELSEIF ( outflow_r ) THEN179 IF ( i == nxr-1 ) boundary_flags(j,i) = 1180 IF ( i == nxr ) boundary_flags(j,i) = 2181 ELSEIF ( outflow_n ) THEN182 IF ( j == nyn-1 ) boundary_flags(j,i) = 3183 IF ( j == nyn ) boundary_flags(j,i) = 4184 ELSEIF ( outflow_s ) THEN185 IF ( j == nysv ) boundary_flags(j,i) = 7186 IF ( j == nys ) boundary_flags(j,i) = 8187 ENDIF188 ENDDO189 ENDDO190 191 END SUBROUTINE ws_init192 193 194 195 SUBROUTINE ws_statistics196 197 USE control_parameters198 USE statistics199 200 !201 !-- The arrays needed for statistical evaluation are set to to 0 at the202 !-- begin of prognostic_equations.203 IF ( ws_scheme_mom ) THEN204 sums_wsus_ws_l = 0.0205 sums_wsvs_ws_l = 0.0206 sums_us2_ws_l = 0.0207 sums_vs2_ws_l = 0.0208 sums_ws2_ws_l = 0.0209 ENDIF210 IF ( ws_scheme_sca ) THEN211 sums_wspts_ws_l = 0.0212 IF ( humidity .OR. passive_scalar ) sums_wsqs_ws_l = 0.0213 IF ( ocean ) sums_wssas_ws_l = 0.0214 ENDIF215 251 216 252 END SUBROUTINE ws_statistics 217 218 253 219 254 … … 222 257 !------------------------------------------------------------------------------! 223 258 SUBROUTINE advec_s_ws_ij( i, j, sk, sk_char,swap_flux_y_local, & 224 swap_diss_y_local, swap_flux_x_local, swap_diss_x_local ) 259 swap_diss_y_local, swap_flux_x_local, & 260 swap_diss_x_local ) 225 261 226 262 USE arrays_3d … … 239 275 REAL, DIMENSION(nzb:nzt+1) :: flux_t, diss_t, flux_r, diss_r, & 240 276 flux_n, diss_n 241 REAL, DIMENSION(nzb+1:nzt) :: swap_flux_y_local, & 277 REAL, DIMENSION(nzb+1:nzt) :: swap_flux_y_local, & 242 278 swap_diss_y_local 243 279 REAL, DIMENSION(nzb+1:nzt,nys:nyn) :: swap_flux_x_local, & 244 280 swap_diss_x_local 245 281 CHARACTER (LEN = *), INTENT(IN) :: sk_char 246 282 283 247 284 degraded_l = .FALSE. 248 285 degraded_s = .FALSE. … … 250 287 IF ( boundary_flags(j,i) /= 0 ) THEN 251 288 ! 252 !-- Degrade the order for Dirichlet bc. at the outflow boundary. 253 SELECT CASE ( boundary_flags(j,i) ) 254 255 CASE ( 1 ) 256 DO k = nzb_s_inner(j,i) + 1, nzt 257 u_comp = u(k,j,i+1) - u_gtrans 258 flux_r(k) = u_comp * ( & 259 7. * ( sk(k,j,i+1) + sk(k,j,i) ) & 260 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 261 diss_r(k) = - abs(u_comp) * ( & 262 3. * ( sk(k,j,i+1)-sk(k,j,i) ) & 263 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 289 !-- Degrade the order for Dirichlet bc. at the outflow boundary 290 SELECT CASE ( boundary_flags(j,i) ) 291 292 CASE ( 1 ) 293 294 DO k = nzb_s_inner(j,i)+1, nzt 295 u_comp = u(k,j,i+1) - u_gtrans 296 flux_r(k) = u_comp * ( & 297 7.0 * ( sk(k,j,i+1) + sk(k,j,i) ) & 298 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 299 diss_r(k) = -ABS( u_comp ) * ( & 300 3.0 * ( sk(k,j,i+1) - sk(k,j,i) ) & 301 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 302 ENDDO 303 304 CASE ( 2 ) 305 306 DO k = nzb_s_inner(j,i)+1, nzt 307 u_comp = u(k,j,i+1) - u_gtrans 308 flux_r(k) = u_comp * ( sk(k,j,i+1) + sk(k,j,i) ) * 0.5 309 diss_r(k) = diss_2nd( sk(k,j,i+1), sk(k,j,i+1), sk(k,j,i), & 310 sk(k,j,i-1), sk(k,j,i-2), u_comp, & 311 0.5, ddx ) 312 ENDDO 313 314 CASE ( 3 ) 315 316 DO k = nzb_s_inner(j,i) + 1, nzt 317 v_comp = v(k,j+1,i) - v_gtrans 318 flux_n(k) = v_comp * ( & 319 7.0 * ( sk(k,j+1,i) + sk(k,j,i) ) & 320 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 321 diss_n(k) = -ABS( v_comp ) * ( & 322 3.0 * ( sk(k,j+1,i) - sk(k,j,i) ) & 323 - ( sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 264 324 ENDDO 265 266 CASE ( 2 ) 267 DO k = nzb_s_inner(j,i) + 1, nzt 268 u_comp = u(k,j,i+1) - u_gtrans 269 flux_r(k) = u_comp * ( sk(k,j,i+1) + sk(k,j,i) ) * 0.5 270 diss_r(k) = diss_2nd( sk(k,j,i+1), sk(k,j,i+1), sk(k,j,i), & 271 sk(k,j,i-1), sk(k,j,i-2), u_comp, & 272 0.5, ddx ) 273 ENDDO 274 275 CASE ( 3 ) 276 DO k = nzb_s_inner(j,i) + 1, nzt 277 v_comp = v(k,j+1,i) - v_gtrans 278 flux_n(k) = v_comp * ( & 279 7. * ( sk(k,j+1,i) + sk(k,j,i) ) & 280 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 281 diss_n(k) = - abs(v_comp) * ( & 282 3. * ( sk(k,j+1,i)-sk(k,j,i) ) & 283 - (sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 284 ENDDO 285 CASE ( 4 ) 286 DO k = nzb_s_inner(j,i) + 1, nzt 287 v_comp = v(k,j+1,i) - v_gtrans 288 flux_n(k) = v_comp* ( sk(k,j+1,i) + sk(k,j,i) ) * 0.5 289 diss_n(k) = diss_2nd( sk(k,j+1,i), sk(k,j+1,i), sk(k,j,i), & 325 326 CASE ( 4 ) 327 328 DO k = nzb_s_inner(j,i)+1, nzt 329 v_comp = v(k,j+1,i) - v_gtrans 330 flux_n(k) = v_comp* ( sk(k,j+1,i) + sk(k,j,i) ) * 0.5 331 diss_n(k) = diss_2nd( sk(k,j+1,i), sk(k,j+1,i), sk(k,j,i), & 290 332 sk(k,j-1,i), sk(k,j-2,i), v_comp, & 291 333 0.5, ddy ) 292 ENDDO 334 ENDDO 335 336 CASE ( 5 ) 337 338 DO k = nzb_w_inner(j,i)+1, nzt 339 u_comp = u(k,j,i+1) - u_gtrans 340 flux_r(k) = u_comp * ( & 341 7.0 * ( sk(k,j,i+1) + sk(k,j,i) ) & 342 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 343 diss_r(k) = -ABS( u_comp ) * ( & 344 3.0 * ( sk(k,j,i+1) - sk(k,j,i) ) & 345 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 346 ENDDO 347 348 CASE ( 6 ) 349 350 DO k = nzb_s_inner(j,i)+1, nzt 351 u_comp = u(k,j,i+1) - u_gtrans 352 flux_r(k) = u_comp * ( & 353 7.0 * ( sk(k,j,i+1) + sk(k,j,i) ) & 354 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 355 diss_r(k) = -ABS( u_comp ) * ( & 356 3.0 * ( sk(k,j,i+1) - sk(k,j,i) ) & 357 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 358 ! 359 !-- Compute leftside fluxes for the left boundary of PE domain 360 u_comp = u(k,j,i) - u_gtrans 361 swap_flux_x_local(k,j) = u_comp * ( & 362 sk(k,j,i) + sk(k,j,i-1) ) * 0.5 363 swap_diss_x_local(k,j) = diss_2nd( sk(k,j,i+2),sk(k,j,i+1), & 364 sk(k,j,i), sk(k,j,i-1), & 365 sk(k,j,i-1), u_comp, & 366 0.5, ddx ) 367 ENDDO 368 degraded_l = .TRUE. 369 370 CASE ( 7 ) 371 372 DO k = nzb_s_inner(j,i)+1, nzt 373 v_comp = v(k,j+1,i)-v_gtrans 374 flux_n(k) = v_comp * ( & 375 7.0 * ( sk(k,j+1,i) + sk(k,j,i) ) & 376 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 377 diss_n(k) = -ABS( v_comp ) * ( & 378 3.0 * ( sk(k,j+1,i) - sk(k,j,i) ) & 379 - ( sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 380 ENDDO 381 382 CASE ( 8 ) 383 384 DO k = nzb_s_inner(j,i)+1, nzt 385 v_comp = v(k,j+1,i) - v_gtrans 386 flux_n(k) = v_comp * ( & 387 7.0 * ( sk(k,j+1,i) + sk(k,j,i) ) & 388 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 389 diss_n(k) = -ABS( v_comp ) * ( & 390 3.0 * ( sk(k,j+1,i) - sk(k,j,i) ) & 391 - ( sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 392 ! 393 !-- Compute southside fluxes for the south boundary of PE domain 394 v_comp = v(k,j,i) - v_gtrans 395 swap_flux_y_local(k) = v_comp * & 396 ( sk(k,j,i) + sk(k,j-1,i) ) * 0.5 397 swap_diss_y_local(k) = diss_2nd( sk(k,j+2,i), sk(k,j+1,i), & 398 sk(k,j,i), sk(k,j-1,i), & 399 sk(k,j-1,i), v_comp, & 400 0.5, ddy ) 401 ENDDO 402 degraded_s = .TRUE. 293 403 294 CASE ( 5 ) 295 DO k = nzb_w_inner(j,i) + 1, nzt 296 u_comp = u(k,j,i+1) - u_gtrans 297 flux_r(k) = u_comp * ( & 298 7. * ( sk(k,j,i+1) + sk(k,j,i) ) & 299 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 300 diss_r(k) = - abs(u_comp) * ( & 301 3. * ( sk(k,j,i+1)-sk(k,j,i) ) & 302 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 303 ENDDO 304 305 CASE ( 6 ) 306 DO k = nzb_s_inner(j,i) + 1, nzt 307 u_comp = u(k,j,i+1) - u_gtrans 308 flux_r(k) = u_comp * ( & 309 7. * ( sk(k,j,i+1) + sk(k,j,i) ) & 310 - ( sk(k,j,i+2) + sk(k,j,i-1) ) ) * adv_sca_3 311 diss_r(k) = - abs(u_comp) * ( & 312 3. * ( sk(k,j,i+1)-sk(k,j,i) ) & 313 - ( sk(k,j,i+2) - sk(k,j,i-1) ) ) * adv_sca_3 314 ! 315 !-- Compute leftside fluxes for the left boundary of PE domain. 316 u_comp = u(k,j,i) - u_gtrans 317 swap_flux_x_local(k,j) = u_comp * ( & 318 sk(k,j,i) + sk(k,j,i-1) ) * 0.5 319 swap_diss_x_local(k,j) = diss_2nd( sk(k,j,i+2),sk(k,j,i+1), & 320 sk(k,j,i), sk(k,j,i-1), & 321 sk(k,j,i-1), u_comp, & 322 0.5, ddx ) 323 ENDDO 324 degraded_l = .TRUE. 325 326 CASE ( 7 ) 327 DO k = nzb_s_inner(j,i) + 1, nzt 328 v_comp = v(k,j+1,i)-v_gtrans 329 flux_n(k) = v_comp * ( & 330 7. * ( sk(k,j+1,i) + sk(k,j,i) ) & 331 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 332 diss_n(k) = - abs(v_comp) * ( & 333 3. * ( sk(k,j+1,i) - sk(k,j,i) ) & 334 - ( sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 335 ENDDO 336 337 CASE ( 8 ) 338 DO k = nzb_s_inner(j,i) + 1, nzt 339 v_comp = v(k,j+1,i) - v_gtrans 340 flux_n(k) = v_comp * ( & 341 7. * ( sk(k,j+1,i) + sk(k,j,i) ) & 342 - ( sk(k,j+2,i) + sk(k,j-1,i) ) ) * adv_sca_3 343 diss_n(k) = - abs(v_comp) * ( & 344 3. * ( sk(k,j+1,i) - sk(k,j,i) ) & 345 - ( sk(k,j+2,i) - sk(k,j-1,i) ) ) * adv_sca_3 346 ! 347 !-- Compute southside fluxes for the south boundary of PE domain 348 v_comp = v(k,j,i) - v_gtrans 349 swap_flux_y_local(k) = v_comp * ( & 350 sk(k,j,i)+ sk(k,j-1,i) ) * 0.5 351 swap_diss_y_local(k) = diss_2nd( sk(k,j+2,i), sk(k,j+1,i), & 352 sk(k,j,i), sk(k,j-1,i), & 353 sk(k,j-1,i), v_comp, & 354 0.5, ddy ) 355 ENDDO 356 degraded_s = .TRUE. 357 358 CASE DEFAULT 404 CASE DEFAULT 359 405 360 END SELECT 406 END SELECT 407 361 408 ! 362 !-- Compute the crosswise 5th order fluxes at the outflow 363 IF ( boundary_flags(j,i) == 1 .OR. boundary_flags(j,i) == 2 & 364 .OR. boundary_flags(j,i) == 5 .OR. boundary_flags(j,i) == 6 ) THEN 365 DO k = nzb_s_inner(j,i) + 1, nzt 366 v_comp = v(k,j+1,i) - v_gtrans 367 flux_n(k) = v_comp * ( & 368 37. * ( sk(k,j+1,i) + sk(k,j,i) ) & 369 - 8. * ( sk(k,j+2,i) + sk(k,j-1,i) ) & 370 + ( sk(k,j+3,i) + sk(k,j-2,i) ) ) * adv_sca_5 371 diss_n(k) = - abs(v_comp) * ( & 372 10. * ( sk(k,j+1,i) - sk(k,j,i) ) & 373 - 5. * ( sk(k,j+2,i) - sk(k,j-1,i) ) & 374 + ( sk(k,j+3,i) - sk(k,j-2,i) ) ) * adv_sca_5 375 ENDDO 409 !-- Compute the crosswise 5th order fluxes at the outflow 410 IF ( boundary_flags(j,i) == 1 .OR. boundary_flags(j,i) == 2 .OR. & 411 boundary_flags(j,i) == 5 .OR. boundary_flags(j,i) == 6 ) THEN 412 413 DO k = nzb_s_inner(j,i)+1, nzt 414 v_comp = v(k,j+1,i) - v_gtrans 415 flux_n(k) = v_comp * ( & 416 37.0 * ( sk(k,j+1,i) + sk(k,j,i) ) & 417 - 8.0 * ( sk(k,j+2,i) + sk(k,j-1,i) ) & 418 + ( sk(k,j+3,i) + sk(k,j-2,i) ) ) * adv_sca_5 419 diss_n(k) = -ABS( v_comp ) * ( & 420 10.0 * ( sk(k,j+1,i) - sk(k,j,i) ) & 421 - 5.0 * ( sk(k,j+2,i) - sk(k,j-1,i) ) & 422 + ( sk(k,j+3,i) - sk(k,j-2,i) ) ) * adv_sca_5 423 ENDDO 376 424 377 ELSE425 ELSE 378 426 379 DO k = nzb_s_inner(j,i) +1, nzt380 u_comp = u(k,j,i+1) - u_gtrans381 flux_r(k) = u_comp * ( &382 37.* ( sk(k,j,i+1) + sk(k,j,i) ) &383 - 8.* ( sk(k,j,i+2) + sk(k,j,i-1) ) &384 +( sk(k,j,i+3) + sk(k,j,i-2) ) ) * adv_sca_5385 diss_r(k) = - abs(u_comp) * (&386 10.* ( sk(k,j,i+1) - sk(k,j,i) ) &387 - 5.* ( sk(k,j,i+2) - sk(k,j,i-1) ) &388 +( sk(k,j,i+3) - sk(k,j,i-2) ) ) * adv_sca_5389 ENDDO427 DO k = nzb_s_inner(j,i)+1, nzt 428 u_comp = u(k,j,i+1) - u_gtrans 429 flux_r(k) = u_comp * ( & 430 37.0 * ( sk(k,j,i+1) + sk(k,j,i) ) & 431 - 8.0 * ( sk(k,j,i+2) + sk(k,j,i-1) ) & 432 + ( sk(k,j,i+3) + sk(k,j,i-2) ) ) * adv_sca_5 433 diss_r(k) = -ABS( u_comp ) * ( & 434 10.0 * ( sk(k,j,i+1) - sk(k,j,i) ) & 435 - 5.0 * ( sk(k,j,i+2) - sk(k,j,i-1) ) & 436 + ( sk(k,j,i+3) - sk(k,j,i-2) ) ) * adv_sca_5 437 ENDDO 390 438 391 ENDIF 392 ! 393 !-- Compute the fifth order fluxes for the interior of PE domain. 439 ENDIF 440 394 441 ELSE 395 442 396 DO k = nzb_u_inner(j,i) + 1, nzt 443 ! 444 !-- Compute the fifth order fluxes for the interior of PE domain. 445 DO k = nzb_u_inner(j,i)+1, nzt 397 446 u_comp = u(k,j,i+1) - u_gtrans 398 447 flux_r(k) = u_comp * ( & 399 37. 400 - 8.* ( sk(k,j,i+2) + sk(k,j,i-1) ) &401 +( sk(k,j,i+3) + sk(k,j,i-2) ) ) * adv_sca_5402 diss_r(k) = - abs(u_comp) * (&403 10. 404 - 5.* ( sk(k,j,i+2) - sk(k,j,i-1) ) &405 +( sk(k,j,i+3) - sk(k,j,i-2) ) ) * adv_sca_5448 37.0 * ( sk(k,j,i+1) + sk(k,j,i) ) & 449 - 8.0 * ( sk(k,j,i+2) + sk(k,j,i-1) ) & 450 + ( sk(k,j,i+3) + sk(k,j,i-2) ) ) * adv_sca_5 451 diss_r(k) = -ABS( u_comp ) * ( & 452 10.0 * ( sk(k,j,i+1) - sk(k,j,i) ) & 453 - 5.0 * ( sk(k,j,i+2) - sk(k,j,i-1) ) & 454 + ( sk(k,j,i+3) - sk(k,j,i-2) ) ) * adv_sca_5 406 455 407 456 v_comp = v(k,j+1,i) - v_gtrans 408 457 flux_n(k) = v_comp * ( & 409 37. 410 - 8.* ( sk(k,j+2,i) + sk(k,j-1,i) ) &411 +( sk(k,j+3,i) + sk(k,j-2,i) ) ) * adv_sca_5412 diss_n(k) = - abs(v_comp) * (&413 10. * ( sk(k,j+1,i) - sk(k,j,i) ) &414 - 5.* ( sk(k,j+2,i) - sk(k,j-1,i) ) &415 + ( sk(k,j+3,i) - sk(k,j-2,i) ) ) * adv_sca_5458 37.0 * ( sk(k,j+1,i) + sk(k,j,i) ) & 459 - 8.0 * ( sk(k,j+2,i) + sk(k,j-1,i) ) & 460 + ( sk(k,j+3,i) + sk(k,j-2,i) ) ) * adv_sca_5 461 diss_n(k) = -ABS( v_comp ) * ( & 462 10.0 * ( sk(k,j+1,i) - sk(k,j,i) ) & 463 - 5.0 * ( sk(k,j+2,i) - sk(k,j-1,i) ) & 464 + ( sk(k,j+3,i) - sk(k,j-2,i) ) ) * adv_sca_5 416 465 ENDDO 417 466 … … 424 473 v_comp = v(k,j,i) - v_gtrans 425 474 swap_flux_y_local(k) = v_comp * ( & 426 37. * ( sk(k,j,i) + sk(k,j-1,i) )&427 - 8. * ( sk(k,j+1,i) + sk(k,j-2,i) )&428 + ( sk(k,j+2,i) + sk(k,j-3,i) )&429 ) * adv_sca_5430 swap_diss_y_local(k) = - abs(v_comp) * (&431 10. * ( sk(k,j,i) - sk(k,j-1,i)) &432 - 5.* ( sk(k,j+1,i) - sk(k,j-2,i) ) &433 +sk(k,j+2,i) - sk(k,j-3,i) &434 ) * adv_sca_5475 37.0 * ( sk(k,j,i) + sk(k,j-1,i) ) & 476 - 8.0 * ( sk(k,j+1,i) + sk(k,j-2,i) ) & 477 + ( sk(k,j+2,i) + sk(k,j-3,i) ) & 478 ) * adv_sca_5 479 swap_diss_y_local(k) = -ABS( v_comp ) * ( & 480 10.0 * ( sk(k,j,i) - sk(k,j-1,i) ) & 481 - 5.0 * ( sk(k,j+1,i) - sk(k,j-2,i) ) & 482 + sk(k,j+2,i) - sk(k,j-3,i) & 483 ) * adv_sca_5 435 484 ENDDO 436 485 437 486 ENDIF 438 487 439 IF ( i == nxl .AND. ( .NOT. degraded_l )) THEN488 IF ( i == nxl .AND. .NOT. degraded_l ) THEN 440 489 441 DO k = nzb_s_inner(j,i) +1, nzt490 DO k = nzb_s_inner(j,i)+1, nzt 442 491 u_comp = u(k,j,i) - u_gtrans 443 492 swap_flux_x_local(k,j) = u_comp * ( & 444 37. * ( sk(k,j,i) + sk(k,j,i-1)) &445 - 8.* ( sk(k,j,i+1) + sk(k,j,i-2) ) &446 +( sk(k,j,i+2) + sk(k,j,i-3) ) &447 ) * adv_sca_5448 swap_diss_x_local(k,j) = - abs(u_comp) * (&449 10. * ( sk(k,j,i) - sk(k,j,i-1)) &450 - 5.* ( sk(k,j,i+1) - sk(k,j,i-2) ) &451 +( sk(k,j,i+2) - sk(k,j,i-3) ) &452 ) * adv_sca_5493 37.0 * ( sk(k,j,i) + sk(k,j,i-1) ) & 494 - 8.0 * ( sk(k,j,i+1) + sk(k,j,i-2) ) & 495 + ( sk(k,j,i+2) + sk(k,j,i-3) ) & 496 ) * adv_sca_5 497 swap_diss_x_local(k,j) = -ABS( u_comp ) * ( & 498 10.0 * ( sk(k,j,i) - sk(k,j,i-1) ) & 499 - 5.0 * ( sk(k,j,i+1) - sk(k,j,i-2) ) & 500 + ( sk(k,j,i+2) - sk(k,j,i-3) ) & 501 ) * adv_sca_5 453 502 ENDDO 454 503 455 504 ENDIF 505 456 506 ! 457 !-- Now compute the tendency terms for the horizontal parts .458 DO k = nzb_s_inner(j,i) +1, nzt507 !-- Now compute the tendency terms for the horizontal parts 508 DO k = nzb_s_inner(j,i)+1, nzt 459 509 460 tend(k,j,i) = tend(k,j,i) - ( & 461 ( flux_r(k) + diss_r(k) & 462 - ( swap_flux_x_local(k,j) + swap_diss_x_local(k,j) ) ) * ddx & 463 + (flux_n(k)+diss_n(k) & 464 - (swap_flux_y_local(k) + swap_diss_y_local(k) ) ) * ddy ) 510 tend(k,j,i) = tend(k,j,i) - ( & 511 ( flux_r(k) + diss_r(k) - swap_flux_x_local(k,j) - & 512 swap_diss_x_local(k,j) ) * ddx & 513 + ( flux_n(k) + diss_n(k) - swap_flux_y_local(k) - & 514 swap_diss_y_local(k) ) * ddy & 515 ) 465 516 466 517 swap_flux_y_local(k) = flux_n(k) … … 470 521 471 522 ENDDO 472 ! 473 !-- Vertical advection, degradation of order near surface and top. 474 !-- The fluxes flux_d and diss_d at the surface are 0. Due to reasons of 475 !-- statistical evaluation the top flux at the surface should be 0 476 477 flux_t(nzb_s_inner(j,i)) = 0. 478 diss_t(nzb_s_inner(j,i)) = 0. 523 524 ! 525 !-- Vertical advection, degradation of order near bottom and top. 526 !-- The fluxes flux_d and diss_d at the surface are 0. Due to later 527 !-- calculation of statistics the top flux at the surface should be 0. 528 flux_t(nzb_s_inner(j,i)) = 0.0 529 diss_t(nzb_s_inner(j,i)) = 0.0 530 479 531 ! 480 !-- 2nd order scheme532 !-- 2nd-order scheme (bottom) 481 533 k = nzb_s_inner(j,i) + 1 482 534 flux_d = flux_t(k-1) 483 535 diss_d = diss_t(k-1) 484 536 flux_t(k) = w(k,j,i) * ( sk(k+1,j,i) + sk(k,j,i) ) * 0.5 537 485 538 ! 486 !-- sk(k,j,i) is referenced three times to avoid a access below surface.539 !-- sk(k,j,i) is referenced three times to avoid an access below surface 487 540 diss_t(k) = diss_2nd( sk(k+2,j,i), sk(k+1,j,i), sk(k,j,i), sk(k,j,i), & 488 541 sk(k,j,i), w(k,j,i), 0.5, ddzw(k) ) 489 542 490 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) 491 - ( flux_d + diss_d ) )* ddzw(k)492 ! 493 !-- WS3 as an intermediate step 543 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) - flux_d - diss_d ) & 544 * ddzw(k) 545 ! 546 !-- WS3 as an intermediate step (bottom) 494 547 k = nzb_s_inner(j,i) + 2 495 548 flux_d = flux_t(k-1) 496 549 diss_d = diss_t(k-1) 497 550 flux_t(k) = w(k,j,i) * ( & 498 7. * ( sk(k+1,j,i) + sk(k,j,i) ) & 499 - ( sk(k+2,j,i) + sk(k-1,j,i) ) ) * adv_sca_3 500 diss_t(k) = - abs(w(k,j,i)) * ( & 501 3. * ( sk(k+1,j,i) - sk(k,j,i) ) & 502 - ( sk(k+2,j,i) - sk(k-1,j,i) ) ) * adv_sca_3 503 504 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) & 505 - ( flux_d + diss_d ) ) * ddzw(k) 551 7.0 * ( sk(k+1,j,i) + sk(k,j,i) ) & 552 - ( sk(k+2,j,i) + sk(k-1,j,i) ) & 553 ) * adv_sca_3 554 diss_t(k) = -ABS( w(k,j,i) ) * ( & 555 3.0 * ( sk(k+1,j,i) - sk(k,j,i) ) & 556 - ( sk(k+2,j,i) - sk(k-1,j,i) ) & 557 ) * adv_sca_3 558 559 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) - flux_d - diss_d ) & 560 * ddzw(k) 506 561 ! 507 562 !-- WS5 508 DO k = nzb_s_inner(j,i) + 3, nzt -2563 DO k = nzb_s_inner(j,i)+3, nzt-2 509 564 510 565 flux_d = flux_t(k-1) 511 diss_d = diss_t(k-1) 512 513 flux_t(k) = w(k,j,i) * ( & 514 37. * ( sk(k+1,j,i) + sk(k,j,i) ) & 515 - 8. * ( sk(k+2,j,i) + sk(k-1,j,i) ) & 516 + ( sk(k+3,j,i) + sk(k-2,j,i) ) ) *adv_sca_5 517 diss_t(k) = - abs(w(k,j,i)) * ( & 518 10. * ( sk(k+1,j,i) - sk(k,j,i) ) & 519 - 5. * ( sk(k+2,j,i) - sk(k-1,j,i) ) & 520 + ( sk(k+3,j,i) - sk(k-2,j,i) ) ) * adv_sca_5 566 diss_d = diss_t(k-1) 567 flux_t(k) = w(k,j,i) * ( & 568 37.0 * ( sk(k+1,j,i) + sk(k,j,i) ) & 569 - 8.0 * ( sk(k+2,j,i) + sk(k-1,j,i) ) & 570 + ( sk(k+3,j,i) + sk(k-2,j,i) ) & 571 ) * adv_sca_5 572 diss_t(k) = -ABS( w(k,j,i) ) * ( & 573 10.0 * ( sk(k+1,j,i) - sk(k,j,i) )& 574 - 5.0 * ( sk(k+2,j,i) - sk(k-1,j,i) )& 575 + ( sk(k+3,j,i) - sk(k-2,j,i) )& 576 ) * adv_sca_5 521 577 522 578 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) & 523 579 - ( flux_d + diss_d ) ) * ddzw(k) 580 524 581 ENDDO 525 ! 526 !-- WS3 as an intermediate step 582 583 ! 584 !-- WS3 as an intermediate step (top) 527 585 k = nzt - 1 528 586 flux_d = flux_t(k-1) 529 587 diss_d = diss_t(k-1) 530 flux_t(k) = w(k,j,i) * ( & 531 7. * ( sk(k+1,j,i) + sk(k,j,i) ) & 532 - ( sk(k+2,j,i) + sk(k-1,j,i) ) ) * adv_sca_3 533 diss_t(k) = - abs(w(k,j,i)) * ( & 534 3. * ( sk(k+1,j,i) - sk(k,j,i) ) & 535 - ( sk(k+2,j,i) - sk(k-1,j,i) ) ) * adv_sca_3 536 537 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) & 538 - ( flux_d + diss_d ) ) * ddzw(k) 588 flux_t(k) = w(k,j,i) * ( & 589 7.0 * ( sk(k+1,j,i) + sk(k,j,i) ) & 590 - ( sk(k+2,j,i) + sk(k-1,j,i) ) & 591 ) * adv_sca_3 592 diss_t(k) = -ABS( w(k,j,i) ) * ( & 593 3.0 * ( sk(k+1,j,i) - sk(k,j,i) ) & 594 - ( sk(k+2,j,i) - sk(k-1,j,i) ) & 595 ) * adv_sca_3 596 597 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) - flux_d - diss_d ) & 598 * ddzw(k) 539 599 ! 540 !-- 2nd 600 !-- 2nd-order scheme (top) 541 601 k = nzt 542 602 flux_d = flux_t(k-1) 543 603 diss_d = diss_t(k-1) 544 flux_t(k) = w(k,j,i) * ( sk(k+1,j,i) + sk(k,j,i) ) *0.5 604 flux_t(k) = w(k,j,i) * ( sk(k+1,j,i) + sk(k,j,i) ) * 0.5 605 545 606 ! 546 !-- sk(k+1) is referenced two times to avoid a segmentation fault at top .547 diss_t(k) = diss_2nd( sk(k+1,j,i), sk(k+1,j,i), sk(k,j,i), sk(k-1,j,i), &607 !-- sk(k+1) is referenced two times to avoid a segmentation fault at top 608 diss_t(k) = diss_2nd( sk(k+1,j,i), sk(k+1,j,i), sk(k,j,i), sk(k-1,j,i), & 548 609 sk(k-2,j,i), w(k,j,i), 0.5, ddzw(k) ) 549 610 550 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) 551 - ( flux_d + diss_d ) )* ddzw(k)611 tend(k,j,i) = tend(k,j,i) - ( flux_t(k) + diss_t(k) - flux_d - diss_d ) & 612 * ddzw(k) 552 613 ! 553 !-- evaluation of statistics614 !-- Evaluation of statistics 554 615 SELECT CASE ( sk_char ) 555 616 556 617 CASE ( 'pt' ) 557 DO k = nzb_s_inner(j,i), nzt 558 sums_wspts_ws_l(k,:) = sums_wspts_ws_l(k,:) & 559 + (flux_t(k)+diss_t(k)) & 560 * weight_substep(intermediate_timestep_count) * rmask(j,i,:) 618 619 DO k = nzb_s_inner(j,i), nzt 620 sums_wspts_ws_l(k,:) = sums_wspts_ws_l(k,:) + & 621 ( flux_t(k) + diss_t(k) ) & 622 * weight_substep(intermediate_timestep_count) & 623 * rmask(j,i,:) 561 624 ENDDO 562 625 563 626 CASE ( 'sa' ) 564 DO k = nzb_s_inner(j,i), nzt 565 sums_wssas_ws_l(k,:) = sums_wssas_ws_l(k,:) & 566 +(flux_t(k)+diss_t(k)) & 567 * weight_substep(intermediate_timestep_count) * rmask(j,i,:) 627 628 DO k = nzb_s_inner(j,i), nzt 629 sums_wssas_ws_l(k,:) = sums_wssas_ws_l(k,:) + & 630 ( flux_t(k) + diss_t(k) ) & 631 * weight_substep(intermediate_timestep_count) & 632 * rmask(j,i,:) 568 633 ENDDO 569 634 570 635 CASE ( 'q' ) 636 571 637 DO k = nzb_s_inner(j,i), nzt 572 sums_wsqs_ws_l(k,:) = sums_wsqs_ws_l(k,:) & 573 + (flux_t(k)+diss_t(k)) & 574 * weight_substep(intermediate_timestep_count) * rmask(j,i,:) 638 sums_wsqs_ws_l(k,:) = sums_wsqs_ws_l(k,:) + & 639 ( flux_t(k) + diss_t(k) ) & 640 * weight_substep(intermediate_timestep_count) & 641 * rmask(j,i,:) 575 642 ENDDO 576 643 -
palm/trunk/SOURCE/exchange_horiz.f90
r708 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! formatting adjustments 7 7 ! 8 8 ! Former revisions: … … 69 69 70 70 ! 71 !-- Exchange of lateral boundary values for parallel computers71 !-- Exchange of lateral boundary values 72 72 IF ( pdims(1) == 1 .OR. mg_switch_to_pe0 ) THEN 73 73 ! -
palm/trunk/SOURCE/flow_statistics.f90
r700 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: … … 114 115 USE arrays_3d 115 116 USE cloud_parameters 117 USE control_parameters 116 118 USE cpulog 117 119 USE grid_variables … … 120 122 USE pegrid 121 123 USE statistics 122 USE control_parameters123 124 124 125 IMPLICIT NONE … … 738 739 ENDDO 739 740 ENDDO 740 !- for reasons of speed optimization the loop is splitted, to avoid if-else 741 !- statements inside the loop 742 !- Fluxes which have been computed in part directly inside the advection routines 743 !- treated seperatly. 744 !- First treat the momentum fluxes 741 ! 742 !-- For speed optimization fluxes which have been computed in part directly 743 !-- inside the WS advection routines are treated seperatly 744 !-- Momentum fluxes first: 745 745 IF ( .NOT. ws_scheme_mom ) THEN 746 746 !$OMP DO … … 771 771 DO i = nxl, nxr 772 772 DO j = nys, nyn 773 DO k = nzb_diff_s_inner(j,i) - 1, nzt_diff 774 !- vertical heat flux 773 DO k = nzb_diff_s_inner(j,i)-1, nzt_diff 774 ! 775 !-- Vertical heat flux 775 776 sums_l(k,17,tn) = sums_l(k,17,tn) + 0.5 * & 776 777 ( pt(k,j,i) - hom(k,1,4,sr) + & … … 1221 1222 1222 1223 END SUBROUTINE flow_statistics 1223 1224 1225 -
palm/trunk/SOURCE/inflow_turbulence.f90
r668 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: … … 54 55 ! 55 56 !-- First, local averaging within the recycling domain 56 57 57 i = recycling_plane 58 58 … … 79 79 !-- Now, averaging over all PEs 80 80 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 81 CALL MPI_ALLREDUCE( avpr_l(nzb,1,1), avpr(nzb,1,1), ngp_pr, &82 MPI_REAL,MPI_SUM, comm2d, ierr )81 CALL MPI_ALLREDUCE( avpr_l(nzb,1,1), avpr(nzb,1,1), ngp_pr, MPI_REAL, & 82 MPI_SUM, comm2d, ierr ) 83 83 84 84 #else … … 165 165 DO k = nzb, nzt + 1 166 166 167 u(k,j,-nbgp+1:0) 167 u(k,j,-nbgp+1:0) = mean_inflow_profiles(k,1) + & 168 168 inflow_dist(k,j,1,1:nbgp) * inflow_damping_factor(k) 169 169 v(k,j,-nbgp:-1) = mean_inflow_profiles(k,2) + & 170 170 inflow_dist(k,j,2,1:nbgp) * inflow_damping_factor(k) 171 w(k,j,-nbgp:-1) = inflow_dist(k,j,3,1:nbgp) * inflow_damping_factor(k) 171 w(k,j,-nbgp:-1) = & 172 inflow_dist(k,j,3,1:nbgp) * inflow_damping_factor(k) 172 173 pt(k,j,-nbgp:-1) = mean_inflow_profiles(k,4) + & 173 174 inflow_dist(k,j,4,1:nbgp) * inflow_damping_factor(k) … … 181 182 ENDIF 182 183 183 !184 !-- Conserve the volume flow at the inflow in order to avoid generation of185 !-- waves in the stable layer186 ! IF ( conserve_volume_flow .AND. inflow_l ) THEN187 188 ! volume_flow(1) = 0.0189 ! volume_flow_l(1) = 0.0190 191 ! i = 0192 193 ! DO j = nys, nyn194 !195 !-- Sum up the volume flow through the south/north boundary196 ! DO k = nzb_2d(j,i) + 1, nzt197 ! volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzu(k)198 ! ENDDO199 ! ENDDO200 201 #if defined( __parallel )202 ! IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr )203 ! CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, &204 ! MPI_SUM, comm1dy, ierr )205 #else206 ! volume_flow = volume_flow_l207 #endif208 ! volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) &209 ! / volume_flow_area(1)210 211 ! DO j = nys-1, nyn+1212 ! DO k = nzb_v_inner(j,i) + 1, nzt213 ! u(k,j,i) = u(k,j,i) + volume_flow_offset(1)214 ! ENDDO215 ! ENDDO216 217 ! ENDIF218 219 184 CALL cpu_log( log_point(40), 'inflow_turbulence', 'stop' ) 220 185 -
palm/trunk/SOURCE/init_3d_model.f90
r708 r709 7 7 ! Current revisions: 8 8 ! ----------------- 9 ! 9 ! formatting adjustments 10 10 ! 11 11 ! Former revisions: … … 29 29 ! nxl-1, nxr+1, nys-1, nyn+1 replaced by nxlg, nxrg, nysg, nyng in loops and 30 30 ! allocation of arrays. Calls of exchange_horiz are modified. 31 ! Call ws_init to initialize arrays needed for statistical evaluation and31 ! Call ws_init to initialize arrays needed for calculating statisticas and for 32 32 ! optimization when ws-scheme is used. 33 33 ! Initial volume flow is now calculated by using the variable hom_sum. … … 35 35 ! removed (removed u_nzb_p1_for_vfc and v_nzb_p1_for_vfc) 36 36 ! Changed surface boundary conditions for u and v in case of ibc_uv_b == 0 from 37 ! mirror bc to dirichlet boundary conditions (u=v=0), so that k=nzb is38 ! representative for the height z0 37 ! mirror to Dirichlet boundary conditions (u=v=0), so that k=nzb is 38 ! representative for the height z0. 39 39 ! Bugfix: type conversion of '1' to 64bit for the MAX function (ngp_3d_inner) 40 40 ! … … 453 453 454 454 ! 455 !-- Allocate arrays containing the RK coefficient for right evaluation of 456 !-- perturbation pressure and turbulent fluxes. At this point it is needed 457 !-- for right pressure correction during initialization. Further below 458 !-- the real values will be set. 459 ALLOCATE (weight_substep(1:intermediate_timestep_count_max), & 455 !-- Allocate arrays containing the RK coefficient for calculation of 456 !-- perturbation pressure and turbulent fluxes. At this point values are 457 !-- set for pressure calculation during initialization (where no timestep 458 !-- is done). Further below the values needed within the timestep scheme 459 !-- will be set. 460 ALLOCATE( weight_substep(1:intermediate_timestep_count_max), & 460 461 weight_pres(1:intermediate_timestep_count_max) ) 461 weight_substep = 1. 462 weight_pres = 1.463 intermediate_timestep_count = 1 ! needed for simulated_time=0462 weight_substep = 1.0 463 weight_pres = 1.0 464 intermediate_timestep_count = 1 ! needed when simulated_time = 0.0 464 465 465 466 ! … … 1011 1012 ! 1012 1013 !-- Read binary data from restart file 1013 1014 1014 CALL read_3d_binary 1015 1015 … … 1119 1119 ! 1120 1120 !-- Calculate the initial volume flow at the right and north boundary 1121 IF ( conserve_volume_flow ) THEN1121 IF ( conserve_volume_flow ) THEN 1122 1122 1123 1123 volume_flow_initial_l = 0.0 … … 1128 1128 IF ( nxr == nx ) THEN 1129 1129 DO j = nys, nyn 1130 DO k = nzb_2d(j,nx) +1, nzt1130 DO k = nzb_2d(j,nx)+1, nzt 1131 1131 volume_flow_initial_l(1) = volume_flow_initial_l(1) + & 1132 1132 hom_sum(k,1,0) * dzw(k) … … 1138 1138 IF ( nyn == ny ) THEN 1139 1139 DO i = nxl, nxr 1140 DO k = nzb_2d(ny,i) +1, nzt1140 DO k = nzb_2d(ny,i)+1, nzt 1141 1141 volume_flow_initial_l(2) = volume_flow_initial_l(2) + & 1142 1142 hom_sum(k,2,0) * dzw(k) 1143 1143 volume_flow_area_l(2) = volume_flow_area_l(2) + dzw(k) 1144 1144 ENDDO … … 1150 1150 IF ( nxr == nx ) THEN 1151 1151 DO j = nys, nyn 1152 DO k = nzb_2d(j,nx) +1, nzt1152 DO k = nzb_2d(j,nx)+1, nzt 1153 1153 volume_flow_initial_l(1) = volume_flow_initial_l(1) + & 1154 1154 u(k,j,nx) * dzw(k) 1155 1155 volume_flow_area_l(1) = volume_flow_area_l(1) + dzw(k) 1156 1156 ENDDO … … 1160 1160 IF ( nyn == ny ) THEN 1161 1161 DO i = nxl, nxr 1162 DO k = nzb_2d(ny,i) +1, nzt1162 DO k = nzb_2d(ny,i)+1, nzt 1163 1163 volume_flow_initial_l(2) = volume_flow_initial_l(2) + & 1164 1164 v(k,ny,i) * dzw(k) … … 1171 1171 1172 1172 #if defined( __parallel ) 1173 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1), &1173 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1), & 1174 1174 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 1175 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), &1175 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), & 1176 1176 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 1177 1177 … … 1182 1182 1183 1183 ! 1184 !-- In case of 'bulk_velocity' mode, volume_flow_initial is overridden1185 !-- and calculated from u|v_bulk instead.1184 !-- In case of 'bulk_velocity' mode, volume_flow_initial is calculated 1185 !-- from u|v_bulk instead 1186 1186 IF ( TRIM( conserve_volume_flow_mode ) == 'bulk_velocity' ) THEN 1187 1187 volume_flow_initial(1) = u_bulk * volume_flow_area(1) … … 1208 1208 ! 1209 1209 !-- Initialization of the leaf area density 1210 IF ( plant_canopy )THEN1210 IF ( plant_canopy ) THEN 1211 1211 1212 1212 SELECT CASE ( TRIM( canopy_mode ) ) … … 1218 1218 lad_s(:,j,i) = lad(:) 1219 1219 cdc(:,j,i) = drag_coefficient 1220 IF ( passive_scalar ) THEN1220 IF ( passive_scalar ) THEN 1221 1221 sls(:,j,i) = leaf_surface_concentration 1222 1222 sec(:,j,i) = scalar_exchange_coefficient … … 1240 1240 CALL exchange_horiz( cdc, nbgp ) 1241 1241 1242 IF ( passive_scalar ) THEN1242 IF ( passive_scalar ) THEN 1243 1243 CALL exchange_horiz( sls, nbgp ) 1244 1244 CALL exchange_horiz( sec, nbgp ) … … 1255 1255 DO j = nys, nyn 1256 1256 DO k = nzb, nzt+1 1257 IF ( lad_s(k,j,i) > 0.0 ) THEN1257 IF ( lad_s(k,j,i) > 0.0 ) THEN 1258 1258 lad_u(k,j,i) = lad_s(k,j,i) 1259 1259 lad_u(k,j,i+1) = lad_s(k,j,i) … … 1277 1277 ! 1278 1278 !-- Initialisation of the canopy heat source distribution 1279 IF ( cthf /= 0.0 ) THEN1279 IF ( cthf /= 0.0 ) THEN 1280 1280 ! 1281 1281 !-- Piecewise evaluation of the leaf area index by … … 1313 1313 shf(:,:) = canopy_heat_flux(0,:,:) 1314 1314 1315 IF ( ASSOCIATED( shf_m ) ) shf_m = shf1315 IF ( ASSOCIATED( shf_m ) ) shf_m = shf 1316 1316 1317 1317 ENDIF … … 1346 1346 1347 1347 ! 1348 !-- Setting weighting factors for right evaluation of perturbation pressure 1349 !-- and turbulent quantities during the RK substeps. 1350 IF ( TRIM(timestep_scheme) == 'runge-kutta-3' ) THEN ! RK3 1348 !-- Setting weighting factors for calculation of perturbation pressure 1349 !-- and turbulent quantities from the RK substeps 1350 IF ( TRIM(timestep_scheme) == 'runge-kutta-3' ) THEN ! for RK3-method 1351 1351 1352 weight_substep(1) = 0.166666666666666 1352 1353 weight_substep(2) = 0.3 1353 1354 weight_substep(3) = 0.533333333333333 1354 1355 weight_pres(1) = 0.333333333333333 1356 weight_pres(2) = 0.416666666666666 1357 weight_pres(3) = 0.25 1358 ELSEIF ( TRIM(timestep_scheme) == 'runge-kutta-2' ) THEN ! RK2 1355 1356 weight_pres(1) = 0.333333333333333 1357 weight_pres(2) = 0.416666666666666 1358 weight_pres(3) = 0.25 1359 1360 ELSEIF ( TRIM(timestep_scheme) == 'runge-kutta-2' ) THEN ! for RK2-method 1361 1359 1362 weight_substep(1) = 0.5 1360 1363 weight_substep(2) = 0.5 1361 1364 1362 weight_pres(1) = 0.5 1363 weight_pres(2) = 0.5 1364 ELSE ! Euler and Leapfrog 1365 weight_pres(1) = 0.5 1366 weight_pres(2) = 0.5 1367 1368 ELSE ! for Euler- and leapfrog-method 1369 1365 1370 weight_substep(1) = 1.0 1366 weight_pres(1) = 1.0 1371 weight_pres(1) = 1.0 1372 1367 1373 ENDIF 1368 1374 … … 1476 1482 1477 1483 ! 1478 !-- Initialize local summation arrays for UP flow_statistics. This is necessary1479 !-- because they may not yet have been initialized when they are called from1480 !-- flow_statistics (or - depending on the chosen model run - are never1481 !-- initialized)1484 !-- Initialize local summation arrays for routine flow_statistics. 1485 !-- This is necessary because they may not yet have been initialized when they 1486 !-- are called from flow_statistics (or - depending on the chosen model run - 1487 !-- are never initialized) 1482 1488 sums_divnew_l = 0.0 1483 1489 sums_divold_l = 0.0 … … 1492 1498 ! 1493 1499 !-- User-defined initializing actions. Check afterwards, if maximum number 1494 !-- of allowed timeseries is notexceeded1500 !-- of allowed timeseries is exceeded 1495 1501 CALL user_init 1496 1502 -
palm/trunk/SOURCE/init_coupling.f90
r692 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: -
palm/trunk/SOURCE/init_grid.f90
r708 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! formatting adjustments 7 7 ! 8 8 ! Former revisions: … … 89 89 90 90 ! 91 ! Computation of the array bounds.91 !-- Calculation of horizontal array bounds including ghost layers 92 92 nxlg = nxl - nbgp 93 93 nxrg = nxr + nbgp 94 94 nysg = nys - nbgp 95 95 nyng = nyn + nbgp 96 96 97 ! 97 98 !-- Allocate grid arrays … … 196 197 197 198 ! 198 !-- In case of FFT method or SOR swap inverse grid lenght ddzu to ddzu_fft and199 !-- modify the lowest entry. This is necessary for atmosphere runs, because200 !-- zu(0) and so ddzu(1) changed. Accompanied with this modified arrays201 !-- pressure solver uses wrong values and this causes kinks in the profiles202 !-- of turbulent quantities.203 IF ( psolver /= 'multigrid' ) THEN199 !-- The FFT- SOR-pressure solvers assume grid spacings of a staggered grid 200 !-- everywhere. For the actual grid, the grid spacing at the lowest level 201 !-- is only dz/2, but should be dz. Therefore, an additional array 202 !-- containing with appropriate grid information is created for these 203 !-- solvers. 204 IF ( psolver /= 'multigrid' ) THEN 204 205 ALLOCATE( ddzu_pres(1:nzt+1) ) 205 206 ddzu_pres = ddzu 206 IF( .NOT. ocean ) ddzu_pres(1) = ddzu_pres(2)207 IF( .NOT. ocean ) ddzu_pres(1) = ddzu_pres(2) ! change for lowest level 207 208 ENDIF 208 209 … … 221 222 dzu_mg(:,maximum_grid_level) = dzu 222 223 ! 223 !-- To ensure a equally spaced grid. For ocean runs this is not necessary, 224 !-- Next line to ensure an equally spaced grid. For ocean runs this is not 225 !-- necessary, 224 226 !-- because zu(0) does not changed so far. Also this would cause errors 225 227 !-- if a vertical stretching for ocean runs is used. 226 228 IF ( .NOT. ocean ) dzu_mg(1,maximum_grid_level) = dzu(2) 229 227 230 dzw_mg(:,maximum_grid_level) = dzw 228 231 nzt_l = nzt … … 1132 1135 1133 1136 ! 1134 !-- Need to set lateral boundary conditions for l_wall 1135 1137 !-- Set lateral boundary conditions for l_wall 1136 1138 CALL exchange_horiz( l_wall, nbgp ) 1137 1139 -
palm/trunk/SOURCE/init_pegrid.f90
r708 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! formatting adjustments 7 7 ! 8 8 ! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!! … … 577 577 578 578 CALL MPI_OPEN_PORT( MPI_INFO_NULL, port_name, ierr ) 579 !580 !-- TEST OUTPUT (TO BE REMOVED)581 WRITE(9,*) TRIM( coupling_mode ), &582 ', ierr after MPI_OPEN_PORT: ', ierr583 CALL LOCAL_FLUSH( 9 )584 579 585 580 CALL MPI_PUBLISH_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, & 586 581 ierr ) 587 !588 !-- TEST OUTPUT (TO BE REMOVED)589 WRITE(9,*) TRIM( coupling_mode ), &590 ', ierr after MPI_PUBLISH_NAME: ', ierr591 CALL LOCAL_FLUSH( 9 )592 582 593 583 ! … … 614 604 615 605 CALL MPI_LOOKUP_NAME( 'palm_coupler', MPI_INFO_NULL, port_name, ierr ) 616 !617 !-- TEST OUTPUT (TO BE REMOVED)618 WRITE(9,*) TRIM( coupling_mode ), &619 ', ierr after MPI_LOOKUP_NAME: ', ierr620 CALL LOCAL_FLUSH( 9 )621 622 606 623 607 ENDIF … … 631 615 IF ( coupling_mode == 'atmosphere_to_ocean' ) THEN 632 616 633 PRINT*, '... before COMM_ACCEPT'634 617 CALL MPI_COMM_ACCEPT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & 635 618 comm_inter, ierr ) 636 PRINT*, '--- ierr = ', ierr637 PRINT*, '--- comm_inter atmosphere = ', comm_inter638 639 619 coupling_mode_remote = 'ocean_to_atmosphere' 640 620 641 621 ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN 642 622 643 IF ( myid == 0 ) PRINT*, '*** read: ', port_name, ' ierr = ', ierr644 PRINT*, '... before COMM_CONNECT'645 623 CALL MPI_COMM_CONNECT( port_name, MPI_INFO_NULL, 0, MPI_COMM_WORLD, & 646 624 comm_inter, ierr ) 647 PRINT*, '--- ierr = ', ierr648 PRINT*, '--- comm_inter ocean = ', comm_inter649 650 625 coupling_mode_remote = 'atmosphere_to_ocean' 651 626 … … 654 629 655 630 ! 656 !-- Determine the number of ghost point s657 IF ( scalar_advec == 'ws-scheme' .OR. momentum_advec == 'ws-scheme')THEN631 !-- Determine the number of ghost point layers 632 IF ( scalar_advec == 'ws-scheme' .OR. momentum_advec == 'ws-scheme' ) THEN 658 633 nbgp = 3 659 634 ELSE 660 635 nbgp = 1 661 END IF 662 663 ! 664 !-- In case of coupled runs, create a new MPI derived datatype for the 665 !-- exchange of surface (xy) data . 666 !-- Gridpoint number for the exchange of ghost points (xy-plane) 667 636 ENDIF 637 638 ! 639 !-- Create a new MPI derived datatype for the exchange of surface (xy) data, 640 !-- which is needed for coupled atmosphere-ocean runs. 641 !-- First, calculate number of grid points of an xy-plane. 668 642 ngp_xy = ( nxr - nxl + 1 + 2 * nbgp ) * ( nyn - nys + 1 + 2 * nbgp ) 669 670 !671 !-- Define a new MPI derived datatype for the exchange of ghost points in672 !-- y-direction for 2D-arrays (line)673 643 CALL MPI_TYPE_VECTOR( ngp_xy, 1, nzt-nzb+2, MPI_REAL, type_xy, ierr ) 674 644 CALL MPI_TYPE_COMMIT( type_xy, ierr ) 675 645 676 677 IF ( TRIM( coupling_mode ) .NE. 'uncoupled' ) THEN 646 IF ( TRIM( coupling_mode ) /= 'uncoupled' ) THEN 678 647 679 648 ! … … 685 654 ny_a = ny 686 655 687 IF ( myid == 0 ) THEN 688 CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, & 689 comm_inter, ierr ) 690 CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, & 691 comm_inter, ierr ) 692 CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, & 693 comm_inter, ierr ) 694 CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, & 695 comm_inter, status, ierr ) 696 CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, & 697 comm_inter, status, ierr ) 698 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & 656 IF ( myid == 0 ) THEN 657 658 CALL MPI_SEND( nx_a, 1, MPI_INTEGER, numprocs, 1, comm_inter, & 659 ierr ) 660 CALL MPI_SEND( ny_a, 1, MPI_INTEGER, numprocs, 2, comm_inter, & 661 ierr ) 662 CALL MPI_SEND( pdims, 2, MPI_INTEGER, numprocs, 3, comm_inter, & 663 ierr ) 664 CALL MPI_RECV( nx_o, 1, MPI_INTEGER, numprocs, 4, comm_inter, & 665 status, ierr ) 666 CALL MPI_RECV( ny_o, 1, MPI_INTEGER, numprocs, 5, comm_inter, & 667 status, ierr ) 668 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, numprocs, 6, & 699 669 comm_inter, status, ierr ) 700 670 ENDIF 701 671 702 CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr )703 CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr )704 CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr )672 CALL MPI_BCAST( nx_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 673 CALL MPI_BCAST( ny_o, 1, MPI_INTEGER, 0, comm2d, ierr ) 674 CALL MPI_BCAST( pdims_remote, 2, MPI_INTEGER, 0, comm2d, ierr ) 705 675 706 676 ELSEIF ( coupling_mode == 'ocean_to_atmosphere' ) THEN … … 710 680 711 681 IF ( myid == 0 ) THEN 712 CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, & 713 comm_inter, status, ierr ) 714 CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, & 715 comm_inter, status, ierr ) 716 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, & 717 comm_inter, status, ierr ) 718 CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, & 719 comm_inter, ierr ) 720 CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, & 721 comm_inter, ierr ) 722 CALL MPI_SEND( pdims, 2, MPI_INTEGER, 0, 6, & 723 comm_inter, ierr ) 682 683 CALL MPI_RECV( nx_a, 1, MPI_INTEGER, 0, 1, comm_inter, status, & 684 ierr ) 685 CALL MPI_RECV( ny_a, 1, MPI_INTEGER, 0, 2, comm_inter, status, & 686 ierr ) 687 CALL MPI_RECV( pdims_remote, 2, MPI_INTEGER, 0, 3, comm_inter, & 688 status, ierr ) 689 CALL MPI_SEND( nx_o, 1, MPI_INTEGER, 0, 4, comm_inter, ierr ) 690 CALL MPI_SEND( ny_o, 1, MPI_INTEGER, 0, 5, comm_inter, ierr ) 691 CALL MPI_SEND( pdims, 2, MPI_INTEGER, 0, 6, comm_inter, ierr ) 724 692 ENDIF 725 693 … … 730 698 ENDIF 731 699 732 ngp_a = (nx_a+1+2*nbgp)*(ny_a+1+2*nbgp) 733 ngp_o = (nx_o+1+2*nbgp)*(ny_o+1+2*nbgp) 734 735 ! 736 !-- determine if the horizontal grid and the number of PEs 737 !-- in ocean and atmosphere is same or not 738 !-- (different number of PEs still not implemented) 739 IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & 700 ngp_a = ( nx_a+1 + 2 * nbgp ) * ( ny_a+1 + 2 * nbgp ) 701 ngp_o = ( nx_o+1 + 2 * nbgp ) * ( ny_o+1 + 2 * nbgp ) 702 703 ! 704 !-- Determine if the horizontal grid and the number of PEs in ocean and 705 !-- atmosphere is same or not 706 IF ( nx_o == nx_a .AND. ny_o == ny_a .AND. & 740 707 pdims(1) == pdims_remote(1) .AND. pdims(2) == pdims_remote(2) ) & 741 708 THEN … … 748 715 !-- Determine the target PEs for the exchange between ocean and 749 716 !-- atmosphere (comm2d) 750 IF ( coupling_topology == 0) THEN 751 IF ( TRIM( coupling_mode ) .EQ. 'atmosphere_to_ocean' ) THEN 717 IF ( coupling_topology == 0 ) THEN 718 ! 719 !-- In case of identical topologies, every atmosphere PE has exactly one 720 !-- ocean PE counterpart and vice versa 721 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN 752 722 target_id = myid + numprocs 753 723 ELSE … … 756 726 757 727 ELSE 758 759 728 ! 760 729 !-- In case of nonequivalent topology in ocean and atmosphere only for 761 730 !-- PE0 in ocean and PE0 in atmosphere a target_id is needed, since 762 !-- data echxchange between ocean and atmosphere will be done only by 763 !-- those PEs. 764 IF ( myid == 0 ) THEN 765 IF ( TRIM( coupling_mode ) .EQ. 'atmosphere_to_ocean' ) THEN 731 !-- data echxchange between ocean and atmosphere will be done only 732 !-- between these PEs. 733 IF ( myid == 0 ) THEN 734 735 IF ( TRIM( coupling_mode ) == 'atmosphere_to_ocean' ) THEN 766 736 target_id = numprocs 767 737 ELSE 768 738 target_id = 0 769 739 ENDIF 770 print*, coupling_mode, myid, " -> ", target_id, "numprocs: ", numprocs 740 771 741 ENDIF 742 772 743 ENDIF 773 744 … … 861 832 ! 862 833 !-- Find out, if the total domain allows more levels. These additional 863 !-- levels are processed on PE0 only.834 !-- levels are identically processed on all PEs. 864 835 IF ( numprocs > 1 .AND. mg_switch_to_pe0_level /= -1 ) THEN 836 865 837 IF ( mg_levels_z > MIN( mg_levels_x, mg_levels_y ) ) THEN 838 866 839 mg_switch_to_pe0_level_l = maximum_grid_level 867 840 … … 889 862 mg_switch_to_pe0_level_l = 0 890 863 ENDIF 864 891 865 ELSE 866 892 867 mg_switch_to_pe0_level_l = 0 893 868 maximum_grid_level_l = maximum_grid_level 869 894 870 ENDIF 895 871 … … 920 896 921 897 ENDIF 898 922 899 ENDIF 923 900 … … 939 916 !-- Save the grid size of the subdomain at the switch level, because 940 917 !-- it is needed in poismg. 941 !-- Array bounds of the local subdomain grids are gathered on PE0942 918 ind(1) = nxl_l; ind(2) = nxr_l 943 919 ind(3) = nys_l; ind(4) = nyn_l … … 953 929 DEALLOCATE( ind_all ) 954 930 ! 955 !-- Calculate the grid size of the total domain gathered on PE0931 !-- Calculate the grid size of the total domain 956 932 nxr_l = ( nxr_l-nxl_l+1 ) * pdims(1) - 1 957 933 nxl_l = 0 … … 1006 982 1007 983 ! 1008 !-- Define a new MPI derived datatype for the exchange of ghost points in 1009 !-- y-direction for 2D-arrays (line) 1010 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, ierr ) 984 !-- Define new MPI derived datatypes for the exchange of ghost points in 985 !-- x- and y-direction for 2D-arrays (line) 986 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_REAL, type_x, & 987 ierr ) 1011 988 CALL MPI_TYPE_COMMIT( type_x, ierr ) 1012 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, type_x_int, ierr ) 989 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp, ngp_y, MPI_INTEGER, & 990 type_x_int, ierr ) 1013 991 CALL MPI_TYPE_COMMIT( type_x_int, ierr ) 1014 992 … … 1029 1007 1030 1008 nxl_l = nxl; nxr_l = nxr; nys_l = nys; nyn_l = nyn; nzb_l = nzb; nzt_l = nzt 1009 1031 1010 ! 1032 1011 !-- Discern between the model grid, which needs nbgp ghost points and 1033 1012 !-- grid levels for the multigrid scheme. In the latter case only one 1034 1013 !-- ghost point is necessary. 1035 !-- First definition of mpi-vectors for exchange of ghost layers on normal1014 !-- First definition of MPI-datatypes for exchange of ghost layers on normal 1036 1015 !-- grid. The following loop is needed for data exchange in poismg.f90. 1037 1016 ! 1038 1017 !-- Determine number of grid points of yz-layer for exchange 1039 1018 ngp_yz(0) = (nzt - nzb + 2) * (nyn - nys + 1 + 2 * nbgp) 1040 ! 1041 !-- Define a new mpi datatype for the exchange of left - right boundaries. 1042 !-- Indeed the data are connected in the physical memory and no mpi-vector 1043 !-- is necessary, but the data exchange between left and right PE's using 1044 !-- mpi-vectors is 10% faster than without. 1019 1020 ! 1021 !-- Define an MPI-datatype for the exchange of left/right boundaries. 1022 !-- Although data are contiguous in physical memory (which does not 1023 !-- necessarily require an MPI-derived datatype), the data exchange between 1024 !-- left and right PE's using the MPI-derived type is 10% faster than without. 1045 1025 CALL MPI_TYPE_VECTOR( nxr-nxl+1+2*nbgp, nbgp*(nzt-nzb+2), ngp_yz(0), & 1046 1026 MPI_REAL, type_xz(0), ierr ) 1047 1027 CALL MPI_TYPE_COMMIT( type_xz(0), ierr ) 1048 1028 1049 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), ierr) 1029 CALL MPI_TYPE_VECTOR( nbgp, ngp_yz(0), ngp_yz(0), MPI_REAL, type_yz(0), & 1030 ierr ) 1050 1031 CALL MPI_TYPE_COMMIT( type_yz(0), ierr ) 1051 ! 1052 !-- Definition of mpi-vectors for multigrid 1032 1033 ! 1034 !-- Definition of MPI-datatypes for multigrid method (coarser level grids) 1053 1035 IF ( psolver == 'multigrid' ) THEN 1054 1036 ! 1055 !-- The definition of mpi-vectors as aforementioned, but only 1 ghost point is used. 1056 DO i = maximum_grid_level, 1 , -1 1037 !-- Definition of MPI-datatyoe as above, but only 1 ghost level is used 1038 DO i = maximum_grid_level, 1 , -1 1039 1057 1040 ngp_yz(i) = (nzt_l - nzb_l + 2) * (nyn_l - nys_l + 3) 1058 1041 1059 1042 CALL MPI_TYPE_VECTOR( nxr_l-nxl_l+3, nzt_l-nzb_l+2, ngp_yz(i), & 1060 MPI_REAL, type_xz(i), ierr )1043 MPI_REAL, type_xz(i), ierr ) 1061 1044 CALL MPI_TYPE_COMMIT( type_xz(i), ierr ) 1062 1045 1063 CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), ierr) 1046 CALL MPI_TYPE_VECTOR( 1, ngp_yz(i), ngp_yz(i), MPI_REAL, type_yz(i), & 1047 ierr ) 1064 1048 CALL MPI_TYPE_COMMIT( type_yz(i), ierr ) 1065 1049 … … 1069 1053 nyn_l = nyn_l / 2 1070 1054 nzt_l = nzt_l / 2 1055 1071 1056 ENDDO 1072 END IF 1057 1058 ENDIF 1073 1059 #endif 1074 1060 -
palm/trunk/SOURCE/prandtl_fluxes.f90
r668 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: … … 10 11 ! 11 12 ! 667 2010-12-23 12:06:00Z suehring/gryschka 12 ! Changed surface boundary conditions for u and v from mirror bc to dirichelt bc,13 ! therefore u(uzb,:,:) and v(nzb,:,:) is now representative for the height z013 ! Changed surface boundary conditions for u and v from mirror to Dirichlet. 14 ! Therefore u(uzb,:,:) and v(nzb,:,:) are now representative for height z0. 14 15 ! nxl-1, nxr+1, nys-1, nyn+1 replaced by nxlg, nxrg, nysg, nyng 15 16 ! -
palm/trunk/SOURCE/pres.f90
r708 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! formatting adjustments 7 7 ! 8 8 ! Former revisions: … … 104 104 105 105 ddt_3d = 1.0 / dt_3d 106 d_weight_pres = 1. / weight_pres(intermediate_timestep_count)106 d_weight_pres = 1.0 / weight_pres(intermediate_timestep_count) 107 107 108 108 ! … … 146 146 ! 147 147 !-- Left/right 148 149 IF ( conserve_volume_flow .AND. ( outflow_l .OR. outflow_r ) ) THEN 148 IF ( conserve_volume_flow .AND. ( outflow_l .OR. outflow_r ) ) THEN 150 149 151 150 volume_flow(1) = 0.0 … … 161 160 ! 162 161 !-- Sum up the volume flow through the south/north boundary 163 DO k = nzb_2d(j,i) +1, nzt162 DO k = nzb_2d(j,i)+1, nzt 164 163 volume_flow_l(1) = volume_flow_l(1) + u(k,j,i) * dzw(k) 165 164 ENDDO … … 173 172 volume_flow = volume_flow_l 174 173 #endif 175 volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) 174 volume_flow_offset(1) = ( volume_flow_initial(1) - volume_flow(1) ) & 176 175 / volume_flow_area(1) 177 176 178 177 DO j = nysg, nyng 179 DO k = nzb_2d(j,i) +1, nzt178 DO k = nzb_2d(j,i)+1, nzt 180 179 u(k,j,i) = u(k,j,i) + volume_flow_offset(1) 181 180 ENDDO … … 186 185 ! 187 186 !-- South/north 188 IF ( conserve_volume_flow .AND. ( outflow_n 187 IF ( conserve_volume_flow .AND. ( outflow_n .OR. outflow_s ) ) THEN 189 188 190 189 volume_flow(2) = 0.0 … … 200 199 ! 201 200 !-- Sum up the volume flow through the south/north boundary 202 DO k = nzb_2d(j,i) +1, nzt201 DO k = nzb_2d(j,i)+1, nzt 203 202 volume_flow_l(2) = volume_flow_l(2) + v(k,j,i) * dzw(k) 204 203 ENDDO … … 216 215 217 216 DO i = nxlg, nxrg 218 DO k = nzb_v_inner(j,i) +1, nzt217 DO k = nzb_v_inner(j,i)+1, nzt 219 218 v(k,j,i) = v(k,j,i) + volume_flow_offset(2) 220 219 ENDDO … … 226 225 !-- Remove mean vertical velocity 227 226 IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 ) THEN 228 IF ( simulated_time > 0.0 ) THEN ! otherwise nzb_w_inner isnot yet known227 IF ( simulated_time > 0.0 ) THEN ! otherwise nzb_w_inner not yet known 229 228 w_l = 0.0; w_l_l = 0.0 230 229 DO i = nxl, nxr … … 237 236 #if defined( __parallel ) 238 237 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 239 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d,&240 ierr )238 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, & 239 comm2d, ierr ) 241 240 #else 242 241 w_l = w_l_l … … 574 573 !-- Correction of the provisional velocities with the current perturbation 575 574 !-- pressure just computed 576 IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR.bc_ns_cyc ) ) THEN575 IF ( conserve_volume_flow .AND. ( bc_lr_cyc .OR. bc_ns_cyc ) ) THEN 577 576 volume_flow_l(1) = 0.0 578 577 volume_flow_l(2) = 0.0 -
palm/trunk/SOURCE/prognostic_equations.f90
r674 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: … … 152 153 IF ( ocean ) CALL calc_mean_profile( rho, 64 ) 153 154 IF ( humidity ) CALL calc_mean_profile( vpt, 44 ) 154 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND.&155 intermediate_timestep_count == 1 ) CALL ws_statistics155 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND. & 156 intermediate_timestep_count == 1 ) CALL ws_statistics 156 157 157 158 ! … … 927 928 IF ( humidity ) CALL calc_mean_profile( vpt, 44 ) 928 929 IF ( .NOT. constant_diffusion ) CALL production_e_init 929 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND.&930 intermediate_timestep_count == 1 ) CALL ws_statistics930 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND. & 931 intermediate_timestep_count == 1 ) CALL ws_statistics 931 932 932 933 … … 1459 1460 IF ( ocean ) CALL calc_mean_profile( rho, 64 ) 1460 1461 IF ( humidity ) CALL calc_mean_profile( vpt, 44 ) 1461 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND.&1462 intermediate_timestep_count == 1 ) CALL ws_statistics1462 IF ( ( ws_scheme_mom .OR. ws_scheme_sca ) .AND. & 1463 intermediate_timestep_count == 1 ) CALL ws_statistics 1463 1464 1464 1465 ! -
palm/trunk/SOURCE/surface_coupler.f90
r668 r709 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! formatting adjustments 6 7 ! 7 8 ! Former revisions: … … 10 11 ! 11 12 ! 667 2010-12-23 12:06:00Z suehring/gryschka 12 ! additional case for nonequivalent processor and grid topopolgy in ocean and13 ! atmosphere added (coupling_topology = 1) 13 ! Additional case for nonequivalent processor and grid topopolgy in ocean and 14 ! atmosphere added (coupling_topology = 1). 14 15 ! Added exchange of u and v from Ocean to Atmosphere 15 16 ! … … 60 61 61 62 IF ( coupling_topology == 0 ) THEN 62 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, target_id, 63 0, 64 terminate_coupled_remote, 1, MPI_INTEGER, target_id, 63 CALL MPI_SENDRECV( terminate_coupled, 1, MPI_INTEGER, target_id, & 64 0, & 65 terminate_coupled_remote, 1, MPI_INTEGER, target_id, & 65 66 0, comm_inter, status, ierr ) 66 67 ELSE … … 72 73 comm_inter, status, ierr ) 73 74 ENDIF 74 CALL MPI_BCAST( terminate_coupled_remote, 1, MPI_INTEGER, 0, comm2d, ierr) 75 CALL MPI_BCAST( terminate_coupled_remote, 1, MPI_INTEGER, 0, comm2d, & 76 ierr ) 75 77 76 78 ALLOCATE( total_2d_a(-nbgp:ny_a+nbgp,-nbgp:nx_a+nbgp), & … … 97 99 !-- Exchange the current simulated time between the models, 98 100 !-- currently just for total_2ding 99 IF ( coupling_topology == 0 ) THEN 100 CALL MPI_SEND( time_since_reference_point, 1, MPI_REAL, & 101 target_id, 11, comm_inter, ierr ) 102 CALL MPI_RECV( time_since_reference_point_rem, 1, MPI_REAL, & 103 target_id, 11, comm_inter, status, ierr ) 101 IF ( coupling_topology == 0 ) THEN 102 103 CALL MPI_SEND( time_since_reference_point, 1, MPI_REAL, target_id, 11, & 104 comm_inter, ierr ) 105 CALL MPI_RECV( time_since_reference_point_rem, 1, MPI_REAL, target_id, & 106 11, comm_inter, status, ierr ) 104 107 ELSE 108 105 109 IF ( myid == 0 ) THEN 106 CALL MPI_SEND( time_since_reference_point, 1, MPI_REAL, & 107 target_id, 11, comm_inter, ierr ) 108 CALL MPI_RECV( time_since_reference_point_rem, 1, MPI_REAL, & 110 111 CALL MPI_SEND( time_since_reference_point, 1, MPI_REAL, target_id, & 112 11, comm_inter, ierr ) 113 CALL MPI_RECV( time_since_reference_point_rem, 1, MPI_REAL, & 109 114 target_id, 11, comm_inter, status, ierr ) 115 110 116 ENDIF 111 CALL MPI_BCAST( time_since_reference_point_rem, 1, MPI_REAL, & 112 0, comm2d, ierr ) 113 ENDIF 114 WRITE ( 9, * ) 'simulated time: ', simulated_time 115 WRITE ( 9, * ) 'time since start of coupling: ', & 116 time_since_reference_point, ' remote: ', & 117 time_since_reference_point_rem 118 CALL local_flush( 9 ) 119 117 118 CALL MPI_BCAST( time_since_reference_point_rem, 1, MPI_REAL, 0, comm2d, & 119 ierr ) 120 121 ENDIF 120 122 121 123 ! … … 124 126 125 127 ! 126 !-- Horizontal grid size and number of processors is equal 127 !-- in ocean and atmosphere 128 IF ( coupling_topology == 0 ) THEN 129 130 ! 131 !-- Send heat flux at bottom surface to the ocean model 132 CALL MPI_SEND( shf(nysg,nxlg), ngp_xy, MPI_REAL, & 133 target_id, 12, comm_inter, ierr ) 134 135 ! 136 !-- Send humidity flux at bottom surface to the ocean model 128 !-- Horizontal grid size and number of processors is equal in ocean and 129 !-- atmosphere 130 IF ( coupling_topology == 0 ) THEN 131 132 ! 133 !-- Send heat flux at bottom surface to the ocean 134 CALL MPI_SEND( shf(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 12, & 135 comm_inter, ierr ) 136 ! 137 !-- Send humidity flux at bottom surface to the ocean 137 138 IF ( humidity ) THEN 138 CALL MPI_SEND( qsws(nysg,nxlg), ngp_xy, MPI_REAL, & 139 target_id, 13, comm_inter, ierr ) 140 ENDIF 141 142 ! 143 !-- Receive temperature at the bottom surface from the ocean model 144 WRITE ( 9, * ) '*** receive pt from ocean' 145 CALL local_flush( 9 ) 146 CALL MPI_RECV( pt(0,nysg,nxlg), 1, type_xy, & 147 target_id, 14, comm_inter, status, ierr ) 148 149 ! 150 !-- Send the momentum flux (u) at bottom surface to the ocean model 151 CALL MPI_SEND( usws(nysg,nxlg), ngp_xy, MPI_REAL, & 152 target_id, 15, comm_inter, ierr ) 153 154 ! 155 !-- Send the momentum flux (v) at bottom surface to the ocean model 156 CALL MPI_SEND( vsws(nysg,nxlg), ngp_xy, MPI_REAL, & 157 target_id, 16, comm_inter, ierr ) 158 159 ! 160 !-- Receive u at the bottom surface from the ocean model 161 CALL MPI_RECV( u(0,nysg,nxlg), 1, type_xy, & 162 target_id, 17, comm_inter, status, ierr ) 163 164 ! 165 !-- Receive v at the bottom surface from the ocean model 166 CALL MPI_RECV( v(0,nysg,nxlg), 1, type_xy, & 167 target_id, 18, comm_inter, status, ierr ) 168 139 CALL MPI_SEND( qsws(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 13, & 140 comm_inter, ierr ) 141 ENDIF 142 ! 143 !-- Receive temperature at the bottom surface from the ocean 144 CALL MPI_RECV( pt(0,nysg,nxlg), 1, type_xy, target_id, 14, & 145 comm_inter, status, ierr ) 146 ! 147 !-- Send the momentum flux (u) at bottom surface to the ocean 148 CALL MPI_SEND( usws(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 15, & 149 comm_inter, ierr ) 150 ! 151 !-- Send the momentum flux (v) at bottom surface to the ocean 152 CALL MPI_SEND( vsws(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 16, & 153 comm_inter, ierr ) 154 ! 155 !-- Receive u at the bottom surface from the ocean 156 CALL MPI_RECV( u(0,nysg,nxlg), 1, type_xy, target_id, 17, & 157 comm_inter, status, ierr ) 158 ! 159 !-- Receive v at the bottom surface from the ocean 160 CALL MPI_RECV( v(0,nysg,nxlg), 1, type_xy, target_id, 18, & 161 comm_inter, status, ierr ) 169 162 ! 170 163 !-- Horizontal grid size or number of processors differs between … … 173 166 174 167 ! 175 !-- Send heat flux at bottom surface to the ocean model168 !-- Send heat flux at bottom surface to the ocean 176 169 total_2d_a = 0.0 177 total_2d = 0.0170 total_2d = 0.0 178 171 total_2d(nys:nyn,nxl:nxr) = shf(nys:nyn,nxl:nxr) 179 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, & 180 MPI_SUM, 0, comm2d, ierr )181 CALL interpolate_to_ocean(12)182 183 ! 184 !-- Send humidity flux at bottom surface to the ocean model185 IF ( humidity ) THEN172 173 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, MPI_SUM, 0, & 174 comm2d, ierr ) 175 CALL interpolate_to_ocean( 12 ) 176 ! 177 !-- Send humidity flux at bottom surface to the ocean 178 IF ( humidity ) THEN 186 179 total_2d_a = 0.0 187 total_2d = 0.0180 total_2d = 0.0 188 181 total_2d(nys:nyn,nxl:nxr) = qsws(nys:nyn,nxl:nxr) 189 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, & 190 MPI_SUM, 0, comm2d, ierr )191 CALL interpolate_to_ocean(13)192 ENDIF193 194 ! 195 !-- Receive temperature at the bottom surface from the ocean model196 IF ( myid == 0 ) THEN182 183 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, MPI_SUM, & 184 0, comm2d, ierr ) 185 CALL interpolate_to_ocean( 13 ) 186 ENDIF 187 ! 188 !-- Receive temperature at the bottom surface from the ocean 189 IF ( myid == 0 ) THEN 197 190 CALL MPI_RECV( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, & 198 191 target_id, 14, comm_inter, status, ierr ) 199 192 ENDIF 200 193 CALL MPI_BARRIER( comm2d, ierr ) 201 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, &202 0, comm2d,ierr )194 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, 0, comm2d, & 195 ierr ) 203 196 pt(0,nysg:nyng,nxlg:nxrg) = total_2d_a(nysg:nyng,nxlg:nxrg) 204 205 ! 206 !-- Send momentum flux (u) at bottom surface to the ocean model 197 ! 198 !-- Send momentum flux (u) at bottom surface to the ocean 207 199 total_2d_a = 0.0 208 total_2d = 0.0200 total_2d = 0.0 209 201 total_2d(nys:nyn,nxl:nxr) = usws(nys:nyn,nxl:nxr) 210 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, & 211 MPI_SUM, 0, comm2d, ierr ) 212 CALL interpolate_to_ocean(15) 213 214 ! 215 !-- Send momentum flux (v) at bottom surface to the ocean model 202 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, MPI_SUM, 0, & 203 comm2d, ierr ) 204 CALL interpolate_to_ocean( 15 ) 205 ! 206 !-- Send momentum flux (v) at bottom surface to the ocean 216 207 total_2d_a = 0.0 217 total_2d = 0.0208 total_2d = 0.0 218 209 total_2d(nys:nyn,nxl:nxr) = vsws(nys:nyn,nxl:nxr) 219 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, & 220 MPI_SUM, 0, comm2d, ierr ) 221 CALL interpolate_to_ocean(16) 222 223 ! 224 !-- Receive u at the bottom surface from the ocean model 225 IF ( myid == 0 ) THEN 210 CALL MPI_REDUCE( total_2d, total_2d_a, ngp_a, MPI_REAL, MPI_SUM, 0, & 211 comm2d, ierr ) 212 CALL interpolate_to_ocean( 16 ) 213 ! 214 !-- Receive u at the bottom surface from the ocean 215 IF ( myid == 0 ) THEN 226 216 CALL MPI_RECV( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, & 227 target_id, 17, comm_inter, status, ierr ) 217 target_id, 17, comm_inter, status, ierr ) 228 218 ENDIF 229 219 CALL MPI_BARRIER( comm2d, ierr ) 230 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, &231 0, comm2d,ierr )220 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, 0, comm2d, & 221 ierr ) 232 222 u(0,nysg:nyng,nxlg:nxrg) = total_2d_a(nysg:nyng,nxlg:nxrg) 233 234 ! 235 !-- Receive v at the bottom surface from the ocean model 236 IF ( myid == 0 ) THEN 223 ! 224 !-- Receive v at the bottom surface from the ocean 225 IF ( myid == 0 ) THEN 237 226 CALL MPI_RECV( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, & 238 target_id, 18, comm_inter, status, ierr ) 227 target_id, 18, comm_inter, status, ierr ) 239 228 ENDIF 240 229 CALL MPI_BARRIER( comm2d, ierr ) 241 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, &242 0, comm2d,ierr )230 CALL MPI_BCAST( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, 0, comm2d, & 231 ierr ) 243 232 v(0,nysg:nyng,nxlg:nxrg) = total_2d_a(nysg:nyng,nxlg:nxrg) 244 233 … … 252 241 IF ( coupling_topology == 0 ) THEN 253 242 ! 254 !-- Receive heat flux at the sea surface (top) from the atmosphere model 255 CALL MPI_RECV( tswst(nysg,nxlg), ngp_xy, MPI_REAL, & 256 target_id, 12, comm_inter, status, ierr ) 257 258 259 ! 260 !-- Receive humidity flux from the atmosphere model (bottom) 243 !-- Receive heat flux at the sea surface (top) from the atmosphere 244 CALL MPI_RECV( tswst(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 12, & 245 comm_inter, status, ierr ) 246 ! 247 !-- Receive humidity flux from the atmosphere (bottom) 261 248 !-- and add it to the heat flux at the sea surface (top)... 262 249 IF ( humidity_remote ) THEN 263 250 CALL MPI_RECV( qswst_remote(nysg,nxlg), ngp_xy, MPI_REAL, & 264 251 target_id, 13, comm_inter, status, ierr ) 265 266 ENDIF 267 252 ENDIF 268 253 ! 269 254 !-- Send sea surface temperature to the atmosphere model 270 CALL MPI_SEND( pt(nzt,nysg,nxlg), 1, type_xy, & 271 target_id, 14, comm_inter, ierr ) 272 255 CALL MPI_SEND( pt(nzt,nysg,nxlg), 1, type_xy, target_id, 14, & 256 comm_inter, ierr ) 273 257 ! 274 258 !-- Receive momentum flux (u) at the sea surface (top) from the atmosphere 275 !-- model 276 WRITE ( 9, * ) '*** receive uswst from atmosphere' 277 CALL local_flush( 9 ) 278 CALL MPI_RECV( uswst(nysg,nxlg), ngp_xy, MPI_REAL, & 279 target_id, 15, comm_inter, status, ierr ) 280 259 CALL MPI_RECV( uswst(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 15, & 260 comm_inter, status, ierr ) 281 261 ! 282 262 !-- Receive momentum flux (v) at the sea surface (top) from the atmosphere 283 !-- model 284 CALL MPI_RECV( vswst(nysg,nxlg), ngp_xy, MPI_REAL, & 285 target_id, 16, comm_inter, status, ierr ) 286 287 !-- Send u to the atmosphere model 288 CALL MPI_SEND( u(nzt,nysg,nxlg), 1, type_xy, & 289 target_id, 17, comm_inter, ierr ) 290 291 ! 292 !-- Send v to the atmosphere model 293 CALL MPI_SEND( v(nzt,nysg,nxlg), 1, type_xy, & 294 target_id, 18, comm_inter, ierr ) 295 263 CALL MPI_RECV( vswst(nysg,nxlg), ngp_xy, MPI_REAL, target_id, 16, & 264 comm_inter, status, ierr ) 265 ! 266 !-- Send u to the atmosphere 267 CALL MPI_SEND( u(nzt,nysg,nxlg), 1, type_xy, target_id, 17, & 268 comm_inter, ierr ) 269 ! 270 !-- Send v to the atmosphere 271 CALL MPI_SEND( v(nzt,nysg,nxlg), 1, type_xy, target_id, 18, & 272 comm_inter, ierr ) 296 273 ! 297 274 !-- Horizontal gridsize or number of processors differs between 298 275 !-- ocean and atmosphere 299 276 ELSE 300 301 ! 302 !-- Receive heat flux at the sea surface (top) from the atmosphere model 303 IF ( myid == 0 ) THEN 277 ! 278 !-- Receive heat flux at the sea surface (top) from the atmosphere 279 IF ( myid == 0 ) THEN 304 280 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 305 target_id, 12, comm_inter, status, ierr ) 281 target_id, 12, comm_inter, status, ierr ) 282 ENDIF 283 CALL MPI_BARRIER( comm2d, ierr ) 284 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, 0, comm2d, & 285 ierr ) 286 tswst(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 287 ! 288 !-- Receive humidity flux at the sea surface (top) from the atmosphere 289 IF ( humidity_remote ) THEN 290 IF ( myid == 0 ) THEN 291 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 292 target_id, 13, comm_inter, status, ierr ) 293 ENDIF 294 CALL MPI_BARRIER( comm2d, ierr ) 295 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, 0, & 296 comm2d, ierr) 297 qswst_remote(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 298 ENDIF 299 ! 300 !-- Send surface temperature to atmosphere 301 total_2d_o = 0.0 302 total_2d = 0.0 303 total_2d(nys:nyn,nxl:nxr) = pt(nzt,nys:nyn,nxl:nxr) 304 305 CALL MPI_REDUCE( total_2d, total_2d_o, ngp_o, MPI_REAL, MPI_SUM, 0, & 306 comm2d, ierr) 307 CALL interpolate_to_atmos( 14 ) 308 ! 309 !-- Receive momentum flux (u) at the sea surface (top) from the atmosphere 310 IF ( myid == 0 ) THEN 311 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 312 target_id, 15, comm_inter, status, ierr ) 306 313 ENDIF 307 314 CALL MPI_BARRIER( comm2d, ierr ) 308 315 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 309 0, comm2d, ierr) 310 tswst(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 311 312 ! 313 !-- Receive humidity flux at the sea surface (top) from the 314 !-- atmosphere model 315 IF ( humidity_remote ) THEN 316 IF ( myid == 0 ) THEN 317 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 318 target_id, 13, comm_inter, status, ierr ) 319 ENDIF 320 CALL MPI_BARRIER( comm2d, ierr ) 321 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 322 0, comm2d, ierr) 323 qswst_remote(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 324 ENDIF 325 326 ! 327 !-- Send surface temperature to atmosphere 328 total_2d_o = 0.0 329 total_2d = 0.0 330 total_2d(nys:nyn,nxl:nxr) = pt(nzt,nys:nyn,nxl:nxr) 331 332 CALL MPI_REDUCE(total_2d, total_2d_o, ngp_o, & 333 MPI_REAL, MPI_SUM, 0, comm2d, ierr) 334 335 CALL interpolate_to_atmos(14) 336 337 ! 338 !-- Receive momentum flux (u) at the sea surface (top) from the 339 !-- atmosphere model 340 IF ( myid == 0 ) THEN 316 0, comm2d, ierr ) 317 uswst(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 318 ! 319 !-- Receive momentum flux (v) at the sea surface (top) from the atmosphere 320 IF ( myid == 0 ) THEN 341 321 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 342 target_id, 1 5, comm_inter, status, ierr )322 target_id, 16, comm_inter, status, ierr ) 343 323 ENDIF 344 324 CALL MPI_BARRIER( comm2d, ierr ) 345 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 346 0, comm2d, ierr) 347 uswst(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 348 349 ! 350 !-- Receive momentum flux (v) at the sea surface (top) from the 351 !-- atmosphere model 352 IF ( myid == 0 ) THEN 353 CALL MPI_RECV( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 354 target_id, 16, comm_inter, status, ierr ) 355 ENDIF 356 CALL MPI_BARRIER( comm2d, ierr ) 357 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, & 358 0, comm2d, ierr) 325 CALL MPI_BCAST( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, 0, comm2d, & 326 ierr ) 359 327 vswst(nysg:nyng,nxlg:nxrg) = total_2d_o(nysg:nyng,nxlg:nxrg) 360 361 328 ! 362 329 !-- Send u to atmosphere 363 330 total_2d_o = 0.0 364 total_2d = 0.0331 total_2d = 0.0 365 332 total_2d(nys:nyn,nxl:nxr) = u(nzt,nys:nyn,nxl:nxr) 366 CALL MPI_REDUCE(total_2d, total_2d_o, ngp_o, MPI_REAL, & 367 MPI_SUM, 0, comm2d, ierr) 368 CALL interpolate_to_atmos(17) 369 333 CALL MPI_REDUCE( total_2d, total_2d_o, ngp_o, MPI_REAL, MPI_SUM, 0, & 334 comm2d, ierr ) 335 CALL interpolate_to_atmos( 17 ) 370 336 ! 371 337 !-- Send v to atmosphere 372 338 total_2d_o = 0.0 373 total_2d = 0.0339 total_2d = 0.0 374 340 total_2d(nys:nyn,nxl:nxr) = v(nzt,nys:nyn,nxl:nxr) 375 CALL MPI_REDUCE( total_2d, total_2d_o, ngp_o, MPI_REAL, &376 MPI_SUM, 0, comm2d, ierr)377 CALL interpolate_to_atmos( 18)341 CALL MPI_REDUCE( total_2d, total_2d_o, ngp_o, MPI_REAL, MPI_SUM, 0, & 342 comm2d, ierr ) 343 CALL interpolate_to_atmos( 18 ) 378 344 379 345 ENDIF … … 382 348 !-- Conversions of fluxes received from atmosphere 383 349 IF ( humidity_remote ) THEN 384 !here tswst is still the sum of atmospheric bottom heat fluxes 385 tswst = tswst + qswst_remote * 2.2626108e6 / 1005.0 386 !*latent heat of vaporization in m2/s2, or 540 cal/g, or 40.65 kJ/mol 387 !/(rho_atm(=1.0)*c_p) 350 ! 351 !-- Here tswst is still the sum of atmospheric bottom heat fluxes, 352 !-- * latent heat of vaporization in m2/s2, or 540 cal/g, or 40.65 kJ/mol 353 !-- /(rho_atm(=1.0)*c_p) 354 tswst = tswst + qswst_remote * 2.2626108E6 / 1005.0 388 355 ! 389 356 !-- ...and convert it to a salinity flux at the sea surface (top) … … 405 372 vswst = vswst / rho(nzt,:,:) 406 373 407 408 ENDIF 409 410 IF ( coupling_topology == 1 ) THEN 374 ENDIF 375 376 IF ( coupling_topology == 1 ) THEN 411 377 DEALLOCATE( total_2d_o, total_2d_a ) 412 378 ENDIF … … 420 386 421 387 422 SUBROUTINE interpolate_to_atmos( tag)388 SUBROUTINE interpolate_to_atmos( tag ) 423 389 424 390 USE arrays_3d … … 430 396 IMPLICIT NONE 431 397 432 433 398 INTEGER :: dnx, dnx2, dny, dny2, i, ii, j, jj 434 399 INTEGER, intent(in) :: tag … … 436 401 CALL MPI_BARRIER( comm2d, ierr ) 437 402 438 IF ( myid == 0 ) THEN 439 440 ! 441 !-- cyclic boundary conditions for the total 2D-grid 403 IF ( myid == 0 ) THEN 404 ! 405 !-- Cyclic boundary conditions for the total 2D-grid 442 406 total_2d_o(-nbgp:-1,:) = total_2d_o(ny+1-nbgp:ny,:) 443 407 total_2d_o(:,-nbgp:-1) = total_2d_o(:,nx+1-nbgp:nx) … … 452 416 453 417 ! 454 !-- Distance for interpolation around coarse grid points within the fine grid455 !-- (note: 2*dnx2 must not be equal with dnx)418 !-- Distance for interpolation around coarse grid points within the fine 419 !-- grid (note: 2*dnx2 must not be equal with dnx) 456 420 dnx2 = 2 * ( dnx / 2 ) 457 421 dny2 = 2 * ( dny / 2 ) … … 472 436 ENDDO 473 437 ! 474 !-- cyclic boundary conditions for atmosphere grid438 !-- Cyclic boundary conditions for atmosphere grid 475 439 total_2d_a(-nbgp:-1,:) = total_2d_a(ny_a+1-nbgp:ny_a,:) 476 440 total_2d_a(:,-nbgp:-1) = total_2d_a(:,nx_a+1-nbgp:nx_a) … … 480 444 ! 481 445 !-- Transfer of the atmosphere-grid-layer to the atmosphere 482 CALL MPI_SEND( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, &483 ta rget_id, tag, comm_inter, ierr )446 CALL MPI_SEND( total_2d_a(-nbgp,-nbgp), ngp_a, MPI_REAL, target_id, & 447 tag, comm_inter, ierr ) 484 448 485 449 ENDIF … … 490 454 491 455 492 SUBROUTINE interpolate_to_ocean( tag)456 SUBROUTINE interpolate_to_ocean( tag ) 493 457 494 458 USE arrays_3d … … 500 464 IMPLICIT NONE 501 465 502 REAL :: fl, fr, myl, myr503 466 INTEGER :: dnx, dny, i, ii, j, jj 504 467 INTEGER, intent(in) :: tag 468 REAL :: fl, fr, myl, myr 469 505 470 506 471 CALL MPI_BARRIER( comm2d, ierr ) 507 472 508 IF ( myid == 0 ) THEN509 510 ! 511 ! 473 IF ( myid == 0 ) THEN 474 475 ! 476 !-- Number of gridpoints of the fine grid within one mesh of the coarse grid 512 477 dnx = ( nx_o + 1 ) / ( nx_a + 1 ) 513 478 dny = ( ny_o + 1 ) / ( ny_a + 1 ) 514 479 515 480 ! 516 !-- cyclic boundary conditions for atmosphere grid481 !-- Cyclic boundary conditions for atmosphere grid 517 482 total_2d_a(-nbgp:-1,:) = total_2d_a(ny+1-nbgp:ny,:) 518 483 total_2d_a(:,-nbgp:-1) = total_2d_a(:,nx+1-nbgp:nx) … … 521 486 total_2d_a(:,nx+1:nx+nbgp) = total_2d_a(:,0:nbgp-1) 522 487 ! 523 !-- Bilinear Interpolation from atmosphere -grid-layer to ocean-grid-layer488 !-- Bilinear Interpolation from atmosphere grid-layer to ocean grid-layer 524 489 DO j = 0, ny 525 490 DO i = 0, nx … … 527 492 myr = ( total_2d_a(j+1,i+1) - total_2d_a(j,i+1) ) / dny 528 493 DO jj = 0, dny-1 529 fl = myl*jj 530 fr = myr*jj 494 fl = myl*jj + total_2d_a(j,i) 495 fr = myr*jj + total_2d_a(j,i+1) 531 496 DO ii = 0, dnx-1 532 497 total_2d_o(j*dny+jj,i*dnx+ii) = ( fr - fl ) / dnx * ii + fl … … 536 501 ENDDO 537 502 ! 538 !-- cyclic boundary conditions for ocean grid503 !-- Cyclic boundary conditions for ocean grid 539 504 total_2d_o(-nbgp:-1,:) = total_2d_o(ny_o+1-nbgp:ny_o,:) 540 505 total_2d_o(:,-nbgp:-1) = total_2d_o(:,nx_o+1-nbgp:nx_o) … … 542 507 total_2d_o(ny_o+1:ny_o+nbgp,:) = total_2d_o(0:nbgp-1,:) 543 508 total_2d_o(:,nx_o+1:nx_o+nbgp) = total_2d_o(:,0:nbgp-1) 544 545 509 546 510 CALL MPI_SEND( total_2d_o(-nbgp,-nbgp), ngp_o, MPI_REAL, &
Note: See TracChangeset
for help on using the changeset viewer.