SUBROUTINE advec_particles !------------------------------------------------------------------------------! ! Current revisions: ! ----------------- ! Particle attributes are set with new routine set_particle_attributes. ! Output of NetCDF messages with aid of message handling routine. ! Output of messages replaced by message handling routine ! Bugfix: error in check, if particles moved further than one subdomain length. ! This check must not be applied for newly released particles ! Bugfix: several tail counters are initialized, particle_tail_coordinates is ! only written to file if its third index is > 0 ! TEST: PRINT statements on unit 9 (commented out) ! ! Former revisions: ! ----------------- ! $Id: advec_particles.f90 276 2009-03-29 08:50:26Z letzel $ ! ! 212 2008-11-11 09:09:24Z raasch ! Bugfix in calculating k index in case of oceans runs (sort_particles) ! ! 150 2008-02-29 08:19:58Z raasch ! Bottom boundary condition and vertical index calculations adjusted for ! ocean runs. ! ! 119 2007-10-17 10:27:13Z raasch ! Sorting of particles is controlled by dt_sort_particles and moved from ! the SGS timestep loop after the end of this loop. ! Bugfix: pleft/pright changed to pnorth/psouth in sendrecv of particle tail ! numbers along y ! Small bugfixes in the SGS part ! ! 106 2007-08-16 14:30:26Z raasch ! remaining variables iran changed to iran_part ! ! 95 2007-06-02 16:48:38Z raasch ! hydro_press renamed hyp ! ! 75 2007-03-22 09:54:05Z raasch ! Particle reflection at vertical walls implemented in new subroutine ! particle_boundary_conds, ! vertical walls are regarded in the SGS model, ! + user_advec_particles, particles-package is now part of the defaut code, ! array arguments in sendrecv calls have to refer to first element (1) due to ! mpich (mpiI) interface requirements, ! 2nd+3rd argument removed from exchange horiz ! ! 16 2007-02-15 13:16:47Z raasch ! Bugfix: wrong if-clause from revision 1.32 ! ! r4 | raasch | 2007-02-13 12:33:16 +0100 (Tue, 13 Feb 2007) ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.32 2007/02/11 12:48:20 raasch ! Allways the lower level k is used for interpolation ! Bugfix: new particles are released only if end_time_prel > simulated_time ! Bugfix: transfer of particles when x < -0.5*dx (0.0 before), etc., ! index i,j used instead of cartesian (x,y) coordinate to check for ! transfer because this failed under very rare conditions ! Bugfix: calculation of number of particles with same radius as the current ! particle (cloud droplet code) ! ! Revision 1.31 2006/08/17 09:21:01 raasch ! Two more compilation errors removed from the last revision ! ! Revision 1.30 2006/08/17 09:11:17 raasch ! Two compilation errors removed from the last revision ! ! Revision 1.29 2006/08/04 14:05:01 raasch ! Subgrid scale velocities are (optionally) included for calculating the ! particle advection, new counters trlp_count_sum, etc. for accumulating ! the number of particles exchanged between the subdomains during all ! sub-timesteps (if sgs velocities are included), +3d-arrays de_dx/y/z, ! izuf renamed iran, output of particle time series ! ! Revision 1.1 1999/11/25 16:16:06 raasch ! Initial revision ! ! ! Description: ! ------------ ! Particle advection !------------------------------------------------------------------------------! USE arrays_3d USE cloud_parameters USE constants USE control_parameters USE cpulog USE grid_variables USE indices USE interfaces USE netcdf_control USE particle_attributes USE pegrid USE random_function_mod USE statistics IMPLICIT NONE INTEGER :: agp, deleted_particles, deleted_tails, i, ie, ii, inc, is, j, & jj, js, k, kk, kw, m, n, nc, nn, num_gp, psi, tlength, & trlp_count, trlp_count_sum, trlp_count_recv, & trlp_count_recv_sum, trlpt_count, trlpt_count_recv, & trnp_count, trnp_count_sum, trnp_count_recv, & trnp_count_recv_sum, trnpt_count, trnpt_count_recv, & trrp_count, trrp_count_sum, trrp_count_recv, & trrp_count_recv_sum, trrpt_count, trrpt_count_recv, & trsp_count, trsp_count_sum, trsp_count_recv, & trsp_count_recv_sum, trspt_count, trspt_count_recv, nd INTEGER :: gp_outside_of_building(1:8) LOGICAL :: dt_3d_reached, dt_3d_reached_l, prt_position REAL :: aa, arg, bb, cc, dd, delta_r, dens_ratio, de_dt, de_dt_min, & de_dx_int, de_dx_int_l, de_dx_int_u, de_dy_int, de_dy_int_l, & de_dy_int_u, de_dz_int, de_dz_int_l, de_dz_int_u, diss_int, & diss_int_l, diss_int_u, distance, dt_gap, dt_particle, & dt_particle_m, d_radius, d_sum, e_a, e_int, e_int_l, e_int_u, & e_mean_int, e_s, exp_arg, exp_term, fs_int, gg, & lagr_timescale, mean_r, new_r, p_int, pt_int, pt_int_l, & pt_int_u, q_int, q_int_l, q_int_u, ql_int, ql_int_l, ql_int_u, & random_gauss, sl_r3, sl_r4, s_r3, s_r4, t_int, u_int, u_int_l, & u_int_u, vv_int, v_int, v_int_l, v_int_u, w_int, w_int_l, & w_int_u, x, y REAL, DIMENSION(1:30) :: de_dxi, de_dyi, de_dzi, dissi, d_gp_pl, ei REAL :: location(1:30,1:3) REAL, DIMENSION(nzb:nzt+1,nys-1:nyn+1,nxl-1:nxr+1) :: de_dx, de_dy, de_dz REAL, DIMENSION(:,:,:), ALLOCATABLE :: trlpt, trnpt, trrpt, trspt TYPE(particle_type) :: tmp_particle TYPE(particle_type), DIMENSION(:), ALLOCATABLE :: trlp, trnp, trrp, trsp CALL cpu_log( log_point(25), 'advec_particles', 'start' ) ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #1' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF ! !-- Write particle data on file for later analysis. !-- This has to be done here (before particles are advected) in order !-- to allow correct output in case of dt_write_particle_data = dt_prel = !-- particle_maximum_age. Otherwise (if output is done at the end of this !-- subroutine), the relevant particles would have been already deleted. !-- The MOD function allows for changes in the output interval with restart !-- runs. !-- Attention: change version number for unit 85 (in routine check_open) !-- whenever the output format for this unit is changed! time_write_particle_data = time_write_particle_data + dt_3d IF ( time_write_particle_data >= dt_write_particle_data ) THEN CALL cpu_log( log_point_s(40), 'advec_part_io', 'start' ) CALL check_open( 85 ) WRITE ( 85 ) simulated_time, maximum_number_of_particles, & number_of_particles WRITE ( 85 ) particles WRITE ( 85 ) maximum_number_of_tailpoints, maximum_number_of_tails, & number_of_tails IF ( maximum_number_of_tails > 0 ) THEN WRITE ( 85 ) particle_tail_coordinates ENDIF CALL close_file( 85 ) IF ( netcdf_output ) CALL output_particles_netcdf time_write_particle_data = MOD( time_write_particle_data, & MAX( dt_write_particle_data, dt_3d ) ) CALL cpu_log( log_point_s(40), 'advec_part_io', 'stop' ) ENDIF ! !-- Calculate exponential term used in case of particle inertia for each !-- of the particle groups CALL cpu_log( log_point_s(41), 'advec_part_exp', 'start' ) DO m = 1, number_of_particle_groups IF ( particle_groups(m)%density_ratio /= 0.0 ) THEN particle_groups(m)%exp_arg = & 4.5 * particle_groups(m)%density_ratio * & molecular_viscosity / ( particle_groups(m)%radius )**2 particle_groups(m)%exp_term = EXP( -particle_groups(m)%exp_arg * & dt_3d ) ENDIF ENDDO CALL cpu_log( log_point_s(41), 'advec_part_exp', 'stop' ) ! WRITE ( 9, * ) '*** advec_particles: ##0.3' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- Particle (droplet) growth by condensation/evaporation and collision IF ( cloud_droplets ) THEN ! !-- Reset summation arrays ql_c = 0.0; ql_v = 0.0; ql_vp = 0.0 ! !-- Particle growth by condensation/evaporation CALL cpu_log( log_point_s(42), 'advec_part_cond', 'start' ) DO n = 1, number_of_particles ! !-- Interpolate temperature and humidity. !-- First determine left, south, and bottom index of the arrays. i = particles(n)%x * ddx j = particles(n)%y * ddy k = ( particles(n)%z + 0.5 * dz * atmos_ocean_sign ) / dz & + offset_ocean_nzt ! only exact if equidistant x = particles(n)%x - i * dx y = particles(n)%y - j * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd pt_int_l = ( ( gg - aa ) * pt(k,j,i) + ( gg - bb ) * pt(k,j,i+1) & + ( gg - cc ) * pt(k,j+1,i) + ( gg - dd ) * pt(k,j+1,i+1) & ) / ( 3.0 * gg ) pt_int_u = ( ( gg-aa ) * pt(k+1,j,i) + ( gg-bb ) * pt(k+1,j,i+1) & + ( gg-cc ) * pt(k+1,j+1,i) + ( gg-dd ) * pt(k+1,j+1,i+1) & ) / ( 3.0 * gg ) pt_int = pt_int_l + ( particles(n)%z - zu(k) ) / dz * & ( pt_int_u - pt_int_l ) q_int_l = ( ( gg - aa ) * q(k,j,i) + ( gg - bb ) * q(k,j,i+1) & + ( gg - cc ) * q(k,j+1,i) + ( gg - dd ) * q(k,j+1,i+1) & ) / ( 3.0 * gg ) q_int_u = ( ( gg-aa ) * q(k+1,j,i) + ( gg-bb ) * q(k+1,j,i+1) & + ( gg-cc ) * q(k+1,j+1,i) + ( gg-dd ) * q(k+1,j+1,i+1) & ) / ( 3.0 * gg ) q_int = q_int_l + ( particles(n)%z - zu(k) ) / dz * & ( q_int_u - q_int_l ) ql_int_l = ( ( gg - aa ) * ql(k,j,i) + ( gg - bb ) * ql(k,j,i+1) & + ( gg - cc ) * ql(k,j+1,i) + ( gg - dd ) * ql(k,j+1,i+1) & ) / ( 3.0 * gg ) ql_int_u = ( ( gg-aa ) * ql(k+1,j,i) + ( gg-bb ) * ql(k+1,j,i+1) & + ( gg-cc ) * ql(k+1,j+1,i) + ( gg-dd ) * ql(k+1,j+1,i+1) & ) / ( 3.0 * gg ) ql_int = ql_int_l + ( particles(n)%z - zu(k) ) / dz * & ( ql_int_u - ql_int_l ) ! !-- Calculate real temperature and saturation vapor pressure p_int = hyp(k) + ( particles(n)%z - zu(k) ) / dz * ( hyp(k+1)-hyp(k) ) t_int = pt_int * ( p_int / 100000.0 )**0.286 e_s = 611.0 * EXP( l_d_rv * ( 3.6609E-3 - 1.0 / t_int ) ) ! !-- Current vapor pressure e_a = q_int * p_int / ( 0.378 * q_int + 0.622 ) ! !-- Change in radius by condensation/evaporation !-- ATTENTION: this is only an approximation for large radii arg = particles(n)%radius**2 + 2.0 * dt_3d * & ( e_a / e_s - 1.0 ) / & ( ( l_d_rv / t_int - 1.0 ) * l_v * rho_l / t_int / & thermal_conductivity_l + & rho_l * r_v * t_int / diff_coeff_l / e_s ) IF ( arg < 1.0E-14 ) THEN new_r = 1.0E-7 ELSE new_r = SQRT( arg ) ENDIF delta_r = new_r - particles(n)%radius ! NOTE: this is the correct formula (indipendent of radius). ! nevertheless, it give wrong results for large timesteps ! d_radius = 1.0 / particles(n)%radius ! delta_r = d_radius * ( e_a / e_s - 1.0 - 3.3E-7 / t_int * d_radius + & ! b_cond * d_radius**3 ) / & ! ( ( l_d_rv / t_int - 1.0 ) * l_v * rho_l / t_int / & ! thermal_conductivity_l + & ! rho_l * r_v * t_int / diff_coeff_l / e_s ) * dt_3d ! new_r = particles(n)%radius + delta_r ! IF ( new_r < 1.0E-7 ) new_r = 1.0E-7 ! !-- Sum up the change in volume of liquid water for the respective grid !-- volume (this is needed later on for calculating the release of !-- latent heat) i = ( particles(n)%x + 0.5 * dx ) * ddx j = ( particles(n)%y + 0.5 * dy ) * ddy k = particles(n)%z / dz + 1 + offset_ocean_nzt_m1 ! only exact if equidistant ql_c(k,j,i) = ql_c(k,j,i) + particles(n)%weight_factor * & rho_l * 1.33333333 * pi * & ( new_r**3 - particles(n)%radius**3 ) / & ( rho_surface * dx * dy * dz ) IF ( ql_c(k,j,i) > 100.0 ) THEN WRITE( message_string, * ) 'k=',k,' j=',j,' i=',i, & ' ql_c=',ql_c(k,j,i), ' &part(',n,')%wf=', & particles(n)%weight_factor,' delta_r=',delta_r CALL message( 'advec_particles', 'PA0143', 2, 2, -1, 6, 1 ) ENDIF ! !-- Change the droplet radius IF ( ( new_r - particles(n)%radius ) < 0.0 .AND. new_r < 0.0 ) & THEN WRITE( message_string, * ) '#1 k=',k,' j=',j,' i=',i, & ' e_s=',e_s, ' e_a=',e_a,' t_int=',t_int, & ' &d_radius=',d_radius,' delta_r=',delta_r,& ' particle_radius=',particles(n)%radius CALL message( 'advec_particles', 'PA0144', 2, 2, -1, 6, 1 ) ENDIF particles(n)%radius = new_r ! !-- Sum up the total volume of liquid water (needed below for !-- re-calculating the weighting factors) ql_v(k,j,i) = ql_v(k,j,i) + particles(n)%weight_factor * & particles(n)%radius**3 ENDDO CALL cpu_log( log_point_s(42), 'advec_part_cond', 'stop' ) ! !-- Particle growth by collision CALL cpu_log( log_point_s(43), 'advec_part_coll', 'start' ) DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt ! !-- Collision requires at least two particles in the box IF ( prt_count(k,j,i) > 1 ) THEN ! !-- First, sort particles within the gridbox by their size, !-- using Shell's method (see Numerical Recipes) !-- NOTE: In case of using particle tails, the re-sorting of !-- ---- tails would have to be included here! psi = prt_start_index(k,j,i) - 1 inc = 1 DO WHILE ( inc <= prt_count(k,j,i) ) inc = 3 * inc + 1 ENDDO DO WHILE ( inc > 1 ) inc = inc / 3 DO is = inc+1, prt_count(k,j,i) tmp_particle = particles(psi+is) js = is DO WHILE ( particles(psi+js-inc)%radius > & tmp_particle%radius ) particles(psi+js) = particles(psi+js-inc) js = js - inc IF ( js <= inc ) EXIT ENDDO particles(psi+js) = tmp_particle ENDDO ENDDO ! !-- Calculate the mean radius of all those particles which !-- are of smaller or equal size than the current particle !-- and use this radius for calculating the collision efficiency psi = prt_start_index(k,j,i) s_r3 = 0.0 s_r4 = 0.0 DO n = psi, psi+prt_count(k,j,i)-1 ! !-- There may be some particles of size equal to the !-- current particle but with larger index sl_r3 = 0.0 sl_r4 = 0.0 DO is = n, psi+prt_count(k,j,i)-2 IF ( particles(is+1)%radius == & particles(is)%radius ) THEN sl_r3 = sl_r3 + particles(is+1)%radius**3 sl_r4 = sl_r4 + particles(is+1)%radius**4 ELSE EXIT ENDIF ENDDO IF ( ( s_r3 + sl_r3 ) > 0.0 ) THEN mean_r = ( s_r4 + sl_r4 ) / ( s_r3 + sl_r3 ) CALL collision_efficiency( mean_r, & particles(n)%radius, & effective_coll_efficiency ) ELSE effective_coll_efficiency = 0.0 ENDIF ! !-- Contribution of the current particle to the next one s_r3 = s_r3 + particles(n)%radius**3 s_r4 = s_r4 + particles(n)%radius**4 IF ( effective_coll_efficiency > 1.0 .OR. & effective_coll_efficiency < 0.0 ) & THEN WRITE( message_string, * ) 'collision_efficiency ' , & 'out of range:' ,effective_coll_efficiency CALL message( 'advec_particles', 'PA0145', 2, 2, -1, & 6, 1 ) END IF ! !-- Interpolation of ... ii = particles(n)%x * ddx jj = particles(n)%y * ddy kk = ( particles(n)%z + 0.5 * dz ) / dz x = particles(n)%x - ii * dx y = particles(n)%y - jj * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd ql_int_l = ( ( gg-aa ) * ql(kk,jj,ii) + ( gg-bb ) * & ql(kk,jj,ii+1) & + ( gg-cc ) * ql(kk,jj+1,ii) + ( gg-dd ) * & ql(kk,jj+1,ii+1) & ) / ( 3.0 * gg ) ql_int_u = ( ( gg-aa ) * ql(kk+1,jj,ii) + ( gg-bb ) * & ql(kk+1,jj,ii+1) & + ( gg-cc ) * ql(kk+1,jj+1,ii) + ( gg-dd ) * & ql(kk+1,jj+1,ii+1) & ) / ( 3.0 * gg ) ql_int = ql_int_l + ( particles(n)%z - zu(kk) ) / dz * & ( ql_int_u - ql_int_l ) ! !-- Interpolate u velocity-component ii = ( particles(n)%x + 0.5 * dx ) * ddx jj = particles(n)%y * ddy kk = ( particles(n)%z + 0.5 * dz ) / dz ! only if eq.dist IF ( ( particles(n)%z - zu(kk) ) > ( 0.5*dz ) ) kk = kk+1 x = particles(n)%x + ( 0.5 - ii ) * dx y = particles(n)%y - jj * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd u_int_l = ( ( gg-aa ) * u(kk,jj,ii) + ( gg-bb ) * & u(kk,jj,ii+1) & + ( gg-cc ) * u(kk,jj+1,ii) + ( gg-dd ) * & u(kk,jj+1,ii+1) & ) / ( 3.0 * gg ) - u_gtrans IF ( kk+1 == nzt+1 ) THEN u_int = u_int_l ELSE u_int_u = ( ( gg-aa ) * u(kk+1,jj,ii) + ( gg-bb ) * & u(kk+1,jj,ii+1) & + ( gg-cc ) * u(kk+1,jj+1,ii) + ( gg-dd ) * & u(kk+1,jj+1,ii+1) & ) / ( 3.0 * gg ) - u_gtrans u_int = u_int_l + ( particles(n)%z - zu(kk) ) / dz * & ( u_int_u - u_int_l ) ENDIF ! !-- Same procedure for interpolation of the v velocity-compo- !-- nent (adopt index k from u velocity-component) ii = particles(n)%x * ddx jj = ( particles(n)%y + 0.5 * dy ) * ddy x = particles(n)%x - ii * dx y = particles(n)%y + ( 0.5 - jj ) * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd v_int_l = ( ( gg-aa ) * v(kk,jj,ii) + ( gg-bb ) * & v(kk,jj,ii+1) & + ( gg-cc ) * v(kk,jj+1,ii) + ( gg-dd ) * & v(kk,jj+1,ii+1) & ) / ( 3.0 * gg ) - v_gtrans IF ( kk+1 == nzt+1 ) THEN v_int = v_int_l ELSE v_int_u = ( ( gg-aa ) * v(kk+1,jj,ii) + ( gg-bb ) * & v(kk+1,jj,ii+1) & + ( gg-cc ) * v(kk+1,jj+1,ii) + ( gg-dd ) * & v(kk+1,jj+1,ii+1) & ) / ( 3.0 * gg ) - v_gtrans v_int = v_int_l + ( particles(n)%z - zu(kk) ) / dz * & ( v_int_u - v_int_l ) ENDIF ! !-- Same procedure for interpolation of the w velocity-compo- !-- nent (adopt index i from v velocity-component) jj = particles(n)%y * ddy kk = particles(n)%z / dz x = particles(n)%x - ii * dx y = particles(n)%y - jj * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd w_int_l = ( ( gg-aa ) * w(kk,jj,ii) + ( gg-bb ) * & w(kk,jj,ii+1) & + ( gg-cc ) * w(kk,jj+1,ii) + ( gg-dd ) * & w(kk,jj+1,ii+1) & ) / ( 3.0 * gg ) IF ( kk+1 == nzt+1 ) THEN w_int = w_int_l ELSE w_int_u = ( ( gg-aa ) * w(kk+1,jj,ii) + ( gg-bb ) * & w(kk+1,jj,ii+1) & + ( gg-cc ) * w(kk+1,jj+1,ii) + ( gg-dd ) * & w(kk+1,jj+1,ii+1) & ) / ( 3.0 * gg ) w_int = w_int_l + ( particles(n)%z - zw(kk) ) / dz * & ( w_int_u - w_int_l ) ENDIF ! !-- Change in radius due to collision delta_r = effective_coll_efficiency * & ql_int * rho_surface / ( 1.0 - ql_int ) * & 0.25 / rho_l * & SQRT( ( u_int - particles(n)%speed_x )**2 + & ( v_int - particles(n)%speed_y )**2 + & ( w_int - particles(n)%speed_z )**2 & ) * dt_3d particles(n)%radius = particles(n)%radius + delta_r ql_vp(k,j,i) = ql_vp(k,j,i) + particles(n)%radius**3 ENDDO ENDIF ! !-- Re-calculate the weighting factor (total liquid water content !-- must be conserved during collision) IF ( ql_vp(k,j,i) /= 0.0 ) THEN ql_vp(k,j,i) = ql_v(k,j,i) / ql_vp(k,j,i) ! !-- Re-assign this weighting factor to the particles of the !-- current gridbox psi = prt_start_index(k,j,i) DO n = psi, psi + prt_count(k,j,i)-1 particles(n)%weight_factor = ql_vp(k,j,i) ENDDO ENDIF ENDDO ENDDO ENDDO CALL cpu_log( log_point_s(43), 'advec_part_coll', 'stop' ) ENDIF ! !-- Particle advection. !-- In case of including the SGS velocities, the LES timestep has probably !-- to be split into several smaller timesteps because of the Lagrangian !-- timescale condition. Because the number of timesteps to be carried out is !-- not known at the beginning, these steps are carried out in an infinite loop !-- with exit condition. ! !-- If SGS velocities are used, gradients of the TKE have to be calculated and !-- boundary conditions have to be set first. Also, horizontally averaged !-- profiles of the SGS TKE and the resolved-scale velocity variances are !-- needed. IF ( use_sgs_for_particles ) THEN ! !-- TKE gradient along x and y DO i = nxl, nxr DO j = nys, nyn DO k = nzb, nzt+1 IF ( k <= nzb_s_inner(j,i-1) .AND. & k > nzb_s_inner(j,i) .AND. & k > nzb_s_inner(j,i+1) ) THEN de_dx(k,j,i) = 2.0 * sgs_wfu_part * & ( e(k,j,i+1) - e(k,j,i) ) * ddx ELSEIF ( k > nzb_s_inner(j,i-1) .AND. & k > nzb_s_inner(j,i) .AND. & k <= nzb_s_inner(j,i+1) ) THEN de_dx(k,j,i) = 2.0 * sgs_wfu_part * & ( e(k,j,i) - e(k,j,i-1) ) * ddx ELSEIF ( k < nzb_s_inner(j,i) .AND. k < nzb_s_inner(j,i+1) ) & THEN de_dx(k,j,i) = 0.0 ELSEIF ( k < nzb_s_inner(j,i-1) .AND. k < nzb_s_inner(j,i) ) & THEN de_dx(k,j,i) = 0.0 ELSE de_dx(k,j,i) = sgs_wfu_part * & ( e(k,j,i+1) - e(k,j,i-1) ) * ddx ENDIF IF ( k <= nzb_s_inner(j-1,i) .AND. & k > nzb_s_inner(j,i) .AND. & k > nzb_s_inner(j+1,i) ) THEN de_dy(k,j,i) = 2.0 * sgs_wfv_part * & ( e(k,j+1,i) - e(k,j,i) ) * ddy ELSEIF ( k > nzb_s_inner(j-1,i) .AND. & k > nzb_s_inner(j,i) .AND. & k <= nzb_s_inner(j+1,i) ) THEN de_dy(k,j,i) = 2.0 * sgs_wfv_part * & ( e(k,j,i) - e(k,j-1,i) ) * ddy ELSEIF ( k < nzb_s_inner(j,i) .AND. k < nzb_s_inner(j+1,i) ) & THEN de_dy(k,j,i) = 0.0 ELSEIF ( k < nzb_s_inner(j-1,i) .AND. k < nzb_s_inner(j,i) ) & THEN de_dy(k,j,i) = 0.0 ELSE de_dy(k,j,i) = sgs_wfv_part * & ( e(k,j+1,i) - e(k,j-1,i) ) * ddy ENDIF ENDDO ENDDO ENDDO ! !-- TKE gradient along z, including bottom and top boundary conditions DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_inner(j,i)+2, nzt-1 de_dz(k,j,i) = 2.0 * sgs_wfw_part * & ( e(k+1,j,i) - e(k-1,j,i) ) / ( zu(k+1)-zu(k-1) ) ENDDO k = nzb_s_inner(j,i) de_dz(nzb:k,j,i) = 0.0 de_dz(k+1,j,i) = 2.0 * sgs_wfw_part * ( e(k+2,j,i) - e(k+1,j,i) ) & / ( zu(k+2) - zu(k+1) ) de_dz(nzt,j,i) = 0.0 de_dz(nzt+1,j,i) = 0.0 ENDDO ENDDO ! !-- Lateral boundary conditions CALL exchange_horiz( de_dx ) CALL exchange_horiz( de_dy ) CALL exchange_horiz( de_dz ) CALL exchange_horiz( diss ) ! !-- Calculate the horizontally averaged profiles of SGS TKE and resolved !-- velocity variances (they may have been already calculated in routine !-- flow_statistics). IF ( .NOT. flow_statistics_called ) THEN ! !-- First calculate horizontally averaged profiles of the horizontal !-- velocities. sums_l(:,1,0) = 0.0 sums_l(:,2,0) = 0.0 DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_outer(j,i), nzt+1 sums_l(k,1,0) = sums_l(k,1,0) + u(k,j,i) sums_l(k,2,0) = sums_l(k,2,0) + v(k,j,i) ENDDO ENDDO ENDDO #if defined( __parallel ) ! !-- Compute total sum from local sums CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) #else sums(:,1) = sums_l(:,1,0) sums(:,2) = sums_l(:,2,0) #endif ! !-- Final values are obtained by division by the total number of grid !-- points used for the summation. hom(:,1,1,0) = sums(:,1) / ngp_2dh_outer(:,0) ! u hom(:,1,2,0) = sums(:,2) / ngp_2dh_outer(:,0) ! v ! !-- Now calculate the profiles of SGS TKE and the resolved-scale !-- velocity variances sums_l(:,8,0) = 0.0 sums_l(:,30,0) = 0.0 sums_l(:,31,0) = 0.0 sums_l(:,32,0) = 0.0 DO i = nxl, nxr DO j = nys, nyn DO k = nzb_s_outer(j,i), nzt+1 sums_l(k,8,0) = sums_l(k,8,0) + e(k,j,i) sums_l(k,30,0) = sums_l(k,30,0) + & ( u(k,j,i) - hom(k,1,1,0) )**2 sums_l(k,31,0) = sums_l(k,31,0) + & ( v(k,j,i) - hom(k,1,2,0) )**2 sums_l(k,32,0) = sums_l(k,32,0) + w(k,j,i)**2 ENDDO ENDDO ENDDO #if defined( __parallel ) ! !-- Compute total sum from local sums CALL MPI_ALLREDUCE( sums_l(nzb,8,0), sums(nzb,8), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) CALL MPI_ALLREDUCE( sums_l(nzb,30,0), sums(nzb,30), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) CALL MPI_ALLREDUCE( sums_l(nzb,31,0), sums(nzb,31), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) CALL MPI_ALLREDUCE( sums_l(nzb,32,0), sums(nzb,32), nzt+2-nzb, & MPI_REAL, MPI_SUM, comm2d, ierr ) #else sums(:,8) = sums_l(:,8,0) sums(:,30) = sums_l(:,30,0) sums(:,31) = sums_l(:,31,0) sums(:,32) = sums_l(:,32,0) #endif ! !-- Final values are obtained by division by the total number of grid !-- points used for the summation. hom(:,1,8,0) = sums(:,8) / ngp_2dh_outer(:,0) ! e hom(:,1,30,0) = sums(:,30) / ngp_2dh_outer(:,0) ! u*2 hom(:,1,31,0) = sums(:,31) / ngp_2dh_outer(:,0) ! v*2 hom(:,1,32,0) = sums(:,32) / ngp_2dh_outer(:,0) ! w*2 ENDIF ENDIF ! !-- Initialize variables used for accumulating the number of particles !-- exchanged between the subdomains during all sub-timesteps (if sgs !-- velocities are included). These data are output further below on the !-- particle statistics file. trlp_count_sum = 0 trlp_count_recv_sum = 0 trrp_count_sum = 0 trrp_count_recv_sum = 0 trsp_count_sum = 0 trsp_count_recv_sum = 0 trnp_count_sum = 0 trnp_count_recv_sum = 0 ! !-- Initialize the variable storing the total time that a particle has advanced !-- within the timestep procedure particles(1:number_of_particles)%dt_sum = 0.0 ! !-- Timestep loop. !-- This loop has to be repeated until the advection time of every particle !-- (in the total domain!) has reached the LES timestep (dt_3d) DO CALL cpu_log( log_point_s(44), 'advec_part_advec', 'start' ) ! !-- Initialize the switch used for the loop exit condition checked at the !-- end of this loop. !-- If at least one particle has failed to reach the LES timestep, this !-- switch will be set false. dt_3d_reached_l = .TRUE. ! !-- Initialize variables for the (sub-) timestep, i.e. for marking those !-- particles to be deleted after the timestep particle_mask = .TRUE. deleted_particles = 0 trlp_count_recv = 0 trnp_count_recv = 0 trrp_count_recv = 0 trsp_count_recv = 0 trlpt_count_recv = 0 trnpt_count_recv = 0 trrpt_count_recv = 0 trspt_count_recv = 0 IF ( use_particle_tails ) THEN tail_mask = .TRUE. ENDIF deleted_tails = 0 DO n = 1, number_of_particles ! !-- Move particles only if the LES timestep has not (approximately) been !-- reached IF ( ( dt_3d - particles(n)%dt_sum ) < 1E-8 ) CYCLE ! !-- Interpolate u velocity-component, determine left, front, bottom !-- index of u-array i = ( particles(n)%x + 0.5 * dx ) * ddx j = particles(n)%y * ddy k = ( particles(n)%z + 0.5 * dz * atmos_ocean_sign ) / dz & + offset_ocean_nzt ! only exact if equidistant ! !-- Interpolation of the velocity components in the xy-plane x = particles(n)%x + ( 0.5 - i ) * dx y = particles(n)%y - j * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd u_int_l = ( ( gg - aa ) * u(k,j,i) + ( gg - bb ) * u(k,j,i+1) & + ( gg - cc ) * u(k,j+1,i) + ( gg - dd ) * u(k,j+1,i+1) & ) / ( 3.0 * gg ) - u_gtrans IF ( k+1 == nzt+1 ) THEN u_int = u_int_l ELSE u_int_u = ( ( gg-aa ) * u(k+1,j,i) + ( gg-bb ) * u(k+1,j,i+1) & + ( gg-cc ) * u(k+1,j+1,i) + ( gg-dd ) * u(k+1,j+1,i+1) & ) / ( 3.0 * gg ) - u_gtrans u_int = u_int_l + ( particles(n)%z - zu(k) ) / dz * & ( u_int_u - u_int_l ) ENDIF ! !-- Same procedure for interpolation of the v velocity-component (adopt !-- index k from u velocity-component) i = particles(n)%x * ddx j = ( particles(n)%y + 0.5 * dy ) * ddy x = particles(n)%x - i * dx y = particles(n)%y + ( 0.5 - j ) * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd v_int_l = ( ( gg - aa ) * v(k,j,i) + ( gg - bb ) * v(k,j,i+1) & + ( gg - cc ) * v(k,j+1,i) + ( gg - dd ) * v(k,j+1,i+1) & ) / ( 3.0 * gg ) - v_gtrans IF ( k+1 == nzt+1 ) THEN v_int = v_int_l ELSE v_int_u = ( ( gg-aa ) * v(k+1,j,i) + ( gg-bb ) * v(k+1,j,i+1) & + ( gg-cc ) * v(k+1,j+1,i) + ( gg-dd ) * v(k+1,j+1,i+1) & ) / ( 3.0 * gg ) - v_gtrans v_int = v_int_l + ( particles(n)%z - zu(k) ) / dz * & ( v_int_u - v_int_l ) ENDIF ! !-- Same procedure for interpolation of the w velocity-component (adopt !-- index i from v velocity-component) IF ( vertical_particle_advection ) THEN j = particles(n)%y * ddy k = particles(n)%z / dz + offset_ocean_nzt_m1 x = particles(n)%x - i * dx y = particles(n)%y - j * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd w_int_l = ( ( gg - aa ) * w(k,j,i) + ( gg - bb ) * w(k,j,i+1) & + ( gg - cc ) * w(k,j+1,i) + ( gg - dd ) * w(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( k+1 == nzt+1 ) THEN w_int = w_int_l ELSE w_int_u = ( ( gg-aa ) * w(k+1,j,i) + & ( gg-bb ) * w(k+1,j,i+1) + & ( gg-cc ) * w(k+1,j+1,i) + & ( gg-dd ) * w(k+1,j+1,i+1) & ) / ( 3.0 * gg ) w_int = w_int_l + ( particles(n)%z - zw(k) ) / dz * & ( w_int_u - w_int_l ) ENDIF ELSE w_int = 0.0 ENDIF ! !-- Interpolate and calculate quantities needed for calculating the SGS !-- velocities IF ( use_sgs_for_particles ) THEN ! !-- Interpolate TKE i = particles(n)%x * ddx j = particles(n)%y * ddy k = ( particles(n)%z + 0.5 * dz * atmos_ocean_sign ) / dz & + offset_ocean_nzt ! only exact if eq.dist IF ( topography == 'flat' ) THEN x = particles(n)%x - i * dx y = particles(n)%y - j * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd e_int_l = ( ( gg-aa ) * e(k,j,i) + ( gg-bb ) * e(k,j,i+1) & + ( gg-cc ) * e(k,j+1,i) + ( gg-dd ) * e(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( k+1 == nzt+1 ) THEN e_int = e_int_l ELSE e_int_u = ( ( gg - aa ) * e(k+1,j,i) + & ( gg - bb ) * e(k+1,j,i+1) + & ( gg - cc ) * e(k+1,j+1,i) + & ( gg - dd ) * e(k+1,j+1,i+1) & ) / ( 3.0 * gg ) e_int = e_int_l + ( particles(n)%z - zu(k) ) / dz * & ( e_int_u - e_int_l ) ENDIF ! !-- Interpolate the TKE gradient along x (adopt incides i,j,k and !-- all position variables from above (TKE)) de_dx_int_l = ( ( gg - aa ) * de_dx(k,j,i) + & ( gg - bb ) * de_dx(k,j,i+1) + & ( gg - cc ) * de_dx(k,j+1,i) + & ( gg - dd ) * de_dx(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dx_int = de_dx_int_l ELSE de_dx_int_u = ( ( gg - aa ) * de_dx(k+1,j,i) + & ( gg - bb ) * de_dx(k+1,j,i+1) + & ( gg - cc ) * de_dx(k+1,j+1,i) + & ( gg - dd ) * de_dx(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dx_int = de_dx_int_l + ( particles(n)%z - zu(k) ) / dz * & ( de_dx_int_u - de_dx_int_l ) ENDIF ! !-- Interpolate the TKE gradient along y de_dy_int_l = ( ( gg - aa ) * de_dy(k,j,i) + & ( gg - bb ) * de_dy(k,j,i+1) + & ( gg - cc ) * de_dy(k,j+1,i) + & ( gg - dd ) * de_dy(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dy_int = de_dy_int_l ELSE de_dy_int_u = ( ( gg - aa ) * de_dy(k+1,j,i) + & ( gg - bb ) * de_dy(k+1,j,i+1) + & ( gg - cc ) * de_dy(k+1,j+1,i) + & ( gg - dd ) * de_dy(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dy_int = de_dy_int_l + ( particles(n)%z - zu(k) ) / dz * & ( de_dy_int_u - de_dy_int_l ) ENDIF ! !-- Interpolate the TKE gradient along z IF ( particles(n)%z < 0.5 * dz ) THEN de_dz_int = 0.0 ELSE de_dz_int_l = ( ( gg - aa ) * de_dz(k,j,i) + & ( gg - bb ) * de_dz(k,j,i+1) + & ( gg - cc ) * de_dz(k,j+1,i) + & ( gg - dd ) * de_dz(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dz_int = de_dz_int_l ELSE de_dz_int_u = ( ( gg - aa ) * de_dz(k+1,j,i) + & ( gg - bb ) * de_dz(k+1,j,i+1) + & ( gg - cc ) * de_dz(k+1,j+1,i) + & ( gg - dd ) * de_dz(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dz_int = de_dz_int_l + ( particles(n)%z - zu(k) ) / dz * & ( de_dz_int_u - de_dz_int_l ) ENDIF ENDIF ! !-- Interpolate the dissipation of TKE diss_int_l = ( ( gg - aa ) * diss(k,j,i) + & ( gg - bb ) * diss(k,j,i+1) + & ( gg - cc ) * diss(k,j+1,i) + & ( gg - dd ) * diss(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( k+1 == nzt+1 ) THEN diss_int = diss_int_l ELSE diss_int_u = ( ( gg - aa ) * diss(k+1,j,i) + & ( gg - bb ) * diss(k+1,j,i+1) + & ( gg - cc ) * diss(k+1,j+1,i) + & ( gg - dd ) * diss(k+1,j+1,i+1) & ) / ( 3.0 * gg ) diss_int = diss_int_l + ( particles(n)%z - zu(k) ) / dz * & ( diss_int_u - diss_int_l ) ENDIF ELSE ! !-- In case that there are buildings it has to be determined !-- how many of the gridpoints defining the particle box are !-- situated within a building !-- gp_outside_of_building(1): i,j,k !-- gp_outside_of_building(2): i,j+1,k !-- gp_outside_of_building(3): i,j,k+1 !-- gp_outside_of_building(4): i,j+1,k+1 !-- gp_outside_of_building(5): i+1,j,k !-- gp_outside_of_building(6): i+1,j+1,k !-- gp_outside_of_building(7): i+1,j,k+1 !-- gp_outside_of_building(8): i+1,j+1,k+1 gp_outside_of_building = 0 location = 0.0 num_gp = 0 IF ( k > nzb_s_inner(j,i) .OR. nzb_s_inner(j,i) == 0 ) THEN num_gp = num_gp + 1 gp_outside_of_building(1) = 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i) dissi(num_gp) = diss(k,j,i) de_dxi(num_gp) = de_dx(k,j,i) de_dyi(num_gp) = de_dy(k,j,i) de_dzi(num_gp) = de_dz(k,j,i) ENDIF IF ( k > nzb_s_inner(j+1,i) .OR. nzb_s_inner(j+1,i) == 0 ) & THEN num_gp = num_gp + 1 gp_outside_of_building(2) = 1 location(num_gp,1) = i * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i) dissi(num_gp) = diss(k,j+1,i) de_dxi(num_gp) = de_dx(k,j+1,i) de_dyi(num_gp) = de_dy(k,j+1,i) de_dzi(num_gp) = de_dz(k,j+1,i) ENDIF IF ( k+1 > nzb_s_inner(j,i) .OR. nzb_s_inner(j,i) == 0 ) THEN num_gp = num_gp + 1 gp_outside_of_building(3) = 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy location(num_gp,3) = (k+1) * dz - 0.5 * dz ei(num_gp) = e(k+1,j,i) dissi(num_gp) = diss(k+1,j,i) de_dxi(num_gp) = de_dx(k+1,j,i) de_dyi(num_gp) = de_dy(k+1,j,i) de_dzi(num_gp) = de_dz(k+1,j,i) ENDIF IF ( k+1 > nzb_s_inner(j+1,i) .OR. nzb_s_inner(j+1,i) == 0 ) & THEN num_gp = num_gp + 1 gp_outside_of_building(4) = 1 location(num_gp,1) = i * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = (k+1) * dz - 0.5 * dz ei(num_gp) = e(k+1,j+1,i) dissi(num_gp) = diss(k+1,j+1,i) de_dxi(num_gp) = de_dx(k+1,j+1,i) de_dyi(num_gp) = de_dy(k+1,j+1,i) de_dzi(num_gp) = de_dz(k+1,j+1,i) ENDIF IF ( k > nzb_s_inner(j,i+1) .OR. nzb_s_inner(j,i+1) == 0 ) & THEN num_gp = num_gp + 1 gp_outside_of_building(5) = 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i+1) dissi(num_gp) = diss(k,j,i+1) de_dxi(num_gp) = de_dx(k,j,i+1) de_dyi(num_gp) = de_dy(k,j,i+1) de_dzi(num_gp) = de_dz(k,j,i+1) ENDIF IF ( k > nzb_s_inner(j+1,i+1) .OR. nzb_s_inner(j+1,i+1) == 0 ) & THEN num_gp = num_gp + 1 gp_outside_of_building(6) = 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i+1) dissi(num_gp) = diss(k,j+1,i+1) de_dxi(num_gp) = de_dx(k,j+1,i+1) de_dyi(num_gp) = de_dy(k,j+1,i+1) de_dzi(num_gp) = de_dz(k,j+1,i+1) ENDIF IF ( k+1 > nzb_s_inner(j,i+1) .OR. nzb_s_inner(j,i+1) == 0 ) & THEN num_gp = num_gp + 1 gp_outside_of_building(7) = 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy location(num_gp,3) = (k+1) * dz - 0.5 * dz ei(num_gp) = e(k+1,j,i+1) dissi(num_gp) = diss(k+1,j,i+1) de_dxi(num_gp) = de_dx(k+1,j,i+1) de_dyi(num_gp) = de_dy(k+1,j,i+1) de_dzi(num_gp) = de_dz(k+1,j,i+1) ENDIF IF ( k+1 > nzb_s_inner(j+1,i+1) .OR. nzb_s_inner(j+1,i+1) == 0)& THEN num_gp = num_gp + 1 gp_outside_of_building(8) = 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = (k+1) * dz - 0.5 * dz ei(num_gp) = e(k+1,j+1,i+1) dissi(num_gp) = diss(k+1,j+1,i+1) de_dxi(num_gp) = de_dx(k+1,j+1,i+1) de_dyi(num_gp) = de_dy(k+1,j+1,i+1) de_dzi(num_gp) = de_dz(k+1,j+1,i+1) ENDIF ! !-- If all gridpoints are situated outside of a building, then the !-- ordinary interpolation scheme can be used. IF ( num_gp == 8 ) THEN x = particles(n)%x - i * dx y = particles(n)%y - j * dy aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd e_int_l = (( gg-aa ) * e(k,j,i) + ( gg-bb ) * e(k,j,i+1) & + ( gg-cc ) * e(k,j+1,i) + ( gg-dd ) * e(k,j+1,i+1)& ) / ( 3.0 * gg ) IF ( k+1 == nzt+1 ) THEN e_int = e_int_l ELSE e_int_u = ( ( gg - aa ) * e(k+1,j,i) + & ( gg - bb ) * e(k+1,j,i+1) + & ( gg - cc ) * e(k+1,j+1,i) + & ( gg - dd ) * e(k+1,j+1,i+1) & ) / ( 3.0 * gg ) e_int = e_int_l + ( particles(n)%z - zu(k) ) / dz * & ( e_int_u - e_int_l ) ENDIF ! !-- Interpolate the TKE gradient along x (adopt incides i,j,k !-- and all position variables from above (TKE)) de_dx_int_l = ( ( gg - aa ) * de_dx(k,j,i) + & ( gg - bb ) * de_dx(k,j,i+1) + & ( gg - cc ) * de_dx(k,j+1,i) + & ( gg - dd ) * de_dx(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dx_int = de_dx_int_l ELSE de_dx_int_u = ( ( gg - aa ) * de_dx(k+1,j,i) + & ( gg - bb ) * de_dx(k+1,j,i+1) + & ( gg - cc ) * de_dx(k+1,j+1,i) + & ( gg - dd ) * de_dx(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dx_int = de_dx_int_l + ( particles(n)%z - zu(k) ) / & dz * ( de_dx_int_u - de_dx_int_l ) ENDIF ! !-- Interpolate the TKE gradient along y de_dy_int_l = ( ( gg - aa ) * de_dy(k,j,i) + & ( gg - bb ) * de_dy(k,j,i+1) + & ( gg - cc ) * de_dy(k,j+1,i) + & ( gg - dd ) * de_dy(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dy_int = de_dy_int_l ELSE de_dy_int_u = ( ( gg - aa ) * de_dy(k+1,j,i) + & ( gg - bb ) * de_dy(k+1,j,i+1) + & ( gg - cc ) * de_dy(k+1,j+1,i) + & ( gg - dd ) * de_dy(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dy_int = de_dy_int_l + ( particles(n)%z - zu(k) ) / & dz * ( de_dy_int_u - de_dy_int_l ) ENDIF ! !-- Interpolate the TKE gradient along z IF ( particles(n)%z < 0.5 * dz ) THEN de_dz_int = 0.0 ELSE de_dz_int_l = ( ( gg - aa ) * de_dz(k,j,i) + & ( gg - bb ) * de_dz(k,j,i+1) + & ( gg - cc ) * de_dz(k,j+1,i) + & ( gg - dd ) * de_dz(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( ( k+1 == nzt+1 ) .OR. ( k == nzb ) ) THEN de_dz_int = de_dz_int_l ELSE de_dz_int_u = ( ( gg - aa ) * de_dz(k+1,j,i) + & ( gg - bb ) * de_dz(k+1,j,i+1) + & ( gg - cc ) * de_dz(k+1,j+1,i) + & ( gg - dd ) * de_dz(k+1,j+1,i+1) & ) / ( 3.0 * gg ) de_dz_int = de_dz_int_l + ( particles(n)%z - zu(k) ) /& dz * ( de_dz_int_u - de_dz_int_l ) ENDIF ENDIF ! !-- Interpolate the dissipation of TKE diss_int_l = ( ( gg - aa ) * diss(k,j,i) + & ( gg - bb ) * diss(k,j,i+1) + & ( gg - cc ) * diss(k,j+1,i) + & ( gg - dd ) * diss(k,j+1,i+1) & ) / ( 3.0 * gg ) IF ( k+1 == nzt+1 ) THEN diss_int = diss_int_l ELSE diss_int_u = ( ( gg - aa ) * diss(k+1,j,i) + & ( gg - bb ) * diss(k+1,j,i+1) + & ( gg - cc ) * diss(k+1,j+1,i) + & ( gg - dd ) * diss(k+1,j+1,i+1) & ) / ( 3.0 * gg ) diss_int = diss_int_l + ( particles(n)%z - zu(k) ) / dz *& ( diss_int_u - diss_int_l ) ENDIF ELSE ! !-- If wall between gridpoint 1 and gridpoint 5, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(1) == 1 .AND. & gp_outside_of_building(5) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i) dissi(num_gp) = diss(k,j,i) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k,j,i) de_dzi(num_gp) = de_dz(k,j,i) ENDIF IF ( gp_outside_of_building(5) == 1 .AND. & gp_outside_of_building(1) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i+1) dissi(num_gp) = diss(k,j,i+1) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k,j,i+1) de_dzi(num_gp) = de_dz(k,j,i+1) ENDIF ! !-- If wall between gridpoint 5 and gridpoint 6, then !-- then Neumann boundary condition has to be applied IF ( gp_outside_of_building(5) == 1 .AND. & gp_outside_of_building(6) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i+1) dissi(num_gp) = diss(k,j,i+1) de_dxi(num_gp) = de_dx(k,j,i+1) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k,j,i+1) ENDIF IF ( gp_outside_of_building(6) == 1 .AND. & gp_outside_of_building(5) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i+1) dissi(num_gp) = diss(k,j+1,i+1) de_dxi(num_gp) = de_dx(k,j+1,i+1) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k,j+1,i+1) ENDIF ! !-- If wall between gridpoint 2 and gridpoint 6, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(2) == 1 .AND. & gp_outside_of_building(6) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i) dissi(num_gp) = diss(k,j+1,i) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k,j+1,i) de_dzi(num_gp) = de_dz(k,j+1,i) ENDIF IF ( gp_outside_of_building(6) == 1 .AND. & gp_outside_of_building(2) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i+1) dissi(num_gp) = diss(k,j+1,i+1) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k,j+1,i+1) de_dzi(num_gp) = de_dz(k,j+1,i+1) ENDIF ! !-- If wall between gridpoint 1 and gridpoint 2, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(1) == 1 .AND. & gp_outside_of_building(2) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j,i) dissi(num_gp) = diss(k,j,i) de_dxi(num_gp) = de_dx(k,j,i) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k,j,i) ENDIF IF ( gp_outside_of_building(2) == 1 .AND. & gp_outside_of_building(1) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz - 0.5 * dz ei(num_gp) = e(k,j+1,i) dissi(num_gp) = diss(k,j+1,i) de_dxi(num_gp) = de_dx(k,j+1,i) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k,j+1,i) ENDIF ! !-- If wall between gridpoint 3 and gridpoint 7, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(3) == 1 .AND. & gp_outside_of_building(7) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j,i) dissi(num_gp) = diss(k+1,j,i) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k+1,j,i) de_dzi(num_gp) = de_dz(k+1,j,i) ENDIF IF ( gp_outside_of_building(7) == 1 .AND. & gp_outside_of_building(3) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j,i+1) dissi(num_gp) = diss(k+1,j,i+1) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k+1,j,i+1) de_dzi(num_gp) = de_dz(k+1,j,i+1) ENDIF ! !-- If wall between gridpoint 7 and gridpoint 8, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(7) == 1 .AND. & gp_outside_of_building(8) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j,i+1) dissi(num_gp) = diss(k+1,j,i+1) de_dxi(num_gp) = de_dx(k+1,j,i+1) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k+1,j,i+1) ENDIF IF ( gp_outside_of_building(8) == 1 .AND. & gp_outside_of_building(7) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j+1,i+1) dissi(num_gp) = diss(k+1,j+1,i+1) de_dxi(num_gp) = de_dx(k+1,j+1,i+1) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k+1,j+1,i+1) ENDIF ! !-- If wall between gridpoint 4 and gridpoint 8, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(4) == 1 .AND. & gp_outside_of_building(8) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j+1,i) dissi(num_gp) = diss(k+1,j+1,i) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k+1,j+1,i) de_dzi(num_gp) = de_dz(k+1,j+1,i) ENDIF IF ( gp_outside_of_building(8) == 1 .AND. & gp_outside_of_building(4) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx + 0.5 * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j+1,i+1) dissi(num_gp) = diss(k+1,j+1,i+1) de_dxi(num_gp) = 0.0 de_dyi(num_gp) = de_dy(k+1,j+1,i+1) de_dzi(num_gp) = de_dz(k+1,j+1,i+1) ENDIF ! !-- If wall between gridpoint 3 and gridpoint 4, then !-- Neumann boundary condition has to be applied IF ( gp_outside_of_building(3) == 1 .AND. & gp_outside_of_building(4) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j,i) dissi(num_gp) = diss(k+1,j,i) de_dxi(num_gp) = de_dx(k+1,j,i) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k+1,j,i) ENDIF IF ( gp_outside_of_building(4) == 1 .AND. & gp_outside_of_building(3) == 0 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy + 0.5 * dy location(num_gp,3) = k * dz + 0.5 * dz ei(num_gp) = e(k+1,j+1,i) dissi(num_gp) = diss(k+1,j+1,i) de_dxi(num_gp) = de_dx(k+1,j+1,i) de_dyi(num_gp) = 0.0 de_dzi(num_gp) = de_dz(k+1,j+1,i) ENDIF ! !-- If wall between gridpoint 1 and gridpoint 3, then !-- Neumann boundary condition has to be applied !-- (only one case as only building beneath is possible) IF ( gp_outside_of_building(1) == 0 .AND. & gp_outside_of_building(3) == 1 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz ei(num_gp) = e(k+1,j,i) dissi(num_gp) = diss(k+1,j,i) de_dxi(num_gp) = de_dx(k+1,j,i) de_dyi(num_gp) = de_dy(k+1,j,i) de_dzi(num_gp) = 0.0 ENDIF ! !-- If wall between gridpoint 5 and gridpoint 7, then !-- Neumann boundary condition has to be applied !-- (only one case as only building beneath is possible) IF ( gp_outside_of_building(5) == 0 .AND. & gp_outside_of_building(7) == 1 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = j * dy location(num_gp,3) = k * dz ei(num_gp) = e(k+1,j,i+1) dissi(num_gp) = diss(k+1,j,i+1) de_dxi(num_gp) = de_dx(k+1,j,i+1) de_dyi(num_gp) = de_dy(k+1,j,i+1) de_dzi(num_gp) = 0.0 ENDIF ! !-- If wall between gridpoint 2 and gridpoint 4, then !-- Neumann boundary condition has to be applied !-- (only one case as only building beneath is possible) IF ( gp_outside_of_building(2) == 0 .AND. & gp_outside_of_building(4) == 1 ) THEN num_gp = num_gp + 1 location(num_gp,1) = i * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz ei(num_gp) = e(k+1,j+1,i) dissi(num_gp) = diss(k+1,j+1,i) de_dxi(num_gp) = de_dx(k+1,j+1,i) de_dyi(num_gp) = de_dy(k+1,j+1,i) de_dzi(num_gp) = 0.0 ENDIF ! !-- If wall between gridpoint 6 and gridpoint 8, then !-- Neumann boundary condition has to be applied !-- (only one case as only building beneath is possible) IF ( gp_outside_of_building(6) == 0 .AND. & gp_outside_of_building(8) == 1 ) THEN num_gp = num_gp + 1 location(num_gp,1) = (i+1) * dx location(num_gp,2) = (j+1) * dy location(num_gp,3) = k * dz ei(num_gp) = e(k+1,j+1,i+1) dissi(num_gp) = diss(k+1,j+1,i+1) de_dxi(num_gp) = de_dx(k+1,j+1,i+1) de_dyi(num_gp) = de_dy(k+1,j+1,i+1) de_dzi(num_gp) = 0.0 ENDIF ! !-- Carry out the interpolation IF ( num_gp == 1 ) THEN ! !-- If only one of the gridpoints is situated outside of the !-- building, it follows that the values at the particle !-- location are the same as the gridpoint values e_int = ei(num_gp) diss_int = dissi(num_gp) de_dx_int = de_dxi(num_gp) de_dy_int = de_dyi(num_gp) de_dz_int = de_dzi(num_gp) ELSE IF ( num_gp > 1 ) THEN d_sum = 0.0 ! !-- Evaluation of the distances between the gridpoints !-- contributing to the interpolated values, and the particle !-- location DO agp = 1, num_gp d_gp_pl(agp) = ( particles(n)%x-location(agp,1) )**2 & + ( particles(n)%y-location(agp,2) )**2 & + ( particles(n)%z-location(agp,3) )**2 d_sum = d_sum + d_gp_pl(agp) ENDDO ! !-- Finally the interpolation can be carried out e_int = 0.0 diss_int = 0.0 de_dx_int = 0.0 de_dy_int = 0.0 de_dz_int = 0.0 DO agp = 1, num_gp e_int = e_int + ( d_sum - d_gp_pl(agp) ) * & ei(agp) / ( (num_gp-1) * d_sum ) diss_int = diss_int + ( d_sum - d_gp_pl(agp) ) * & dissi(agp) / ( (num_gp-1) * d_sum ) de_dx_int = de_dx_int + ( d_sum - d_gp_pl(agp) ) * & de_dxi(agp) / ( (num_gp-1) * d_sum ) de_dy_int = de_dy_int + ( d_sum - d_gp_pl(agp) ) * & de_dyi(agp) / ( (num_gp-1) * d_sum ) de_dz_int = de_dz_int + ( d_sum - d_gp_pl(agp) ) * & de_dzi(agp) / ( (num_gp-1) * d_sum ) ENDDO ENDIF ENDIF ENDIF ! !-- Vertically interpolate the horizontally averaged SGS TKE and !-- resolved-scale velocity variances and use the interpolated values !-- to calculate the coefficient fs, which is a measure of the ratio !-- of the subgrid-scale turbulent kinetic energy to the total amount !-- of turbulent kinetic energy. IF ( k == 0 ) THEN e_mean_int = hom(0,1,8,0) ELSE e_mean_int = hom(k,1,8,0) + & ( hom(k+1,1,8,0) - hom(k,1,8,0) ) / & ( zu(k+1) - zu(k) ) * & ( particles(n)%z - zu(k) ) ENDIF kw = particles(n)%z / dz IF ( k == 0 ) THEN aa = hom(k+1,1,30,0) * ( particles(n)%z / & ( 0.5 * ( zu(k+1) - zu(k) ) ) ) bb = hom(k+1,1,31,0) * ( particles(n)%z / & ( 0.5 * ( zu(k+1) - zu(k) ) ) ) cc = hom(kw+1,1,32,0) * ( particles(n)%z / & ( 1.0 * ( zw(kw+1) - zw(kw) ) ) ) ELSE aa = hom(k,1,30,0) + ( hom(k+1,1,30,0) - hom(k,1,30,0) ) * & ( ( particles(n)%z - zu(k) ) / ( zu(k+1) - zu(k) ) ) bb = hom(k,1,31,0) + ( hom(k+1,1,31,0) - hom(k,1,31,0) ) * & ( ( particles(n)%z - zu(k) ) / ( zu(k+1) - zu(k) ) ) cc = hom(kw,1,32,0) + ( hom(kw+1,1,32,0)-hom(kw,1,32,0) ) *& ( ( particles(n)%z - zw(kw) ) / ( zw(kw+1)-zw(kw) ) ) ENDIF vv_int = ( 1.0 / 3.0 ) * ( aa + bb + cc ) fs_int = ( 2.0 / 3.0 ) * e_mean_int / & ( vv_int + ( 2.0 / 3.0 ) * e_mean_int ) ! !-- Calculate the Lagrangian timescale according to the suggestion of !-- Weil et al. (2004). lagr_timescale = ( 4.0 * e_int ) / & ( 3.0 * fs_int * c_0 * diss_int ) ! !-- Calculate the next particle timestep. dt_gap is the time needed to !-- complete the current LES timestep. dt_gap = dt_3d - particles(n)%dt_sum dt_particle = MIN( dt_3d, 0.025 * lagr_timescale, dt_gap ) ! !-- The particle timestep should not be too small in order to prevent !-- the number of particle timesteps of getting too large IF ( dt_particle < dt_min_part .AND. dt_min_part < dt_gap ) & THEN dt_particle = dt_min_part ENDIF ! !-- Calculate the SGS velocity components IF ( particles(n)%age == 0.0 ) THEN ! !-- For new particles the SGS components are derived from the SGS !-- TKE. Limit the Gaussian random number to the interval !-- [-5.0*sigma, 5.0*sigma] in order to prevent the SGS velocities !-- from becoming unrealistically large. particles(n)%speed_x_sgs = SQRT( 2.0 * sgs_wfu_part * e_int ) *& ( random_gauss( iran_part, 5.0 ) & - 1.0 ) particles(n)%speed_y_sgs = SQRT( 2.0 * sgs_wfv_part * e_int ) *& ( random_gauss( iran_part, 5.0 ) & - 1.0 ) particles(n)%speed_z_sgs = SQRT( 2.0 * sgs_wfw_part * e_int ) *& ( random_gauss( iran_part, 5.0 ) & - 1.0 ) ELSE ! !-- Restriction of the size of the new timestep: compared to the !-- previous timestep the increase must not exceed 200% dt_particle_m = particles(n)%age - particles(n)%age_m IF ( dt_particle > 2.0 * dt_particle_m ) THEN dt_particle = 2.0 * dt_particle_m ENDIF ! !-- For old particles the SGS components are correlated with the !-- values from the previous timestep. Random numbers have also to !-- be limited (see above). !-- As negative values for the subgrid TKE are not allowed, the !-- change of the subgrid TKE with time cannot be smaller than !-- -e_int/dt_particle. This value is used as a lower boundary !-- value for the change of TKE de_dt_min = - e_int / dt_particle de_dt = ( e_int - particles(n)%e_m ) / dt_particle_m IF ( de_dt < de_dt_min ) THEN de_dt = de_dt_min ENDIF particles(n)%speed_x_sgs = particles(n)%speed_x_sgs - & fs_int * c_0 * diss_int * particles(n)%speed_x_sgs * & dt_particle / ( 4.0 * sgs_wfu_part * e_int ) + & ( 2.0 * sgs_wfu_part * de_dt * & particles(n)%speed_x_sgs / & ( 2.0 * sgs_wfu_part * e_int ) + de_dx_int & ) * dt_particle / 2.0 + & SQRT( fs_int * c_0 * diss_int ) * & ( random_gauss( iran_part, 5.0 ) - 1.0 ) * & SQRT( dt_particle ) particles(n)%speed_y_sgs = particles(n)%speed_y_sgs - & fs_int * c_0 * diss_int * particles(n)%speed_y_sgs * & dt_particle / ( 4.0 * sgs_wfv_part * e_int ) + & ( 2.0 * sgs_wfv_part * de_dt * & particles(n)%speed_y_sgs / & ( 2.0 * sgs_wfv_part * e_int ) + de_dy_int & ) * dt_particle / 2.0 + & SQRT( fs_int * c_0 * diss_int ) * & ( random_gauss( iran_part, 5.0 ) - 1.0 ) * & SQRT( dt_particle ) particles(n)%speed_z_sgs = particles(n)%speed_z_sgs - & fs_int * c_0 * diss_int * particles(n)%speed_z_sgs * & dt_particle / ( 4.0 * sgs_wfw_part * e_int ) + & ( 2.0 * sgs_wfw_part * de_dt * & particles(n)%speed_z_sgs / & ( 2.0 * sgs_wfw_part * e_int ) + de_dz_int & ) * dt_particle / 2.0 + & SQRT( fs_int * c_0 * diss_int ) * & ( random_gauss( iran_part, 5.0 ) - 1.0 ) * & SQRT( dt_particle ) ENDIF u_int = u_int + particles(n)%speed_x_sgs v_int = v_int + particles(n)%speed_y_sgs w_int = w_int + particles(n)%speed_z_sgs ! !-- Store the SGS TKE of the current timelevel which is needed for !-- for calculating the SGS particle velocities at the next timestep particles(n)%e_m = e_int ELSE ! !-- If no SGS velocities are used, only the particle timestep has to !-- be set dt_particle = dt_3d ENDIF ! !-- Remember the old age of the particle ( needed to prevent that a !-- particle crosses several PEs during one timestep and for the !-- evaluation of the subgrid particle velocity fluctuations ) particles(n)%age_m = particles(n)%age ! !-- Particle advection IF ( particle_groups(particles(n)%group)%density_ratio == 0.0 ) THEN ! !-- Pure passive transport (without particle inertia) particles(n)%x = particles(n)%x + u_int * dt_particle particles(n)%y = particles(n)%y + v_int * dt_particle particles(n)%z = particles(n)%z + w_int * dt_particle particles(n)%speed_x = u_int particles(n)%speed_y = v_int particles(n)%speed_z = w_int ELSE ! !-- Transport of particles with inertia particles(n)%x = particles(n)%x + particles(n)%speed_x * & dt_particle particles(n)%y = particles(n)%y + particles(n)%speed_y * & dt_particle particles(n)%z = particles(n)%z + particles(n)%speed_z * & dt_particle ! !-- Update of the particle velocity dens_ratio = particle_groups(particles(n)%group)%density_ratio IF ( cloud_droplets ) THEN exp_arg = 4.5 * dens_ratio * molecular_viscosity / & ( particles(n)%radius )**2 / & ( 1.0 + 0.15 * ( 2.0 * particles(n)%radius * & SQRT( ( u_int - particles(n)%speed_x )**2 + & ( v_int - particles(n)%speed_y )**2 + & ( w_int - particles(n)%speed_z )**2 ) / & molecular_viscosity )**0.687 & ) exp_term = EXP( -exp_arg * dt_particle ) ELSEIF ( use_sgs_for_particles ) THEN exp_arg = particle_groups(particles(n)%group)%exp_arg exp_term = EXP( -exp_arg * dt_particle ) ELSE exp_arg = particle_groups(particles(n)%group)%exp_arg exp_term = particle_groups(particles(n)%group)%exp_term ENDIF particles(n)%speed_x = particles(n)%speed_x * exp_term + & u_int * ( 1.0 - exp_term ) particles(n)%speed_y = particles(n)%speed_y * exp_term + & v_int * ( 1.0 - exp_term ) particles(n)%speed_z = particles(n)%speed_z * exp_term + & ( w_int - ( 1.0 - dens_ratio ) * g / exp_arg ) & * ( 1.0 - exp_term ) ENDIF ! !-- Increment the particle age and the total time that the particle !-- has advanced within the particle timestep procedure particles(n)%age = particles(n)%age + dt_particle particles(n)%dt_sum = particles(n)%dt_sum + dt_particle ! !-- Check whether there is still a particle that has not yet completed !-- the total LES timestep IF ( ( dt_3d - particles(n)%dt_sum ) > 1E-8 ) THEN dt_3d_reached_l = .FALSE. ENDIF ENDDO ! advection loop ! !-- Particle reflection from walls CALL particle_boundary_conds ! !-- User-defined actions after the calculation of the new particle position CALL user_advec_particles ! !-- Find out, if all particles on every PE have completed the LES timestep !-- and set the switch corespondingly #if defined( __parallel ) CALL MPI_ALLREDUCE( dt_3d_reached_l, dt_3d_reached, 1, MPI_LOGICAL, & MPI_LAND, comm2d, ierr ) #else dt_3d_reached = dt_3d_reached_l #endif CALL cpu_log( log_point_s(44), 'advec_part_advec', 'stop' ) ! !-- Increment time since last release IF ( dt_3d_reached ) time_prel = time_prel + dt_3d ! WRITE ( 9, * ) '*** advec_particles: ##0.4' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- If necessary, release new set of particles IF ( time_prel >= dt_prel .AND. end_time_prel > simulated_time .AND. & dt_3d_reached ) THEN ! !-- Check, if particle storage must be extended IF ( number_of_particles + number_of_initial_particles > & maximum_number_of_particles ) THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_particles ' // & 'needs to be increased ' // & '&but this is not allowed with ' // & 'NetCDF output switched on' CALL message( 'advec_particles', 'PA0146', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_prt_memory dt_prel' ! CALL local_flush( 9 ) CALL allocate_prt_memory( number_of_initial_particles ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_prt_memory dt_prel' ! CALL local_flush( 9 ) ENDIF ENDIF ! !-- Check, if tail storage must be extended IF ( use_particle_tails ) THEN IF ( number_of_tails + number_of_initial_tails > & maximum_number_of_tails ) THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_tails ' // & 'needs to be increased ' // & '&but this is not allowed wi' // & 'th NetCDF output switched on' CALL message( 'advec_particles', 'PA0147', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_tail_memory dt_prel' ! CALL local_flush( 9 ) CALL allocate_tail_memory( number_of_initial_tails ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_tail_memory dt_prel' ! CALL local_flush( 9 ) ENDIF ENDIF ENDIF ! !-- The MOD function allows for changes in the output interval with !-- restart runs. time_prel = MOD( time_prel, MAX( dt_prel, dt_3d ) ) IF ( number_of_initial_particles /= 0 ) THEN is = number_of_particles+1 ie = number_of_particles+number_of_initial_particles particles(is:ie) = initial_particles(1:number_of_initial_particles) ! !-- Add random fluctuation to particle positions. Particles should !-- remain in the subdomain. IF ( random_start_position ) THEN DO n = is, ie IF ( psl(particles(n)%group) /= psr(particles(n)%group) ) & THEN particles(n)%x = particles(n)%x + & ( random_function( iran_part ) - 0.5 ) *& pdx(particles(n)%group) IF ( particles(n)%x <= ( nxl - 0.5 ) * dx ) THEN particles(n)%x = ( nxl - 0.4999999999 ) * dx ELSEIF ( particles(n)%x >= ( nxr + 0.5 ) * dx ) THEN particles(n)%x = ( nxr + 0.4999999999 ) * dx ENDIF ENDIF IF ( pss(particles(n)%group) /= psn(particles(n)%group) ) & THEN particles(n)%y = particles(n)%y + & ( random_function( iran_part ) - 0.5 ) *& pdy(particles(n)%group) IF ( particles(n)%y <= ( nys - 0.5 ) * dy ) THEN particles(n)%y = ( nys - 0.4999999999 ) * dy ELSEIF ( particles(n)%y >= ( nyn + 0.5 ) * dy ) THEN particles(n)%y = ( nyn + 0.4999999999 ) * dy ENDIF ENDIF IF ( psb(particles(n)%group) /= pst(particles(n)%group) ) & THEN particles(n)%z = particles(n)%z + & ( random_function( iran_part ) - 0.5 ) *& pdz(particles(n)%group) ENDIF ENDDO ENDIF ! !-- Set the beginning of the new particle tails and their age IF ( use_particle_tails ) THEN DO n = is, ie ! !-- New particles which should have a tail, already have got a !-- provisional tail id unequal zero (see init_particles) IF ( particles(n)%tail_id /= 0 ) THEN number_of_tails = number_of_tails + 1 nn = number_of_tails particles(n)%tail_id = nn ! set the final tail id particle_tail_coordinates(1,1,nn) = particles(n)%x particle_tail_coordinates(1,2,nn) = particles(n)%y particle_tail_coordinates(1,3,nn) = particles(n)%z particle_tail_coordinates(1,4,nn) = particles(n)%color particles(n)%tailpoints = 1 IF ( minimum_tailpoint_distance /= 0.0 ) THEN particle_tail_coordinates(2,1,nn) = particles(n)%x particle_tail_coordinates(2,2,nn) = particles(n)%y particle_tail_coordinates(2,3,nn) = particles(n)%z particle_tail_coordinates(2,4,nn) = particles(n)%color particle_tail_coordinates(1:2,5,nn) = 0.0 particles(n)%tailpoints = 2 ENDIF ENDIF ENDDO ENDIF ! WRITE ( 9, * ) '*** advec_particles: after setting the beginning of new tails' ! CALL local_flush( 9 ) number_of_particles = number_of_particles + & number_of_initial_particles ENDIF ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##0.5' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #2' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF ! DO n = 1, number_of_particles ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO #if defined( __parallel ) ! !-- As soon as one particle has moved beyond the boundary of the domain, it !-- is included in the relevant transfer arrays and marked for subsequent !-- deletion on this PE. !-- First run for crossings in x direction. Find out first the number of !-- particles to be transferred and allocate temporary arrays needed to store !-- them. !-- For a one-dimensional decomposition along y, no transfer is necessary, !-- because the particle remains on the PE. trlp_count = 0 trlpt_count = 0 trrp_count = 0 trrpt_count = 0 IF ( pdims(1) /= 1 ) THEN ! !-- First calculate the storage necessary for sending and receiving the !-- data DO n = 1, number_of_particles i = ( particles(n)%x + 0.5 * dx ) * ddx ! !-- Above calculation does not work for indices less than zero IF ( particles(n)%x < -0.5 * dx ) i = -1 IF ( i < nxl ) THEN trlp_count = trlp_count + 1 IF ( particles(n)%tail_id /= 0 ) trlpt_count = trlpt_count + 1 ELSEIF ( i > nxr ) THEN trrp_count = trrp_count + 1 IF ( particles(n)%tail_id /= 0 ) trrpt_count = trrpt_count + 1 ENDIF ENDDO IF ( trlp_count == 0 ) trlp_count = 1 IF ( trlpt_count == 0 ) trlpt_count = 1 IF ( trrp_count == 0 ) trrp_count = 1 IF ( trrpt_count == 0 ) trrpt_count = 1 ALLOCATE( trlp(trlp_count), trrp(trrp_count) ) trlp = particle_type( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0, 0, 0, 0 ) trrp = particle_type( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0, 0, 0, 0 ) IF ( use_particle_tails ) THEN ALLOCATE( trlpt(maximum_number_of_tailpoints,5,trlpt_count), & trrpt(maximum_number_of_tailpoints,5,trrpt_count) ) tlength = maximum_number_of_tailpoints * 5 ENDIF trlp_count = 0 trlpt_count = 0 trrp_count = 0 trrpt_count = 0 ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##1' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF DO n = 1, number_of_particles nn = particles(n)%tail_id i = ( particles(n)%x + 0.5 * dx ) * ddx ! !-- Above calculation does not work for indices less than zero IF ( particles(n)%x < - 0.5 * dx ) i = -1 IF ( i < nxl ) THEN IF ( i < 0 ) THEN ! !-- Apply boundary condition along x IF ( ibc_par_lr == 0 ) THEN ! !-- Cyclic condition IF ( pdims(1) == 1 ) THEN particles(n)%x = ( nx + 1 ) * dx + particles(n)%x particles(n)%origin_x = ( nx + 1 ) * dx + & particles(n)%origin_x IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,1,nn) = ( nx + 1 ) * dx & + particle_tail_coordinates(1:i,1,nn) ENDIF ELSE trlp_count = trlp_count + 1 trlp(trlp_count) = particles(n) trlp(trlp_count)%x = ( nx + 1 ) * dx + trlp(trlp_count)%x trlp(trlp_count)%origin_x = trlp(trlp_count)%origin_x + & ( nx + 1 ) * dx particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trlpt_count = trlpt_count + 1 trlpt(:,:,trlpt_count) = & particle_tail_coordinates(:,:,nn) trlpt(:,1,trlpt_count) = ( nx + 1 ) * dx + & trlpt(:,1,trlpt_count) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( ibc_par_lr == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_lr == 2 ) THEN ! !-- Particle reflection particles(n)%x = -particles(n)%x particles(n)%speed_x = -particles(n)%speed_x ENDIF ELSE ! !-- Store particle data in the transfer array, which will be send !-- to the neighbouring PE trlp_count = trlp_count + 1 trlp(trlp_count) = particles(n) particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trlpt_count = trlpt_count + 1 trlpt(:,:,trlpt_count) = particle_tail_coordinates(:,:,nn) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( i > nxr ) THEN IF ( i > nx ) THEN ! !-- Apply boundary condition along x IF ( ibc_par_lr == 0 ) THEN ! !-- Cyclic condition IF ( pdims(1) == 1 ) THEN particles(n)%x = particles(n)%x - ( nx + 1 ) * dx particles(n)%origin_x = particles(n)%origin_x - & ( nx + 1 ) * dx IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,1,nn) = - ( nx+1 ) * dx & + particle_tail_coordinates(1:i,1,nn) ENDIF ELSE trrp_count = trrp_count + 1 trrp(trrp_count) = particles(n) trrp(trrp_count)%x = trrp(trrp_count)%x - ( nx + 1 ) * dx trrp(trrp_count)%origin_x = trrp(trrp_count)%origin_x - & ( nx + 1 ) * dx particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trrpt_count = trrpt_count + 1 trrpt(:,:,trrpt_count) = & particle_tail_coordinates(:,:,nn) trrpt(:,1,trrpt_count) = trrpt(:,1,trrpt_count) - & ( nx + 1 ) * dx tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( ibc_par_lr == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_lr == 2 ) THEN ! !-- Particle reflection particles(n)%x = 2 * ( nx * dx ) - particles(n)%x particles(n)%speed_x = -particles(n)%speed_x ENDIF ELSE ! !-- Store particle data in the transfer array, which will be send !-- to the neighbouring PE trrp_count = trrp_count + 1 trrp(trrp_count) = particles(n) particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trrpt_count = trrpt_count + 1 trrpt(:,:,trrpt_count) = particle_tail_coordinates(:,:,nn) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ENDIF ENDDO ! WRITE ( 9, * ) '*** advec_particles: ##2' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- Send left boundary, receive right boundary (but first exchange how many !-- and check, if particle storage must be extended) IF ( pdims(1) /= 1 ) THEN CALL cpu_log( log_point_s(23), 'sendrcv_particles', 'start' ) CALL MPI_SENDRECV( trlp_count, 1, MPI_INTEGER, pleft, 0, & trrp_count_recv, 1, MPI_INTEGER, pright, 0, & comm2d, status, ierr ) IF ( number_of_particles + trrp_count_recv > & maximum_number_of_particles ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_particles ' // & 'needs to be increased ' // & '&but this is not allowed with ' // & 'NetCDF output switched on' CALL message( 'advec_particles', 'PA0146', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_prt_memory trrp' ! CALL local_flush( 9 ) CALL allocate_prt_memory( trrp_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_prt_memory trrp' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trlp(1)%age, trlp_count, mpi_particle_type, & pleft, 1, particles(number_of_particles+1)%age, & trrp_count_recv, mpi_particle_type, pright, 1, & comm2d, status, ierr ) IF ( use_particle_tails ) THEN CALL MPI_SENDRECV( trlpt_count, 1, MPI_INTEGER, pleft, 0, & trrpt_count_recv, 1, MPI_INTEGER, pright, 0, & comm2d, status, ierr ) IF ( number_of_tails+trrpt_count_recv > maximum_number_of_tails ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_tails ' // & 'needs to be increased ' // & '&but this is not allowed wi'// & 'th NetCDF output switched on' CALL message( 'advec_particles', 'PA0147', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_tail_memory trrpt' ! CALL local_flush( 9 ) CALL allocate_tail_memory( trrpt_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_tail_memory trrpt' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trlpt(1,1,1), trlpt_count*tlength, MPI_REAL, & pleft, 1, & particle_tail_coordinates(1,1,number_of_tails+1), & trrpt_count_recv*tlength, MPI_REAL, pright, 1, & comm2d, status, ierr ) ! !-- Update the tail ids for the transferred particles nn = number_of_tails DO n = number_of_particles+1, number_of_particles+trrp_count_recv IF ( particles(n)%tail_id /= 0 ) THEN nn = nn + 1 particles(n)%tail_id = nn ENDIF ENDDO ENDIF number_of_particles = number_of_particles + trrp_count_recv number_of_tails = number_of_tails + trrpt_count_recv ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #3' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF ! !-- Send right boundary, receive left boundary CALL MPI_SENDRECV( trrp_count, 1, MPI_INTEGER, pright, 0, & trlp_count_recv, 1, MPI_INTEGER, pleft, 0, & comm2d, status, ierr ) IF ( number_of_particles + trlp_count_recv > & maximum_number_of_particles ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_particles ' // & 'needs to be increased ' // & '&but this is not allowed with '// & 'NetCDF output switched on' CALL message( 'advec_particles', 'PA0146', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_prt_memory trlp' ! CALL local_flush( 9 ) CALL allocate_prt_memory( trlp_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_prt_memory trlp' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trrp(1)%age, trrp_count, mpi_particle_type, & pright, 1, particles(number_of_particles+1)%age, & trlp_count_recv, mpi_particle_type, pleft, 1, & comm2d, status, ierr ) IF ( use_particle_tails ) THEN CALL MPI_SENDRECV( trrpt_count, 1, MPI_INTEGER, pright, 0, & trlpt_count_recv, 1, MPI_INTEGER, pleft, 0, & comm2d, status, ierr ) IF ( number_of_tails+trlpt_count_recv > maximum_number_of_tails ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_tails ' // & 'needs to be increased ' // & '&but this is not allowed wi'// & 'th NetCDF output switched on' CALL message( 'advec_particles', 'PA0147', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_tail_memory trlpt' ! CALL local_flush( 9 ) CALL allocate_tail_memory( trlpt_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_tail_memory trlpt' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trrpt(1,1,1), trrpt_count*tlength, MPI_REAL, & pright, 1, & particle_tail_coordinates(1,1,number_of_tails+1), & trlpt_count_recv*tlength, MPI_REAL, pleft, 1, & comm2d, status, ierr ) ! !-- Update the tail ids for the transferred particles nn = number_of_tails DO n = number_of_particles+1, number_of_particles+trlp_count_recv IF ( particles(n)%tail_id /= 0 ) THEN nn = nn + 1 particles(n)%tail_id = nn ENDIF ENDDO ENDIF number_of_particles = number_of_particles + trlp_count_recv number_of_tails = number_of_tails + trlpt_count_recv ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #4' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF IF ( use_particle_tails ) THEN DEALLOCATE( trlpt, trrpt ) ENDIF DEALLOCATE( trlp, trrp ) CALL cpu_log( log_point_s(23), 'sendrcv_particles', 'pause' ) ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##3' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- Check whether particles have crossed the boundaries in y direction. Note !-- that this case can also apply to particles that have just been received !-- from the adjacent right or left PE. !-- Find out first the number of particles to be transferred and allocate !-- temporary arrays needed to store them. !-- For a one-dimensional decomposition along x, no transfer is necessary, !-- because the particle remains on the PE. trsp_count = 0 trspt_count = 0 trnp_count = 0 trnpt_count = 0 IF ( pdims(2) /= 1 ) THEN ! !-- First calculate the storage necessary for sending and receiving the !-- data DO n = 1, number_of_particles IF ( particle_mask(n) ) THEN j = ( particles(n)%y + 0.5 * dy ) * ddy ! !-- Above calculation does not work for indices less than zero IF ( particles(n)%y < -0.5 * dy ) j = -1 IF ( j < nys ) THEN trsp_count = trsp_count + 1 IF ( particles(n)%tail_id /= 0 ) trspt_count = trspt_count+1 ELSEIF ( j > nyn ) THEN trnp_count = trnp_count + 1 IF ( particles(n)%tail_id /= 0 ) trnpt_count = trnpt_count+1 ENDIF ENDIF ENDDO IF ( trsp_count == 0 ) trsp_count = 1 IF ( trspt_count == 0 ) trspt_count = 1 IF ( trnp_count == 0 ) trnp_count = 1 IF ( trnpt_count == 0 ) trnpt_count = 1 ALLOCATE( trsp(trsp_count), trnp(trnp_count) ) trsp = particle_type( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0, 0, 0, 0 ) trnp = particle_type( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, & 0.0, 0, 0, 0, 0 ) IF ( use_particle_tails ) THEN ALLOCATE( trspt(maximum_number_of_tailpoints,5,trspt_count), & trnpt(maximum_number_of_tailpoints,5,trnpt_count) ) tlength = maximum_number_of_tailpoints * 5 ENDIF trsp_count = 0 trspt_count = 0 trnp_count = 0 trnpt_count = 0 ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##4' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF DO n = 1, number_of_particles nn = particles(n)%tail_id ! !-- Only those particles that have not been marked as 'deleted' may be !-- moved. IF ( particle_mask(n) ) THEN j = ( particles(n)%y + 0.5 * dy ) * ddy ! !-- Above calculation does not work for indices less than zero IF ( particles(n)%y < -0.5 * dy ) j = -1 IF ( j < nys ) THEN IF ( j < 0 ) THEN ! !-- Apply boundary condition along y IF ( ibc_par_ns == 0 ) THEN ! !-- Cyclic condition IF ( pdims(2) == 1 ) THEN particles(n)%y = ( ny + 1 ) * dy + particles(n)%y particles(n)%origin_y = ( ny + 1 ) * dy + & particles(n)%origin_y IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,2,nn) = ( ny+1 ) * dy& + particle_tail_coordinates(1:i,2,nn) ENDIF ELSE trsp_count = trsp_count + 1 trsp(trsp_count) = particles(n) trsp(trsp_count)%y = ( ny + 1 ) * dy + & trsp(trsp_count)%y trsp(trsp_count)%origin_y = trsp(trsp_count)%origin_y & + ( ny + 1 ) * dy particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trspt_count = trspt_count + 1 trspt(:,:,trspt_count) = & particle_tail_coordinates(:,:,nn) trspt(:,2,trspt_count) = ( ny + 1 ) * dy + & trspt(:,2,trspt_count) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( ibc_par_ns == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_ns == 2 ) THEN ! !-- Particle reflection particles(n)%y = -particles(n)%y particles(n)%speed_y = -particles(n)%speed_y ENDIF ELSE ! !-- Store particle data in the transfer array, which will be send !-- to the neighbouring PE trsp_count = trsp_count + 1 trsp(trsp_count) = particles(n) particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trspt_count = trspt_count + 1 trspt(:,:,trspt_count) = particle_tail_coordinates(:,:,nn) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( j > nyn ) THEN IF ( j > ny ) THEN ! !-- Apply boundary condition along x IF ( ibc_par_ns == 0 ) THEN ! !-- Cyclic condition IF ( pdims(2) == 1 ) THEN particles(n)%y = particles(n)%y - ( ny + 1 ) * dy particles(n)%origin_y = particles(n)%origin_y - & ( ny + 1 ) * dy IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,2,nn) = - (ny+1) * dy& + particle_tail_coordinates(1:i,2,nn) ENDIF ELSE trnp_count = trnp_count + 1 trnp(trnp_count) = particles(n) trnp(trnp_count)%y = trnp(trnp_count)%y - & ( ny + 1 ) * dy trnp(trnp_count)%origin_y = trnp(trnp_count)%origin_y & - ( ny + 1 ) * dy particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trnpt_count = trnpt_count + 1 trnpt(:,:,trnpt_count) = & particle_tail_coordinates(:,:,nn) trnpt(:,2,trnpt_count) = trnpt(:,2,trnpt_count) - & ( ny + 1 ) * dy tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ELSEIF ( ibc_par_ns == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_ns == 2 ) THEN ! !-- Particle reflection particles(n)%y = 2 * ( ny * dy ) - particles(n)%y particles(n)%speed_y = -particles(n)%speed_y ENDIF ELSE ! !-- Store particle data in the transfer array, which will be send !-- to the neighbouring PE trnp_count = trnp_count + 1 trnp(trnp_count) = particles(n) particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN trnpt_count = trnpt_count + 1 trnpt(:,:,trnpt_count) = particle_tail_coordinates(:,:,nn) tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF ENDIF ENDIF ENDDO ! WRITE ( 9, * ) '*** advec_particles: ##5' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- Send front boundary, receive back boundary (but first exchange how many !-- and check, if particle storage must be extended) IF ( pdims(2) /= 1 ) THEN CALL cpu_log( log_point_s(23), 'sendrcv_particles', 'continue' ) CALL MPI_SENDRECV( trsp_count, 1, MPI_INTEGER, psouth, 0, & trnp_count_recv, 1, MPI_INTEGER, pnorth, 0, & comm2d, status, ierr ) IF ( number_of_particles + trnp_count_recv > & maximum_number_of_particles ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_particles ' // & 'needs to be increased ' // & '&but this is not allowed with '// & 'NetCDF output switched on' CALL message( 'advec_particles', 'PA0146', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_prt_memory trnp' ! CALL local_flush( 9 ) CALL allocate_prt_memory( trnp_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_prt_memory trnp' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trsp(1)%age, trsp_count, mpi_particle_type, & psouth, 1, particles(number_of_particles+1)%age, & trnp_count_recv, mpi_particle_type, pnorth, 1, & comm2d, status, ierr ) IF ( use_particle_tails ) THEN CALL MPI_SENDRECV( trspt_count, 1, MPI_INTEGER, psouth, 0, & trnpt_count_recv, 1, MPI_INTEGER, pnorth, 0, & comm2d, status, ierr ) IF ( number_of_tails+trnpt_count_recv > maximum_number_of_tails ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_tails ' // & 'needs to be increased ' // & '&but this is not allowed wi' // & 'th NetCDF output switched on' CALL message( 'advec_particles', 'PA0147', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_tail_memory trnpt' ! CALL local_flush( 9 ) CALL allocate_tail_memory( trnpt_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_tail_memory trnpt' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trspt(1,1,1), trspt_count*tlength, MPI_REAL, & psouth, 1, & particle_tail_coordinates(1,1,number_of_tails+1), & trnpt_count_recv*tlength, MPI_REAL, pnorth, 1, & comm2d, status, ierr ) ! !-- Update the tail ids for the transferred particles nn = number_of_tails DO n = number_of_particles+1, number_of_particles+trnp_count_recv IF ( particles(n)%tail_id /= 0 ) THEN nn = nn + 1 particles(n)%tail_id = nn ENDIF ENDDO ENDIF number_of_particles = number_of_particles + trnp_count_recv number_of_tails = number_of_tails + trnpt_count_recv ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #5' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF ! !-- Send back boundary, receive front boundary CALL MPI_SENDRECV( trnp_count, 1, MPI_INTEGER, pnorth, 0, & trsp_count_recv, 1, MPI_INTEGER, psouth, 0, & comm2d, status, ierr ) IF ( number_of_particles + trsp_count_recv > & maximum_number_of_particles ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_particles ' // & 'needs to be increased ' // & '&but this is not allowed with ' // & 'NetCDF output switched on' CALL message( 'advec_particles', 'PA0146', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_prt_memory trsp' ! CALL local_flush( 9 ) CALL allocate_prt_memory( trsp_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_prt_memory trsp' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trnp(1)%age, trnp_count, mpi_particle_type, & pnorth, 1, particles(number_of_particles+1)%age, & trsp_count_recv, mpi_particle_type, psouth, 1, & comm2d, status, ierr ) IF ( use_particle_tails ) THEN CALL MPI_SENDRECV( trnpt_count, 1, MPI_INTEGER, pnorth, 0, & trspt_count_recv, 1, MPI_INTEGER, psouth, 0, & comm2d, status, ierr ) IF ( number_of_tails+trspt_count_recv > maximum_number_of_tails ) & THEN IF ( netcdf_output ) THEN message_string = 'maximum_number_of_tails ' // & 'needs to be increased ' // & '&but this is not allowed wi'// & 'th NetCDF output switched on' CALL message( 'advec_particles', 'PA0147', 2, 2, -1, 6, 1 ) ELSE ! WRITE ( 9, * ) '*** advec_particles: before allocate_tail_memory trspt' ! CALL local_flush( 9 ) CALL allocate_tail_memory( trspt_count_recv ) ! WRITE ( 9, * ) '*** advec_particles: after allocate_tail_memory trspt' ! CALL local_flush( 9 ) ENDIF ENDIF CALL MPI_SENDRECV( trnpt(1,1,1), trnpt_count*tlength, MPI_REAL, & pnorth, 1, & particle_tail_coordinates(1,1,number_of_tails+1), & trspt_count_recv*tlength, MPI_REAL, psouth, 1, & comm2d, status, ierr ) ! !-- Update the tail ids for the transferred particles nn = number_of_tails DO n = number_of_particles+1, number_of_particles+trsp_count_recv IF ( particles(n)%tail_id /= 0 ) THEN nn = nn + 1 particles(n)%tail_id = nn ENDIF ENDDO ENDIF number_of_particles = number_of_particles + trsp_count_recv number_of_tails = number_of_tails + trspt_count_recv ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #6' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF IF ( use_particle_tails ) THEN DEALLOCATE( trspt, trnpt ) ENDIF DEALLOCATE( trsp, trnp ) CALL cpu_log( log_point_s(23), 'sendrcv_particles', 'stop' ) ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##6' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF #else ! !-- Apply boundary conditions DO n = 1, number_of_particles nn = particles(n)%tail_id IF ( particles(n)%x < -0.5 * dx ) THEN IF ( ibc_par_lr == 0 ) THEN ! !-- Cyclic boundary. Relevant coordinate has to be changed. particles(n)%x = ( nx + 1 ) * dx + particles(n)%x IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,1,nn) = ( nx + 1 ) * dx + & particle_tail_coordinates(1:i,1,nn) ENDIF ELSEIF ( ibc_par_lr == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_lr == 2 ) THEN ! !-- Particle reflection particles(n)%x = -dx - particles(n)%x particles(n)%speed_x = -particles(n)%speed_x ENDIF ELSEIF ( particles(n)%x >= ( nx + 0.5 ) * dx ) THEN IF ( ibc_par_lr == 0 ) THEN ! !-- Cyclic boundary. Relevant coordinate has to be changed. particles(n)%x = particles(n)%x - ( nx + 1 ) * dx IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,1,nn) = - ( nx + 1 ) * dx + & particle_tail_coordinates(1:i,1,nn) ENDIF ELSEIF ( ibc_par_lr == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_lr == 2 ) THEN ! !-- Particle reflection particles(n)%x = ( nx + 1 ) * dx - particles(n)%x particles(n)%speed_x = -particles(n)%speed_x ENDIF ENDIF IF ( particles(n)%y < -0.5 * dy ) THEN IF ( ibc_par_ns == 0 ) THEN ! !-- Cyclic boundary. Relevant coordinate has to be changed. particles(n)%y = ( ny + 1 ) * dy + particles(n)%y IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,2,nn) = ( ny + 1 ) * dy + & particle_tail_coordinates(1:i,2,nn) ENDIF ELSEIF ( ibc_par_ns == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_ns == 2 ) THEN ! !-- Particle reflection particles(n)%y = -dy - particles(n)%y particles(n)%speed_y = -particles(n)%speed_y ENDIF ELSEIF ( particles(n)%y >= ( ny + 0.5 ) * dy ) THEN IF ( ibc_par_ns == 0 ) THEN ! !-- Cyclic boundary. Relevant coordinate has to be changed. particles(n)%y = particles(n)%y - ( ny + 1 ) * dy IF ( use_particle_tails .AND. nn /= 0 ) THEN i = particles(n)%tailpoints particle_tail_coordinates(1:i,2,nn) = - ( ny + 1 ) * dy + & particle_tail_coordinates(1:i,2,nn) ENDIF ELSEIF ( ibc_par_ns == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_ns == 2 ) THEN ! !-- Particle reflection particles(n)%y = ( ny + 1 ) * dy - particles(n)%y particles(n)%speed_y = -particles(n)%speed_y ENDIF ENDIF ENDDO #endif ! !-- Apply boundary conditions to those particles that have crossed the top or !-- bottom boundary and delete those particles, which are older than allowed DO n = 1, number_of_particles nn = particles(n)%tail_id ! !-- Stop if particles have moved further than the length of one !-- PE subdomain (newly released particles have age = age_m!) IF ( particles(n)%age /= particles(n)%age_m ) THEN IF ( ABS(particles(n)%speed_x) > & ((nxr-nxl+2)*dx)/(particles(n)%age-particles(n)%age_m) .OR. & ABS(particles(n)%speed_y) > & ((nyn-nys+2)*dy)/(particles(n)%age-particles(n)%age_m) ) THEN WRITE( message_string, * ) 'particle too fast. n = ', n CALL message( 'advec_particles', 'PA0148', 2, 2, -1, 6, 1 ) ENDIF ENDIF IF ( particles(n)%age > particle_maximum_age .AND. & particle_mask(n) ) & THEN particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ENDIF IF ( particles(n)%z >= zu(nz) .AND. particle_mask(n) ) THEN IF ( ibc_par_t == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_t == 2 ) THEN ! !-- Particle reflection particles(n)%z = 2.0 * zu(nz) - particles(n)%z particles(n)%speed_z = -particles(n)%speed_z IF ( use_sgs_for_particles .AND. & particles(n)%speed_z_sgs > 0.0 ) THEN particles(n)%speed_z_sgs = -particles(n)%speed_z_sgs ENDIF IF ( use_particle_tails .AND. nn /= 0 ) THEN particle_tail_coordinates(1,3,nn) = 2.0 * zu(nz) - & particle_tail_coordinates(1,3,nn) ENDIF ENDIF ENDIF IF ( particles(n)%z < zw(0) .AND. particle_mask(n) ) THEN IF ( ibc_par_b == 1 ) THEN ! !-- Particle absorption particle_mask(n) = .FALSE. deleted_particles = deleted_particles + 1 IF ( use_particle_tails .AND. nn /= 0 ) THEN tail_mask(nn) = .FALSE. deleted_tails = deleted_tails + 1 ENDIF ELSEIF ( ibc_par_b == 2 ) THEN ! !-- Particle reflection particles(n)%z = 2.0 * zw(0) - particles(n)%z particles(n)%speed_z = -particles(n)%speed_z IF ( use_sgs_for_particles .AND. & particles(n)%speed_z_sgs < 0.0 ) THEN particles(n)%speed_z_sgs = -particles(n)%speed_z_sgs ENDIF IF ( use_particle_tails .AND. nn /= 0 ) THEN particle_tail_coordinates(1,3,nn) = 2.0 * zu(nz) - & particle_tail_coordinates(1,3,nn) ENDIF IF ( use_particle_tails .AND. nn /= 0 ) THEN particle_tail_coordinates(1,3,nn) = 2.0 * zw(0) - & particle_tail_coordinates(1,3,nn) ENDIF ENDIF ENDIF ENDDO ! WRITE ( 9, * ) '*** advec_particles: ##7' ! CALL local_flush( 9 ) ! nd = 0 ! DO n = 1, number_of_particles ! IF ( .NOT. particle_mask(n) ) nd = nd + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! !-- Pack particles (eliminate those marked for deletion), !-- determine new number of particles IF ( number_of_particles > 0 .AND. deleted_particles > 0 ) THEN nn = 0 nd = 0 DO n = 1, number_of_particles IF ( particle_mask(n) ) THEN nn = nn + 1 particles(nn) = particles(n) ELSE nd = nd + 1 ENDIF ENDDO ! IF ( nd /= deleted_particles ) THEN ! WRITE (9,*) '*** advec_part nd=',nd,' deleted_particles=',deleted_particles ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF number_of_particles = number_of_particles - deleted_particles ! !-- Pack the tails, store the new tail ids and re-assign it to the !-- respective !-- particles IF ( use_particle_tails ) THEN nn = 0 nd = 0 DO n = 1, number_of_tails IF ( tail_mask(n) ) THEN nn = nn + 1 particle_tail_coordinates(:,:,nn) = & particle_tail_coordinates(:,:,n) new_tail_id(n) = nn ELSE nd = nd + 1 ! WRITE (9,*) '+++ n=',n,' (of ',number_of_tails,' #oftails)' ! WRITE (9,*) ' id=',new_tail_id(n) ! CALL local_flush( 9 ) ENDIF ENDDO ENDIF ! IF ( nd /= deleted_tails .AND. use_particle_tails ) THEN ! WRITE (9,*) '*** advec_part nd=',nd,' deleted_tails=',deleted_tails ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF number_of_tails = number_of_tails - deleted_tails ! nn = 0 DO n = 1, number_of_particles IF ( particles(n)%tail_id /= 0 ) THEN ! nn = nn + 1 ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id > number_of_tails ) THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' tail_id=',particles(n)%tail_id ! WRITE (9,*) ' new_tail_id=', new_tail_id(particles(n)%tail_id), & ! ' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! ENDIF particles(n)%tail_id = new_tail_id(particles(n)%tail_id) ENDIF ENDDO ! IF ( nn /= number_of_tails .AND. use_particle_tails ) THEN ! WRITE (9,*) '*** advec_part #of_tails=',number_of_tails,' nn=',nn ! CALL local_flush( 9 ) ! DO n = 1, number_of_particles ! WRITE (9,*) 'prt# ',n,' tail_id=',particles(n)%tail_id, & ! ' x=',particles(n)%x, ' y=',particles(n)%y, & ! ' z=',particles(n)%z ! ENDDO ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ENDIF ! IF ( number_of_particles /= number_of_tails ) THEN ! WRITE (9,*) '--- advec_particles: #7' ! WRITE (9,*) ' #of p=',number_of_particles,' #of t=',number_of_tails ! CALL local_flush( 9 ) ! ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##8' ! CALL local_flush( 9 ) ! DO n = 1, number_of_particles ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! WRITE ( 9, * ) '*** advec_particles: ##9' ! CALL local_flush( 9 ) ! DO n = 1, number_of_particles ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! !-- Accumulate the number of particles transferred between the subdomains #if defined( __parallel ) trlp_count_sum = trlp_count_sum + trlp_count trlp_count_recv_sum = trlp_count_recv_sum + trlp_count_recv trrp_count_sum = trrp_count_sum + trrp_count trrp_count_recv_sum = trrp_count_recv_sum + trrp_count_recv trsp_count_sum = trsp_count_sum + trsp_count trsp_count_recv_sum = trsp_count_recv_sum + trsp_count_recv trnp_count_sum = trnp_count_sum + trnp_count trnp_count_recv_sum = trnp_count_recv_sum + trnp_count_recv #endif IF ( dt_3d_reached ) EXIT ENDDO ! timestep loop ! !-- Sort particles in the sequence the gridboxes are stored in the memory time_sort_particles = time_sort_particles + dt_3d IF ( time_sort_particles >= dt_sort_particles ) THEN CALL sort_particles time_sort_particles = MOD( time_sort_particles, & MAX( dt_sort_particles, dt_3d ) ) ENDIF ! !-- Re-evaluate the weighting factors. After advection, particles within a !-- grid box may have different weighting factors if some have been advected !-- from a neighbouring box. The factors are re-evaluated so that they are !-- the same for all particles of one box. This procedure must conserve the !-- liquid water content within one box. IF ( cloud_droplets ) THEN CALL cpu_log( log_point_s(45), 'advec_part_reeval_we', 'start' ) ql = 0.0; ql_v = 0.0; ql_vp = 0.0 ! !-- Re-calculate the weighting factors and calculate the liquid water content DO i = nxl, nxr DO j = nys, nyn DO k = nzb, nzt+1 ! !-- Calculate the total volume of particles in the boxes (ql_vp) as !-- well as the real volume (ql_v, weighting factor has to be !-- included) psi = prt_start_index(k,j,i) DO n = psi, psi+prt_count(k,j,i)-1 ql_vp(k,j,i) = ql_vp(k,j,i) + particles(n)%radius**3 ql_v(k,j,i) = ql_v(k,j,i) + particles(n)%weight_factor * & particles(n)%radius**3 ENDDO ! !-- Re-calculate the weighting factors and calculate the liquid !-- water content IF ( ql_vp(k,j,i) /= 0.0 ) THEN ql_vp(k,j,i) = ql_v(k,j,i) / ql_vp(k,j,i) ql(k,j,i) = ql(k,j,i) + rho_l * 1.33333333 * pi * & ql_v(k,j,i) / & ( rho_surface * dx * dy * dz ) ELSE ql(k,j,i) = 0.0 ENDIF ! !-- Re-assign the weighting factor to the particles DO n = psi, psi+prt_count(k,j,i)-1 particles(n)%weight_factor = ql_vp(k,j,i) ENDDO ENDDO ENDDO ENDDO CALL cpu_log( log_point_s(45), 'advec_part_reeval_we', 'stop' ) ENDIF ! !-- Set particle attributes CALL set_particle_attributes ! !-- Set particle attributes defined by the user CALL user_particle_attributes ! WRITE ( 9, * ) '*** advec_particles: ##10' ! CALL local_flush( 9 ) ! DO n = 1, number_of_particles ! IF ( particles(n)%tail_id<0 .OR. particles(n)%tail_id>number_of_tails ) & ! THEN ! WRITE (9,*) '+++ n=',n,' (of ',number_of_particles,')' ! WRITE (9,*) ' id=',particles(n)%tail_id,' of (',number_of_tails,')' ! CALL local_flush( 9 ) ! CALL MPI_ABORT( comm2d, 9999, ierr ) ! ENDIF ! ENDDO ! !-- If necessary, add the actual particle positions to the particle tails IF ( use_particle_tails ) THEN distance = 0.0 DO n = 1, number_of_particles nn = particles(n)%tail_id IF ( nn /= 0 ) THEN ! !-- Calculate the distance between the actual particle position and the !-- next tailpoint ! WRITE ( 9, * ) '*** advec_particles: ##10.1 nn=',nn ! CALL local_flush( 9 ) IF ( minimum_tailpoint_distance /= 0.0 ) THEN distance = ( particle_tail_coordinates(1,1,nn) - & particle_tail_coordinates(2,1,nn) )**2 + & ( particle_tail_coordinates(1,2,nn) - & particle_tail_coordinates(2,2,nn) )**2 + & ( particle_tail_coordinates(1,3,nn) - & particle_tail_coordinates(2,3,nn) )**2 ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##10.2' ! CALL local_flush( 9 ) ! !-- First, increase the index of all existings tailpoints by one IF ( distance >= minimum_tailpoint_distance ) THEN DO i = particles(n)%tailpoints, 1, -1 particle_tail_coordinates(i+1,:,nn) = & particle_tail_coordinates(i,:,nn) ENDDO ! !-- Increase the counter which contains the number of tailpoints. !-- This must always be smaller than the given maximum number of !-- tailpoints because otherwise the index bounds of !-- particle_tail_coordinates would be exceeded IF ( particles(n)%tailpoints < maximum_number_of_tailpoints-1 )& THEN particles(n)%tailpoints = particles(n)%tailpoints + 1 ENDIF ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##10.3' ! CALL local_flush( 9 ) ! !-- In any case, store the new point at the beginning of the tail particle_tail_coordinates(1,1,nn) = particles(n)%x particle_tail_coordinates(1,2,nn) = particles(n)%y particle_tail_coordinates(1,3,nn) = particles(n)%z particle_tail_coordinates(1,4,nn) = particles(n)%color ! WRITE ( 9, * ) '*** advec_particles: ##10.4' ! CALL local_flush( 9 ) ! !-- Increase the age of the tailpoints IF ( minimum_tailpoint_distance /= 0.0 ) THEN particle_tail_coordinates(2:particles(n)%tailpoints,5,nn) = & particle_tail_coordinates(2:particles(n)%tailpoints,5,nn) + & dt_3d ! !-- Delete the last tailpoint, if it has exceeded its maximum age IF ( particle_tail_coordinates(particles(n)%tailpoints,5,nn) > & maximum_tailpoint_age ) THEN particles(n)%tailpoints = particles(n)%tailpoints - 1 ENDIF ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##10.5' ! CALL local_flush( 9 ) ENDIF ENDDO ENDIF ! WRITE ( 9, * ) '*** advec_particles: ##11' ! CALL local_flush( 9 ) ! !-- Write particle statistics on file IF ( write_particle_statistics ) THEN CALL check_open( 80 ) #if defined( __parallel ) WRITE ( 80, 8000 ) current_timestep_number+1, simulated_time+dt_3d, & number_of_particles, pleft, trlp_count_sum, & trlp_count_recv_sum, pright, trrp_count_sum, & trrp_count_recv_sum, psouth, trsp_count_sum, & trsp_count_recv_sum, pnorth, trnp_count_sum, & trnp_count_recv_sum, maximum_number_of_particles CALL close_file( 80 ) #else WRITE ( 80, 8000 ) current_timestep_number+1, simulated_time+dt_3d, & number_of_particles, maximum_number_of_particles #endif ENDIF CALL cpu_log( log_point(25), 'advec_particles', 'stop' ) ! !-- Formats 8000 FORMAT (I6,1X,F7.2,4X,I6,5X,4(I3,1X,I4,'/',I4,2X),6X,I6) END SUBROUTINE advec_particles SUBROUTINE allocate_prt_memory( number_of_new_particles ) !------------------------------------------------------------------------------! ! Description: ! ------------ ! Extend particle memory !------------------------------------------------------------------------------! USE particle_attributes IMPLICIT NONE INTEGER :: new_maximum_number, number_of_new_particles LOGICAL, DIMENSION(:), ALLOCATABLE :: tmp_particle_mask TYPE(particle_type), DIMENSION(:), ALLOCATABLE :: tmp_particles new_maximum_number = maximum_number_of_particles + & MAX( 5*number_of_new_particles, number_of_initial_particles ) IF ( write_particle_statistics ) THEN CALL check_open( 80 ) WRITE ( 80, '(''*** Request: '', I7, '' new_maximum_number(prt)'')' ) & new_maximum_number CALL close_file( 80 ) ENDIF ALLOCATE( tmp_particles(maximum_number_of_particles), & tmp_particle_mask(maximum_number_of_particles) ) tmp_particles = particles tmp_particle_mask = particle_mask DEALLOCATE( particles, particle_mask ) ALLOCATE( particles(new_maximum_number), & particle_mask(new_maximum_number) ) maximum_number_of_particles = new_maximum_number particles(1:number_of_particles) = tmp_particles(1:number_of_particles) particle_mask(1:number_of_particles) = & tmp_particle_mask(1:number_of_particles) particle_mask(number_of_particles+1:maximum_number_of_particles) = .TRUE. DEALLOCATE( tmp_particles, tmp_particle_mask ) END SUBROUTINE allocate_prt_memory SUBROUTINE allocate_tail_memory( number_of_new_tails ) !------------------------------------------------------------------------------! ! Description: ! ------------ ! Extend tail memory !------------------------------------------------------------------------------! USE particle_attributes IMPLICIT NONE INTEGER :: new_maximum_number, number_of_new_tails LOGICAL, DIMENSION(maximum_number_of_tails) :: tmp_tail_mask REAL, DIMENSION(maximum_number_of_tailpoints,5,maximum_number_of_tails) :: & tmp_tail new_maximum_number = maximum_number_of_tails + & MAX( 5*number_of_new_tails, number_of_initial_tails ) IF ( write_particle_statistics ) THEN CALL check_open( 80 ) WRITE ( 80, '(''*** Request: '', I5, '' new_maximum_number(tails)'')' ) & new_maximum_number CALL close_file( 80 ) ENDIF WRITE (9,*) '*** Request: ',new_maximum_number,' new_maximum_number(tails)' ! CALL local_flush( 9 ) tmp_tail(:,:,1:number_of_tails) = & particle_tail_coordinates(:,:,1:number_of_tails) tmp_tail_mask(1:number_of_tails) = tail_mask(1:number_of_tails) DEALLOCATE( new_tail_id, particle_tail_coordinates, tail_mask ) ALLOCATE( new_tail_id(new_maximum_number), & particle_tail_coordinates(maximum_number_of_tailpoints,5, & new_maximum_number), & tail_mask(new_maximum_number) ) maximum_number_of_tails = new_maximum_number particle_tail_coordinates = 0.0 particle_tail_coordinates(:,:,1:number_of_tails) = & tmp_tail(:,:,1:number_of_tails) tail_mask(1:number_of_tails) = tmp_tail_mask(1:number_of_tails) tail_mask(number_of_tails+1:maximum_number_of_tails) = .TRUE. END SUBROUTINE allocate_tail_memory SUBROUTINE output_particles_netcdf #if defined( __netcdf ) USE control_parameters USE netcdf_control USE particle_attributes IMPLICIT NONE CALL check_open( 108 ) ! !-- Update the NetCDF time axis prt_time_count = prt_time_count + 1 nc_stat = NF90_PUT_VAR( id_set_prt, id_var_time_prt, (/ simulated_time /), & start = (/ prt_time_count /), count = (/ 1 /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 1 ) ! !-- Output the real number of particles used nc_stat = NF90_PUT_VAR( id_set_prt, id_var_rnop_prt, & (/ number_of_particles /), & start = (/ prt_time_count /), count = (/ 1 /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 2 ) ! !-- Output all particle attributes nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(1), particles%age, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 3 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(2), particles%dvrp_psize, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 4 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(3), particles%origin_x, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 5 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(4), particles%origin_y, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 6 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(5), particles%origin_z, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 7 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(6), particles%radius, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 8 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(7), particles%speed_x, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 9 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(8), particles%speed_y, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 10 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(9), particles%speed_z, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 11 ) nc_stat = NF90_PUT_VAR( id_set_prt,id_var_prt(10),particles%weight_factor,& start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 12 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(11), particles%x, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 13 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(12), particles%y, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 14 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(13), particles%z, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 15 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(14), particles%color, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 16 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(15), particles%group, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 17 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(16), particles%tailpoints, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 18 ) nc_stat = NF90_PUT_VAR( id_set_prt, id_var_prt(17), particles%tail_id, & start = (/ 1, prt_time_count /), & count = (/ maximum_number_of_particles /) ) CALL handle_netcdf_error( 'output_particles_netcdf', 19 ) #endif END SUBROUTINE output_particles_netcdf SUBROUTINE write_particles !------------------------------------------------------------------------------! ! Description: ! ------------ ! Write particle data on restart file !------------------------------------------------------------------------------! USE control_parameters USE particle_attributes USE pegrid IMPLICIT NONE CHARACTER (LEN=10) :: particle_binary_version ! !-- First open the output unit. IF ( myid_char == '' ) THEN OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT'//myid_char, & FORM='UNFORMATTED') ELSE IF ( myid == 0 ) CALL local_system( 'mkdir PARTICLE_RESTART_DATA_OUT' ) #if defined( __parallel ) ! !-- Set a barrier in order to allow that thereafter all other processors !-- in the directory created by PE0 can open their file CALL MPI_BARRIER( comm2d, ierr ) #endif OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT/'//myid_char, & FORM='UNFORMATTED' ) ENDIF ! !-- Write the version number of the binary format. !-- Attention: After changes to the following output commands the version !-- --------- number of the variable particle_binary_version must be changed! !-- Also, the version number and the list of arrays to be read in !-- init_particles must be adjusted accordingly. particle_binary_version = '3.0' WRITE ( 90 ) particle_binary_version ! !-- Write some particle parameters, the size of the particle arrays as well as !-- other dvrp-plot variables. WRITE ( 90 ) bc_par_b, bc_par_lr, bc_par_ns, bc_par_t, & maximum_number_of_particles, maximum_number_of_tailpoints, & maximum_number_of_tails, number_of_initial_particles, & number_of_particles, number_of_particle_groups, & number_of_tails, particle_groups, time_prel, & time_write_particle_data, uniform_particles IF ( number_of_initial_particles /= 0 ) WRITE ( 90 ) initial_particles WRITE ( 90 ) prt_count, prt_start_index WRITE ( 90 ) particles IF ( use_particle_tails ) THEN WRITE ( 90 ) particle_tail_coordinates ENDIF CLOSE ( 90 ) END SUBROUTINE write_particles SUBROUTINE collision_efficiency( mean_r, r, e) !------------------------------------------------------------------------------! ! Description: ! ------------ ! Interpolate collision efficiency from table !------------------------------------------------------------------------------! IMPLICIT NONE INTEGER :: i, j, k LOGICAL, SAVE :: first = .TRUE. REAL :: aa, bb, cc, dd, dx, dy, e, gg, mean_r, mean_rm, r, rm, & x, y REAL, DIMENSION(1:9), SAVE :: collected_r = 0.0 REAL, DIMENSION(1:19), SAVE :: collector_r = 0.0 REAL, DIMENSION(1:9,1:19), SAVE :: ef = 0.0 mean_rm = mean_r * 1.0E06 rm = r * 1.0E06 IF ( first ) THEN collected_r = (/ 2.0, 3.0, 4.0, 6.0, 8.0, 10.0, 15.0, 20.0, 25.0 /) collector_r = (/ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 80.0, 100.0, 150.0,& 200.0, 300.0, 400.0, 500.0, 600.0, 1000.0, 1400.0, & 1800.0, 2400.0, 3000.0 /) ef(:,1) = (/0.017, 0.027, 0.037, 0.052, 0.052, 0.052, 0.052, 0.0, 0.0 /) ef(:,2) = (/0.001, 0.016, 0.027, 0.060, 0.12, 0.17, 0.17, 0.17, 0.0 /) ef(:,3) = (/0.001, 0.001, 0.02, 0.13, 0.28, 0.37, 0.54, 0.55, 0.47/) ef(:,4) = (/0.001, 0.001, 0.02, 0.23, 0.4, 0.55, 0.7, 0.75, 0.75/) ef(:,5) = (/0.01, 0.01, 0.03, 0.3, 0.4, 0.58, 0.73, 0.75, 0.79/) ef(:,6) = (/0.01, 0.01, 0.13, 0.38, 0.57, 0.68, 0.80, 0.86, 0.91/) ef(:,7) = (/0.01, 0.085, 0.23, 0.52, 0.68, 0.76, 0.86, 0.92, 0.95/) ef(:,8) = (/0.01, 0.14, 0.32, 0.60, 0.73, 0.81, 0.90, 0.94, 0.96/) ef(:,9) = (/0.025, 0.25, 0.43, 0.66, 0.78, 0.83, 0.92, 0.95, 0.96/) ef(:,10)= (/0.039, 0.3, 0.46, 0.69, 0.81, 0.87, 0.93, 0.95, 0.96/) ef(:,11)= (/0.095, 0.33, 0.51, 0.72, 0.82, 0.87, 0.93, 0.96, 0.97/) ef(:,12)= (/0.098, 0.36, 0.51, 0.73, 0.83, 0.88, 0.93, 0.96, 0.97/) ef(:,13)= (/0.1, 0.36, 0.52, 0.74, 0.83, 0.88, 0.93, 0.96, 0.97/) ef(:,14)= (/0.17, 0.4, 0.54, 0.72, 0.83, 0.88, 0.94, 0.98, 1.0 /) ef(:,15)= (/0.15, 0.37, 0.52, 0.74, 0.82, 0.88, 0.94, 0.98, 1.0 /) ef(:,16)= (/0.11, 0.34, 0.49, 0.71, 0.83, 0.88, 0.94, 0.95, 1.0 /) ef(:,17)= (/0.08, 0.29, 0.45, 0.68, 0.8, 0.86, 0.96, 0.94, 1.0 /) ef(:,18)= (/0.04, 0.22, 0.39, 0.62, 0.75, 0.83, 0.92, 0.96, 1.0 /) ef(:,19)= (/0.02, 0.16, 0.33, 0.55, 0.71, 0.81, 0.90, 0.94, 1.0 /) ENDIF DO k = 1, 8 IF ( collected_r(k) <= mean_rm ) i = k ENDDO DO k = 1, 18 IF ( collector_r(k) <= rm ) j = k ENDDO IF ( rm < 10.0 ) THEN e = 0.0 ELSEIF ( mean_rm < 2.0 ) THEN e = 0.001 ELSEIF ( mean_rm >= 25.0 ) THEN IF( j <= 3 ) e = 0.55 IF( j == 4 ) e = 0.8 IF( j == 5 ) e = 0.9 IF( j >=6 ) e = 1.0 ELSEIF ( rm >= 3000.0 ) THEN e = 1.0 ELSE x = mean_rm - collected_r(i) y = rm - collected_r(j) dx = collected_r(i+1) - collected_r(i) dy = collector_r(j+1) - collector_r(j) aa = x**2 + y**2 bb = ( dx - x )**2 + y**2 cc = x**2 + ( dy - y )**2 dd = ( dx - x )**2 + ( dy - y )**2 gg = aa + bb + cc + dd e = ( (gg-aa)*ef(i,j) + (gg-bb)*ef(i+1,j) + (gg-cc)*ef(i,j+1) + & (gg-dd)*ef(i+1,j+1) ) / (3.0*gg) ENDIF END SUBROUTINE collision_efficiency SUBROUTINE sort_particles !------------------------------------------------------------------------------! ! Description: ! ------------ ! Sort particles in the sequence the grid boxes are stored in memory !------------------------------------------------------------------------------! USE arrays_3d USE control_parameters USE cpulog USE grid_variables USE indices USE interfaces USE particle_attributes IMPLICIT NONE INTEGER :: i, ilow, j, k, n TYPE(particle_type), DIMENSION(1:number_of_particles) :: particles_temp CALL cpu_log( log_point_s(47), 'sort_particles', 'start' ) ! !-- Initialize the array used for counting and indexing the particles prt_count = 0 ! !-- Count the particles per gridbox DO n = 1, number_of_particles i = ( particles(n)%x + 0.5 * dx ) * ddx j = ( particles(n)%y + 0.5 * dy ) * ddy k = particles(n)%z / dz + 1 + offset_ocean_nzt ! only exact if equidistant prt_count(k,j,i) = prt_count(k,j,i) + 1 IF ( i < nxl .OR. i > nxr .OR. j < nys .OR. j > nyn .OR. k < nzb+1 .OR. & k > nzt ) THEN WRITE( message_string, * ) ' particle out of range: i=', i, ' j=', & j, ' k=', k, & ' &nxl=', nxl, ' nxr=', nxr, & ' nys=', nys, ' nyn=', nyn, & ' nzb=', nzb, ' nzt=', nzt CALL message( 'sort_particles', 'PA0149', 1, 2, 0, 6, 0 ) ENDIF ENDDO ! !-- Calculate the lower indices of those ranges of the particles-array !-- containing particles which belong to the same gridpox i,j,k ilow = 1 DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt prt_start_index(k,j,i) = ilow ilow = ilow + prt_count(k,j,i) ENDDO ENDDO ENDDO ! !-- Sorting the particles DO n = 1, number_of_particles i = ( particles(n)%x + 0.5 * dx ) * ddx j = ( particles(n)%y + 0.5 * dy ) * ddy k = particles(n)%z / dz + 1 + offset_ocean_nzt ! only exact if equidistant particles_temp(prt_start_index(k,j,i)) = particles(n) prt_start_index(k,j,i) = prt_start_index(k,j,i) + 1 ENDDO particles(1:number_of_particles) = particles_temp ! !-- Reset the index array to the actual start position DO i = nxl, nxr DO j = nys, nyn DO k = nzb+1, nzt prt_start_index(k,j,i) = prt_start_index(k,j,i) - prt_count(k,j,i) ENDDO ENDDO ENDDO CALL cpu_log( log_point_s(47), 'sort_particles', 'stop' ) END SUBROUTINE sort_particles