Changeset 4309
- Timestamp:
- Nov 26, 2019 6:49:59 PM (5 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/Makefile
r4286 r4309 25 25 # ----------------- 26 26 # $Id$ 27 # Add dependency to parallel random generator for synthetic turbulence generator 28 # 29 # 4286 2019-10-30 16:01:14Z resler 27 30 # delete boundary_conds, added missing dependencies 28 31 # … … 1077 1080 nesting_offl_mod.o \ 1078 1081 pmc_interface_mod.o \ 1082 random_generator_parallel_mod.o \ 1079 1083 surface_mod.o 1080 1084 temperton_fft_mod.o: \ -
palm/trunk/SOURCE/header.f90
r4301 r4309 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! replaced recycling_yshift by y_shift22 ! 23 23 ! 24 24 ! Former revisions: 25 25 ! ----------------- 26 26 ! $Id$ 27 ! replaced recycling_yshift by y_shift 28 ! 29 ! 4301 2019-11-22 12:09:09Z oliver.maas 27 30 ! 28 31 ! 4297 2019-11-21 10:37:50Z oliver.maas -
palm/trunk/SOURCE/plant_canopy_model_mod.f90
r4302 r4309 27 27 ! ----------------- 28 28 ! $Id$ 29 ! Typo 30 ! 31 ! 4302 2019-11-22 13:15:56Z suehring 29 32 ! Omit tall canopy mapped on top of buildings 30 33 ! … … 1115 1118 WRITE( message_string, * ) & 1116 1119 'Resolved plant-canopy is ' // & 1117 ' is defined on top of a building ' //&1120 'defined on top of a building ' // & 1118 1121 '- lad is omitted at this grid ' //& 1119 1122 'point: (i,j) = ', i, j -
palm/trunk/SOURCE/synthetic_turbulence_generator_mod.f90
r4182 r4309 25 25 ! ----------------- 26 26 ! $Id$ 27 ! Computation of velocity seeds optimized. This implies that random numbers 28 ! are computed now using the parallel random number generator. Random numbers 29 ! are now only computed and normalized locally, while distributed over all 30 ! mpi ranks afterwards, instead of computing random numbers on a global array. 31 ! Further, the number of calls for the time-consuming velocity-seed generation 32 ! is reduced - now the left and right, as well as the north and south boundary 33 ! share the same velocity-seed matrices. 34 ! 35 ! 4182 2019-08-22 15:20:23Z scharf 27 36 ! Corrected "Former revisions" section 28 37 ! … … 154 163 neutral, & 155 164 num_mean_inflow_profiles, & 165 random_generator, & 156 166 rans_mode, & 157 167 restart_string, & … … 206 216 USE pmc_interface, & 207 217 ONLY : rans_mode_parent 218 219 USE random_generator_parallel, & 220 ONLY: random_dummy, & 221 random_number_parallel, & 222 random_seed_parallel, & 223 seq_random_array 208 224 209 225 USE transpose_indices, & … … 227 243 INTEGER(iwp) :: id_stg_right !< right lateral boundary core id in case of turbulence generator 228 244 INTEGER(iwp) :: id_stg_south !< south lateral boundary core id in case of turbulence generator 229 INTEGER(iwp) :: stg_type_xz !< MPI type for full z range230 INTEGER(iwp) :: stg_type_xz_small !< MPI type for small z range231 INTEGER(iwp) :: stg_type_yz !< MPI type for full z range232 INTEGER(iwp) :: stg_type_yz_small !< MPI type for small z range233 245 INTEGER(iwp) :: merg !< maximum length scale (in gp) 234 246 INTEGER(iwp) :: mergp !< merg + nbgp … … 237 249 INTEGER(iwp) :: nzb_y_stg !< lower bound of z coordinate (required for transposing z on PEs along y) 238 250 INTEGER(iwp) :: nzt_y_stg !< upper bound of z coordinate (required for transposing z on PEs along y) 251 INTEGER(iwp) :: stg_type_xz !< MPI type for full z range 252 INTEGER(iwp) :: stg_type_xz_small !< MPI type for small z range 253 INTEGER(iwp) :: stg_type_yz !< MPI type for full z range 254 INTEGER(iwp) :: stg_type_yz_small !< MPI type for small z range 239 255 240 256 INTEGER(iwp), DIMENSION(3) :: nr_non_topo_xz = 0 !< number of non-topography grid points at xz cross-sections, … … 256 272 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nwy !< length scale of w in y direction (in gp) 257 273 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nwz !< length scale of w in z direction (in gp) 258 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: seed !< seed of random number for rn-generator259 274 260 275 REAL(wp) :: blend !< value to create gradually and smooth blending of Reynolds stress and length … … 476 491 CALL message( 'stg_check_parameters', 'PA0039', 1, 2, 0, 6, 0 ) 477 492 ENDIF 493 ! 494 !-- Synthetic turbulence generator requires the parallel random generator 495 IF ( random_generator /= 'random-parallel' ) THEN 496 message_string = 'Using synthetic turbulence generator ' // & 497 'requires random_generator = random-parallel.' 498 CALL message( 'stg_check_parameters', 'PA0421', 1, 2, 0, 6, 0 ) 499 ENDIF 478 500 479 501 ENDIF … … 540 562 INTEGER(iwp) :: newtype !< dummy MPI type 541 563 INTEGER(iwp) :: realsize !< size of REAL variables 542 INTEGER(iwp) :: nseed !< dimension of random number seed 543 INTEGER(iwp) :: startseed = 1234567890 !< start seed for random number generator 544 564 545 565 INTEGER(iwp), DIMENSION(3) :: nr_non_topo_xz_l = 0 !< number of non-topography grid points at xz-cross-section on subdomain 546 566 INTEGER(iwp), DIMENSION(3) :: nr_non_topo_yz_l = 0 !< number of non-topography grid points at yz-cross-section on subdomain … … 648 668 649 669 #endif 650 !651 !-- Define seed of random number652 CALL RANDOM_SEED()653 CALL RANDOM_SEED( size=nseed )654 ALLOCATE( seed(1:nseed) )655 DO j = 1, nseed656 seed(j) = startseed + j657 ENDDO658 CALL RANDOM_SEED(put=seed)659 670 ! 660 671 !-- Allocate required arrays. … … 1209 1220 !-- Initial value of fu, fv, fw 1210 1221 IF ( time_since_reference_point == 0.0_wp .AND. .NOT. velocity_seed_initialized ) THEN 1211 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fu_yz, id_stg_left ) 1212 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fv_yz, id_stg_left ) 1213 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fw_yz, id_stg_left ) 1214 1215 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 1216 .AND. .NOT. rans_mode ) ) THEN 1217 ! 1218 !-- Generate turbulence at right boundary 1219 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fu_yz, id_stg_right ) 1220 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fv_yz, id_stg_right ) 1221 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fw_yz, id_stg_right ) 1222 ! 1223 !-- Generate turbulence at north boundary 1224 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fu_xz, id_stg_north ) 1225 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fv_xz, id_stg_north ) 1226 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fw_xz, id_stg_north ) 1227 ! 1228 !-- Generate turbulence at south boundary 1229 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fu_xz, id_stg_south ) 1230 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fv_xz, id_stg_south ) 1231 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fw_xz, id_stg_south ) 1222 ! 1223 !-- Generate turbulence at the left and right boundary. Random numbers 1224 !-- for the yz-planes at the left/right boundary are generated by the 1225 !-- left-sided mpi ranks only. After random numbers are calculated, they 1226 !-- are distributed to all other mpi ranks in the model, so that the 1227 !-- velocity seed matrices are available on all mpi ranks (i.e. also on the 1228 !-- right-sided boundary mpi ranks). In case of offline nesting, this implies, 1229 !-- that the left- and the right-sided lateral boundary have the same initial 1230 !-- seeds. 1231 !-- Note, in case of inflow from the right only, only turbulence at the left 1232 !-- boundary is required. 1233 IF ( .NOT. ( nesting_offline .OR. & 1234 ( child_domain .AND. rans_mode_parent & 1235 .AND. .NOT. rans_mode ) ) ) THEN 1236 1237 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fu_yz, id_stg_left ) 1238 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fv_yz, id_stg_left ) 1239 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fw_yz, id_stg_left ) 1240 ELSE 1241 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fu_yz, & 1242 id_stg_left, id_stg_right ) 1243 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fv_yz, & 1244 id_stg_left, id_stg_right ) 1245 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fw_yz, & 1246 id_stg_left, id_stg_right ) 1247 ! 1248 !-- Generate turbulence at the south and north boundary. Random numbers 1249 !-- for the xz-planes at the south/north boundary are generated by the 1250 !-- south-sided mpi ranks only. Please see also comment above. 1251 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fu_xz, & 1252 id_stg_south, id_stg_north ) 1253 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fv_xz, & 1254 id_stg_south, id_stg_north ) 1255 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fw_xz, & 1256 id_stg_south, id_stg_north ) 1232 1257 ENDIF 1233 1258 velocity_seed_initialized = .TRUE. 1234 1259 ENDIF 1235 1236 ! 1237 !-- New set of fu, fv, fw 1260 ! 1261 !-- New set of fu, fv, fw. Note, for inflow from the left side only, velocity 1262 !-- seeds are only required at the left boundary, while in case of offline 1263 !-- nesting or RANS-LES nesting velocity seeds are required also at the 1264 !-- right, south and north boundaries. 1238 1265 IF ( stg_call ) THEN 1239 ! 1240 !-- Generate turbulence at left boundary (required in all applications, 1241 !-- i.e. offline nesting and turbulent inflow from the left) 1242 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fuo_yz, id_stg_left ) 1243 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fvo_yz, id_stg_left ) 1244 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fwo_yz, id_stg_left ) 1245 1246 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 1247 .AND. .NOT. rans_mode ) ) THEN 1248 ! 1249 !-- Generate turbulence at right boundary 1250 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fuo_yz, id_stg_right ) 1251 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fvo_yz, id_stg_right ) 1252 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fwo_yz, id_stg_right ) 1253 ! 1254 !-- Generate turbulence at north boundary 1255 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fuo_xz, id_stg_north ) 1256 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fvo_xz, id_stg_north ) 1257 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fwo_xz, id_stg_north ) 1258 ! 1259 !-- Generate turbulence at south boundary 1260 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fuo_xz, id_stg_south ) 1261 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fvo_xz, id_stg_south ) 1262 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fwo_xz, id_stg_south ) 1266 IF ( .NOT. ( nesting_offline .OR. & 1267 ( child_domain .AND. rans_mode_parent & 1268 .AND. .NOT. rans_mode ) ) ) THEN 1269 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fuo_yz, id_stg_left ) 1270 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fvo_yz, id_stg_left ) 1271 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fwo_yz, id_stg_left ) 1272 1273 ELSE 1274 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fuo_yz, & 1275 id_stg_left, id_stg_right ) 1276 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fvo_yz, & 1277 id_stg_left, id_stg_right ) 1278 CALL stg_generate_seed_yz( nwy, nwz, bwy, bwz, fwo_yz, & 1279 id_stg_left, id_stg_right ) 1280 1281 CALL stg_generate_seed_xz( nux, nuz, bux, buz, fuo_xz, & 1282 id_stg_south, id_stg_north ) 1283 CALL stg_generate_seed_xz( nvx, nvz, bvx, bvz, fvo_xz, & 1284 id_stg_south, id_stg_north ) 1285 CALL stg_generate_seed_xz( nwx, nwz, bwx, bwz, fwo_xz, & 1286 id_stg_south, id_stg_north ) 1263 1287 ENDIF 1264 1288 ENDIF 1265 1289 1266 1290 ! 1267 !-- Turbulence generation at left and 1291 !-- Turbulence generation at left and/or right boundary 1268 1292 IF ( myidx == id_stg_left .OR. myidx == id_stg_right ) THEN 1269 1293 ! … … 1313 1337 dist_yz(k,j,1) = MIN( a11(k) * fu_yz(k,j), 3.0_wp ) * & 1314 1338 MERGE( 1.0_wp, 0.0_wp, & 1315 BTEST( wall_flags_0(k,j,i), 1 ) ) 1339 BTEST( wall_flags_0(k,j,i), 1 ) ) 1316 1340 ENDDO 1317 1341 ENDDO 1318 1342 1319 1343 IF ( myidx == id_stg_left ) i = nxl-1 1320 1344 IF ( myidx == id_stg_right ) i = nxr+1 … … 1341 1365 ENDDO 1342 1366 ENDIF 1343 1344 1367 ! 1345 1368 !-- Mass flux correction following Kim et al. (2013) … … 1514 1537 + a22(k) * fv_xz(k,i) ), 3.0_wp ) * & 1515 1538 MERGE( 1.0_wp, 0.0_wp, & 1516 BTEST( wall_flags_0(k,j,i), 2 ) ) 1539 BTEST( wall_flags_0(k,j,i), 2 ) ) 1517 1540 ENDDO 1518 1541 ENDDO … … 1648 1671 !> parts are collected to form the full array. 1649 1672 !------------------------------------------------------------------------------! 1650 SUBROUTINE stg_generate_seed_yz( n_y, n_z, b_y, b_z, f_n, id ) 1673 SUBROUTINE stg_generate_seed_yz( n_y, n_z, b_y, b_z, f_n, id_left, id_right ) 1674 1675 USE pegrid 1651 1676 1652 1677 IMPLICIT NONE 1653 1678 1654 INTEGER(iwp) :: id !< core ids at respective boundaries 1655 INTEGER(iwp) :: j !< loop index in y-direction 1656 INTEGER(iwp) :: jj !< loop index in y-direction 1657 INTEGER(iwp) :: k !< loop index in z-direction 1658 INTEGER(iwp) :: kk !< loop index in z-direction 1659 INTEGER(iwp) :: send_count !< send count for MPI_GATHERV 1679 INTEGER(iwp) :: i !< grid index x-direction 1680 INTEGER(iwp) :: id_left !< core ids at respective boundaries 1681 INTEGER(iwp), OPTIONAL :: id_right !< core ids at respective boundaries 1682 INTEGER(iwp) :: j !< loop index in y-direction 1683 INTEGER(iwp) :: jj !< loop index in y-direction 1684 INTEGER(iwp) :: k !< loop index in z-direction 1685 INTEGER(iwp) :: kk !< loop index in z-direction 1686 INTEGER(iwp) :: send_count !< send count for MPI_GATHERV 1660 1687 1661 1688 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_y !< length scale in y-direction … … 1670 1697 REAL(wp), DIMENSION(-merg:merg,nzb:nzt+1) :: b_y !< filter function in y-direction 1671 1698 REAL(wp), DIMENSION(-merg:merg,nzb:nzt+1) :: b_z !< filter function in z-direction 1699 1672 1700 REAL(wp), DIMENSION(nzb_x_stg:nzt_x_stg+1,nysg:nyng) :: f_n_l !< local velocity seed 1673 REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng) :: f_n !< velocity seed 1674 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< random number 1675 1676 1677 ! 1678 !-- Generate random numbers using a seed generated in stg_init. 1701 REAL(wp), DIMENSION(nzb:nzt+1,nysg:nyng) :: f_n !< velocity seed 1702 1703 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< global array of random numbers 1704 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it_l !< local array of random numbers 1705 1706 ! 1707 !-- Generate random numbers using the parallel random generator. 1679 1708 !-- The set of random numbers are modified to have an average of 0 and 1680 !-- unit variance. 1709 !-- unit variance. Note, at the end the random number array must be defined 1710 !-- globally in order to compute the correlation matrices. However, 1711 !-- the calculation and normalization of random numbers is done locally, 1712 !-- while the result is later distributed to all processes. Further, 1713 !-- please note, a set of random numbers is only calculated for the 1714 !-- left boundary, while the right boundary uses the same random numbers 1715 !-- and thus also computes the same correlation matrix. 1681 1716 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp:ny+mergp) ) 1717 rand_it = 0.0_wp 1682 1718 1683 1719 rand_av = 0.0_wp 1684 1720 rand_sigma_inv = 0.0_wp 1685 1721 nyz_inv = 1.0_wp / REAL( ( nzt+1 - nzb+1 ) * ( ny+1 ), KIND=wp ) 1686 1687 DO j = 0, ny 1688 DO k = nzb, nzt+1 1689 CALL RANDOM_NUMBER( rand_it(k,j) ) 1690 rand_av = rand_av + rand_it(k,j) 1722 ! 1723 !-- Compute and normalize random numbers only on left-boundary ranks. 1724 IF ( myidx == id_stg_left ) THEN 1725 ! 1726 !-- Allocate array for local set of random numbers 1727 ALLOCATE( rand_it_l(nzb:nzt+1,nys:nyn) ) 1728 rand_it_l = 0.0_wp 1729 1730 i = nxl 1731 DO j = nys, nyn 1732 ! 1733 !-- Put the random seeds at grid point j,i 1734 CALL random_seed_parallel( put=seq_random_array(:, j, i) ) 1735 DO k = nzb, nzt+1 1736 CALL random_number_parallel( random_dummy ) 1737 rand_it_l(k,j) = random_dummy 1738 rand_av = rand_av + rand_it_l(k,j) 1739 ENDDO 1740 ! 1741 !-- Get the new random seeds from last call at grid point j,i 1742 CALL random_seed_parallel( get=seq_random_array(:, j, i) ) 1691 1743 ENDDO 1692 ENDDO 1693 1694 rand_av = rand_av * nyz_inv 1695 1696 DO j = 0, ny 1697 DO k = nzb, nzt+1 1698 rand_it(k,j) = rand_it(k,j) - rand_av 1699 rand_sigma_inv = rand_sigma_inv + rand_it(k,j) ** 2 1744 #if defined( __parallel ) 1745 ! 1746 !-- Sum-up the local averages of the random numbers 1747 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL, & 1748 MPI_SUM, comm1dy, ierr ) 1749 #endif 1750 rand_av = rand_av * nyz_inv 1751 ! 1752 !-- Obtain zero mean 1753 rand_it_l = rand_it_l - rand_av 1754 ! 1755 !-- Now, compute the variance 1756 DO j = nys, nyn 1757 DO k = nzb, nzt+1 1758 rand_sigma_inv = rand_sigma_inv + rand_it_l(k,j)**2 1759 ENDDO 1700 1760 ENDDO 1701 ENDDO 1702 1703 rand_sigma_inv = 1.0_wp / SQRT(rand_sigma_inv * nyz_inv) 1704 1705 DO j = 0, ny 1706 DO k = nzb, nzt+1 1707 rand_it(k,j) = rand_it(k,j) * rand_sigma_inv 1708 ENDDO 1709 ENDDO 1710 1711 ! 1712 !-- Periodic fill of random number in space 1761 1762 #if defined( __parallel ) 1763 ! 1764 !-- Sum-up the local quadratic averages of the random numbers 1765 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 1766 MPI_SUM, comm1dy, ierr ) 1767 #endif 1768 ! 1769 !-- Compute standard deviation 1770 IF ( rand_sigma_inv /= 0.0_wp ) THEN 1771 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nyz_inv ) 1772 ELSE 1773 rand_sigma_inv = 1.0_wp 1774 ENDIF 1775 ! 1776 !-- Normalize with standard deviation to obtain unit variance 1777 rand_it_l = rand_it_l * rand_sigma_inv 1778 ! 1779 !-- Copy local random numbers on the global array 1780 rand_it(nzb:nzt+1,nys:nyn) = rand_it_l(nzb:nzt+1,nys:nyn) 1781 ! 1782 !-- Deallocate local array 1783 DEALLOCATE( rand_it_l ) 1784 ! 1785 !-- Now, distribute the final set of random numbers to all mpi ranks located 1786 !-- on the left boundary. Here, an allreduce with sum reduction is sufficient, 1787 !-- or, in the non-parallel case, nothing need to be done at all. 1788 #if defined( __parallel ) 1789 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1790 MPI_SUM, comm1dy, ierr ) 1791 #endif 1792 ENDIF 1793 ! 1794 !-- Finally, distribute the set of random numbers (defined on the leftmost- 1795 !-- located mpi ranks) to all other mpi ranks. Here, a allreduce with sum 1796 !-- option is sufficient, because rand_it is zero on all other mpi_ranks. 1797 !-- Note, the reduce operation is only performed with communicator comm1dx, 1798 !-- where only 1 rank within the communicator has non-zero random numbers. 1799 #if defined( __parallel ) 1800 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1801 MPI_SUM, comm1dx, ierr ) 1802 #endif 1803 ! 1804 !-- Periodic fill of random numbers in space 1713 1805 DO j = 0, ny 1714 1806 DO k = 1, mergp … … 1748 1840 1749 1841 #if defined( __parallel ) 1750 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nysg), send_count, stg_type_yz_small, & 1751 f_n(nzb+1,nysg), recv_count_yz, displs_yz, stg_type_yz, & 1752 id, comm1dx, ierr ) 1842 ! 1843 !-- Gather the velocity seed matrix on left boundary mpi ranks. 1844 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nysg), send_count, stg_type_yz_small, & 1845 f_n(nzb+1,nysg), recv_count_yz, displs_yz, stg_type_yz, & 1846 id_left, comm1dx, ierr ) 1847 ! 1848 !-- If required, gather the same velocity seed matrix on right boundary 1849 !-- mpi ranks (in offline nesting for example). 1850 IF ( PRESENT( id_right ) ) THEN 1851 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nysg), send_count, stg_type_yz_small, & 1852 f_n(nzb+1,nysg), recv_count_yz, displs_yz, stg_type_yz,& 1853 id_right, comm1dx, ierr ) 1854 ENDIF 1753 1855 #else 1754 1856 f_n(nzb+1:nzt+1,nysg:nyng) = f_n_l(nzb_x_stg:nzt_x_stg+1,nysg:nyng) … … 1757 1859 1758 1860 END SUBROUTINE stg_generate_seed_yz 1759 1760 1761 1861 1762 1862 … … 1770 1870 !> parts are collected to form the full array. 1771 1871 !------------------------------------------------------------------------------! 1772 SUBROUTINE stg_generate_seed_xz( n_x, n_z, b_x, b_z, f_n, id )1872 SUBROUTINE stg_generate_seed_xz( n_x, n_z, b_x, b_z, f_n, id_south, id_north ) 1773 1873 1774 1874 IMPLICIT NONE 1775 1875 1776 INTEGER(iwp) :: id !< core ids at respective boundaries1777 1876 INTEGER(iwp) :: i !< loop index in x-direction 1877 INTEGER(iwp) :: id_north !< core ids at respective boundaries 1878 INTEGER(iwp) :: id_south !< core ids at respective boundaries 1778 1879 INTEGER(iwp) :: ii !< loop index in x-direction 1880 INTEGER(iwp) :: j !< grid index y-direction 1779 1881 INTEGER(iwp) :: k !< loop index in z-direction 1780 1882 INTEGER(iwp) :: kk !< loop index in z-direction … … 1792 1894 REAL(wp), DIMENSION(-merg:merg,nzb:nzt+1) :: b_x !< filter function in x-direction 1793 1895 REAL(wp), DIMENSION(-merg:merg,nzb:nzt+1) :: b_z !< filter function in z-direction 1896 1794 1897 REAL(wp), DIMENSION(nzb_y_stg:nzt_y_stg+1,nxlg:nxrg) :: f_n_l !< local velocity seed 1795 REAL(wp), DIMENSION(nzb:nzt+1,nxlg:nxrg) :: f_n !< velocity seed 1796 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< random number 1797 1798 1799 ! 1800 !-- Generate random numbers using a seed generated in stg_init. 1898 REAL(wp), DIMENSION(nzb:nzt+1,nxlg:nxrg) :: f_n !< velocity seed 1899 1900 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< global array of random numbers 1901 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it_l !< local array of random numbers 1902 1903 ! 1904 !-- Generate random numbers using the parallel random generator. 1801 1905 !-- The set of random numbers are modified to have an average of 0 and 1802 !-- unit variance. 1906 !-- unit variance. Note, at the end the random number array must be defined 1907 !-- globally in order to compute the correlation matrices. However, 1908 !-- the calculation and normalization of random numbers is done locally, 1909 !-- while the result is later distributed to all processes. Further, 1910 !-- please note, a set of random numbers is only calculated for the 1911 !-- left boundary, while the right boundary uses the same random numbers 1912 !-- and thus also computes the same correlation matrix. 1803 1913 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp:nx+mergp) ) 1914 rand_it = 0.0_wp 1804 1915 1805 1916 rand_av = 0.0_wp 1806 1917 rand_sigma_inv = 0.0_wp 1807 1918 nxz_inv = 1.0_wp / REAL( ( nzt+1 - nzb+1 ) * ( nx+1 ), KIND=wp ) 1808 1809 DO i = 0, nx 1810 DO k = nzb, nzt+1 1811 CALL RANDOM_NUMBER( rand_it(k,i) ) 1812 rand_av = rand_av + rand_it(k,i) 1919 ! 1920 !-- Compute and normalize random numbers only on south-boundary ranks. 1921 IF ( myidy == id_stg_south ) THEN 1922 ! 1923 !-- Allocate array for local set of random numbers 1924 ALLOCATE( rand_it_l(nzb:nzt+1,nxl:nxr) ) 1925 rand_it_l = 0.0_wp 1926 1927 j = nys 1928 DO i = nxl, nxr 1929 ! 1930 !-- Put the random seeds at grid point j,i 1931 CALL random_seed_parallel( put=seq_random_array(:, j, i) ) 1932 DO k = nzb, nzt+1 1933 CALL random_number_parallel( random_dummy ) 1934 rand_it_l(k,i) = random_dummy 1935 rand_av = rand_av + rand_it_l(k,i) 1936 ENDDO 1937 ! 1938 !-- Get the new random seeds from last call at grid point j,i 1939 CALL random_seed_parallel( get=seq_random_array(:, j, i) ) 1813 1940 ENDDO 1814 ENDDO 1815 1816 rand_av = rand_av * nxz_inv 1817 1818 DO i = 0, nx 1819 DO k = nzb, nzt+1 1820 rand_it(k,i) = rand_it(k,i) - rand_av 1821 rand_sigma_inv = rand_sigma_inv + rand_it(k,i) ** 2 1941 #if defined( __parallel ) 1942 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL, & 1943 MPI_SUM, comm1dx, ierr ) 1944 #endif 1945 rand_av = rand_av * nxz_inv 1946 ! 1947 !-- Obtain zero mean 1948 rand_it_l = rand_it_l - rand_av 1949 ! 1950 !-- Now, compute the variance 1951 DO i = nxl, nxr 1952 DO k = nzb, nzt+1 1953 rand_sigma_inv = rand_sigma_inv + rand_it_l(k,i)**2 1954 ENDDO 1822 1955 ENDDO 1823 ENDDO 1824 1825 rand_sigma_inv = 1.0_wp / SQRT(rand_sigma_inv * nxz_inv) 1826 1827 DO i = 0, nx 1828 DO k = nzb, nzt+1 1829 rand_it(k,i) = rand_it(k,i) * rand_sigma_inv 1830 ENDDO 1831 ENDDO 1832 1956 1957 #if defined( __parallel ) 1958 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 1959 MPI_SUM, comm1dx, ierr ) 1960 #endif 1961 ! 1962 !-- Compute standard deviation 1963 IF ( rand_sigma_inv /= 0.0_wp ) THEN 1964 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nxz_inv ) 1965 ELSE 1966 rand_sigma_inv = 1.0_wp 1967 ENDIF 1968 ! 1969 !-- Normalize with standard deviation to obtain unit variance 1970 rand_it_l = rand_it_l * rand_sigma_inv 1971 ! 1972 !-- Copy local random numbers on the global array 1973 rand_it(nzb:nzt+1,nxl:nxr) = rand_it_l(nzb:nzt+1,nxl:nxr) 1974 ! 1975 !-- Deallocate local array 1976 DEALLOCATE( rand_it_l ) 1977 ! 1978 !-- Now, distribute the final set of random numbers to all mpi ranks located 1979 !-- on the south boundary. Here, an allreduce with sum reduction is sufficient. 1980 #if defined( __parallel ) 1981 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1982 MPI_SUM, comm1dx, ierr ) 1983 #endif 1984 ENDIF 1985 ! 1986 !-- Finally, distribute the set of random numbers (defined on the 1987 !-- southmost-located mpi ranks) to all other mpi ranks. Here, a allreduce 1988 !-- with sum option is sufficient, because rand_it is zero on all other 1989 !-- mpi_ranks. Note, the reduce operation is only performed with communicator 1990 !-- comm1dy, where only 1 rank within the communicator has non zero random numbers. 1991 #if defined( __parallel ) 1992 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1993 MPI_SUM, comm1dy, ierr ) 1994 #endif 1833 1995 ! 1834 1996 !-- Periodic fill of random number in space … … 1845 2007 ENDDO 1846 2008 ENDDO 1847 1848 2009 ! 1849 2010 !-- Generate velocity seed following Eq.6 of Xie and Castro (2008) … … 1870 2031 IF ( nzt_y_stg == nzt ) send_count = send_count + 1 1871 2032 1872 1873 2033 #if defined( __parallel ) 1874 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxlg), send_count, stg_type_xz_small, & 1875 f_n(nzb+1,nxlg), recv_count_xz, displs_xz, stg_type_xz, & 1876 id, comm1dy, ierr ) 2034 ! 2035 !-- Gather the processed velocity seed on south boundary mpi ranks. 2036 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxlg), send_count, stg_type_xz_small, & 2037 f_n(nzb+1,nxlg), recv_count_xz, displs_xz, stg_type_xz, & 2038 id_south, comm1dy, ierr ) 2039 ! 2040 !-- Gather the processed velocity seed on north boundary mpi ranks. 2041 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxlg), send_count, stg_type_xz_small, & 2042 f_n(nzb+1,nxlg), recv_count_xz, displs_xz, stg_type_xz, & 2043 id_north, comm1dy, ierr ) 1877 2044 #else 1878 2045 f_n(nzb+1:nzt+1,nxlg:nxrg) = f_n_l(nzb_y_stg:nzt_y_stg+1,nxlg:nxrg) 1879 2046 #endif 1880 1881 2047 1882 2048 END SUBROUTINE stg_generate_seed_xz -
palm/trunk/SOURCE/urban_surface_mod.f90
r4305 r4309 23 23 ! Current revisions: 24 24 ! ------------------ 25 ! 26 ! 27 ! Former revisions: 28 ! ----------------- 29 ! $Id$ 25 30 ! - Bugfix, include m_liq into restarts 26 31 ! - Remove unused arrays for liquid water and saturation moisture at vertical 27 32 ! walls 28 33 ! 29 ! Former revisions: 30 ! ----------------- 31 ! $Id$ 34 ! 4305 2019-11-25 11:15:40Z suehring 32 35 ! Revision of some indoor-model parameters 33 36 !
Note: See TracChangeset
for help on using the changeset viewer.