Changeset 4438
- Timestamp:
- Mar 3, 2020 8:49:28 PM (5 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/random_generator_parallel_mod.f90
r4360 r4438 25 25 ! ----------------- 26 26 ! $Id$ 27 ! - Rename variables to avoid confusion with subdomain grid indices 28 ! - Some formatting adjustments 29 ! - New routine to initialize spatial 1D-arrays independent on the 2D random- 30 ! number array 31 ! 32 ! 4360 2020-01-07 11:25:50Z suehring 27 33 ! Corrected "Former revisions" section 28 34 ! … … 50 56 !------------------------------------------------------------------------------! 51 57 MODULE random_generator_parallel 52 53 58 54 59 USE kinds 55 60 56 61 IMPLICIT NONE 57 58 PRIVATE 59 PUBLIC random_number_parallel, random_seed_parallel, random_dummy, & 60 id_random_array, seq_random_array, init_parallel_random_generator 61 62 62 63 INTEGER(isp), SAVE :: lenran=0 !< 63 64 INTEGER(isp), SAVE :: seq=0 !< … … 68 69 INTEGER(isp), SAVE :: nran0 !< 69 70 INTEGER(isp), SAVE :: rans !< 70 71 71 72 INTEGER(isp), DIMENSION(:, :), POINTER, SAVE :: ranseeds !< 72 73 73 74 INTEGER(isp), DIMENSION(:), POINTER, SAVE :: iran !< 74 75 INTEGER(isp), DIMENSION(:), POINTER, SAVE :: jran !< … … 77 78 INTEGER(isp), DIMENSION(:), POINTER, SAVE :: nran !< 78 79 INTEGER(isp), DIMENSION(:), POINTER, SAVE :: ranv !< 79 80 81 80 82 81 INTEGER(isp), DIMENSION(:,:), ALLOCATABLE :: id_random_array !< 83 82 INTEGER(isp), DIMENSION(:,:,:), ALLOCATABLE :: seq_random_array !< 84 83 85 84 REAL(wp), SAVE :: amm !< 86 85 87 86 REAL(wp) :: random_dummy=0.0 !< 88 87 89 88 INTERFACE init_parallel_random_generator 90 MODULE PROCEDURE init_parallel_random_generator 89 MODULE PROCEDURE init_parallel_random_generator_1d 90 MODULE PROCEDURE init_parallel_random_generator_2d 91 91 END INTERFACE 92 92 93 93 INTERFACE random_number_parallel 94 94 MODULE PROCEDURE ran0_s 95 95 END INTERFACE 96 96 97 97 INTERFACE random_seed_parallel 98 98 MODULE PROCEDURE random_seed_parallel 99 99 END INTERFACE 100 100 101 101 INTERFACE ran_hash 102 102 MODULE PROCEDURE ran_hash_v 103 103 END INTERFACE 104 104 105 105 INTERFACE reallocate 106 106 MODULE PROCEDURE reallocate_iv,reallocate_im 107 107 END INTERFACE 108 108 109 109 INTERFACE arth 110 110 MODULE PROCEDURE arth_i 111 111 END INTERFACE 112 112 113 PRIVATE 114 115 PUBLIC random_number_parallel, random_seed_parallel, random_dummy, & 116 id_random_array, seq_random_array, init_parallel_random_generator 117 113 118 CONTAINS 114 115 !------------------------------------------------------------------------------! 116 ! Description: 117 ! ------------ 118 !> Initialize the parallel random number generator for a specific subdomain 119 !------------------------------------------------------------------------------! 120 SUBROUTINE init_parallel_random_generator( nx, nys, nyn, nxl, nxr ) 121 122 USE kinds 119 120 !------------------------------------------------------------------------------! 121 ! Description: 122 ! ------------ 123 !> Initialize the parallel random number generator for a 1-dimensional array. 124 !------------------------------------------------------------------------------! 125 SUBROUTINE init_parallel_random_generator_1d( nxy, ns, ne, id_rand, seq_rand ) 123 126 124 127 USE control_parameters, & 125 128 ONLY: ensemble_member_nr 126 129 130 INTEGER(isp), INTENT(IN) :: nxy !< constant scaling with grid dimensions 131 INTEGER(isp), INTENT(IN) :: ns !< start index on subdomain 132 INTEGER(isp), INTENT(IN) :: ne !< end index on subdomain 133 134 INTEGER(iwp) :: j !< loop index 135 136 INTEGER(isp), DIMENSION(ns:ne) :: id_rand !< initial IDs 137 INTEGER(isp), DIMENSION(5,ns:ne) :: seq_rand !< initial random seeds 138 139 ! 140 !-- Asigning an ID to every vertical gridpoint column 141 !-- dependig on the ensemble run number. 142 DO j = ns, ne 143 id_rand(j) = j * ( nxy + 1.0_wp ) + 1.0_wp + 1E6 * & 144 ( ensemble_member_nr - 1000 ) 145 ENDDO 146 ! 147 !-- Initializing with random_seed_parallel for every vertical 148 !-- gridpoint column. 149 random_dummy=0 150 DO j = ns, ne 151 CALL random_seed_parallel( random_sequence=id_rand(j) ) 152 CALL random_number_parallel( random_dummy ) 153 CALL random_seed_parallel( get=seq_rand(:,j) ) 154 ENDDO 155 156 END SUBROUTINE init_parallel_random_generator_1d 157 158 !------------------------------------------------------------------------------! 159 ! Description: 160 ! ------------ 161 !> Initialize the parallel random number generator for a specific subdomain 162 !------------------------------------------------------------------------------! 163 SUBROUTINE init_parallel_random_generator_2d( nx_l, nys_l, nyn_l, nxl_l, nxr_l ) 164 165 USE kinds 166 167 USE control_parameters, & 168 ONLY: ensemble_member_nr 169 127 170 IMPLICIT NONE 128 171 129 INTEGER(isp), INTENT(IN) :: nx !<130 INTEGER(isp), INTENT(IN) :: nys !<131 INTEGER(isp), INTENT(IN) :: nyn !<132 INTEGER(isp), INTENT(IN) :: nxl !<133 INTEGER(isp), INTENT(IN) :: nxr !<134 135 INTEGER(iwp) :: i !< 136 INTEGER(iwp) :: j !< 172 INTEGER(isp), INTENT(IN) :: nx_l !< constant 173 INTEGER(isp), INTENT(IN) :: nys_l !< local lower subdomain bound index in y-direction 174 INTEGER(isp), INTENT(IN) :: nyn_l !< local upper subdomain bound index in y-direction 175 INTEGER(isp), INTENT(IN) :: nxl_l !< local lower subdomain bound index in x-direction 176 INTEGER(isp), INTENT(IN) :: nxr_l !< local upper subdomain bound index in x-direction 177 178 INTEGER(iwp) :: i !< grid index x-direction 179 INTEGER(iwp) :: j !< grid index y-direction 137 180 138 181 !-- Allocate ID-array and state-space-array 139 ALLOCATE ( seq_random_array(5,nys :nyn,nxl:nxr) )140 ALLOCATE ( id_random_array(nys :nyn,nxl:nxr) )182 ALLOCATE ( seq_random_array(5,nys_l:nyn_l,nxl_l:nxr_l) ) 183 ALLOCATE ( id_random_array(nys_l:nyn_l,nxl_l:nxr_l) ) 141 184 seq_random_array = 0 142 185 id_random_array = 0 … … 144 187 !-- Asigning an ID to every vertical gridpoint column 145 188 !-- dependig on the ensemble run number. 146 DO i=nxl, nxr147 DO j=nys, nyn148 id_random_array(j,i) = j *(nx+1.0_wp) + i + 1.0_wp + 1E6 *&189 DO i = nxl_l, nxr_l 190 DO j = nys_l, nyn_l 191 id_random_array(j,i) = j * ( nx_l + 1.0_wp ) + i + 1.0_wp + 1E6 * & 149 192 ( ensemble_member_nr - 1000 ) 150 193 ENDDO … … 153 196 !-- gridpoint column. 154 197 random_dummy=0 155 DO i = nxl, nxr156 DO j = nys, nyn198 DO i = nxl_l, nxr_l 199 DO j = nys_l, nyn_l 157 200 CALL random_seed_parallel (random_sequence=id_random_array(j, i)) 158 201 CALL random_number_parallel (random_dummy) … … 161 204 ENDDO 162 205 163 END SUBROUTINE init_parallel_random_generator 206 END SUBROUTINE init_parallel_random_generator_2d 164 207 165 208 !------------------------------------------------------------------------------! -
palm/trunk/SOURCE/synthetic_turbulence_generator_mod.f90
r4346 r4438 25 25 ! ----------------- 26 26 ! $Id$ 27 ! Performance optimizations in velocity-seed calculation: 28 ! - random number array is only defined and computed locally (except for 29 ! normalization to zero mean and unit variance) 30 ! - parallel random number generator is applied independent on the 2D random 31 ! numbers in other routines 32 ! - option to decide wheter velocity seeds are computed locally without any 33 ! further communication or are computed by all processes along the 34 ! communicator 35 ! 36 ! 4346 2019-12-18 11:55:56Z motisi 27 37 ! Introduction of wall_flags_total_0, which currently sets bits based on static 28 38 ! topography information used in wall_flags_static_0 … … 184 194 time_since_reference_point, & 185 195 turbulent_inflow 186 196 197 USE cpulog, & 198 ONLY: cpu_log, & 199 log_point_s 200 187 201 USE grid_variables, & 188 202 ONLY: ddx, & … … 199 213 nxl, & 200 214 nxlu, & 201 nxlg, &202 215 nxr, & 203 nxrg, &204 216 ny, & 205 217 nys, & 206 218 nysv, & 207 219 nyn, & 208 nyng, &209 nysg, &210 220 wall_flags_total_0 211 221 … … 228 238 myidy, & 229 239 pdims 230 240 231 241 USE pmc_interface, & 232 242 ONLY : rans_mode_parent 233 234 USE random_generator_parallel, & 235 ONLY: random_dummy, & 243 244 USE random_generator_parallel, & 245 ONLY: init_parallel_random_generator, & 246 random_dummy, & 236 247 random_number_parallel, & 237 random_seed_parallel, & 238 seq_random_array 248 random_seed_parallel 239 249 240 250 USE transpose_indices, & … … 242 252 nzt_x 243 253 254 USE surface_mod, & 255 ONLY: surf_def_h, & 256 surf_lsm_h, & 257 surf_usm_h 244 258 245 259 IMPLICIT NONE … … 253 267 LOGICAL :: parametrize_inflow_turbulence = .FALSE. !< flag indicating that inflow turbulence is either read from file (.FALSE.) or if it parametrized 254 268 LOGICAL :: use_syn_turb_gen = .FALSE. !< switch to use synthetic turbulence generator 269 LOGICAL :: compute_velocity_seeds_local = .TRUE. !< switch to decide whether velocity seeds are computed locally or if computation 270 !< is distributed over several processes 255 271 256 272 INTEGER(iwp) :: id_stg_left !< left lateral boundary core id in case of turbulence generator … … 258 274 INTEGER(iwp) :: id_stg_right !< right lateral boundary core id in case of turbulence generator 259 275 INTEGER(iwp) :: id_stg_south !< south lateral boundary core id in case of turbulence generator 260 INTEGER(iwp) :: merg !< maximum length scale (in gp) 261 INTEGER(iwp) :: mergp !< merg + nbgp 276 INTEGER(iwp) :: mergp !< maximum length scale (in gp) 262 277 INTEGER(iwp) :: nzb_x_stg !< lower bound of z coordinate (required for transposing z on PEs along x) 263 278 INTEGER(iwp) :: nzt_x_stg !< upper bound of z coordinate (required for transposing z on PEs along x) … … 288 303 INTEGER(iwp), DIMENSION(:), ALLOCATABLE :: nwz !< length scale of w in z direction (in gp) 289 304 305 INTEGER(isp), DIMENSION(:), ALLOCATABLE :: id_rand_xz !< initial random IDs at xz inflow boundary 306 INTEGER(isp), DIMENSION(:), ALLOCATABLE :: id_rand_yz !< initial random IDs at yz inflow boundary 307 INTEGER(isp), DIMENSION(:,:), ALLOCATABLE :: seq_rand_xz !< initial random seeds at xz inflow boundary 308 INTEGER(isp), DIMENSION(:,:), ALLOCATABLE :: seq_rand_yz !< initial random seeds at yz inflow boundary 309 290 310 REAL(wp) :: blend !< value to create gradually and smooth blending of Reynolds stress and length 291 311 !< scales above the boundary layer … … 295 315 REAL(wp) :: length_scale !< length scale, default is 8 x minimum grid spacing 296 316 REAL(wp) :: dt_stg_adjust = 300.0_wp !< time interval for adjusting turbulence statistics 297 REAL(wp) :: dt_stg_call = 5.0_wp !< time interval for calling synthetic turbulence generator317 REAL(wp) :: dt_stg_call = 0.0_wp !< time interval for calling synthetic turbulence generator 298 318 REAL(wp) :: scale_l !< scaling parameter used for turbulence parametrization - Obukhov length 299 319 REAL(wp) :: scale_us !< scaling parameter used for turbulence parametrization - friction velocity … … 438 458 SUBROUTINE stg_check_parameters 439 459 440 IMPLICIT NONE441 442 460 IF ( .NOT. use_syn_turb_gen .AND. .NOT. rans_mode .AND. & 443 461 nesting_offline ) THEN … … 526 544 SUBROUTINE stg_header ( io ) 527 545 528 529 IMPLICIT NONE530 531 546 INTEGER(iwp), INTENT(IN) :: io !< Unit of the output file 532 547 … … 562 577 !------------------------------------------------------------------------------! 563 578 SUBROUTINE stg_init 564 565 IMPLICIT NONE566 579 567 580 LOGICAL :: file_stg_exist = .FALSE. !< flag indicating whether parameter file for Reynolds stress and length scales exist … … 599 612 CALL MPI_BARRIER( comm2d, ierr ) 600 613 #endif 601 614 ! 615 !-- Create mpi-datatypes for exchange in case of non-local but distributed 616 !-- computation of the velocity seeds. This option is useful in 617 !-- case large turbulent length scales are presentm, where the computational 618 !-- effort becomes large and need to be parallelized. For parametrized 619 !-- turbulence the length scales are small and computing the velocity seeds 620 !-- locally is faster (no overhead by communication). 621 IF ( .NOT. compute_velocity_seeds_local ) THEN 602 622 #if defined( __parallel ) 603 ! 604 !-- Determine processor decomposition of z-axis along x- and y-direction 605 nnz = nz / pdims(1) 606 nzb_x_stg = 1 + myidx * INT( nnz ) 607 nzt_x_stg = ( myidx + 1 ) * INT( nnz ) 608 609 IF ( MOD( nz , pdims(1) ) /= 0 .AND. myidx == id_stg_right ) & 610 nzt_x_stg = nzt_x_stg + myidx * ( nnz - INT( nnz ) ) 611 612 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 613 .AND. .NOT. rans_mode ) ) THEN 614 nnz = nz / pdims(2) 615 nzb_y_stg = 1 + myidy * INT( nnz ) 616 nzt_y_stg = ( myidy + 1 ) * INT( nnz ) 617 618 IF ( MOD( nz , pdims(2) ) /= 0 .AND. myidy == id_stg_north ) & 619 nzt_y_stg = nzt_y_stg + myidy * ( nnz - INT( nnz ) ) 620 ENDIF 621 622 ! 623 !-- Define MPI type used in stg_generate_seed_yz to gather vertical splitted 624 !-- velocity seeds 625 CALL MPI_TYPE_SIZE( MPI_REAL, realsize, ierr ) 626 extent = 1 * realsize 627 ! 628 !-- Set-up MPI datatyp to involve all cores for turbulence generation at yz 629 !-- layer 630 !-- stg_type_yz: yz-slice with vertical bounds nzb:nzt+1 631 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt-nzb+2,nyng-nysg+1], & 632 [1,nyng-nysg+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 633 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_yz, ierr ) 634 CALL MPI_TYPE_COMMIT( stg_type_yz, ierr ) 635 CALL MPI_TYPE_FREE( newtype, ierr ) 636 637 ! stg_type_yz_small: yz-slice with vertical bounds nzb_x_stg:nzt_x_stg+1 638 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt_x_stg-nzb_x_stg+2,nyng-nysg+1], & 639 [1,nyng-nysg+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 640 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_yz_small, ierr ) 641 CALL MPI_TYPE_COMMIT( stg_type_yz_small, ierr ) 642 CALL MPI_TYPE_FREE( newtype, ierr ) 643 644 ! receive count and displacement for MPI_GATHERV in stg_generate_seed_yz 645 ALLOCATE( recv_count_yz(pdims(1)), displs_yz(pdims(1)) ) 646 647 recv_count_yz = nzt_x_stg-nzb_x_stg + 1 648 recv_count_yz(pdims(1)) = recv_count_yz(pdims(1)) + 1 649 650 DO j = 1, pdims(1) 651 displs_yz(j) = 0 + (nzt_x_stg-nzb_x_stg+1) * (j-1) 652 ENDDO 653 ! 654 !-- Set-up MPI datatyp to involve all cores for turbulence generation at xz 655 !-- layer 656 !-- stg_type_xz: xz-slice with vertical bounds nzb:nzt+1 657 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 658 .AND. .NOT. rans_mode ) ) THEN 659 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt-nzb+2,nxrg-nxlg+1], & 660 [1,nxrg-nxlg+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 661 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_xz, ierr ) 662 CALL MPI_TYPE_COMMIT( stg_type_xz, ierr ) 623 ! 624 !-- Determine processor decomposition of z-axis along x- and y-direction 625 nnz = nz / pdims(1) 626 nzb_x_stg = 1 + myidx * INT( nnz ) 627 nzt_x_stg = ( myidx + 1 ) * INT( nnz ) 628 629 IF ( MOD( nz , pdims(1) ) /= 0 .AND. myidx == id_stg_right ) & 630 nzt_x_stg = nzt_x_stg + myidx * ( nnz - INT( nnz ) ) 631 632 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 633 .AND. .NOT. rans_mode ) ) THEN 634 nnz = nz / pdims(2) 635 nzb_y_stg = 1 + myidy * INT( nnz ) 636 nzt_y_stg = ( myidy + 1 ) * INT( nnz ) 637 638 IF ( MOD( nz , pdims(2) ) /= 0 .AND. myidy == id_stg_north ) & 639 nzt_y_stg = nzt_y_stg + myidy * ( nnz - INT( nnz ) ) 640 ENDIF 641 642 ! 643 !-- Define MPI type used in stg_generate_seed_yz to gather vertical splitted 644 !-- velocity seeds 645 CALL MPI_TYPE_SIZE( MPI_REAL, realsize, ierr ) 646 extent = 1 * realsize 647 ! 648 !-- Set-up MPI datatyp to involve all cores for turbulence generation at yz 649 !-- layer 650 !-- stg_type_yz: yz-slice with vertical bounds nzb:nzt+1 651 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt-nzb+2,nyn-nys+1], & 652 [1,nyn-nys+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 653 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_yz, ierr ) 654 CALL MPI_TYPE_COMMIT( stg_type_yz, ierr ) 663 655 CALL MPI_TYPE_FREE( newtype, ierr ) 664 665 ! stg_type_yz_small: xz-slice with vertical bounds nzb_x_stg:nzt_x_stg+1666 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt_ y_stg-nzb_y_stg+2,nxrg-nxlg+1],&667 [1,n xrg-nxlg+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr )668 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_ xz_small, ierr )669 CALL MPI_TYPE_COMMIT( stg_type_ xz_small, ierr )656 657 ! stg_type_yz_small: yz-slice with vertical bounds nzb_x_stg:nzt_x_stg+1 658 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt_x_stg-nzb_x_stg+2,nyn-nys+1], & 659 [1,nyn-nys+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 660 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_yz_small, ierr ) 661 CALL MPI_TYPE_COMMIT( stg_type_yz_small, ierr ) 670 662 CALL MPI_TYPE_FREE( newtype, ierr ) 671 663 672 664 ! receive count and displacement for MPI_GATHERV in stg_generate_seed_yz 673 ALLOCATE( recv_count_ xz(pdims(2)), displs_xz(pdims(2)) )674 675 recv_count_ xz = nzt_y_stg-nzb_y_stg + 1676 recv_count_ xz(pdims(2)) = recv_count_xz(pdims(2)) + 1677 678 DO j = 1, pdims( 2)679 displs_ xz(j) = 0 + (nzt_y_stg-nzb_y_stg+1) * (j-1)665 ALLOCATE( recv_count_yz(pdims(1)), displs_yz(pdims(1)) ) 666 667 recv_count_yz = nzt_x_stg-nzb_x_stg + 1 668 recv_count_yz(pdims(1)) = recv_count_yz(pdims(1)) + 1 669 670 DO j = 1, pdims(1) 671 displs_yz(j) = 0 + (nzt_x_stg-nzb_x_stg+1) * (j-1) 680 672 ENDDO 681 673 ! 674 !-- Set-up MPI datatyp to involve all cores for turbulence generation at xz 675 !-- layer 676 !-- stg_type_xz: xz-slice with vertical bounds nzb:nzt+1 677 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 678 .AND. .NOT. rans_mode ) ) THEN 679 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt-nzb+2,nxr-nxl+1], & 680 [1,nxr-nxl+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 681 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_xz, ierr ) 682 CALL MPI_TYPE_COMMIT( stg_type_xz, ierr ) 683 CALL MPI_TYPE_FREE( newtype, ierr ) 684 685 ! stg_type_yz_small: xz-slice with vertical bounds nzb_x_stg:nzt_x_stg+1 686 CALL MPI_TYPE_CREATE_SUBARRAY( 2, [nzt_y_stg-nzb_y_stg+2,nxr-nxl+1], & 687 [1,nxr-nxl+1], [0,0], MPI_ORDER_FORTRAN, MPI_REAL, newtype, ierr ) 688 CALL MPI_TYPE_CREATE_RESIZED( newtype, tob, extent, stg_type_xz_small, ierr ) 689 CALL MPI_TYPE_COMMIT( stg_type_xz_small, ierr ) 690 CALL MPI_TYPE_FREE( newtype, ierr ) 691 692 ! receive count and displacement for MPI_GATHERV in stg_generate_seed_yz 693 ALLOCATE( recv_count_xz(pdims(2)), displs_xz(pdims(2)) ) 694 695 recv_count_xz = nzt_y_stg-nzb_y_stg + 1 696 recv_count_xz(pdims(2)) = recv_count_xz(pdims(2)) + 1 697 698 DO j = 1, pdims(2) 699 displs_xz(j) = 0 + (nzt_y_stg-nzb_y_stg+1) * (j-1) 700 ENDDO 701 702 ENDIF 682 703 ENDIF 683 704 … … 715 736 tu(nzb:nzt+1), tv(nzb:nzt+1), tw(nzb:nzt+1) ) 716 737 717 ALLOCATE ( dist_xz(nzb:nzt+1,nxl g:nxrg,3) )718 ALLOCATE ( dist_yz(nzb:nzt+1,nys g:nyng,3) )738 ALLOCATE ( dist_xz(nzb:nzt+1,nxl:nxr,3) ) 739 ALLOCATE ( dist_yz(nzb:nzt+1,nys:nyn,3) ) 719 740 dist_xz = 0.0_wp 720 741 dist_yz = 0.0_wp … … 788 809 !-- Define length scale for the imposed turbulence, which is defined as 789 810 !-- 8 times the minimum grid spacing 790 length_scale = 8.0_wp * MIN( dx, dy, MINVAL( dzw ) )811 length_scale = 30.0_wp * MIN( dx, dy, MINVAL( dzw ) ) !8.0_wp * MIN( dx, dy, MINVAL( dzw ) ) 791 812 ! 792 813 !-- Define constant to gradually decrease length scales and Reynolds stress … … 837 858 ! 838 859 !-- Define the size of the filter functions and allocate them. 839 merg = 0860 mergp = 0 840 861 841 862 ! arrays must be large enough to cover the largest length scale … … 844 865 ABS(nvx(k)), ABS(nvy(k)), ABS(nvz(k)), & 845 866 ABS(nwx(k)), ABS(nwy(k)), ABS(nwz(k)) ) 846 IF ( j > merg ) merg= j867 IF ( j > mergp ) mergp = j 847 868 ENDDO 848 869 849 merg = 2 * merg 850 mergp = merg + nbgp851 852 ALLOCATE ( bux(-merg :merg,nzb:nzt+1), &853 buy(-merg :merg,nzb:nzt+1), &854 buz(-merg :merg,nzb:nzt+1), &855 bvx(-merg :merg,nzb:nzt+1), &856 bvy(-merg :merg,nzb:nzt+1), &857 bvz(-merg :merg,nzb:nzt+1), &858 bwx(-merg :merg,nzb:nzt+1), &859 bwy(-merg :merg,nzb:nzt+1), &860 bwz(-merg :merg,nzb:nzt+1) )870 ! mergp = 2 * mergp 871 ! mergp = mergp 872 873 ALLOCATE ( bux(-mergp:mergp,nzb:nzt+1), & 874 buy(-mergp:mergp,nzb:nzt+1), & 875 buz(-mergp:mergp,nzb:nzt+1), & 876 bvx(-mergp:mergp,nzb:nzt+1), & 877 bvy(-mergp:mergp,nzb:nzt+1), & 878 bvz(-mergp:mergp,nzb:nzt+1), & 879 bwx(-mergp:mergp,nzb:nzt+1), & 880 bwy(-mergp:mergp,nzb:nzt+1), & 881 bwz(-mergp:mergp,nzb:nzt+1) ) 861 882 862 883 ! 863 884 !-- Allocate velocity seeds for turbulence at xz-layer 864 ALLOCATE ( fu_xz( nzb:nzt+1,nxl g:nxrg), fuo_xz(nzb:nzt+1,nxlg:nxrg), &865 fv_xz( nzb:nzt+1,nxl g:nxrg), fvo_xz(nzb:nzt+1,nxlg:nxrg), &866 fw_xz( nzb:nzt+1,nxl g:nxrg), fwo_xz(nzb:nzt+1,nxlg:nxrg) )885 ALLOCATE ( fu_xz( nzb:nzt+1,nxl:nxr), fuo_xz(nzb:nzt+1,nxl:nxr), & 886 fv_xz( nzb:nzt+1,nxl:nxr), fvo_xz(nzb:nzt+1,nxl:nxr), & 887 fw_xz( nzb:nzt+1,nxl:nxr), fwo_xz(nzb:nzt+1,nxl:nxr) ) 867 888 868 889 ! 869 890 !-- Allocate velocity seeds for turbulence at yz-layer 870 ALLOCATE ( fu_yz( nzb:nzt+1,nys g:nyng), fuo_yz(nzb:nzt+1,nysg:nyng), &871 fv_yz( nzb:nzt+1,nys g:nyng), fvo_yz(nzb:nzt+1,nysg:nyng), &872 fw_yz( nzb:nzt+1,nys g:nyng), fwo_yz(nzb:nzt+1,nysg:nyng) )891 ALLOCATE ( fu_yz( nzb:nzt+1,nys:nyn), fuo_yz(nzb:nzt+1,nys:nyn), & 892 fv_yz( nzb:nzt+1,nys:nyn), fvo_yz(nzb:nzt+1,nys:nyn), & 893 fw_yz( nzb:nzt+1,nys:nyn), fwo_yz(nzb:nzt+1,nys:nyn) ) 873 894 874 895 fu_xz = 0.0_wp … … 913 934 IF ( myidx == id_stg_right ) i = nxr+1 914 935 915 DO j = nys g, nyng936 DO j = nys, nyn 916 937 DO k = nzb, nzt+1 917 938 IF ( a11(k) > 10E-8_wp ) THEN … … 944 965 IF ( myidy == id_stg_north ) j = nyn+1 945 966 946 DO i = nxl g, nxrg967 DO i = nxl, nxr 947 968 DO k = nzb, nzt+1 948 969 ! … … 1039 1060 #endif 1040 1061 ENDIF 1062 ! 1063 !-- Initialize random number generator at xz- and yz-layers. Random numbers 1064 !-- are initialized at each core. In case there is only inflow from the left, 1065 !-- it is sufficient to generate only random numbers for the yz-layer, else 1066 !-- random numbers for the xz-layer are also required. 1067 ALLOCATE ( id_rand_yz(-mergp+nys:nyn+mergp) ) 1068 ALLOCATE ( seq_rand_yz(5,-mergp+nys:nyn+mergp) ) 1069 id_rand_yz = 0 1070 seq_rand_yz = 0 1071 1072 CALL init_parallel_random_generator( ny, -mergp+nys, nyn+mergp, & 1073 id_rand_yz, seq_rand_yz ) 1074 1075 IF ( nesting_offline .OR. ( child_domain .AND. rans_mode_parent & 1076 .AND. .NOT. rans_mode ) ) THEN 1077 ALLOCATE ( id_rand_xz(-mergp+nxl:nxr+mergp) ) 1078 ALLOCATE ( seq_rand_xz(5,-mergp+nxl:nxr+mergp) ) 1079 id_rand_xz = 0 1080 seq_rand_xz = 0 1081 1082 CALL init_parallel_random_generator( nx, -mergp+nxl, nxr+mergp, & 1083 id_rand_xz, seq_rand_xz ) 1084 ENDIF 1085 1041 1086 1042 1087 … … 1052 1097 SUBROUTINE stg_filter_func( nxx, bxx ) 1053 1098 1054 1055 IMPLICIT NONE1056 1057 1099 INTEGER(iwp) :: k !< loop index 1058 1100 INTEGER(iwp) :: n_k !< length scale nXX in height k 1059 INTEGER(iwp) :: n_k2 !< n_k * 21060 1101 INTEGER(iwp) :: nf !< index for length scales 1061 1102 … … 1063 1104 REAL(wp) :: qsi = 1.0_wp !< minimization factor 1064 1105 1065 INTEGER(iwp), DIMENSION( :) :: nxx(nzb:nzt+1)!< length scale (in gp)1066 1067 REAL(wp), DIMENSION( :,:) :: bxx(-merg:merg,nzb:nzt+1)!< filter function1106 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: nxx !< length scale (in gp) 1107 1108 REAL(wp), DIMENSION(-mergp:mergp,nzb:nzt+1) :: bxx !< filter function 1068 1109 1069 1110 … … 1074 1115 n_k = nxx(k) 1075 1116 IF ( n_k /= 0 ) THEN 1076 n_k2 = n_k * 21077 1117 1078 1118 ! 1079 1119 !-- ( Eq.10 )^2 1080 DO nf = -n_k 2, n_k21120 DO nf = -n_k, n_k 1081 1121 bdenom = bdenom + EXP( -qsi * pi * ABS(nf) / n_k )**2 1082 1122 ENDDO … … 1085 1125 !-- ( Eq.9 ) 1086 1126 bdenom = SQRT( bdenom ) 1087 DO nf = -n_k 2, n_k21127 DO nf = -n_k, n_k 1088 1128 bxx(nf,k) = EXP( -qsi * pi * ABS(nf) / n_k ) / bdenom 1089 1129 ENDDO … … 1101 1141 SUBROUTINE stg_parin 1102 1142 1103 1104 IMPLICIT NONE1105 1106 1143 CHARACTER (LEN=80) :: line !< dummy string that contains the current line of the parameter file 1107 1144 1108 1145 1109 NAMELIST /stg_par/ dt_stg_adjust, dt_stg_call, use_syn_turb_gen 1146 NAMELIST /stg_par/ dt_stg_adjust, & 1147 dt_stg_call, & 1148 use_syn_turb_gen, & 1149 compute_velocity_seeds_local 1110 1150 1111 1151 line = ' ' 1112 1113 1152 ! 1114 1153 !-- Try to find stg package … … 1146 1185 SUBROUTINE stg_rrd_global( found ) 1147 1186 1148 1149 IMPLICIT NONE1150 1151 1187 LOGICAL, INTENT(OUT) :: found !< flag indicating if variable was found 1152 1153 1188 1154 1189 found = .TRUE. … … 1183 1218 SUBROUTINE stg_wrd_global 1184 1219 1185 1186 IMPLICIT NONE1187 1188 1220 CALL wrd_write_string( 'time_stg_adjust' ) 1189 1221 WRITE ( 14 ) time_stg_adjust … … 1208 1240 !------------------------------------------------------------------------------! 1209 1241 SUBROUTINE stg_main 1210 1211 IMPLICIT NONE1212 1242 1213 1243 INTEGER(iwp) :: i !< grid index in x-direction … … 1252 1282 ( child_domain .AND. rans_mode_parent & 1253 1283 .AND. .NOT. rans_mode ) ) ) THEN 1254 1255 1284 CALL stg_generate_seed_yz( nuy, nuz, buy, buz, fu_yz, id_stg_left ) 1256 1285 CALL stg_generate_seed_yz( nvy, nvz, bvy, bvz, fv_yz, id_stg_left ) … … 1315 1344 IF ( stg_call ) THEN 1316 1345 1317 DO j = nys g, nyng1346 DO j = nys, nyn 1318 1347 DO k = nzb, nzt + 1 1319 1348 ! … … 1348 1377 IF ( myidx == id_stg_left ) i = nxl 1349 1378 IF ( myidx == id_stg_right ) i = nxr+1 1350 DO j = nys g, nyng1379 DO j = nys, nyn 1351 1380 DO k = nzb+1, nzt + 1 1352 1381 ! … … 1361 1390 IF ( myidx == id_stg_left ) i = nxl-1 1362 1391 IF ( myidx == id_stg_right ) i = nxr+1 1363 DO j = nys g, nyng1392 DO j = nys, nyn 1364 1393 DO k = nzb+1, nzt + 1 1365 1394 ! … … 1427 1456 IF ( myidx == id_stg_right ) i = nxr+1 1428 1457 1429 dist_yz(:, :,1) = ( dist_yz(:,:,1) - mc_factor(1) ) &1458 dist_yz(:,nys:nyn,1) = ( dist_yz(:,nys:nyn,1) - mc_factor(1) ) & 1430 1459 * MERGE( 1.0_wp, 0.0_wp, & 1431 BTEST( wall_flags_total_0(:, :,i), 1 ) )1460 BTEST( wall_flags_total_0(:,nys:nyn,i), 1 ) ) 1432 1461 1433 1462 … … 1435 1464 IF ( myidx == id_stg_right ) i = nxr+1 1436 1465 1437 dist_yz(:, :,2) = ( dist_yz(:,:,2) - mc_factor(2) ) &1466 dist_yz(:,nys:nyn,2) = ( dist_yz(:,nys:nyn,2) - mc_factor(2) ) & 1438 1467 * MERGE( 1.0_wp, 0.0_wp, & 1439 BTEST( wall_flags_total_0(:, :,i), 2 ) )1468 BTEST( wall_flags_total_0(:,nys:nyn,i), 2 ) ) 1440 1469 1441 dist_yz(:, :,3) = ( dist_yz(:,:,3) - mc_factor(3) ) &1470 dist_yz(:,nys:nyn,3) = ( dist_yz(:,nys:nyn,3) - mc_factor(3) ) & 1442 1471 * MERGE( 1.0_wp, 0.0_wp, & 1443 BTEST( wall_flags_total_0(:, :,i), 3 ) )1472 BTEST( wall_flags_total_0(:,nys:nyn,i), 3 ) ) 1444 1473 ! 1445 1474 !-- Add disturbances … … 1452 1481 ! 1453 1482 !-- Add disturbance at the inflow 1454 DO j = nys g, nyng1483 DO j = nys, nyn 1455 1484 DO k = nzb, nzt+1 1456 1485 u(k,j,-nbgp+1:0) = ( mean_inflow_profiles(k,1) + & … … 1510 1539 !-- imposed 1511 1540 IF ( stg_call ) THEN 1512 DO i = nxl g, nxrg1541 DO i = nxl, nxr 1513 1542 DO k = nzb, nzt + 1 1514 1543 ! … … 1544 1573 IF ( myidy == id_stg_south ) j = nys 1545 1574 IF ( myidy == id_stg_north ) j = nyn+1 1546 DO i = nxl g, nxrg1575 DO i = nxl, nxr 1547 1576 DO k = nzb+1, nzt + 1 1548 1577 ! … … 1561 1590 IF ( myidy == id_stg_south ) j = nys-1 1562 1591 IF ( myidy == id_stg_north ) j = nyn+1 1563 DO i = nxl g, nxrg1592 DO i = nxl, nxr 1564 1593 DO k = nzb+1, nzt + 1 1565 1594 ! … … 1621 1650 IF ( myidy == id_stg_north ) j = nyn+1 1622 1651 1623 dist_xz(:, :,2) = ( dist_xz(:,:,2) - mc_factor(2) ) &1652 dist_xz(:,nxl:nxr,2) = ( dist_xz(:,nxl:nxr,2) - mc_factor(2) ) & 1624 1653 * MERGE( 1.0_wp, 0.0_wp, & 1625 BTEST( wall_flags_total_0(:,j, :), 2 ) )1654 BTEST( wall_flags_total_0(:,j,nxl:nxr), 2 ) ) 1626 1655 1627 1656 … … 1629 1658 IF ( myidy == id_stg_north ) j = nyn+1 1630 1659 1631 dist_xz(:, :,1) = ( dist_xz(:,:,1) - mc_factor(1) ) &1660 dist_xz(:,nxl:nxr,1) = ( dist_xz(:,nxl:nxr,1) - mc_factor(1) ) & 1632 1661 * MERGE( 1.0_wp, 0.0_wp, & 1633 BTEST( wall_flags_total_0(:,j, :), 1 ) )1662 BTEST( wall_flags_total_0(:,j,nxl:nxr), 1 ) ) 1634 1663 1635 dist_xz(:, :,3) = ( dist_xz(:,:,3) - mc_factor(3) ) &1664 dist_xz(:,nxl:nxr,3) = ( dist_xz(:,nxl:nxr,3) - mc_factor(3) ) & 1636 1665 * MERGE( 1.0_wp, 0.0_wp, & 1637 BTEST( wall_flags_total_0(:,j, :), 3 ) )1666 BTEST( wall_flags_total_0(:,j,nxl:nxr), 3 ) ) 1638 1667 ! 1639 1668 !-- Add disturbances … … 1673 1702 ENDIF 1674 1703 ! 1704 !-- Exchange ghost points. 1705 CALL exchange_horiz( u, nbgp ) 1706 CALL exchange_horiz( v, nbgp ) 1707 CALL exchange_horiz( w, nbgp ) 1708 ! 1675 1709 !-- Finally, set time counter for calling STG to zero 1676 1710 IF ( stg_call ) time_stg_call = 0.0_wp … … 1690 1724 !------------------------------------------------------------------------------! 1691 1725 SUBROUTINE stg_generate_seed_yz( n_y, n_z, b_y, b_z, f_n, id_left, id_right ) 1692 1693 USE pegrid 1694 1695 IMPLICIT NONE 1696 1697 INTEGER(iwp) :: i !< grid index x-direction 1726 1698 1727 INTEGER(iwp) :: id_left !< core ids at respective boundaries 1699 1728 INTEGER(iwp), OPTIONAL :: id_right !< core ids at respective boundaries … … 1706 1735 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_y !< length scale in y-direction 1707 1736 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_z !< length scale in z-direction 1708 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_y2 !< n_y*21709 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_z2 !< n_z*21710 1737 1711 1738 REAL(wp) :: nyz_inv !< inverse of number of grid points in yz-slice … … 1713 1740 REAL(wp) :: rand_sigma_inv !< inverse of stdev of random number 1714 1741 1715 REAL(wp), DIMENSION(-merg :merg,nzb:nzt+1) :: b_y !< filter function in y-direction1716 REAL(wp), DIMENSION(-merg :merg,nzb:nzt+1) :: b_z !< filter function in z-direction1717 1718 REAL(wp), DIMENSION(nzb_x_stg:nzt_x_stg+1,nys g:nyng) :: f_n_l !< local velocity seed1719 REAL(wp), DIMENSION(nzb:nzt+1,nys g:nyng) :: f_n !< velocity seed1742 REAL(wp), DIMENSION(-mergp:mergp,nzb:nzt+1) :: b_y !< filter function in y-direction 1743 REAL(wp), DIMENSION(-mergp:mergp,nzb:nzt+1) :: b_z !< filter function in z-direction 1744 1745 REAL(wp), DIMENSION(nzb_x_stg:nzt_x_stg+1,nys:nyn) :: f_n_l !< local velocity seed 1746 REAL(wp), DIMENSION(nzb:nzt+1,nys:nyn) :: f_n !< velocity seed 1720 1747 1721 1748 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< global array of random numbers 1722 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it_l !< local array of random numbers1723 1724 1749 ! 1725 1750 !-- Generate random numbers using the parallel random generator. … … 1732 1757 !-- left boundary, while the right boundary uses the same random numbers 1733 1758 !-- and thus also computes the same correlation matrix. 1734 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp :ny+mergp) )1735 rand_it 1759 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp+nys:nyn+mergp) ) 1760 rand_it = 0.0_wp 1736 1761 1737 1762 rand_av = 0.0_wp 1738 1763 rand_sigma_inv = 0.0_wp 1739 nyz_inv = 1.0_wp / REAL( ( nzt+1 - nzb+1 ) * ( ny+1 ), KIND=wp ) 1740 ! 1741 !-- Compute and normalize random numbers only on left-boundary ranks. 1742 IF ( myidx == id_stg_left ) THEN 1743 ! 1744 !-- Allocate array for local set of random numbers 1745 ALLOCATE( rand_it_l(nzb:nzt+1,nys:nyn) ) 1746 rand_it_l = 0.0_wp 1747 1748 i = nxl 1749 DO j = nys, nyn 1750 ! 1751 !-- Put the random seeds at grid point j,i 1752 CALL random_seed_parallel( put=seq_random_array(:, j, i) ) 1753 DO k = nzb, nzt+1 1754 CALL random_number_parallel( random_dummy ) 1755 rand_it_l(k,j) = random_dummy 1756 rand_av = rand_av + rand_it_l(k,j) 1757 ENDDO 1758 ! 1759 !-- Get the new random seeds from last call at grid point j,i 1760 CALL random_seed_parallel( get=seq_random_array(:, j, i) ) 1764 nyz_inv = 1.0_wp / REAL( ( nzt + 1 + mergp - ( nzb - mergp ) + 1 ) & 1765 * ( ny + mergp - ( 0 - mergp ) + 1 ), & 1766 KIND=wp ) 1767 ! 1768 !-- Compute and normalize random numbers. 1769 DO j = nys - mergp, nyn + mergp 1770 ! 1771 !-- Put the random seeds at grid point j 1772 CALL random_seed_parallel( put=seq_rand_yz(:,j) ) 1773 DO k = nzb - mergp, nzt + 1 + mergp 1774 CALL random_number_parallel( random_dummy ) 1775 rand_it(k,j) = random_dummy 1761 1776 ENDDO 1777 ! 1778 !-- Get the new random seeds from last call at grid point j 1779 CALL random_seed_parallel( get=seq_rand_yz(:,j) ) 1780 ENDDO 1781 ! 1782 !-- For normalization to zero mean, sum-up the global random numers. 1783 !-- To normalize the global set of random numbers, 1784 !-- the inner ghost layers mergp must not be summed-up, else 1785 !-- the random numbers on the ghost layers will be stronger weighted as they 1786 !-- also occur on the inner subdomains. 1787 DO j = MERGE( nys, nys - mergp, nys /= 0 ), & 1788 MERGE( nyn, nyn + mergp, nyn /= ny ) 1789 DO k = nzb - mergp, nzt + 1 + mergp 1790 rand_av = rand_av + rand_it(k,j) 1791 ENDDO 1792 ENDDO 1793 1762 1794 #if defined( __parallel ) 1763 1795 ! 1764 !-- 1765 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL,&1766 1796 !-- Sum-up the local averages of the random numbers 1797 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL, & 1798 MPI_SUM, comm1dy, ierr ) 1767 1799 #endif 1768 rand_av = rand_av * nyz_inv 1769 ! 1770 !-- Obtain zero mean 1771 rand_it_l = rand_it_l - rand_av 1772 ! 1773 !-- Now, compute the variance 1800 rand_av = rand_av * nyz_inv 1801 ! 1802 !-- Obtain zero mean 1803 rand_it= rand_it - rand_av 1804 ! 1805 !-- Now, compute the variance 1806 DO j = MERGE( nys, nys - mergp, nys /= 0 ), & 1807 MERGE( nyn, nyn + mergp, nyn /= ny ) 1808 DO k = nzb - mergp, nzt + 1 + mergp 1809 rand_sigma_inv = rand_sigma_inv + rand_it(k,j)**2 1810 ENDDO 1811 ENDDO 1812 1813 #if defined( __parallel ) 1814 ! 1815 !-- Sum-up the local quadratic averages of the random numbers 1816 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 1817 MPI_SUM, comm1dy, ierr ) 1818 #endif 1819 ! 1820 !-- Compute standard deviation 1821 IF ( rand_sigma_inv /= 0.0_wp ) THEN 1822 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nyz_inv ) 1823 ELSE 1824 rand_sigma_inv = 1.0_wp 1825 ENDIF 1826 ! 1827 !-- Normalize with standard deviation to obtain unit variance 1828 rand_it = rand_it * rand_sigma_inv 1829 1830 CALL cpu_log( log_point_s(31), 'STG f_n factors', 'start' ) 1831 ! 1832 !-- Generate velocity seed following Eq.6 of Xie and Castro (2008). There 1833 !-- are two options. In the first one, the computation of the seeds is 1834 !-- distributed to all processes along the communicator comm1dy and 1835 !-- gathered on the leftmost and, if necessary, on the rightmost process. 1836 !-- For huge length scales the computational effort can become quite huge 1837 !-- (it scales with the turbulent length scales), so that gain by parallelization 1838 !-- exceeds the costs by the subsequent communication. 1839 !-- In the second option, which performs better when the turbulent length scales 1840 !-- are parametrized and thus the loops are smaller, the seeds are computed 1841 !-- locally and no communication is necessary. 1842 IF ( compute_velocity_seeds_local ) THEN 1843 1844 f_n = 0.0_wp 1774 1845 DO j = nys, nyn 1775 1846 DO k = nzb, nzt+1 1776 rand_sigma_inv = rand_sigma_inv + rand_it_l(k,j)**2 1777 ENDDO 1778 ENDDO 1779 1780 #if defined( __parallel ) 1781 ! 1782 !-- Sum-up the local quadratic averages of the random numbers 1783 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 1784 MPI_SUM, comm1dy, ierr ) 1785 #endif 1786 ! 1787 !-- Compute standard deviation 1788 IF ( rand_sigma_inv /= 0.0_wp ) THEN 1789 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nyz_inv ) 1790 ELSE 1791 rand_sigma_inv = 1.0_wp 1792 ENDIF 1793 ! 1794 !-- Normalize with standard deviation to obtain unit variance 1795 rand_it_l = rand_it_l * rand_sigma_inv 1796 ! 1797 !-- Copy local random numbers on the global array 1798 rand_it(nzb:nzt+1,nys:nyn) = rand_it_l(nzb:nzt+1,nys:nyn) 1799 ! 1800 !-- Deallocate local array 1801 DEALLOCATE( rand_it_l ) 1802 ! 1803 !-- Now, distribute the final set of random numbers to all mpi ranks located 1804 !-- on the left boundary. Here, an allreduce with sum reduction is sufficient, 1805 !-- or, in the non-parallel case, nothing need to be done at all. 1806 #if defined( __parallel ) 1807 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1808 MPI_SUM, comm1dy, ierr ) 1809 #endif 1810 ENDIF 1811 ! 1812 !-- Finally, distribute the set of random numbers (defined on the leftmost- 1813 !-- located mpi ranks) to all other mpi ranks. Here, a allreduce with sum 1814 !-- option is sufficient, because rand_it is zero on all other mpi_ranks. 1815 !-- Note, the reduce operation is only performed with communicator comm1dx, 1816 !-- where only 1 rank within the communicator has non-zero random numbers. 1817 #if defined( __parallel ) 1818 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 1819 MPI_SUM, comm1dx, ierr ) 1820 #endif 1821 ! 1822 !-- Periodic fill of random numbers in space 1823 DO j = 0, ny 1824 DO k = 1, mergp 1825 rand_it(nzb -k,j) = rand_it(nzt+2-k,j) ! bottom margin 1826 rand_it(nzt+1+k,j) = rand_it(nzb+k-1,j) ! top margin 1827 ENDDO 1828 ENDDO 1829 DO j = 1, mergp 1830 DO k = nzb-mergp, nzt+1+mergp 1831 rand_it(k, -j) = rand_it(k,ny-j+1) ! south margin 1832 rand_it(k,ny+j) = rand_it(k, j-1) ! north margin 1833 ENDDO 1834 ENDDO 1835 1836 ! 1837 !-- Generate velocity seed following Eq.6 of Xie and Castro (2008) 1838 n_y2 = n_y * 2 1839 n_z2 = n_z * 2 1840 f_n_l = 0.0_wp 1841 1842 DO j = nysg, nyng 1843 DO k = nzb_x_stg, nzt_x_stg+1 1844 DO jj = -n_y2(k), n_y2(k) 1845 DO kk = -n_z2(k), n_z2(k) 1846 f_n_l(k,j) = f_n_l(k,j) & 1847 + b_y(jj,k) * b_z(kk,k) * rand_it(k+kk,j+jj) 1847 DO jj = -n_y(k), n_y(k) 1848 DO kk = -n_z(k), n_z(k) 1849 f_n(k,j) = f_n(k,j) + b_y(jj,k) * b_z(kk,k) * rand_it(k+kk,j+jj) 1850 ENDDO 1848 1851 ENDDO 1849 1852 ENDDO 1850 1853 ENDDO 1851 ENDDO 1854 1855 ELSE 1856 1857 f_n_l = 0.0_wp 1858 DO j = nys, nyn 1859 DO k = nzb_x_stg, nzt_x_stg+1 1860 DO jj = -n_y(k), n_y(k) 1861 DO kk = -n_z(k), n_z(k) 1862 f_n_l(k,j) = f_n_l(k,j) + b_y(jj,k) * b_z(kk,k) * rand_it(k+kk,j+jj) 1863 ENDDO 1864 ENDDO 1865 ENDDO 1866 ENDDO 1867 ! 1868 !-- Gather velocity seeds of full subdomain 1869 send_count = nzt_x_stg - nzb_x_stg + 1 1870 IF ( nzt_x_stg == nzt ) send_count = send_count + 1 1871 1872 #if defined( __parallel ) 1873 ! 1874 !-- Gather the velocity seed matrix on left boundary mpi ranks. 1875 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nys), send_count, stg_type_yz_small, & 1876 f_n(nzb+1,nys), recv_count_yz, displs_yz, stg_type_yz,& 1877 id_left, comm1dx, ierr ) 1878 ! 1879 !-- If required, gather the same velocity seed matrix on right boundary 1880 !-- mpi ranks (in offline nesting for example). 1881 IF ( PRESENT( id_right ) ) THEN 1882 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nys), send_count, stg_type_yz_small, & 1883 f_n(nzb+1,nys), recv_count_yz, displs_yz, stg_type_yz,& 1884 id_right, comm1dx, ierr ) 1885 ENDIF 1886 #else 1887 f_n(nzb+1:nzt+1,nys:nyn) = f_n_l(nzb_x_stg:nzt_x_stg+1,nys:nyn) 1888 #endif 1889 1890 ENDIF 1852 1891 1853 1892 DEALLOCATE( rand_it ) 1854 ! 1855 !-- Gather velocity seeds of full subdomain 1856 send_count = nzt_x_stg - nzb_x_stg + 1 1857 IF ( nzt_x_stg == nzt ) send_count = send_count + 1 1858 1859 #if defined( __parallel ) 1860 ! 1861 !-- Gather the velocity seed matrix on left boundary mpi ranks. 1862 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nysg), send_count, stg_type_yz_small, & 1863 f_n(nzb+1,nysg), recv_count_yz, displs_yz, stg_type_yz, & 1864 id_left, comm1dx, ierr ) 1865 ! 1866 !-- If required, gather the same velocity seed matrix on right boundary 1867 !-- mpi ranks (in offline nesting for example). 1868 IF ( PRESENT( id_right ) ) THEN 1869 CALL MPI_GATHERV( f_n_l(nzb_x_stg,nysg), send_count, stg_type_yz_small, & 1870 f_n(nzb+1,nysg), recv_count_yz, displs_yz, stg_type_yz,& 1871 id_right, comm1dx, ierr ) 1872 ENDIF 1873 #else 1874 f_n(nzb+1:nzt+1,nysg:nyng) = f_n_l(nzb_x_stg:nzt_x_stg+1,nysg:nyng) 1875 #endif 1876 1893 1894 CALL cpu_log( log_point_s(31), 'STG f_n factors', 'stop' ) 1877 1895 1878 1896 END SUBROUTINE stg_generate_seed_yz … … 1890 1908 SUBROUTINE stg_generate_seed_xz( n_x, n_z, b_x, b_z, f_n, id_south, id_north ) 1891 1909 1892 IMPLICIT NONE1893 1894 1910 INTEGER(iwp) :: i !< loop index in x-direction 1895 1911 INTEGER(iwp) :: id_north !< core ids at respective boundaries 1896 1912 INTEGER(iwp) :: id_south !< core ids at respective boundaries 1897 1913 INTEGER(iwp) :: ii !< loop index in x-direction 1898 INTEGER(iwp) :: j !< grid index y-direction1899 1914 INTEGER(iwp) :: k !< loop index in z-direction 1900 1915 INTEGER(iwp) :: kk !< loop index in z-direction … … 1903 1918 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_x !< length scale in x-direction 1904 1919 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_z !< length scale in z-direction 1905 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_x2 !< n_x*21906 INTEGER(iwp), DIMENSION(nzb:nzt+1) :: n_z2 !< n_z*21907 1920 1908 1921 REAL(wp) :: nxz_inv !< inverse of number of grid points in xz-slice … … 1910 1923 REAL(wp) :: rand_sigma_inv !< inverse of stdev of random number 1911 1924 1912 REAL(wp), DIMENSION(-merg :merg,nzb:nzt+1) :: b_x !< filter function in x-direction1913 REAL(wp), DIMENSION(-merg :merg,nzb:nzt+1) :: b_z !< filter function in z-direction1914 1915 REAL(wp), DIMENSION(nzb_y_stg:nzt_y_stg+1,nxl g:nxrg) :: f_n_l !< local velocity seed1916 REAL(wp), DIMENSION(nzb:nzt+1,nxl g:nxrg) :: f_n !< velocity seed1925 REAL(wp), DIMENSION(-mergp:mergp,nzb:nzt+1) :: b_x !< filter function in x-direction 1926 REAL(wp), DIMENSION(-mergp:mergp,nzb:nzt+1) :: b_z !< filter function in z-direction 1927 1928 REAL(wp), DIMENSION(nzb_y_stg:nzt_y_stg+1,nxl:nxr) :: f_n_l !< local velocity seed 1929 REAL(wp), DIMENSION(nzb:nzt+1,nxl:nxr) :: f_n !< velocity seed 1917 1930 1918 1931 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it !< global array of random numbers 1919 REAL(wp), DIMENSION(:,:), ALLOCATABLE :: rand_it_l !< local array of random numbers1920 1932 1921 1933 ! … … 1929 1941 !-- left boundary, while the right boundary uses the same random numbers 1930 1942 !-- and thus also computes the same correlation matrix. 1931 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp :nx+mergp) )1932 rand_it 1943 ALLOCATE( rand_it(nzb-mergp:nzt+1+mergp,-mergp+nxl:nxr+mergp) ) 1944 rand_it = 0.0_wp 1933 1945 1934 1946 rand_av = 0.0_wp 1935 1947 rand_sigma_inv = 0.0_wp 1936 nxz_inv = 1.0_wp / REAL( ( nzt+1 - nzb+1 ) * ( nx+1 ), KIND=wp ) 1937 ! 1938 !-- Compute and normalize random numbers only on south-boundary ranks. 1939 IF ( myidy == id_stg_south ) THEN 1940 ! 1941 !-- Allocate array for local set of random numbers 1942 ALLOCATE( rand_it_l(nzb:nzt+1,nxl:nxr) ) 1943 rand_it_l = 0.0_wp 1944 1945 j = nys 1946 DO i = nxl, nxr 1947 ! 1948 !-- Put the random seeds at grid point j,i 1949 CALL random_seed_parallel( put=seq_random_array(:, j, i) ) 1950 DO k = nzb, nzt+1 1951 CALL random_number_parallel( random_dummy ) 1952 rand_it_l(k,i) = random_dummy 1953 rand_av = rand_av + rand_it_l(k,i) 1954 ENDDO 1955 ! 1956 !-- Get the new random seeds from last call at grid point j,i 1957 CALL random_seed_parallel( get=seq_random_array(:, j, i) ) 1948 nxz_inv = 1.0_wp / REAL( ( nzt + 1 + mergp - ( nzb - mergp ) + 1 ) & 1949 * ( nx + mergp - ( 0 - mergp ) +1 ), & 1950 KIND=wp ) 1951 ! 1952 !-- Compute and normalize random numbers. 1953 DO i = nxl - mergp, nxr + mergp 1954 ! 1955 !-- Put the random seeds at grid point ii 1956 CALL random_seed_parallel( put=seq_rand_xz(:,i) ) 1957 DO k = nzb - mergp, nzt + 1 + mergp 1958 CALL random_number_parallel( random_dummy ) 1959 rand_it(k,i) = random_dummy 1958 1960 ENDDO 1961 ! 1962 !-- Get the new random seeds from last call at grid point ii 1963 CALL random_seed_parallel( get=seq_rand_xz(:,i) ) 1964 ENDDO 1965 ! 1966 !-- For normalization to zero mean, sum-up the global random numers. 1967 !-- To normalize the global set of random numbers, 1968 !-- the inner ghost layers mergp must not be summed-up, else 1969 !-- the random numbers on the ghost layers will be stronger weighted as they 1970 !-- also occur on the inner subdomains. 1971 DO i = MERGE( nxl, nxl - mergp, nxl /= 0 ), & 1972 MERGE( nxr, nxr + mergp, nxr /= nx ) 1973 DO k = nzb - mergp, nzt + 1 + mergp 1974 rand_av = rand_av + rand_it(k,i) 1975 ENDDO 1976 ENDDO 1977 1959 1978 #if defined( __parallel ) 1960 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL, & 1961 MPI_SUM, comm1dx, ierr ) 1979 ! 1980 !-- Sum-up the local averages of the random numbers 1981 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_av, 1, MPI_REAL, & 1982 MPI_SUM, comm1dx, ierr ) 1962 1983 #endif 1963 rand_av = rand_av * nxz_inv 1964 ! 1965 !-- Obtain zero mean 1966 rand_it_l = rand_it_l - rand_av 1967 ! 1968 !-- Now, compute the variance 1984 rand_av = rand_av * nxz_inv 1985 ! 1986 !-- Obtain zero mean 1987 rand_it= rand_it - rand_av 1988 ! 1989 !-- Now, compute the variance 1990 DO i = MERGE( nxl, nxl - mergp, nxl /= 0 ), & 1991 MERGE( nxr, nxr + mergp, nxr /= nx ) 1992 DO k = nzb - mergp, nzt + 1 + mergp 1993 rand_sigma_inv = rand_sigma_inv + rand_it(k,i)**2 1994 ENDDO 1995 ENDDO 1996 1997 #if defined( __parallel ) 1998 ! 1999 !-- Sum-up the local quadratic averages of the random numbers 2000 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 2001 MPI_SUM, comm1dx, ierr ) 2002 #endif 2003 ! 2004 !-- Compute standard deviation 2005 IF ( rand_sigma_inv /= 0.0_wp ) THEN 2006 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nxz_inv ) 2007 ELSE 2008 rand_sigma_inv = 1.0_wp 2009 ENDIF 2010 ! 2011 !-- Normalize with standard deviation to obtain unit variance 2012 rand_it = rand_it * rand_sigma_inv 2013 2014 CALL cpu_log( log_point_s(31), 'STG f_n factors', 'start' ) 2015 ! 2016 !-- Generate velocity seed following Eq.6 of Xie and Castro (2008). There 2017 !-- are two options. In the first one, the computation of the seeds is 2018 !-- distributed to all processes along the communicator comm1dx and 2019 !-- gathered on the southmost and, if necessary, on the northmost process. 2020 !-- For huge length scales the computational effort can become quite huge 2021 !-- (it scales with the turbulent length scales), so that gain by parallelization 2022 !-- exceeds the costs by the subsequent communication. 2023 !-- In the second option, which performs better when the turbulent length scales 2024 !-- are parametrized and thus the loops are smaller, the seeds are computed 2025 !-- locally and no communication is necessary. 2026 IF ( compute_velocity_seeds_local ) THEN 2027 2028 f_n = 0.0_wp 1969 2029 DO i = nxl, nxr 1970 2030 DO k = nzb, nzt+1 1971 rand_sigma_inv = rand_sigma_inv + rand_it_l(k,i)**2 1972 ENDDO 1973 ENDDO 1974 1975 #if defined( __parallel ) 1976 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_sigma_inv, 1, MPI_REAL, & 1977 MPI_SUM, comm1dx, ierr ) 1978 #endif 1979 ! 1980 !-- Compute standard deviation 1981 IF ( rand_sigma_inv /= 0.0_wp ) THEN 1982 rand_sigma_inv = 1.0_wp / SQRT( rand_sigma_inv * nxz_inv ) 1983 ELSE 1984 rand_sigma_inv = 1.0_wp 1985 ENDIF 1986 ! 1987 !-- Normalize with standard deviation to obtain unit variance 1988 rand_it_l = rand_it_l * rand_sigma_inv 1989 ! 1990 !-- Copy local random numbers on the global array 1991 rand_it(nzb:nzt+1,nxl:nxr) = rand_it_l(nzb:nzt+1,nxl:nxr) 1992 ! 1993 !-- Deallocate local array 1994 DEALLOCATE( rand_it_l ) 1995 ! 1996 !-- Now, distribute the final set of random numbers to all mpi ranks located 1997 !-- on the south boundary. Here, an allreduce with sum reduction is sufficient. 1998 #if defined( __parallel ) 1999 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 2000 MPI_SUM, comm1dx, ierr ) 2001 #endif 2002 ENDIF 2003 ! 2004 !-- Finally, distribute the set of random numbers (defined on the 2005 !-- southmost-located mpi ranks) to all other mpi ranks. Here, a allreduce 2006 !-- with sum option is sufficient, because rand_it is zero on all other 2007 !-- mpi_ranks. Note, the reduce operation is only performed with communicator 2008 !-- comm1dy, where only 1 rank within the communicator has non zero random numbers. 2009 #if defined( __parallel ) 2010 CALL MPI_ALLREDUCE( MPI_IN_PLACE, rand_it, SIZE( rand_it ), MPI_REAL, & 2011 MPI_SUM, comm1dy, ierr ) 2012 #endif 2013 ! 2014 !-- Periodic fill of random number in space 2015 DO i = 0, nx 2016 DO k = 1, mergp 2017 rand_it(nzb-k,i) = rand_it(nzt+2-k,i) ! bottom margin 2018 rand_it(nzt+1+k,i) = rand_it(nzb+k-1,i) ! top margin 2019 ENDDO 2020 ENDDO 2021 DO i = 1, mergp 2022 DO k = nzb-mergp, nzt+1+mergp 2023 rand_it(k,-i) = rand_it(k,nx-i+1) ! left margin 2024 rand_it(k,nx+i) = rand_it(k,i-1) ! right margin 2025 ENDDO 2026 ENDDO 2027 ! 2028 !-- Generate velocity seed following Eq.6 of Xie and Castro (2008) 2029 n_x2 = n_x * 2 2030 n_z2 = n_z * 2 2031 f_n_l = 0.0_wp 2032 2033 DO i = nxlg, nxrg 2034 DO k = nzb_y_stg, nzt_y_stg+1 2035 DO ii = -n_x2(k), n_x2(k) 2036 DO kk = -n_z2(k), n_z2(k) 2037 f_n_l(k,i) = f_n_l(k,i) & 2038 + b_x(ii,k) * b_z(kk,k) * rand_it(k+kk,i+ii) 2031 DO ii = -n_x(k), n_x(k) 2032 DO kk = -n_z(k), n_z(k) 2033 f_n(k,i) = f_n(k,i) + b_x(ii,k) * b_z(kk,k) * rand_it(k+kk,i+ii) 2034 ENDDO 2039 2035 ENDDO 2040 2036 ENDDO 2041 2037 ENDDO 2042 ENDDO 2038 2039 ELSE 2040 2041 f_n_l = 0.0_wp 2042 DO i = nxl, nxr 2043 DO k = nzb_y_stg, nzt_y_stg+1 2044 DO ii = -n_x(k), n_x(k) 2045 DO kk = -n_z(k), n_z(k) 2046 f_n_l(k,i) = f_n_l(k,i) + b_x(ii,k) * b_z(kk,k) * rand_it(k+kk,i+ii) 2047 ENDDO 2048 ENDDO 2049 ENDDO 2050 ENDDO 2051 ! 2052 !-- Gather velocity seeds of full subdomain 2053 send_count = nzt_y_stg - nzb_y_stg + 1 2054 IF ( nzt_y_stg == nzt ) send_count = send_count + 1 2055 2056 #if defined( __parallel ) 2057 ! 2058 !-- Gather the processed velocity seed on south boundary mpi ranks. 2059 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxl), send_count, stg_type_xz_small, & 2060 f_n(nzb+1,nxl), recv_count_xz, displs_xz, stg_type_xz, & 2061 id_south, comm1dy, ierr ) 2062 ! 2063 !-- Gather the processed velocity seed on north boundary mpi ranks. 2064 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxl), send_count, stg_type_xz_small, & 2065 f_n(nzb+1,nxl), recv_count_xz, displs_xz, stg_type_xz, & 2066 id_north, comm1dy, ierr ) 2067 #else 2068 f_n(nzb+1:nzt+1,nxl:nxr) = f_n_l(nzb_y_stg:nzt_y_stg+1,nxl:nxr) 2069 #endif 2070 2071 ENDIF 2043 2072 2044 2073 DEALLOCATE( rand_it ) 2045 2074 2046 ! 2047 !-- Gather velocity seeds of full subdomain 2048 send_count = nzt_y_stg - nzb_y_stg + 1 2049 IF ( nzt_y_stg == nzt ) send_count = send_count + 1 2050 2051 #if defined( __parallel ) 2052 ! 2053 !-- Gather the processed velocity seed on south boundary mpi ranks. 2054 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxlg), send_count, stg_type_xz_small, & 2055 f_n(nzb+1,nxlg), recv_count_xz, displs_xz, stg_type_xz, & 2056 id_south, comm1dy, ierr ) 2057 ! 2058 !-- Gather the processed velocity seed on north boundary mpi ranks. 2059 CALL MPI_GATHERV( f_n_l(nzb_y_stg,nxlg), send_count, stg_type_xz_small, & 2060 f_n(nzb+1,nxlg), recv_count_xz, displs_xz, stg_type_xz, & 2061 id_north, comm1dy, ierr ) 2062 #else 2063 f_n(nzb+1:nzt+1,nxlg:nxrg) = f_n_l(nzb_y_stg:nzt_y_stg+1,nxlg:nxrg) 2064 #endif 2075 CALL cpu_log( log_point_s(31), 'STG f_n factors', 'stop' ) 2065 2076 2066 2077 END SUBROUTINE stg_generate_seed_xz … … 2076 2087 SUBROUTINE parametrize_reynolds_stress 2077 2088 2078 IMPLICIT NONE2079 2080 2089 INTEGER(iwp) :: k !< loop index in z-direction 2081 2090 … … 2093 2102 ! 2094 2103 !-- u'u' and v'v'. Assume isotropy. Note, add a small negative number 2095 !-- to the denominator, else the merg e-function can crash if scale_l is2104 !-- to the denominator, else the mergpe-function can crash if scale_l is 2096 2105 !-- zero. 2097 2106 r11(k) = scale_us**2 * ( & … … 2140 2149 !------------------------------------------------------------------------------! 2141 2150 SUBROUTINE calc_coeff_matrix 2142 2143 IMPLICIT NONE2144 2151 2145 2152 INTEGER(iwp) :: k !< loop index in z-direction … … 2195 2202 SUBROUTINE stg_adjust 2196 2203 2197 IMPLICIT NONE2198 2199 2200 2204 IF ( debug_output_timestep ) CALL debug_message( 'stg_adjust', 'start' ) 2201 2205 ! … … 2251 2255 SUBROUTINE calc_length_and_time_scale 2252 2256 2253 IMPLICIT NONE2254 2255 2256 2257 INTEGER(iwp) :: k !< loop index in z-direction 2257 2258 … … 2285 2286 !-- Assume isotropic turbulence length scales 2286 2287 nux(k) = MAX( INT( length_scale * ddx ), 1 ) * blend 2287 nuy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2288 nuy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2288 2289 nvx(k) = MAX( INT( length_scale * ddx ), 1 ) * blend 2289 nvy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2290 nvy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2290 2291 nwx(k) = MAX( INT( length_scale * ddx ), 1 ) * blend 2291 nwy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2292 nwy(k) = MAX( INT( length_scale * ddy ), 1 ) * blend 2292 2293 ! 2293 2294 !-- Along the vertical direction limit the length scale further by the … … 2341 2342 !------------------------------------------------------------------------------! 2342 2343 SUBROUTINE calc_scaling_variables 2343 2344 USE surface_mod, &2345 ONLY: surf_def_h, surf_lsm_h, surf_usm_h2346 2347 IMPLICIT NONE2348 2344 2349 2345 INTEGER(iwp) :: i !< loop index in x-direction -
palm/trunk/SOURCE/virtual_measurement_mod.f90
r4422 r4438 25 25 ! ----------------- 26 26 ! $Id$ 27 ! Add cpu-log points 28 ! 29 ! 4422 2020-02-24 22:45:13Z suehring 27 30 ! Missing trim() 28 31 ! … … 165 168 USE cpulog, & 166 169 ONLY: cpu_log, & 167 log_point 170 log_point_s 168 171 169 172 USE data_output_module … … 1449 1452 1450 1453 ! 1451 !-- Determine the number of output timesteps. Set a maximum value of 80000 1452 !-- timesteps. 1454 !-- Determine the number of output timesteps. 1453 1455 ntimesteps = CEILING( & 1454 1456 ( end_time - MAX( vm_time_start, time_since_reference_point )& … … 1468 1470 TRIM( coupling_char ) // '/' //& 1469 1471 'site', l 1470 1471 1472 1472 1473 return_value = dom_def_file( vmea(l)%nc_filename, 'netcdf4-parallel' ) … … 2281 2282 REAL(wp), DIMENSION(:,:), ALLOCATABLE, TARGET :: output_values_2d_target !< target for 2d output array 2282 2283 2284 CALL cpu_log( log_point_s(26), 'VM output', 'start' ) 2283 2285 ! 2284 2286 !-- At the first call of this routine write the spatial coordinates. … … 2548 2550 ENDDO ! loop over sites 2549 2551 2552 CALL cpu_log( log_point_s(26), 'VM output', 'stop' ) 2553 2550 2554 2551 2555 END SUBROUTINE vm_data_output … … 2582 2586 REAL(wp) :: q_s !< saturation mixing ratio 2583 2587 REAL(wp) :: q_wv !< mixing ratio 2588 2589 CALL cpu_log( log_point_s(27), 'VM sampling', 'start' ) 2584 2590 ! 2585 2591 !-- Loop over all sites. … … 3294 3300 ENDDO 3295 3301 3302 CALL cpu_log( log_point_s(27), 'VM sampling', 'stop' ) 3303 3296 3304 END SUBROUTINE vm_sampling 3297 3305 -
palm/trunk/SOURCE/wind_turbine_model_mod.f90
r4436 r4438 21 21 ! Current revisions: 22 22 ! ----------------- 23 ! Bugfix: shifted netcdf preprocessor directive to correct position23 ! 24 24 ! 25 25 ! Former revisions: 26 26 ! ----------------- 27 27 ! $Id$ 28 ! Bugfix: shifted netcdf preprocessor directive to correct position 29 ! 30 ! 4436 2020-03-03 12:47:02Z oliver.maas 28 31 ! added optional netcdf data input for wtm array input parameters 29 32 !
Note: See TracChangeset
for help on using the changeset viewer.