Changeset 4628 for palm/trunk/SOURCE
- Timestamp:
- Jul 29, 2020 7:23:03 AM (4 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/Makefile
r4564 r4628 20 20 # Current revisions: 21 21 # ------------------ 22 # 23 # 22 # 23 # 24 24 # Former revisions: 25 25 # ----------------- 26 26 # $Id$ 27 # dependencies for particle data MPI-I/O added 28 # 29 # 4564 2020-06-12 14:03:36Z raasch 27 30 # Vertical nesting method of Huq et al. (2019) removed 28 31 # … … 849 852 850 853 mod_particle_attributes.o: \ 854 modules.o \ 851 855 mod_kinds.o 852 856 nesting_offl_mod.o: \ … … 1090 1094 virtual_measurement_mod.o 1091 1095 restart_data_mpi_io_mod.o: \ 1092 modules.o \ 1093 mod_kinds.o \ 1094 exchange_horiz_mod.o \ 1096 exchange_horiz_mod.o \ 1097 modules.o \ 1098 mod_kinds.o \ 1099 mod_particle_attributes.o \ 1095 1100 posix_interface_mod.o \ 1096 1101 shared_memory_io_mod.o -
palm/trunk/SOURCE/lagrangian_particle_model_mod.f90
r4616 r4628 25 25 ! ----------------- 26 26 ! $Id$ 27 ! extensions required for MPI-I/O of particle data to restart files 28 ! 29 ! 4616 2020-07-21 10:09:46Z schwenkel 27 30 ! Bugfix in case of strechting: k-calculation limited lower bound of 1 28 31 ! … … 189 192 child_domain, & 190 193 cloud_droplets, constant_flux_layer, current_timestep_number, & 191 dt_3d, dt_3d_reached, first_call_lpm, humidity,&194 dt_3d, dt_3d_reached, debug_output, first_call_lpm, humidity, & 192 195 dt_3d_reached_l, dt_dopts, dz, initializing_actions, & 193 196 intermediate_timestep_count, intermediate_timestep_count_max, & 194 197 message_string, molecular_viscosity, ocean_mode, & 195 particle_maximum_age, restart_data_format_output, & 198 particle_maximum_age, restart_data_format_input, & 199 restart_data_format_output, & 196 200 simulated_time, topography, dopts_time_count, & 197 201 time_since_reference_point, rho_surface, u_gtrans, v_gtrans, & … … 240 244 id_random_array 241 245 242 USE restart_data_mpi_io_mod, & 243 ONLY: rd_mpi_io_check_array, rrd_mpi_io, wrd_mpi_io 246 USE restart_data_mpi_io_mod, & 247 ONLY: rd_mpi_io_check_array, & 248 rd_mpi_io_check_open, & 249 rd_mpi_io_close, & 250 rd_mpi_io_open, & 251 rd_mpi_io_particle_filetypes, & 252 rrd_mpi_io, & 253 rrd_mpi_io_global_array, & 254 rrd_mpi_io_particles, & 255 wrd_mpi_io, & 256 wrd_mpi_io_global_array, & 257 wrd_mpi_io_particles 244 258 245 259 USE statistics, & … … 251 265 surf_lsm_h, & 252 266 surf_usm_h 267 268 !-- Next lines are in preparation for the output of particle data 269 ! USE data_output_particle_mod, & 270 ! ONLY: dop_active, dop_init, dop_output_tseries 253 271 254 272 #if defined( __parallel ) && !defined( __mpifh ) … … 594 612 write_particle_statistics 595 613 596 614 NAMELIST /particle_parameters/ & 597 615 aero_species, & 598 616 aero_type, & … … 619 637 na, & 620 638 number_concentration, & 639 number_of_output_particles, & 621 640 number_of_particle_groups, & 622 641 number_particles_per_gridbox, & 642 oversize, & 623 643 particles_per_point, & 624 644 particle_advection_start, & 625 645 particle_advection_interpolation, & 626 646 particle_maximum_age, & 647 part_output, & 648 part_inc, & 649 part_percent, & 627 650 pdx, & 628 651 pdy, & … … 648 671 splitting_mode, & 649 672 step_dealloc, & 673 unlimited_dimension, & 650 674 use_sgs_for_particles, & 651 675 vertical_particle_advection, & … … 1215 1239 !-- Initialize collision kernels 1216 1240 IF ( collision_kernel /= 'none' ) CALL lpm_init_kernels 1241 1217 1242 ! 1218 1243 !-- For the first model run of a possible job chain initialize the … … 1241 1266 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 1242 1267 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 1243 0, 0, 0_idp, .FALSE., -1 )1268 0, 0, 0_idp, .FALSE., -1, -1 ) 1244 1269 1245 1270 particle_groups = particle_groups_type( 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp ) … … 1304 1329 IF ( nested_run ) CALL pmcp_g_init 1305 1330 #endif 1331 1332 !-- next line is in preparation for particle data output 1333 ! CALL dop_init 1306 1334 1307 1335 ! … … 2995 3023 SUBROUTINE lpm_rrd_local_particles 2996 3024 2997 CHARACTER (LEN=10) :: particle_binary_version !< 2998 CHARACTER (LEN=10) :: version_on_file !< 3025 CHARACTER(LEN=10) :: particle_binary_version !< 3026 CHARACTER(LEN=10) :: version_on_file !< 3027 3028 CHARACTER(LEN=20) :: save_restart_data_format_input !< 2999 3029 3000 3030 INTEGER(iwp) :: alloc_size !< … … 3003 3033 INTEGER(iwp) :: kp !< 3004 3034 3035 INTEGER(idp), ALLOCATABLE, DIMENSION(:,:,:) :: prt_global_index !< 3036 3037 LOGICAL :: save_debug_output !< 3038 3005 3039 TYPE(particle_type), DIMENSION(:), ALLOCATABLE :: tmp_particles !< 3006 3040 3007 ! 3008 !-- Read particle data from previous model run. 3009 !-- First open the input unit. 3010 IF ( myid_char == '' ) THEN 3011 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_IN'//myid_char, & 3012 FORM='UNFORMATTED' ) 3013 ELSE 3014 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_IN/'//myid_char, & 3015 FORM='UNFORMATTED' ) 3016 ENDIF 3017 3018 ! 3019 !-- First compare the version numbers 3020 READ ( 90 ) version_on_file 3021 particle_binary_version = '4.0' 3022 IF ( TRIM( version_on_file ) /= TRIM( particle_binary_version ) ) THEN 3023 message_string = 'version mismatch concerning data from prior ' // & 3024 'run &version on file = "' // & 3025 TRIM( version_on_file ) // & 3026 '&version in program = "' // & 3027 TRIM( particle_binary_version ) // '"' 3028 CALL message( 'lpm_read_restart_file', 'PA0214', 1, 2, 0, 6, 0 ) 3029 ENDIF 3030 3031 ! 3032 !-- If less particles are stored on the restart file than prescribed by 3033 !-- 1, the remainder is initialized by zero_particle to avoid 3034 !-- errors. 3035 zero_particle = particle_type( 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3036 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3037 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3038 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3039 0, 0, 0_idp, .FALSE., -1 ) 3040 ! 3041 !-- Read some particle parameters and the size of the particle arrays, 3042 !-- allocate them and read their contents. 3043 READ ( 90 ) bc_par_b, bc_par_lr, bc_par_ns, bc_par_t, & 3044 last_particle_release_time, number_of_particle_groups, & 3045 particle_groups, time_write_particle_data 3046 3047 ALLOCATE( prt_count(nzb:nzt+1,nysg:nyng,nxlg:nxrg), & 3048 grid_particles(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 3049 3050 READ ( 90 ) prt_count 3051 3052 DO ip = nxl, nxr 3053 DO jp = nys, nyn 3054 DO kp = nzb+1, nzt 3055 3056 number_of_particles = prt_count(kp,jp,ip) 3057 IF ( number_of_particles > 0 ) THEN 3058 alloc_size = MAX( INT( number_of_particles * & 3059 ( 1.0_wp + alloc_factor / 100.0_wp ) ), & 3060 1 ) 3061 ELSE 3062 alloc_size = 1 3063 ENDIF 3064 3065 ALLOCATE( grid_particles(kp,jp,ip)%particles(1:alloc_size) ) 3066 3067 IF ( number_of_particles > 0 ) THEN 3068 ALLOCATE( tmp_particles(1:number_of_particles) ) 3069 READ ( 90 ) tmp_particles 3070 grid_particles(kp,jp,ip)%particles(1:number_of_particles) = tmp_particles 3071 DEALLOCATE( tmp_particles ) 3072 IF ( number_of_particles < alloc_size ) THEN 3073 grid_particles(kp,jp,ip)%particles(number_of_particles+1:alloc_size) & 3074 = zero_particle 3041 IF ( TRIM( restart_data_format_input ) == 'fortran_binary' ) THEN 3042 3043 ! 3044 !-- Read particle data from previous model run. 3045 !-- First open the input unit. 3046 IF ( myid_char == '' ) THEN 3047 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_IN'//myid_char, & 3048 FORM='UNFORMATTED' ) 3049 ELSE 3050 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_IN/'//myid_char, & 3051 FORM='UNFORMATTED' ) 3052 ENDIF 3053 3054 ! 3055 !-- First compare the version numbers 3056 READ ( 90 ) version_on_file 3057 particle_binary_version = '4.0' 3058 IF ( TRIM( version_on_file ) /= TRIM( particle_binary_version ) ) THEN 3059 message_string = 'version mismatch concerning data from prior ' // & 3060 'run &version on file = "' // & 3061 TRIM( version_on_file ) // & 3062 '&version in program = "' // & 3063 TRIM( particle_binary_version ) // '"' 3064 CALL message( 'lpm_read_restart_file', 'PA0214', 1, 2, 0, 6, 0 ) 3065 ENDIF 3066 3067 ! 3068 !-- If less particles are stored on the restart file than prescribed by 3069 !-- 1, the remainder is initialized by zero_particle to avoid 3070 !-- errors. 3071 zero_particle = particle_type( 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3072 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3073 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3074 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 3075 0, 0, 0_idp, .FALSE., -1, -1 ) 3076 ! 3077 !-- Read some particle parameters and the size of the particle arrays, 3078 !-- allocate them and read their contents. 3079 READ ( 90 ) bc_par_b, bc_par_lr, bc_par_ns, bc_par_t, & 3080 last_particle_release_time, number_of_particle_groups, & 3081 particle_groups, time_write_particle_data 3082 3083 ALLOCATE( prt_count(nzb:nzt+1,nysg:nyng,nxlg:nxrg), & 3084 grid_particles(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 3085 3086 READ ( 90 ) prt_count 3087 3088 DO ip = nxl, nxr 3089 DO jp = nys, nyn 3090 DO kp = nzb+1, nzt 3091 3092 number_of_particles = prt_count(kp,jp,ip) 3093 IF ( number_of_particles > 0 ) THEN 3094 alloc_size = MAX( INT( number_of_particles * & 3095 ( 1.0_wp + alloc_factor / 100.0_wp ) ), & 3096 1 ) 3097 ELSE 3098 alloc_size = 1 3075 3099 ENDIF 3076 ELSE 3077 grid_particles(kp,jp,ip)%particles(1:alloc_size) = zero_particle 3078 ENDIF 3079 3100 3101 ALLOCATE( grid_particles(kp,jp,ip)%particles(1:alloc_size) ) 3102 3103 IF ( number_of_particles > 0 ) THEN 3104 ALLOCATE( tmp_particles(1:number_of_particles) ) 3105 READ ( 90 ) tmp_particles 3106 grid_particles(kp,jp,ip)%particles(1:number_of_particles) = tmp_particles 3107 DEALLOCATE( tmp_particles ) 3108 IF ( number_of_particles < alloc_size ) THEN 3109 grid_particles(kp,jp,ip)%particles(number_of_particles+1:alloc_size) & 3110 = zero_particle 3111 ENDIF 3112 ELSE 3113 grid_particles(kp,jp,ip)%particles(1:alloc_size) = zero_particle 3114 ENDIF 3115 3116 ENDDO 3080 3117 ENDDO 3081 3118 ENDDO 3082 ENDDO 3083 3084 CLOSE ( 90 ) 3119 3120 CLOSE ( 90 ) 3121 3122 ELSEIF ( restart_data_format_input(1:3) == 'mpi' ) THEN 3123 3124 WRITE ( 9, * ) 'Here is MPI-IO praticle input ', rd_mpi_io_check_open() 3125 FLUSH(9) 3126 3127 ALLOCATE( prt_count(nzb:nzt+1,nysg:nyng,nxlg:nxrg), & 3128 grid_particles(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 3129 3130 ALLOCATE( prt_global_index(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 3131 ! 3132 !-- Open restart file for read, if not already open, and do not allow usage of 3133 !-- shared-memory I/O 3134 IF ( .NOT. rd_mpi_io_check_open() ) THEN 3135 save_restart_data_format_input = restart_data_format_input 3136 restart_data_format_input = 'mpi' 3137 CALL rd_mpi_io_open( 'READ', 'BININ' ) 3138 restart_data_format_input = save_restart_data_format_input 3139 ENDIF 3140 3141 CALL rd_mpi_io_particle_filetypes 3142 3143 prt_count = 0 3144 CALL rrd_mpi_io( 'prt_count', prt_count ) 3145 CALL rrd_mpi_io( 'prt_global_index', prt_global_index ) 3146 3147 ! 3148 !-- Allocate particles arrays 3149 DO ip = nxl, nxr 3150 DO jp = nys, nyn 3151 DO kp = nzb+1, nzt 3152 3153 number_of_particles = prt_count(kp,jp,ip) 3154 IF ( number_of_particles > 0 ) THEN 3155 alloc_size = MAX( INT( number_of_particles * & 3156 ( 1.0_wp + alloc_factor / 100.0_wp ) ), & 3157 1 ) 3158 ELSE 3159 alloc_size = 1 3160 ENDIF 3161 3162 ALLOCATE( grid_particles(kp,jp,ip)%particles(1:alloc_size) ) 3163 3164 ENDDO 3165 ENDDO 3166 ENDDO 3167 3168 ! 3169 !-- Now read particle data from restart file 3170 CALL rrd_mpi_io_particles( 'particles', prt_global_index ) 3171 3172 IF ( .NOT. rd_mpi_io_check_open() ) THEN 3173 ! 3174 !-- Do not print header a second time to the debug file 3175 save_debug_output = debug_output 3176 debug_output = .FALSE. 3177 CALL rd_mpi_io_close() 3178 debug_output = save_debug_output 3179 ENDIF 3180 3181 DEALLOCATE( prt_global_index ) 3182 3183 ENDIF 3085 3184 ! 3086 3185 !-- Must be called to sort particles into blocks, which is needed for a fast 3087 3186 !-- interpolation of the LES fields on the particle position. 3088 3187 CALL lpm_sort_and_delete 3089 3090 3188 3091 3189 END SUBROUTINE lpm_rrd_local_particles … … 3256 3354 3257 3355 CHARACTER (LEN=10) :: particle_binary_version !< 3258 CHARACTER (LEN= 20) :: tmp_name !< temporary variable3356 CHARACTER (LEN=32) :: tmp_name !< temporary variable 3259 3357 3260 3358 INTEGER(iwp) :: i !< loop index 3261 3359 INTEGER(iwp) :: ip !< 3360 INTEGER(iwp) :: j !< loop index 3262 3361 INTEGER(iwp) :: jp !< 3362 INTEGER(iwp) :: k !< loop index 3263 3363 INTEGER(iwp) :: kp !< 3264 ! 3265 !-- First open the output unit. 3266 IF ( myid_char == '' ) THEN 3267 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT'//myid_char, & 3268 FORM='UNFORMATTED') 3269 ELSE 3270 IF ( myid == 0 ) CALL local_system( 'mkdir PARTICLE_RESTART_DATA_OUT' ) 3364 3271 3365 #if defined( __parallel ) 3272 ! 3273 !-- Set a barrier in order to allow that thereafter all other processors 3274 !-- in the directory created by PE0 can open their file 3275 CALL MPI_BARRIER( comm2d, ierr ) 3366 INTEGER :: ierr !< 3276 3367 #endif 3277 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT/'//myid_char, & 3278 FORM='UNFORMATTED' ) 3279 ENDIF 3280 3281 ! 3282 !-- Write the version number of the binary format. 3283 !-- Attention: After changes to the following output commands the version 3284 !-- --------- number of the variable particle_binary_version must be 3285 !-- changed! Also, the version number and the list of arrays 3286 !-- to be read in lpm_read_restart_file must be adjusted 3287 !-- accordingly. 3288 particle_binary_version = '4.0' 3289 WRITE ( 90 ) particle_binary_version 3290 3291 ! 3292 !-- Write some particle parameters, the size of the particle arrays 3293 WRITE ( 90 ) bc_par_b, bc_par_lr, bc_par_ns, bc_par_t, & 3294 last_particle_release_time, number_of_particle_groups, & 3295 particle_groups, time_write_particle_data 3296 3297 WRITE ( 90 ) prt_count 3368 INTEGER(iwp) :: start_index !< 3369 INTEGER(iwp) :: start_index_on_pe !< 3370 3371 INTEGER(iwp), DIMENSION(0:numprocs-1) :: nr_particles_global 3372 INTEGER(iwp), DIMENSION(0:numprocs-1) :: nr_particles_local 3373 3374 INTEGER(idp), ALLOCATABLE, DIMENSION(:,:,:) :: prt_global_index 3375 3376 3377 IF ( TRIM( restart_data_format_output ) == 'fortran_binary' ) THEN 3378 ! 3379 !-- First open the output unit. 3380 IF ( myid_char == '' ) THEN 3381 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT'//myid_char, & 3382 FORM='UNFORMATTED') 3383 ELSE 3384 IF ( myid == 0 ) CALL local_system( 'mkdir PARTICLE_RESTART_DATA_OUT' ) 3385 #if defined( __parallel ) 3386 ! 3387 !-- Set a barrier in order to allow that thereafter all other processors 3388 !-- in the directory created by PE0 can open their file 3389 CALL MPI_BARRIER( comm2d, ierr ) 3390 #endif 3391 OPEN ( 90, FILE='PARTICLE_RESTART_DATA_OUT/'//myid_char, & 3392 FORM='UNFORMATTED' ) 3393 ENDIF 3394 3395 ! 3396 !-- Write the version number of the binary format. 3397 !-- Attention: After changes to the following output commands the version 3398 !-- --------- number of the variable particle_binary_version must be 3399 !-- changed! Also, the version number and the list of arrays 3400 !-- to be read in lpm_read_restart_file must be adjusted 3401 !-- accordingly. 3402 particle_binary_version = '4.0' 3403 WRITE ( 90 ) particle_binary_version 3404 3405 ! 3406 !-- Write some particle parameters, the size of the particle arrays 3407 WRITE ( 90 ) bc_par_b, bc_par_lr, bc_par_ns, bc_par_t, & 3408 last_particle_release_time, number_of_particle_groups, & 3409 particle_groups, time_write_particle_data 3410 3411 WRITE ( 90 ) prt_count 3298 3412 3299 DO ip = nxl, nxr 3300 DO jp = nys, nyn 3301 DO kp = nzb+1, nzt 3302 number_of_particles = prt_count(kp,jp,ip) 3303 particles => grid_particles(kp,jp,ip)%particles(1:number_of_particles) 3304 IF ( number_of_particles <= 0 ) CYCLE 3305 WRITE ( 90 ) particles 3413 DO ip = nxl, nxr 3414 DO jp = nys, nyn 3415 DO kp = nzb+1, nzt 3416 number_of_particles = prt_count(kp,jp,ip) 3417 particles => grid_particles(kp,jp,ip)%particles(1:number_of_particles) 3418 IF ( number_of_particles <= 0 ) CYCLE 3419 WRITE ( 90 ) particles 3420 ENDDO 3306 3421 ENDDO 3307 3422 ENDDO 3308 ENDDO 3309 3310 CLOSE ( 90 ) 3423 3424 CLOSE ( 90 ) 3311 3425 3312 3426 #if defined( __parallel ) … … 3314 3428 #endif 3315 3429 3316 IF ( TRIM( restart_data_format_output ) == 'fortran_binary' ) THEN3317 3430 3318 3431 IF ( ALLOCATED( seq_random_array_particles ) ) THEN … … 3321 3434 ENDIF 3322 3435 3436 3323 3437 ELSEIF ( restart_data_format_output(1:3) == 'mpi' ) THEN 3438 3324 3439 3325 3440 IF ( ALLOCATED( seq_random_array_particles ) ) THEN … … 3330 3445 ENDIF 3331 3446 3447 ALLOCATE( prt_global_index(nzb:nzt+1,nysg:nyng,nxlg:nxrg) ) 3448 3449 #if defined( __parallel ) 3450 !-- TODO: needs to be replaced by standard PALM message 3451 IF ( TRIM( restart_data_format_output ) == 'mpi_shared_memory' ) THEN 3452 WRITE( 9, * ) 'mpi_shared_memory is NOT implemented yet for particle IO' 3453 FLUSH( 9 ) 3454 CALL MPI_ABORT( MPI_COMM_WORLD, 1, ierr ) 3455 ENDIF 3456 #endif 3457 3458 CALL rd_mpi_io_particle_filetypes 3459 3460 nr_particles_local = 0 3461 nr_particles_local(myid) = SUM( prt_count ) 3462 3463 #if defined( __parallel ) 3464 CALL MPI_ALLREDUCE( nr_particles_local, nr_particles_global, numprocs, MPI_INTEGER, & 3465 MPI_SUM, comm2d, ierr ) 3466 #else 3467 nr_particles_global = nr_particles_local 3468 #endif 3469 3470 start_index_on_pe = 0 3471 IF ( myid > 0 ) THEN 3472 DO i = 1, myid 3473 start_index_on_pe = start_index_on_pe + nr_particles_global(i-1) 3474 ENDDO 3475 ENDIF 3476 3477 CALL wrd_mpi_io( 'prt_count', prt_count ) 3478 3479 start_index = start_index_on_pe 3480 DO i = nxl, nxr 3481 DO j = nys, nyn 3482 DO k = nzb, nzt+1 3483 prt_global_index(k,j,i) = start_index 3484 start_index = start_index + prt_count(k,j,i) 3485 ENDDO 3486 ENDDO 3487 ENDDO 3488 3489 CALL wrd_mpi_io( 'prt_global_index', prt_global_index ) 3490 CALL wrd_mpi_io_particles( 'particles', prt_global_index ) 3491 3492 DEALLOCATE( prt_global_index ) 3493 3332 3494 ENDIF 3333 3495 … … 3341 3503 !------------------------------------------------------------------------------! 3342 3504 SUBROUTINE lpm_wrd_global 3505 3506 #if defined( __parallel ) 3507 INTEGER :: ierr !< 3508 #endif 3509 3510 REAL(wp), DIMENSION(4,max_number_of_particle_groups) :: particle_groups_array !< 3511 3343 3512 3344 3513 IF ( TRIM( restart_data_format_output ) == 'fortran_binary' ) THEN … … 3362 3531 CALL wrd_mpi_io( 'interpolation_simple_predictor', interpolation_simple_predictor ) 3363 3532 CALL wrd_mpi_io( 'interpolation_trilinear', interpolation_trilinear ) 3533 ! 3534 !-- Write some global particle parameters 3535 !-- In case of Fortran binary format, these variables are written to unit 90 3536 CALL wrd_mpi_io( 'bc_par_b', bc_par_b ) 3537 CALL wrd_mpi_io( 'bc_par_lr', bc_par_lr ) 3538 CALL wrd_mpi_io( 'bc_par_ns', bc_par_ns ) 3539 CALL wrd_mpi_io( 'bc_par_t', bc_par_t ) 3540 CALL wrd_mpi_io( 'last_particle_release_time', last_particle_release_time ) 3541 CALL wrd_mpi_io( 'number_of_particle_groups', number_of_particle_groups ) 3542 CALL wrd_mpi_io( 'time_write_particle_data', time_write_particle_data ) 3543 3544 ! 3545 !-- Write particle_group informations via 2D array to avoid another overlay in 3546 !-- restart_data_mpi_io_mod. 3547 !-- TODO: convert the following to a standard PALM message 3548 IF( STORAGE_SIZE( particle_groups(1) ) / (wp*8) /= 4 ) THEN 3549 WRITE( 9, * ) 'size of structure particle_groups_type has changed ' 3550 FLUSH( 9 ) 3551 #if defined( __parallel ) 3552 CALL MPI_ABORT( MPI_COMM_WORLD, 1, ierr ) 3553 #else 3554 STOP 'error' 3555 #endif 3556 ENDIF 3557 3558 particle_groups_array(1,:) = particle_groups(:)%density_ratio 3559 particle_groups_array(2,:) = particle_groups(:)%radius 3560 particle_groups_array(3,:) = particle_groups(:)%exp_arg 3561 particle_groups_array(4,:) = particle_groups(:)%exp_term 3562 3563 CALL wrd_mpi_io_global_array( 'particle_groups', particle_groups_array ) 3364 3564 3365 3565 ENDIF … … 3412 3612 SUBROUTINE lpm_rrd_global_mpi 3413 3613 3614 #if defined( __parallel ) 3615 INTEGER :: ierr !< 3616 #endif 3617 3618 REAL(wp), DIMENSION(4,max_number_of_particle_groups) :: particle_groups_array !< 3619 3620 3414 3621 CALL rrd_mpi_io( 'curvature_solution_effects', curvature_solution_effects ) 3415 3622 CALL rrd_mpi_io( 'interpolation_simple_corrector', interpolation_simple_corrector ) 3416 3623 CALL rrd_mpi_io( 'interpolation_simple_predictor', interpolation_simple_predictor ) 3417 3624 CALL rrd_mpi_io( 'interpolation_trilinear', interpolation_trilinear ) 3625 ! 3626 !-- Read some particle parameters. 3627 !-- In case of Fortran binary format, these variables are read from unit 90. 3628 CALL rrd_mpi_io( 'bc_par_b', bc_par_b ) 3629 CALL rrd_mpi_io( 'bc_par_lr', bc_par_lr ) 3630 CALL rrd_mpi_io( 'bc_par_ns', bc_par_ns ) 3631 CALL rrd_mpi_io( 'bc_par_t', bc_par_t ) 3632 CALL rrd_mpi_io( 'last_particle_release_time', last_particle_release_time ) 3633 CALL rrd_mpi_io( 'number_of_particle_groups', number_of_particle_groups ) 3634 CALL rrd_mpi_io( 'time_write_particle_data', time_write_particle_data ) 3635 3636 ! 3637 !-- Read particle group information via 2d-array to avoid another overlay in 3638 !-- restart_data_mpi_io_mod. 3639 !-- TODO: convert the following to a standard PALM message 3640 IF ( STORAGE_SIZE( particle_groups(1) ) / (wp*8) /= 4 ) THEN 3641 WRITE( 9, * ) 'size of structure particle_groups_type has changed ' 3642 FLUSH( 9 ) 3643 #if defined( __parallel ) 3644 CALL MPI_ABORT( MPI_COMM_WORLD, 1, ierr ) 3645 #else 3646 STOP 'error' 3647 #endif 3648 ENDIF 3649 3650 CALL rrd_mpi_io_global_array( 'particle_groups', particle_groups_array ) 3651 3652 particle_groups(:)%density_ratio = particle_groups_array(1,:) 3653 particle_groups(:)%radius = particle_groups_array(2,:) 3654 particle_groups(:)%exp_arg = particle_groups_array(3,:) 3655 particle_groups(:)%exp_term = particle_groups_array(4,:) 3418 3656 3419 3657 END SUBROUTINE lpm_rrd_global_mpi -
palm/trunk/SOURCE/mod_particle_attributes.f90
r4360 r4628 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 23 ! 22 ! 23 ! 24 24 ! Former revisions: 25 25 ! ----------------- 26 26 ! $Id$ 27 ! extensions required for MPI-I/O of particle data to restart files 28 ! 29 ! 4360 2020-01-07 11:25:50Z suehring 27 30 ! Corrected "Former revisions" section 28 31 ! … … 47 50 USE, INTRINSIC :: ISO_C_BINDING 48 51 52 USE control_parameters, & 53 ONLY: varnamelength 54 49 55 USE kinds 56 57 CHARACTER(LEN=varnamelength), DIMENSION(50) :: part_output = ' ' !< namelist parameter 50 58 51 59 INTEGER(iwp) :: dissipation_classes = 10 !< namelist parameter (see documentation) … … 54 62 INTEGER(iwp) :: ibc_par_ns !< particle north/south boundary condition dummy 55 63 INTEGER(iwp) :: ibc_par_t !< particle top boundary condition dummy 64 INTEGER(iwp) :: number_of_output_particles = 0 !< number of output particles 56 65 INTEGER(iwp) :: number_of_particles = 0 !< number of particles for each grid box (3d array is saved on prt_count) 57 66 INTEGER(iwp) :: number_of_particle_groups = 1 !< namelist parameter (see documentation) 67 INTEGER(iwp) :: part_inc = 1 !< increment of particles in output file 58 68 59 69 INTEGER(iwp), PARAMETER :: max_number_of_particle_groups = 10 !< maximum allowed number of particle groups … … 62 72 63 73 LOGICAL :: particle_advection = .FALSE. !< parameter to steer the advection of particles 74 LOGICAL :: unlimited_dimension = .TRUE. !< umlimited dimension for particle output 64 75 LOGICAL :: use_sgs_for_particles = .FALSE. !< namelist parameter (see documentation) 65 76 LOGICAL :: wang_kernel = .FALSE. !< flag for collision kernel 66 77 67 REAL(wp) :: alloc_factor = 20.0_wp !< namelist parameter (see documentation) 68 REAL(wp) :: particle_advection_start = 0.0_wp !< namelist parameter (see documentation) 78 REAL(wp) :: alloc_factor = 20.0_wp !< namelist parameter (see documentation) 79 REAL(wp) :: oversize = 100.0_wp !< reserve spare particles in output file (in % relative to initial number) 80 REAL(wp) :: particle_advection_start = 0.0_wp !< namelist parameter (see documentation) 81 REAL(wp) :: part_percent = 100.0_wp !< percentage of particles in output file 69 82 70 ! 71 !-- WARNING: For compatibility of derived types, the BIND attribute is required, and interoperable C 72 !-- datatypes must be used. These type are hard wired here! So changes in working precision (wp, iwp) 73 !-- will not affect the particle_type! 74 !-- The main reason for introducing the interoperable datatypes was to avoid compiler warnings of 75 !-- the gfortran compiler. 76 !-- The BIND attribite is required because of C_F_POINTER usage in the pmc particle interface. 77 TYPE, BIND(C) :: particle_type 78 REAL(C_DOUBLE) :: aux1 !< auxiliary multi-purpose feature 79 REAL(C_DOUBLE) :: aux2 !< auxiliary multi-purpose feature 80 REAL(C_DOUBLE) :: radius !< radius of particle 81 REAL(C_DOUBLE) :: age !< age of particle 82 REAL(C_DOUBLE) :: age_m !< 83 REAL(C_DOUBLE) :: dt_sum !< 84 REAL(C_DOUBLE) :: e_m !< interpolated sgs tke 85 REAL(C_DOUBLE) :: origin_x !< origin x-position of particle (changed cyclic bc) 86 REAL(C_DOUBLE) :: origin_y !< origin y-position of particle (changed cyclic bc) 87 REAL(C_DOUBLE) :: origin_z !< origin z-position of particle (changed cyclic bc) 88 REAL(C_DOUBLE) :: rvar1 !< 89 REAL(C_DOUBLE) :: rvar2 !< 90 REAL(C_DOUBLE) :: rvar3 !< 91 REAL(C_DOUBLE) :: speed_x !< speed of particle in x 92 REAL(C_DOUBLE) :: speed_y !< speed of particle in y 93 REAL(C_DOUBLE) :: speed_z !< speed of particle in z 94 REAL(C_DOUBLE) :: weight_factor !< weighting factor 95 REAL(C_DOUBLE) :: x !< x-position 96 REAL(C_DOUBLE) :: y !< y-position 97 REAL(C_DOUBLE) :: z !< z-position 98 INTEGER(C_INT) :: class !< radius class needed for collision 99 INTEGER(C_INT) :: group !< number of particle group 100 INTEGER(C_LONG_LONG) :: id !< particle ID (64 bit integer) 101 LOGICAL(C_BOOL) :: particle_mask !< if this parameter is set to false the particle will be deleted 102 INTEGER(C_INT) :: block_nr !< number for sorting (removable?) 83 TYPE, PUBLIC :: particle_type 84 REAL(wp) :: aux1 !< auxiliary multi-purpose feature 85 REAL(wp) :: aux2 !< auxiliary multi-purpose feature 86 REAL(wp) :: radius !< radius of particle 87 REAL(wp) :: age !< age of particle 88 REAL(wp) :: age_m !< 89 REAL(wp) :: dt_sum !< 90 REAL(wp) :: e_m !< interpolated sgs tke 91 REAL(wp) :: origin_x !< origin x-position of particle (changed cyclic bc) 92 REAL(wp) :: origin_y !< origin y-position of particle (changed cyclic bc) 93 REAL(wp) :: origin_z !< origin z-position of particle (changed cyclic bc) 94 REAL(wp) :: rvar1 !< 95 REAL(wp) :: rvar2 !< 96 REAL(wp) :: rvar3 !< 97 REAL(wp) :: speed_x !< speed of particle in x 98 REAL(wp) :: speed_y !< speed of particle in y 99 REAL(wp) :: speed_z !< speed of particle in z 100 REAL(wp) :: weight_factor !< weighting factor 101 REAL(wp) :: x !< x-position 102 REAL(wp) :: y !< y-position 103 REAL(wp) :: z !< z-position 104 INTEGER(iwp) :: class !< radius class needed for collision 105 INTEGER(iwp) :: group !< number of particle group 106 INTEGER(idp) :: id !< particle ID (64 bit integer) 107 LOGICAL :: particle_mask !< if this parameter is set to false the particle will be deleted 108 INTEGER(iwp) :: block_nr !< number for sorting (removable?) 109 INTEGER(iwp) :: particle_nr=-1 !< particle number for particle IO (increment one 103 110 END TYPE particle_type 104 111 -
palm/trunk/SOURCE/posix_interface_mod.f90
r4495 r4628 19 19 ! Current revisions: 20 20 ! ----------------- 21 ! 22 ! 21 ! 22 ! 23 23 ! Former revisions: 24 24 ! ----------------- 25 25 ! $Id$ 26 ! extensions required for MPI-I/O of particle data to restart files 27 ! 28 ! 4495 2020-04-13 20:11:20Z raasch 26 29 ! Initial version (K. Ketelsen) 27 30 ! … … 126 129 MODULE PROCEDURE posix_read_int_1d 127 130 MODULE PROCEDURE posix_read_int_2d 131 MODULE PROCEDURE posix_read_i4_3d 132 MODULE PROCEDURE posix_read_i8_3d 128 133 MODULE PROCEDURE posix_read_offset_1d 129 134 MODULE PROCEDURE posix_read_real_1d … … 137 142 MODULE PROCEDURE posix_write_int_1d 138 143 MODULE PROCEDURE posix_write_int_2d 144 MODULE PROCEDURE posix_write_i4_3d 145 MODULE PROCEDURE posix_write_i8_3d 139 146 MODULE PROCEDURE posix_write_offset_1d 140 147 MODULE PROCEDURE posix_write_real_1d … … 232 239 233 240 234 SUBROUTINE posix_read_int_2d (fid, data, nw)241 SUBROUTINE posix_read_int_2d( fid, data, nw ) 235 242 236 243 IMPLICIT NONE … … 250 257 251 258 END SUBROUTINE posix_read_int_2d 259 260 261 262 SUBROUTINE posix_read_i4_3d( fid, data, nw ) 263 264 IMPLICIT NONE 265 266 INTEGER(KIND=isp), INTENT(IN), TARGET, DIMENSION(:,:,:) :: data !< 267 INTEGER, INTENT(IN) :: fid !< 268 INTEGER :: nr_byte !< 269 INTEGER, INTENT(IN) :: nw !< 270 271 TYPE(C_PTR) :: buf !< 272 273 274 nr_byte = nw * isp 275 buf = C_LOC( data ) 276 277 CALL posix_read( fid, buf, nr_byte ) 278 279 END SUBROUTINE posix_read_i4_3d 280 281 282 283 SUBROUTINE posix_read_i8_3d( fid, data, nw ) 284 285 IMPLICIT NONE 286 287 INTEGER(KIND=idp), INTENT(IN), TARGET, DIMENSION(:,:,:) :: data !< 288 INTEGER, INTENT(IN) :: fid !< 289 INTEGER :: nr_byte !< 290 INTEGER, INTENT(IN) :: nw !< 291 292 TYPE(C_PTR) :: buf !< 293 294 295 nr_byte = nw * idp 296 buf = C_LOC( data ) 297 298 CALL posix_read( fid, buf, nr_byte ) 299 300 END SUBROUTINE posix_read_i8_3d 252 301 253 302 … … 448 497 449 498 499 SUBROUTINE posix_write_i4_3d( fid, data, nw ) 500 501 IMPLICIT NONE 502 503 INTEGER, INTENT(IN) :: fid !< 504 INTEGER :: nr_byte !< 505 INTEGER, INTENT(IN) :: nw !< 506 507 INTEGER(KIND=isp), INTENT(IN), TARGET, DIMENSION(:,:,:) :: data !< 508 509 TYPE(C_PTR) :: buf !< 510 511 512 nr_byte = nw * isp 513 buf = C_LOC( data ) 514 515 CALL posix_write( fid, buf, nr_byte ) 516 517 END SUBROUTINE posix_write_i4_3d 518 519 520 521 SUBROUTINE posix_write_i8_3d( fid, data, nw ) 522 523 IMPLICIT NONE 524 525 INTEGER, INTENT(IN) :: fid !< 526 INTEGER :: nr_byte !< 527 INTEGER, INTENT(IN) :: nw !< 528 529 INTEGER(KIND=idp), INTENT(IN), TARGET, DIMENSION(:,:,:) :: data !< 530 531 TYPE(C_PTR) :: buf !< 532 533 534 nr_byte = nw * idp 535 buf = C_LOC( data ) 536 537 CALL posix_write( fid, buf, nr_byte ) 538 539 END SUBROUTINE posix_write_i8_3d 540 541 542 450 543 SUBROUTINE posix_write_offset_1d( fid, data, nw ) 451 544 -
palm/trunk/SOURCE/restart_data_mpi_io_mod.f90
r4622 r4628 25 25 ! ----------------- 26 26 ! $Id$ 27 ! extensions required for MPI-I/O of particle data to restart files 28 ! 29 ! 4622 2020-07-23 09:02:23Z raasch 27 30 ! unused variables removed 28 31 ! … … 65 68 ! 66 69 ! 4495 2020-04-13 20:11:20Z raasch 67 ! Initial version (K. Ketelsen), adjusted to PALM formatting standards (s. Raasch) 68 ! 70 ! Initial version (K. Ketelsen), adjusted to PALM formatting standards (S. Raasch) 69 71 ! 70 72 ! … … 127 129 USE kinds 128 130 131 USE particle_attributes, & 132 ONLY: grid_particles, & 133 particles, & 134 particle_type, & 135 prt_count, & 136 zero_particle 137 129 138 USE pegrid, & 130 139 ONLY: comm1dx, & … … 161 170 162 171 INTEGER(iwp) :: comm_io !< Communicator for MPI-IO 163 INTEGER(iwp) :: fh 172 INTEGER(iwp) :: fh = -1 !< MPI-IO file handle 164 173 #if defined( __parallel ) 165 174 INTEGER(iwp) :: fhs = -1 !< MPI-IO file handle to open file with comm2d always … … 170 179 INTEGER(iwp) :: ft_2d !< MPI filetype 2D array REAL with outer boundaries 171 180 INTEGER(iwp) :: ft_3d !< MPI filetype 3D array REAL with outer boundaries 181 INTEGER(iwp) :: ft_3di4 = -1 !< MPI filetype 3D array INTEGER*4 182 INTEGER(iwp) :: ft_3di8 = -1 !< MPI filetype 3D array INTEGER*8 172 183 INTEGER(iwp) :: ft_3dsoil !< MPI filetype for 3d-soil array 173 184 #endif … … 181 192 INTEGER(iwp) :: win_2di !< 182 193 INTEGER(iwp) :: win_2dr !< 194 INTEGER(iwp) :: win_3di4 = -1 !< 195 INTEGER(iwp) :: win_3di8 = -1 !< 183 196 INTEGER(iwp) :: win_3dr !< 184 197 INTEGER(iwp) :: win_3ds !< … … 190 203 INTEGER(KIND=rd_offset_kind) :: header_position !< 191 204 192 INTEGER(iwp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2di!<205 INTEGER(iwp), DIMENSION(:,:), POINTER, CONTIGUOUS :: array_2di !< 193 206 194 207 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_end_index !< … … 196 209 INTEGER(iwp), DIMENSION(:,:), ALLOCATABLE :: m_start_index !< 197 210 211 INTEGER(isp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3di4 !< 212 INTEGER(idp), DIMENSION(:,:,:), POINTER, CONTIGUOUS :: array_3di8 !< 198 213 199 214 LOGICAL :: all_pes_write !< all PEs have data to write … … 279 294 TYPE(domain_decomposition_grid_features) :: prerun_grid !< grid variables for the prerun 280 295 296 ! 297 !-- MPI_INTEGER8 is not standard MPI, but is supported on most MPI distibutions 298 !-- If not suppported, a workaround could be enabled with the following preprocessor directive 299 !#if defined( __NO_INTEGER8) 300 ! INTEGER :: MPI_INTEGER8 !< MPI data type INTEGER8 301 !#endif 281 302 282 303 SAVE … … 296 317 END INTERFACE rd_mpi_io_close 297 318 319 INTERFACE rd_mpi_io_check_open 320 MODULE PROCEDURE rd_mpi_io_check_open 321 END INTERFACE rd_mpi_io_check_open 322 298 323 INTERFACE rd_mpi_io_open 299 324 MODULE PROCEDURE rd_mpi_io_open … … 304 329 MODULE PROCEDURE rrd_mpi_io_int 305 330 MODULE PROCEDURE rrd_mpi_io_int_2d 331 MODULE PROCEDURE rrd_mpi_io_int4_3d 332 MODULE PROCEDURE rrd_mpi_io_int8_3d 306 333 MODULE PROCEDURE rrd_mpi_io_logical 307 334 MODULE PROCEDURE rrd_mpi_io_real … … 324 351 END INTERFACE rrd_mpi_io_surface 325 352 353 INTERFACE rrd_mpi_io_particles 354 MODULE PROCEDURE rrd_mpi_io_particles 355 END INTERFACE rrd_mpi_io_particles 356 357 INTERFACE rd_mpi_io_particle_filetypes 358 MODULE PROCEDURE rd_mpi_io_particle_filetypes 359 END INTERFACE rd_mpi_io_particle_filetypes 360 326 361 INTERFACE rd_mpi_io_surface_filetypes 327 362 MODULE PROCEDURE rd_mpi_io_surface_filetypes … … 332 367 MODULE PROCEDURE wrd_mpi_io_int 333 368 MODULE PROCEDURE wrd_mpi_io_int_2d 369 MODULE PROCEDURE wrd_mpi_io_int4_3d 370 MODULE PROCEDURE wrd_mpi_io_int8_3d 334 371 MODULE PROCEDURE wrd_mpi_io_logical 335 372 MODULE PROCEDURE wrd_mpi_io_real … … 347 384 END INTERFACE wrd_mpi_io_global_array 348 385 386 INTERFACE wrd_mpi_io_particles 387 MODULE PROCEDURE wrd_mpi_io_particles 388 END INTERFACE wrd_mpi_io_particles 389 349 390 INTERFACE wrd_mpi_io_surface 350 391 MODULE PROCEDURE wrd_mpi_io_surface … … 353 394 354 395 PUBLIC rd_mpi_io_check_array, & 396 rd_mpi_io_check_open, & 355 397 rd_mpi_io_close, & 356 398 rd_mpi_io_open, & 399 rd_mpi_io_particle_filetypes, & 400 rd_mpi_io_surface_filetypes, & 357 401 rrd_mpi_io, & 358 402 rrd_mpi_io_global_array, & 403 rrd_mpi_io_particles, & 359 404 rrd_mpi_io_surface, & 360 rd_mpi_io_surface_filetypes, &361 405 wrd_mpi_io, & 362 406 wrd_mpi_io_global_array, & 407 wrd_mpi_io_particles, & 363 408 wrd_mpi_io_surface 364 409 … … 394 439 TYPE(C_PTR) :: buf_ptr !< 395 440 #endif 396 397 ! write(9,*) 'Here is rd_mpi_io_open',nx,nx_on_file,ny,ny_on_file,TRIM(action) !kk may become Debug Output398 441 399 442 offset = 0 … … 1415 1458 ! Description: 1416 1459 ! ------------ 1460 !> Read 3d-INTEGER*4 array with MPI-IO 1461 !--------------------------------------------------------------------------------------------------! 1462 SUBROUTINE rrd_mpi_io_int4_3d( name, data ) 1463 1464 IMPLICIT NONE 1465 1466 CHARACTER(LEN=*), INTENT(IN) :: name !< 1467 1468 INTEGER(iwp) :: i !< 1469 1470 #if defined( __parallel ) 1471 INTEGER, DIMENSION(rd_status_size) :: status !< 1472 #endif 1473 1474 LOGICAL :: found !< 1475 1476 INTEGER(isp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< 1477 1478 1479 found = .FALSE. 1480 data = -1.0 1481 1482 DO i = 1, tgh%nr_arrays 1483 IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN 1484 array_position = array_offset(i) 1485 found = .TRUE. 1486 EXIT 1487 ENDIF 1488 ENDDO 1489 1490 IF ( found ) THEN 1491 1492 #if defined( __parallel ) 1493 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive 1494 IF( sm_io%iam_io_pe ) THEN 1495 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_3di4, 'native', & 1496 MPI_INFO_NULL, ierr ) 1497 CALL MPI_FILE_READ_ALL( fh, array_3di4, SIZE( array_3di4 ), MPI_INTEGER, status, ierr ) 1498 ENDIF 1499 CALL sm_io%sm_node_barrier() 1500 #else 1501 CALL posix_lseek( fh, array_position ) 1502 CALL posix_read(fh, array_3di4, SIZE( array_3di4 ) ) 1503 #endif 1504 IF ( include_total_domain_boundaries ) THEN 1505 DO i = iog%nxl, iog%nxr 1506 data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3di4(:,i,iog%nys:iog%nyn) 1507 ENDDO 1508 ELSE 1509 DO i = nxl, nxr 1510 data(:,nys:nyn,i) = array_3di4(:,i,nys:nyn) 1511 ENDDO 1512 ENDIF 1513 1514 ELSE 1515 1516 message_string = '3d-INTEGER*4 array "' // TRIM( name ) // '" not found in restart file' 1517 CALL message( 'rrd_mpi_io_int4_3d', 'PA0722', 3, 2, 0, 6, 0 ) 1518 1519 ENDIF 1520 1521 1522 END SUBROUTINE rrd_mpi_io_int4_3d 1523 1524 1525 1526 !--------------------------------------------------------------------------------------------------! 1527 ! Description: 1528 ! ------------ 1529 !> Read 3d-INTEGER*8 array with MPI-IO 1530 !--------------------------------------------------------------------------------------------------! 1531 SUBROUTINE rrd_mpi_io_int8_3d( name, data ) 1532 1533 IMPLICIT NONE 1534 1535 CHARACTER(LEN=*), INTENT(IN) :: name !< 1536 1537 INTEGER(iwp) :: i !< 1538 1539 #if defined( __parallel ) 1540 INTEGER, DIMENSION(rd_status_size) :: status !< 1541 #endif 1542 1543 LOGICAL :: found !< 1544 1545 INTEGER(idp), INTENT(INOUT), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< 1546 1547 1548 found = .FALSE. 1549 data = -1.0 1550 1551 DO i = 1, tgh%nr_arrays 1552 IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN 1553 array_position = array_offset(i) 1554 found = .TRUE. 1555 EXIT 1556 ENDIF 1557 ENDDO 1558 1559 IF ( found ) THEN 1560 1561 #if defined( __parallel ) 1562 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited # of cores is inactive 1563 IF( sm_io%iam_io_pe ) THEN 1564 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER8, ft_3di8, 'native', MPI_INFO_NULL, & 1565 ierr ) 1566 CALL MPI_FILE_READ_ALL( fh, array_3di8, SIZE( array_3di8 ), MPI_INTEGER8, status, ierr ) 1567 ENDIF 1568 CALL sm_io%sm_node_barrier() 1569 #else 1570 CALL posix_lseek( fh, array_position ) 1571 CALL posix_read(fh, array_3di8, SIZE( array_3di8 ) ) 1572 #endif 1573 IF ( include_total_domain_boundaries ) THEN 1574 DO i = iog%nxl, iog%nxr 1575 data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) = array_3di8(:,i,iog%nys:iog%nyn) 1576 ENDDO 1577 ELSE 1578 DO i = nxl, nxr 1579 data(:,nys:nyn,i) = array_3di8(:,i,nys:nyn) 1580 ENDDO 1581 ENDIF 1582 1583 ELSE 1584 1585 message_string = '3d-INTEGER*8 array "' // TRIM( name ) // '" not found in restart file' 1586 CALL message( 'rrd_mpi_io_int8_3d', 'PA0722', 3, 2, 0, 6, 0 ) 1587 1588 ENDIF 1589 1590 1591 END SUBROUTINE rrd_mpi_io_int8_3d 1592 1593 1594 1595 !--------------------------------------------------------------------------------------------------! 1596 ! Description: 1597 ! ------------ 1417 1598 !> Read 2d-REAL array with MPI-IO 1418 1599 !--------------------------------------------------------------------------------------------------! … … 1949 2130 ! Description: 1950 2131 ! ------------ 2132 !> Write 3d-INTEGER*4 array with MPI-IO 2133 !--------------------------------------------------------------------------------------------------! 2134 SUBROUTINE wrd_mpi_io_int4_3d( name, data ) 2135 2136 IMPLICIT NONE 2137 2138 CHARACTER(LEN=*), INTENT(IN) :: name !< 2139 2140 INTEGER(iwp) :: i !< 2141 #if defined( __parallel ) 2142 INTEGER, DIMENSION(rd_status_size) :: status !< 2143 #endif 2144 INTEGER(isp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< 2145 2146 2147 IF ( header_array_index == max_nr_arrays ) THEN 2148 STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' 2149 ENDIF 2150 2151 array_names(header_array_index) = name 2152 array_offset(header_array_index) = array_position 2153 header_array_index = header_array_index + 1 2154 2155 IF ( include_total_domain_boundaries ) THEN 2156 ! 2157 !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first 2158 !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the 2159 !-- index order of the array in the same way, i.e. the first dimension should be along x and the 2160 !-- second along y. For this reason, the original PALM data need to be swaped. 2161 DO i = iog%nxl, iog%nxr 2162 array_3di4(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) 2163 ENDDO 2164 2165 ELSE 2166 ! 2167 !-- Prepare output of 3d-REAL-array without ghost layers 2168 DO i = nxl, nxr 2169 array_3di4(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) 2170 ENDDO 2171 2172 ENDIF 2173 #if defined( __parallel ) 2174 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive 2175 IF ( sm_io%iam_io_pe ) THEN 2176 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER, ft_3di4, 'native', MPI_INFO_NULL, ierr ) 2177 CALL MPI_FILE_WRITE_ALL( fh, array_3di4, SIZE( array_3di4 ), MPI_INTEGER, status, ierr ) 2178 ENDIF 2179 CALL sm_io%sm_node_barrier() 2180 #else 2181 CALL posix_lseek( fh, array_position ) 2182 CALL posix_write( fh, array_3di4, SIZE( array_3di4 ) ) 2183 #endif 2184 ! 2185 !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. 2186 !-- Maybe a compiler problem. 2187 array_position = array_position + INT( (nz+2), KIND = rd_offset_kind ) * & 2188 INT( (iog%ny+1), KIND = rd_offset_kind ) * & 2189 INT( (iog%nx+1), KIND = rd_offset_kind ) * isp 2190 2191 write(9,*) 'array_position int4_3d ',trim(name),' ',array_position 2192 2193 END SUBROUTINE wrd_mpi_io_int4_3d 2194 2195 2196 2197 !--------------------------------------------------------------------------------------------------! 2198 ! Description: 2199 ! ------------ 2200 !> Write 3d-INTEGER*8 array with MPI-IO 2201 !--------------------------------------------------------------------------------------------------! 2202 SUBROUTINE wrd_mpi_io_int8_3d( name, data ) 2203 2204 IMPLICIT NONE 2205 2206 CHARACTER(LEN=*), INTENT(IN) :: name !< 2207 2208 INTEGER(iwp) :: i !< 2209 #if defined( __parallel ) 2210 INTEGER, DIMENSION(rd_status_size) :: status !< 2211 #endif 2212 INTEGER(idp), INTENT(IN), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: data !< 2213 2214 2215 IF ( header_array_index == max_nr_arrays ) THEN 2216 STOP '+++ maximum number of 2d/3d-array entries in restart file header exceeded' 2217 ENDIF 2218 2219 array_names(header_array_index) = name 2220 array_offset(header_array_index) = array_position 2221 header_array_index = header_array_index + 1 2222 2223 IF ( include_total_domain_boundaries ) THEN 2224 ! 2225 !-- Prepare output of 3d-REAL-array with ghost layers. In the virtual PE grid, the first 2226 !-- dimension is PEs along x, and the second along y. For MPI-IO it is recommended to have the 2227 !-- index order of the array in the same way, i.e. the first dimension should be along x and the 2228 !-- second along y. For this reason, the original PALM data need to be swaped. 2229 DO i = iog%nxl, iog%nxr 2230 array_3di8(:,i,iog%nys:iog%nyn) = data(:,iog%nys-nbgp:iog%nyn-nbgp,i-nbgp) 2231 ENDDO 2232 2233 ELSE 2234 ! 2235 !-- Prepare output of 3d-REAL-array without ghost layers 2236 DO i = nxl, nxr 2237 array_3di8(:,i,iog%nys:iog%nyn) = data(:,nys:nyn,i) 2238 ENDDO 2239 2240 ENDIF 2241 #if defined( __parallel ) 2242 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive 2243 IF ( sm_io%iam_io_pe ) THEN 2244 CALL MPI_FILE_SET_VIEW( fh, array_position, MPI_INTEGER8, ft_3di8, 'native', MPI_INFO_NULL, ierr ) 2245 CALL MPI_FILE_WRITE_ALL( fh, array_3di8, SIZE( array_3di8 ), MPI_INTEGER8, status, ierr ) 2246 ENDIF 2247 CALL sm_io%sm_node_barrier() 2248 #else 2249 CALL posix_lseek( fh, array_position ) 2250 CALL posix_write( fh, array_3di8, SIZE( array_3di8 ) ) 2251 #endif 2252 ! 2253 !-- Type conversion required, otherwise right hand side brackets are calculated assuming 4 byte INT. 2254 !-- Maybe a compiler problem. 2255 array_position = array_position + INT( (nz+2), KIND = rd_offset_kind ) * & 2256 INT( (iog%ny+1), KIND = rd_offset_kind ) * & 2257 INT( (iog%nx+1), KIND = rd_offset_kind ) * dp 2258 2259 write(9,*) 'array_position int8_3d ',trim(name),' ',array_position 2260 2261 END SUBROUTINE wrd_mpi_io_int8_3d 2262 2263 2264 2265 !--------------------------------------------------------------------------------------------------! 2266 ! Description: 2267 ! ------------ 1951 2268 !> Write 3d-REAL array with MPI-IO 1952 2269 !--------------------------------------------------------------------------------------------------! … … 2007 2324 INT( (iog%ny+1), KIND = rd_offset_kind ) * & 2008 2325 INT( (iog%nx+1), KIND = rd_offset_kind ) * wp 2326 2327 write(9,*) 'array_position real3d ',trim(name),' ',array_position 2009 2328 2010 2329 END SUBROUTINE wrd_mpi_io_real_3d … … 2576 2895 ! Description: 2577 2896 ! ------------ 2897 !> Read particle data array with MPI-IO. 2898 !--------------------------------------------------------------------------------------------------! 2899 SUBROUTINE rrd_mpi_io_particles( name, prt_global_index ) 2900 2901 IMPLICIT NONE 2902 2903 CHARACTER(LEN=*), INTENT(IN) :: name !< 2904 INTEGER(idp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: prt_global_index !< 2905 2906 INTEGER(iwp) :: array_size !< 2907 INTEGER(iwp) :: byte_column !< 2908 INTEGER(iwp) :: i !< 2909 INTEGER(iwp) :: ind !< 2910 INTEGER(iwp) :: j !< 2911 INTEGER(iwp) :: k !< 2912 INTEGER(iwp) :: n !< 2913 INTEGER(iwp) :: particle_size !< 2914 2915 INTEGER(KIND=rd_offset_kind) :: disp !< 2916 INTEGER(KIND=rd_offset_kind) :: offset !< 2917 INTEGER(KIND=rd_offset_kind) :: prt_nr_bytes !< 2918 2919 LOGICAL :: found !< 2920 2921 REAL(dp) :: rr !< there is no data type INTEGER*8 in MPI 2922 REAL(dp) :: rs !< use REAL*8 to compute max offset 2923 2924 TYPE(particle_type), DIMENSION(:), ALLOCATABLE, TARGET :: prt_data !< 2925 2926 #if defined( __parallel ) 2927 INTEGER, DIMENSION(rd_status_size) :: status !< 2928 #else 2929 TYPE(C_PTR) :: buf 2930 #endif 2931 2932 found = .FALSE. 2933 2934 DO i = 1, tgh%nr_arrays 2935 IF ( TRIM(array_names(i)) == TRIM( name ) ) THEN 2936 array_position = array_offset(i) 2937 found = .TRUE. 2938 EXIT 2939 ENDIF 2940 ENDDO 2941 2942 IF ( found ) THEN 2943 2944 offset = 0 2945 2946 particle_size = STORAGE_SIZE(zero_particle) / 8 ! 8 here means number of bits/byte, NOT wp 2947 2948 array_size = 0 2949 DO i = nxl, nxr 2950 DO j = nys, nyn 2951 array_size = MAX( array_size, SUM(prt_count(:,j,i)) ) 2952 ENDDO 2953 ENDDO 2954 2955 write(9,*) 'particle_size_read ',particle_size,array_size,array_position,sum(prt_global_index) 2956 2957 ALLOCATE( prt_data(MAX(array_size,1)) ) 2958 2959 ! 2960 !-- Write columns of particle 2961 #if defined( __parallel ) 2962 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 2963 #endif 2964 prt_nr_bytes = 0 2965 DO i = nxl, nxr 2966 DO j = nys, nyn 2967 disp = array_position + prt_global_index(nzb,j,i) * particle_size 2968 byte_column = SUM( prt_count(:,j,i) ) * particle_size 2969 prt_nr_bytes = MAX( disp+byte_column, prt_nr_bytes ) 2970 2971 #if defined( __parallel ) 2972 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive 2973 IF ( byte_column > 0 ) THEN 2974 CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) 2975 CALL MPI_FILE_READ( fh, prt_data, byte_column, MPI_BYTE, status, ierr ) 2976 ENDIF 2977 CALL sm_io%sm_node_barrier() 2978 #else 2979 buf = C_LOC(prt_data) ! use C_PTR to avaid another overlay in posix interface 2980 CALL posix_lseek( fh, disp ) 2981 CALL posix_read( fh, buf, byte_column ) 2982 #endif 2983 ind = 1 2984 DO k = nzb, nzt+1 2985 DO n = 1, prt_count(k,j,i) 2986 grid_particles(k,j,i)%particles(n) = prt_data(ind) 2987 ind = ind+1 2988 ENDDO 2989 ENDDO 2990 2991 ENDDO 2992 ENDDO 2993 2994 #if defined( __parallel ) 2995 rs = prt_nr_bytes 2996 CALL MPI_ALLREDUCE( rs, rr, 1, MPI_DOUBLE_PRECISION, MPI_MAX, comm2d, ierr ) 2997 prt_nr_bytes = rr 2998 #else 2999 rr = rs 3000 #endif 3001 array_position = prt_nr_bytes 3002 3003 write(9,*) 'array_position after particle read ',array_position,prt_nr_bytes,rs 3004 3005 DEALLOCATE( prt_data ) 3006 3007 ENDIF 3008 3009 END SUBROUTINE rrd_mpi_io_particles 3010 3011 3012 3013 !--------------------------------------------------------------------------------------------------! 3014 ! Description: 3015 ! ------------ 2578 3016 !> Read 1d-REAL surface data array with MPI-IO. 2579 3017 !--------------------------------------------------------------------------------------------------! … … 2599 3037 #if defined( __parallel ) 2600 3038 INTEGER, DIMENSION(rd_status_size) :: status !< 3039 #else 3040 TYPE(C_PTR) :: buf !< 2601 3041 #endif 2602 3042 2603 3043 LOGICAL :: found !< 2604 3044 2605 REAL(wp), INTENT(OUT), DIMENSION(:) :: data!<3045 REAL(wp), INTENT(OUT), DIMENSION(:), TARGET :: data !< 2606 3046 2607 3047 … … 2677 3117 ierr ) 2678 3118 #else 3119 ! 3120 !-- Use C_PTR here, because posix read does not work with indexed array 3121 buf = C_LOC( data(m_start_index(j_f,i_f)) ) 2679 3122 CALL posix_lseek( fh, disp_f ) 2680 CALL posix_read( fh, data(m_start_index(j_f,i_f):), nr_bytes_f )3123 CALL posix_read( fh, buf, nr_bytes_f ) 2681 3124 #endif 2682 3125 disp_f = disp … … 2697 3140 2698 3141 ENDIF 2699 2700 ! IF ( lo_first_index == 1 ) THEN2701 ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_1 ', TRIM( name ), ' ', nr_val, SUM( data(1:nr_val) )2702 ! ELSE2703 ! IF ( debug_level >= 2 .AND. nr_val > 0 ) WRITE(9,*) 'r_surf_next ', TRIM( name ), ' ', &2704 ! lo_first_index,nr_val, SUM( data(1:nr_val) )2705 ! ENDIF2706 2707 3142 2708 3143 CONTAINS … … 2928 3363 ! Description: 2929 3364 ! ------------ 3365 !> Write particle data with MPI-IO. 3366 !--------------------------------------------------------------------------------------------------! 3367 SUBROUTINE wrd_mpi_io_particles( name, prt_global_index ) 3368 3369 IMPLICIT NONE 3370 3371 CHARACTER(LEN=*), INTENT(IN) :: name !< 3372 INTEGER(idp), DIMENSION(nzb:nzt+1,nysg:nyng,nxlg:nxrg) :: prt_global_index !< 3373 3374 INTEGER(iwp) :: array_size !< 3375 INTEGER(iwp) :: byte_column !< 3376 INTEGER(iwp) :: i !< 3377 INTEGER(iwp) :: ind !< 3378 INTEGER(iwp) :: j !< 3379 INTEGER(iwp) :: k !< 3380 INTEGER(iwp) :: n !< 3381 INTEGER(iwp) :: particle_size !< 3382 3383 INTEGER(KIND=rd_offset_kind) :: disp !< 3384 INTEGER(KIND=rd_offset_kind) :: offset !< 3385 INTEGER(KIND=rd_offset_kind) :: prt_nr_bytes !< 3386 3387 REAL(dp) :: rs !< use REAL*8 to compute max offset 3388 REAL(dp) :: rr !< there is no data type INTEGER*8 in MPI 3389 3390 3391 TYPE(particle_type), DIMENSION(:), ALLOCATABLE, TARGET :: prt_data !< 3392 3393 #if defined( __parallel ) 3394 INTEGER, DIMENSION(rd_status_size) :: status !< 3395 #else 3396 TYPE(C_PTR) :: buf 3397 #endif 3398 3399 3400 offset = 0 3401 3402 array_names(header_array_index) = TRIM(name) 3403 array_offset(header_array_index) = array_position 3404 header_array_index = header_array_index + 1 3405 3406 particle_size = STORAGE_SIZE( zero_particle ) / 8 3407 3408 array_size = 0 3409 DO i = nxl, nxr 3410 DO j = nys, nyn 3411 array_size = MAX( array_size, SUM(prt_count(:,j,i)) ) 3412 ENDDO 3413 ENDDO 3414 3415 ALLOCATE( prt_data(MAX(array_size,1)) ) 3416 3417 ! 3418 !-- Write columns of particles. 3419 !-- The particles of a column are stored sequentially in the first dimension of the particles array. 3420 !-- Store only the particles of one cell would be possible with this setup, but the I/O portions 3421 !-- for a maximum of 100 particles are not big enough. 3422 #if defined( __parallel ) 3423 CALL MPI_FILE_SET_VIEW( fh, offset, MPI_BYTE, MPI_BYTE, 'native', MPI_INFO_NULL, ierr ) 3424 #endif 3425 prt_nr_bytes = 0 3426 DO i = nxl, nxr 3427 DO j = nys, nyn 3428 disp = array_position + prt_global_index(nzb,j,i) * particle_size 3429 byte_column = SUM( prt_count(:,j,i) ) * particle_size 3430 prt_nr_bytes = MAX( disp+byte_column, prt_nr_bytes ) 3431 3432 ind = 1 3433 DO k = nzb, nzt+1 3434 DO n = 1, prt_count(k,j,i) 3435 prt_data(ind) = grid_particles(k,j,i)%particles(n) 3436 ind = ind+1 3437 ENDDO 3438 ENDDO 3439 3440 #if defined( __parallel ) 3441 CALL sm_io%sm_node_barrier() ! Has no effect if I/O on limited number of cores is inactive 3442 IF ( byte_column > 0 ) THEN 3443 CALL MPI_FILE_SEEK( fh, disp, MPI_SEEK_SET, ierr ) 3444 CALL MPI_FILE_WRITE( fh, prt_data, byte_column, MPI_BYTE, status, ierr ) 3445 ENDIF 3446 CALL sm_io%sm_node_barrier() 3447 #else 3448 buf = C_LOC(prt_data) ! use C_PTR to avoid another overlay in posix interface 3449 CALL posix_lseek( fh, disp ) 3450 CALL posix_write( fh, buf, byte_column ) 3451 #endif 3452 ENDDO 3453 ENDDO 3454 3455 #if defined( __parallel ) 3456 rs = prt_nr_bytes 3457 CALL MPI_ALLREDUCE( rs, rr, 1, MPI_DOUBLE_PRECISION, MPI_MAX, comm2d, ierr ) 3458 prt_nr_bytes = rr 3459 #else 3460 rr = rs 3461 #endif 3462 array_position = prt_nr_bytes 3463 3464 write(9,*) 'array_position after particle ',array_position,prt_nr_bytes,rs 3465 3466 DEALLOCATE( prt_data ) 3467 3468 END SUBROUTINE wrd_mpi_io_particles 3469 3470 3471 3472 !--------------------------------------------------------------------------------------------------! 3473 ! Description: 3474 ! ------------ 2930 3475 !> Write 1d-REAL surface data array with MPI-IO. 2931 3476 !--------------------------------------------------------------------------------------------------! … … 3043 3588 3044 3589 3590 3045 3591 !--------------------------------------------------------------------------------------------------! 3046 3592 ! Description: … … 3207 3753 ENDIF 3208 3754 3755 fh = -1 3756 3209 3757 restart_file_size = array_position / ( 1024.0_dp * 1024.0_dp ) 3210 3758 3211 3759 END SUBROUTINE rd_mpi_io_close 3760 3761 3762 3763 FUNCTION rd_mpi_io_check_open() RESULT ( isopen ) 3764 3765 LOGICAL :: isopen 3766 3767 isopen = ( fh /= -1 ) 3768 3769 END FUNCTION rd_mpi_io_check_open 3212 3770 3213 3771 … … 3567 4125 ! Description: 3568 4126 ! ------------ 4127 !> This subroutine creates file types to access 3d-INTEGER*4 arrays and 3d-INTEGER*8 arrays 4128 !> distributed in blocks among processes to a single file that contains the global arrays. 4129 !> These types are only used for particle data. 4130 !--------------------------------------------------------------------------------------------------! 4131 SUBROUTINE rd_mpi_io_particle_filetypes 4132 4133 IMPLICIT NONE 4134 4135 INTEGER, DIMENSION(3) :: dims3 !< 4136 INTEGER, DIMENSION(3) :: lize3 !< 4137 INTEGER, DIMENSION(3) :: start3 !< 4138 4139 TYPE(domain_decomposition_grid_features) :: save_io_grid !< temporary variable to store grid settings 4140 4141 ! 4142 !-- MPI_INTEGER8 is not standard MPI, but is supported on most MPI distibutions 4143 !-- If not suppported, a workaround could be enabled with the following preprocessor directive 4144 !#if defined( __NO_INTEGER8) 4145 ! CALL MPI_TYPE_CONTIGUOUS( 2, MPI_INTEGER, MPI_INTEGER8, ierr ) 4146 ! CALL MPI_TYPE_COMMIT( MPI_INTEGER8, ierr ) 4147 !#endif 4148 4149 IF ( sm_io%is_sm_active() ) THEN 4150 save_io_grid = sm_io%io_grid 4151 ENDIF 4152 4153 IF( .NOT. pe_active_for_read ) RETURN 4154 4155 ! 4156 !-- Decision, if storage with or without ghost layers. 4157 !-- Please note that the indexing of the global array always starts at 0, even in Fortran. 4158 !-- Therefore the PE local indices have to be shifted by nbgp in the case with ghost layers. 4159 IF ( include_total_domain_boundaries ) THEN 4160 4161 iog%nxl = nxl + nbgp 4162 iog%nxr = nxr + nbgp 4163 iog%nys = nys + nbgp 4164 iog%nyn = nyn + nbgp 4165 iog%nnx = nnx 4166 iog%nny = nny 4167 iog%nx = nx + 2 * nbgp 4168 iog%ny = ny + 2 * nbgp 4169 IF ( myidx == 0 ) THEN 4170 iog%nxl = iog%nxl - nbgp 4171 iog%nnx = iog%nnx + nbgp 4172 ENDIF 4173 IF ( myidx == npex-1 .OR. npex == -1 ) THEN ! npex == 1 if -D__parallel not set 4174 iog%nxr = iog%nxr + nbgp 4175 iog%nnx = iog%nnx + nbgp 4176 ENDIF 4177 IF ( myidy == 0 ) THEN 4178 iog%nys = iog%nys - nbgp 4179 iog%nny = iog%nny + nbgp 4180 ENDIF 4181 IF ( myidy == npey-1 .OR. npey == -1 ) THEN ! npey == 1 if -D__parallel not set 4182 iog%nyn = iog%nyn + nbgp 4183 iog%nny = iog%nny + nbgp 4184 ENDIF 4185 4186 CALL sm_io%sm_adjust_outer_boundary() 4187 4188 ELSE 4189 4190 iog%nxl = nxl 4191 iog%nxr = nxr 4192 iog%nys = nys 4193 iog%nyn = nyn 4194 iog%nnx = nnx 4195 iog%nny = nny 4196 iog%nx = nx 4197 iog%ny = ny 4198 4199 ENDIF 4200 4201 IF ( sm_io%is_sm_active() ) THEN 4202 #if defined( __parallel ) 4203 CALL sm_io%sm_allocate_shared( array_3di4, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr,& 4204 sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3di4 ) 4205 CALL sm_io%sm_allocate_shared( array_3di8, nzb, nzt+1, sm_io%io_grid%nxl, sm_io%io_grid%nxr,& 4206 sm_io%io_grid%nys, sm_io%io_grid%nyn, win_3di8 ) 4207 #endif 4208 ELSE 4209 ALLOCATE( array_3di4(nzb:nzt+1,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) 4210 ALLOCATE( array_3di8(nzb:nzt+1,iog%nxl:iog%nxr,iog%nys:iog%nyn) ) 4211 4212 sm_io%io_grid = iog 4213 ENDIF 4214 4215 ! 4216 !-- Create filetype for 3d INTEGER array 4217 dims3(1) = nz + 2 4218 dims3(2) = iog%nx + 1 4219 dims3(3) = iog%ny + 1 4220 4221 lize3(1) = dims3(1) 4222 lize3(2) = sm_io%io_grid%nnx 4223 lize3(3) = sm_io%io_grid%nny 4224 4225 start3(1) = nzb 4226 start3(2) = sm_io%io_grid%nxl 4227 start3(3) = sm_io%io_grid%nys 4228 4229 #if defined( __parallel ) 4230 IF ( sm_io%iam_io_pe ) THEN 4231 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_INTEGER, & 4232 ft_3di4, ierr ) 4233 CALL MPI_TYPE_COMMIT( ft_3di4, ierr ) 4234 4235 CALL MPI_TYPE_CREATE_SUBARRAY( 3, dims3, lize3, start3, MPI_ORDER_FORTRAN, MPI_INTEGER8, & 4236 ft_3di8, ierr ) 4237 CALL MPI_TYPE_COMMIT( ft_3di8, ierr ) 4238 ENDIF 4239 #endif 4240 4241 END SUBROUTINE rd_mpi_io_particle_filetypes 4242 4243 4244 4245 !--------------------------------------------------------------------------------------------------! 4246 ! Description: 4247 ! ------------ 3569 4248 !> This subroutine creates file types to access 3d-soil arrays distributed in blocks among processes 3570 4249 !> to a single file that contains the global arrays. It is not required for the serial mode. … … 3655 4334 ENDIF 3656 4335 4336 ! 4337 !-- Free last particle filetypes 4338 IF ( sm_io%iam_io_pe .AND. ft_3di4 /= -1 ) THEN 4339 CALL MPI_TYPE_FREE( ft_3di4, ierr ) 4340 CALL MPI_TYPE_FREE( ft_3di8, ierr ) 4341 ENDIF 4342 4343 IF ( sm_io%is_sm_active() .AND. win_3di4 /= -1 ) THEN 4344 CALL sm_io%sm_free_shared( win_3di4 ) 4345 CALL sm_io%sm_free_shared( win_3di8 ) 4346 ENDIF 4347 3657 4348 ft_surf = -1 3658 4349 win_surf = -1 3659 4350 #else 3660 DEALLOCATE( array_2d, array_2di, array_3d ) 4351 IF ( ASSOCIATED(array_2d) ) DEALLOCATE( array_2d ) 4352 IF ( ASSOCIATED(array_2di) ) DEALLOCATE( array_2di ) 4353 IF ( ASSOCIATED(array_3d) ) DEALLOCATE( array_3d ) 4354 IF ( ASSOCIATED(array_3di4) ) DEALLOCATE( array_3di4 ) 4355 IF ( ASSOCIATED(array_3di8) ) DEALLOCATE( array_3di8 ) 3661 4356 #endif 3662 4357 -
palm/trunk/SOURCE/shared_memory_io_mod.f90
r4620 r4628 25 25 ! ----------------- 26 26 ! $Id$ 27 ! extensions required for MPI-I/O of particle data to restart files 28 ! 29 ! 4620 2020-07-22 14:11:16Z raasch 27 30 ! bugfix: variable definition changed 28 31 ! … … 85 88 USE kinds, & 86 89 ONLY: dp, & 90 idp, & 91 isp, & 87 92 iwp, & 88 93 sp, & … … 192 197 PROCEDURE, PASS(this), PUBLIC :: sm_allocate_shared_3d_64 193 198 PROCEDURE, PASS(this), PUBLIC :: sm_allocate_shared_3d_32 199 PROCEDURE, PASS(this), PUBLIC :: sm_allocate_shared_3di_32 200 PROCEDURE, PASS(this), PUBLIC :: sm_allocate_shared_3di_64 194 201 195 202 GENERIC, PUBLIC :: sm_allocate_shared => & 196 sm_allocate_shared_1d_64, sm_allocate_shared_1d_32, & 197 sm_allocate_shared_2d_64, sm_allocate_shared_2d_32, & 198 sm_allocate_shared_2di, sm_allocate_shared_3d_64, & 199 sm_allocate_shared_3d_32, sm_allocate_shared_1di 203 sm_allocate_shared_1d_64, sm_allocate_shared_1d_32, & 204 sm_allocate_shared_2d_64, sm_allocate_shared_2d_32, & 205 sm_allocate_shared_2di, sm_allocate_shared_3d_64, & 206 sm_allocate_shared_3d_32, sm_allocate_shared_1di, & 207 sm_allocate_shared_3di_32, sm_allocate_shared_3di_64 200 208 #endif 201 209 END TYPE sm_class … … 1030 1038 END SUBROUTINE sm_allocate_shared_3d_32 1031 1039 1040 1041 !--------------------------------------------------------------------------------------------------! 1042 ! Description: 1043 ! ------------ 1044 !> Allocate shared 3d-REAL (32 bit) array on ALL threads 1045 !--------------------------------------------------------------------------------------------------! 1046 SUBROUTINE sm_allocate_shared_3di_32( this, p3, d1s, d1e, d2s, d2e, d3s, d3e, win ) 1047 1048 IMPLICIT NONE 1049 1050 CLASS(sm_class), INTENT(inout) :: this 1051 1052 INTEGER :: disp_unit 1053 INTEGER, INTENT(IN) :: d1e 1054 INTEGER, INTENT(IN) :: d1s 1055 INTEGER, INTENT(IN) :: d2e 1056 INTEGER, INTENT(IN) :: d2s 1057 INTEGER, INTENT(IN) :: d3e 1058 INTEGER, INTENT(IN) :: d3s 1059 INTEGER, SAVE :: pe_from = 0 1060 INTEGER, INTENT(OUT) :: win 1061 1062 INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_size 1063 INTEGER(KIND=MPI_ADDRESS_KIND) :: wsize 1064 1065 INTEGER, DIMENSION(3) :: buf_shape 1066 1067 INTEGER(isp), DIMENSION(:,:,:), POINTER :: buf 1068 INTEGER(isp), DIMENSION(:,:,:), POINTER :: p3 1069 1070 TYPE(C_PTR), SAVE :: base_ptr 1071 TYPE(C_PTR), SAVE :: rem_ptr 1072 1073 1074 IF ( this%no_shared_memory_in_this_run ) RETURN 1075 ! 1076 !-- Allocate shared memory on node rank 0 threads. 1077 IF ( this%sh_rank == pe_from ) THEN 1078 wsize = ( d3e - d3s + 1 ) * ( d2e - d2s + 1 ) * ( d1e - d1s + 1 ) 1079 ELSE 1080 wsize = 1 1081 ENDIF 1082 1083 wsize = wsize * isp ! Please note, size is always in bytes, independently of the displacement 1084 ! unit 1085 1086 CALL MPI_WIN_ALLOCATE_SHARED( wsize, isp, MPI_INFO_NULL, this%comm_shared, base_ptr, win, ierr ) 1087 ! 1088 !-- Get C-pointer of the memory located on node-rank pe_from (sh_rank == pe_from) 1089 CALL MPI_WIN_SHARED_QUERY( win, pe_from, rem_size, disp_unit, rem_ptr, ierr ) 1090 ! 1091 !-- Convert C- to Fortran-pointer 1092 buf_shape(3) = d3e - d3s + 1 1093 buf_shape(2) = d2e - d2s + 1 1094 buf_shape(1) = d1e - d1s + 1 1095 CALL C_F_POINTER( rem_ptr, buf, buf_shape ) 1096 p3(d1s:,d2s:,d3s:) => buf 1097 ! 1098 !-- Allocate shared memory in round robin on all PEs of a node. 1099 pe_from = MOD( pe_from, this%sh_npes ) 1100 1101 END SUBROUTINE sm_allocate_shared_3di_32 1102 1103 1104 !--------------------------------------------------------------------------------------------------! 1105 ! Description: 1106 ! ------------ 1107 !> Allocate shared 3d-REAL (64 bit) array on ALL threads 1108 !--------------------------------------------------------------------------------------------------! 1109 SUBROUTINE sm_allocate_shared_3di_64( this, p3, d1s, d1e, d2s, d2e, d3s, d3e, win ) 1110 1111 IMPLICIT NONE 1112 1113 CLASS(sm_class), INTENT(inout) :: this !< 1114 1115 INTEGER :: disp_unit !< 1116 INTEGER, INTENT(IN) :: d1e !< 1117 INTEGER, INTENT(IN) :: d1s !< 1118 INTEGER, INTENT(IN) :: d2e !< 1119 INTEGER, INTENT(IN) :: d2s !< 1120 INTEGER, INTENT(IN) :: d3e !< 1121 INTEGER, INTENT(IN) :: d3s !< 1122 INTEGER, SAVE :: pe_from = 0 !< 1123 INTEGER, INTENT(OUT) :: win !< 1124 1125 INTEGER(KIND=MPI_ADDRESS_KIND) :: rem_size !< 1126 INTEGER(KIND=MPI_ADDRESS_KIND) :: wsize !< 1127 1128 INTEGER, DIMENSION(3) :: buf_shape !< 1129 1130 INTEGER(idp), DIMENSION(:,:,:), POINTER :: buf !< 1131 INTEGER(idp), DIMENSION(:,:,:), POINTER :: p3 !< 1132 1133 TYPE(C_PTR), SAVE :: base_ptr !< 1134 TYPE(C_PTR), SAVE :: rem_ptr !< 1135 1136 1137 IF ( this%no_shared_memory_in_this_run ) RETURN 1138 ! 1139 !-- Allocate shared memory on node rank 0 threads. 1140 IF ( this%sh_rank == pe_from ) THEN 1141 wsize = ( d3e - d3s + 1 ) * ( d2e - d2s + 1 ) * ( d1e - d1s + 1 ) 1142 ELSE 1143 wsize = 1 1144 ENDIF 1145 1146 wsize = wsize * idp ! Please note, size is always in bytes, independently of the displacement 1147 ! unit 1148 1149 CALL MPI_WIN_ALLOCATE_SHARED( wsize, idp, MPI_INFO_NULL, this%comm_shared, base_ptr, win, ierr ) 1150 ! 1151 !-- Get C-pointer of the memory located on node-rank pe_from (sh_rank == pe_from) 1152 CALL MPI_WIN_SHARED_QUERY( win, pe_from, rem_size, disp_unit, rem_ptr, ierr ) 1153 ! 1154 !-- Convert C- to Fortran-pointer 1155 buf_shape(3) = d3e - d3s + 1 1156 buf_shape(2) = d2e - d2s + 1 1157 buf_shape(1) = d1e - d1s + 1 1158 CALL C_F_POINTER( rem_ptr, buf, buf_shape ) 1159 p3(d1s:,d2s:,d3s:) => buf 1160 ! 1161 !-- Allocate shared memory in round robin on all PEs of a node. 1162 pe_from = MOD( pe_from, this%sh_npes ) 1163 1164 END SUBROUTINE sm_allocate_shared_3di_64 1165 1032 1166 #endif 1033 1167
Note: See TracChangeset
for help on using the changeset viewer.