Changeset 4792 for palm/trunk/SOURCE/data_output_particle_mod.f90
- Timestamp:
- Nov 23, 2020 1:02:38 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/data_output_particle_mod.f90
r4779 r4792 24 24 ! ----------------- 25 25 ! $Id$ 26 ! file re-formatted to follow the PALM coding standard 27 ! 28 ! 4779 2020-11-09 17:45:22Z suehring 26 29 ! Avoid overlong lines and unused variables 27 30 ! … … 30 33 ! 31 34 ! 35 !--------------------------------------------------------------------------------------------------! 32 36 ! Description: 33 37 ! ------------ … … 37 41 38 42 #if defined( __parallel ) 39 USE MPI43 USE MPI 40 44 #endif 41 45 42 46 #if defined( __netcdf4 ) 43 USE NETCDF 44 #endif 45 46 USE, INTRINSIC :: ISO_C_BINDING 47 48 USE kinds, & 49 ONLY: wp, iwp, sp, dp, idp, isp 50 51 USE indices, & 52 ONLY: nbgp, nnx, nny, nx, nxl, nxlg, nxr, nxrg, ny, nyn, nyng, nys, nysg, nz, nzb, nzt 53 54 USE control_parameters, & 55 ONLY: end_time, simulated_time, dt_dopts 56 57 USE pegrid, & 58 ONLY: comm1dx, comm1dy, comm2d, myid, myidx, myidy, npex, npey, numprocs, pdims 59 60 USE particle_attributes, & 61 ONLY: particle_type, particles, grid_particle_def, grid_particles, prt_count, & 62 unlimited_dimension, pts_id_file, data_output_pts, pts_increment, pts_percentage, & 63 oversize, number_of_output_particles 64 65 USE shared_memory_io_mod, & 66 ONLY: sm_class 67 68 USE data_output_netcdf4_module, & 69 ONLY: netcdf4_init_dimension, netcdf4_get_error_message, netcdf4_stop_file_header_definition, & 70 netcdf4_init_module, netcdf4_init_variable, netcdf4_finalize, & 71 netcdf4_open_file, netcdf4_write_attribute, netcdf4_write_variable 72 73 USE cpulog, & 74 ONLY: cpu_log, log_point_s 75 76 IMPLICIT NONE 77 78 PRIVATE 79 SAVE 80 81 82 LOGICAL, PUBLIC :: dop_active = .FALSE. 83 84 85 ! Variables for restart 86 87 INTEGER(iwp), PUBLIC :: dop_prt_axis_dimension 88 INTEGER(iwp), PUBLIC :: dop_last_active_particle 89 90 91 92 93 !kk Private module variables can be declared inside the submodule 94 95 INTEGER,PARAMETER :: MAX_NR_VARIABLES = 128 96 INTEGER,PARAMETER :: NR_FIXED_VARIABLES = 16 97 98 CHARACTER(LEN=32) :: file_name !< Name of NetCDF file 99 INTEGER(iwp) :: nr_time_values !< Number of values on time axis 100 REAL(sp),ALLOCATABLE,DIMENSION(:) :: time_axis_values !< time axis Values 101 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: rma_particles !< Start address and number of remote particles 102 INTEGER(iwp) :: nr_particles_PE !< Number of particles assigned for output on this thread 103 INTEGER(iwp) :: nr_particles_out !< total number od particles assigned for output 104 105 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: sh_indices !< Indices in shared memory group 106 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: io_indices !< Indices on IO processes 107 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: mo_indices !< Indices for model communicator 108 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: remote_nr_particles 109 INTEGER(iwp) :: start_local_numbering !< start increment 1 numbering on this thread 110 INTEGER(iwp) :: end_local_numbering 111 INTEGER(iwp) :: pe_start_index !< start index of the output area on this thread 112 INTEGER(iwp) :: pe_end_index 113 INTEGER(iwp) :: io_start_index !< start index of the output area on IO thread 114 INTEGER(iwp) :: io_end_index 115 INTEGER(iwp) :: nr_particles_rest !< Numbere of rest Particle (ireg. distribution) 116 LOGICAL :: irregular_distribubtion !< irregular distribution of output particlesexit 117 118 TYPE var_def 119 INTEGER(iwp) :: var_id 120 CHARACTER(len=32) :: name 121 CHARACTER(len=32) :: units 122 LOGICAL :: is_integer = .FALSE. 123 END TYPE var_def 124 125 TYPE(var_def),DIMENSION(MAX_NR_VARIABLES) :: variables 126 TYPE(var_def),DIMENSION(NR_FIXED_VARIABLES) :: fix_variables 127 128 TYPE(sm_class) :: part_io !< manage communicator for particle IO 129 130 ! 131 ! NetCDF 132 133 INTEGER(iwp) :: file_id = -1 !< id of Netcdf file 134 INTEGER(iwp) :: nr_fix_variables !< Number of fixed variables scheduled for output 135 INTEGER(iwp) :: nr_variables !< Number of variables scheduled for output 136 137 TYPE dimension_id 138 INTEGER(iwp) :: prt 139 INTEGER(iwp) :: time 140 END TYPE dimension_id 141 142 TYPE variable_id 143 INTEGER(iwp) :: prt 144 INTEGER(iwp) :: time 145 END TYPE variable_id 146 147 TYPE(dimension_id) :: did 148 TYPE(variable_id) :: var_id 149 150 ! shared memory buffer 151 INTEGER(iwp) :: win_prt_i = -1 ! integer MPI shared Memory window 152 INTEGER(iwp) :: win_prt_r = -1 !< real MPI shared Memory window 153 INTEGER(iwp), POINTER, CONTIGUOUS, DIMENSION(:) :: out_buf_i !< integer output buffer 154 REAL(sp), POINTER, CONTIGUOUS, DIMENSION(:) :: out_buf_r !< real output buffer 155 156 ! 157 ! Particle list in file 158 159 LOGICAL :: part_list_in_file 160 INTEGER(idp),ALLOCATABLE, DIMENSION(:) :: part_id_list_file 161 ! 162 ! RMA window 163 #if defined( __parallel ) 164 INTEGER(iwp) :: win_rma_buf_i 165 INTEGER(iwp) :: win_rma_buf_r 166 #endif 167 168 INTEGER(iwp),POINTER,DIMENSION(:) :: transfer_buffer_i !< rma window to provide data, which can be fetch via MPI_Get 169 REAL(sp),POINTER,DIMENSION(:) :: transfer_buffer_r !< Same for REAL 170 INTEGER(iwp),ALLOCATABLE,DIMENSION(:) :: remote_indices !< particle nubmer of the remodte partices, used as indices in the output array 171 INTEGER(iwp) :: initial_number_of_active_particles 172 173 ! Public subroutine Interface 174 175 INTERFACE dop_init 176 MODULE PROCEDURE dop_init 177 END INTERFACE dop_init 178 179 INTERFACE dop_output_tseries 180 MODULE PROCEDURE dop_output_tseries 181 END INTERFACE dop_output_tseries 182 183 INTERFACE dop_finalize 184 MODULE PROCEDURE dop_finalize 185 END INTERFACE dop_finalize 186 187 #if defined( __parallel ) 188 INTERFACE dop_alloc_rma_mem 189 MODULE PROCEDURE dop_alloc_rma_mem_i1 190 MODULE PROCEDURE dop_alloc_rma_mem_r1 191 END INTERFACE dop_alloc_rma_mem 192 #endif 193 194 PUBLIC dop_init, dop_output_tseries, dop_finalize 195 #if defined( __parallel ) 196 PUBLIC dop_alloc_rma_mem ! Must be PUBLIC on NEC, although it is only used in Submodule 197 #endif 198 47 USE NETCDF 48 #endif 49 50 USE, INTRINSIC :: ISO_C_BINDING 51 52 USE kinds, & 53 ONLY: dp, idp, isp, iwp, sp, wp 54 55 USE indices, & 56 ONLY: nbgp, nnx, nny, nx, nxl, nxlg, nxr, nxrg, ny, nyn, nyng, nys, nysg, nz, nzb, nzt 57 58 USE control_parameters, & 59 ONLY: dt_dopts, end_time, simulated_time 60 61 USE pegrid, & 62 ONLY: comm1dx, comm1dy, comm2d, myid, myidx, myidy, npex, npey, numprocs, pdims 63 64 USE particle_attributes, & 65 ONLY: data_output_pts, grid_particles, grid_particle_def, number_of_output_particles, & 66 particles, particle_type, prt_count, pts_id_file, pts_increment, pts_percentage, & 67 oversize, unlimited_dimension 68 69 USE shared_memory_io_mod, & 70 ONLY: sm_class 71 72 USE data_output_netcdf4_module, & 73 ONLY: netcdf4_finalize, netcdf4_get_error_message, netcdf4_init_dimension, & 74 netcdf4_init_module, netcdf4_init_variable, netcdf4_open_file, & 75 netcdf4_stop_file_header_definition, netcdf4_write_attribute, netcdf4_write_variable 76 77 USE cpulog, & 78 ONLY: cpu_log, log_point_s 79 80 IMPLICIT NONE 81 82 PRIVATE 83 SAVE 84 85 86 LOGICAL, PUBLIC :: dop_active = .FALSE. 87 88 ! 89 !-- Variables for restart 90 INTEGER(iwp), PUBLIC :: dop_prt_axis_dimension 91 INTEGER(iwp), PUBLIC :: dop_last_active_particle 92 93 ! 94 !-- kk Private module variables can be declared inside the submodule 95 INTEGER, PARAMETER :: max_nr_variables = 128 96 INTEGER, PARAMETER :: nr_fixed_variables = 16 97 98 CHARACTER(LEN=32) :: file_name !< Name of NetCDF file 99 100 INTEGER(iwp) :: end_local_numbering 101 INTEGER(iwp) :: io_end_index 102 INTEGER(iwp) :: io_start_index !< Start index of the output area on IO thread 103 INTEGER(iwp) :: nr_particles_pe !< Number of particles assigned for output on this thread 104 INTEGER(iwp) :: nr_particles_rest !< Numbere of rest Particle (ireg. distribution) 105 INTEGER(iwp) :: nr_particles_out !< Total number od particles assigned for output 106 INTEGER(iwp) :: nr_time_values !< Number of values on time axis 107 INTEGER(iwp) :: pe_end_index 108 INTEGER(iwp) :: pe_start_index !< Start index of the output area on this thread 109 INTEGER(iwp) :: start_local_numbering !< Start increment 1 numbering on this thread 110 111 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: io_indices !< Indices on IO processes 112 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: mo_indices !< Indices for model communicator 113 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: remote_nr_particles 114 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: rma_particles !< Start address and number of remote particles 115 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: sh_indices !< Indices in shared memory group 116 117 LOGICAL :: irregular_distribubtion !< irregular distribution of output particlesexit 118 119 REAL(sp), ALLOCATABLE, DIMENSION(:) :: time_axis_values !< time axis Values 120 121 TYPE var_def 122 INTEGER(iwp) :: var_id 123 CHARACTER(LEN=32) :: name 124 CHARACTER(LEN=32) :: units 125 LOGICAL :: is_integer = .FALSE. 126 END TYPE var_def 127 128 TYPE(sm_class) :: part_io !< manage communicator for particle IO 129 130 TYPE(var_def), DIMENSION(nr_fixed_variables) :: fix_variables 131 TYPE(var_def), DIMENSION(max_nr_variables) :: variables 132 133 ! 134 !-- NetCDF 135 INTEGER(iwp) :: file_id = -1 !< id of Netcdf file 136 INTEGER(iwp) :: nr_fix_variables !< Number of fixed variables scheduled for output 137 INTEGER(iwp) :: nr_variables !< Number of variables scheduled for output 138 139 TYPE dimension_id 140 INTEGER(iwp) :: prt 141 INTEGER(iwp) :: time 142 END TYPE dimension_id 143 144 TYPE variable_id 145 INTEGER(iwp) :: prt 146 INTEGER(iwp) :: time 147 END TYPE variable_id 148 149 TYPE(dimension_id) :: did 150 TYPE(variable_id) :: var_id 151 152 ! 153 !-- Shared memory buffer 154 INTEGER(iwp) :: win_prt_i = -1 !< integer MPI shared Memory window 155 INTEGER(iwp) :: win_prt_r = -1 !< real MPI shared Memory window 156 INTEGER(iwp), POINTER, CONTIGUOUS, DIMENSION(:) :: out_buf_i !< integer output buffer 157 REAL(sp), POINTER, CONTIGUOUS, DIMENSION(:) :: out_buf_r !< real output buffer 158 159 ! 160 !-- Particle list in file 161 INTEGER(idp), ALLOCATABLE, DIMENSION(:) :: part_id_list_file 162 163 LOGICAL :: part_list_in_file 164 165 ! 166 !-- RMA window 167 #if defined( __parallel ) 168 INTEGER(iwp) :: win_rma_buf_i 169 INTEGER(iwp) :: win_rma_buf_r 170 #endif 171 172 INTEGER(iwp) :: initial_number_of_active_particles 173 INTEGER(iwp), ALLOCATABLE, DIMENSION(:) :: remote_indices !< particle nubmer of the remodte partices, 174 !< used as indices in the output array 175 INTEGER(iwp), POINTER, DIMENSION(:) :: transfer_buffer_i !< rma window to provide data, which can be 176 !< fetch via MPI_Get 177 REAL(sp), POINTER, DIMENSION(:) :: transfer_buffer_r !< Same for REAL 178 179 ! 180 !-- Public subroutine Interface 181 INTERFACE dop_init 182 MODULE PROCEDURE dop_init 183 END INTERFACE dop_init 184 185 INTERFACE dop_output_tseries 186 MODULE PROCEDURE dop_output_tseries 187 END INTERFACE dop_output_tseries 188 189 INTERFACE dop_finalize 190 MODULE PROCEDURE dop_finalize 191 END INTERFACE dop_finalize 192 193 #if defined( __parallel ) 194 INTERFACE dop_alloc_rma_mem 195 MODULE PROCEDURE dop_alloc_rma_mem_i1 196 MODULE PROCEDURE dop_alloc_rma_mem_r1 197 END INTERFACE dop_alloc_rma_mem 198 #endif 199 200 PUBLIC dop_init, dop_output_tseries, dop_finalize 201 #if defined( __parallel ) 202 PUBLIC dop_alloc_rma_mem ! Must be PUBLIC on NEC, although it is only used in submodule 203 #endif 199 204 200 205 201 206 CONTAINS 202 207 203 SUBROUTINE dop_init (read_restart) 204 IMPLICIT NONE 205 LOGICAL,INTENT(IN) :: read_restart 206 207 INTEGER(iwp) :: i !< 208 INTEGER(iwp) :: nr_particles_local !< total number of particles scheduled for output on this thread 209 #if defined( __parallel ) 210 INTEGER(iwp) :: ierr !< MPI error code 211 #endif 212 INTEGER(idp) :: nr_particles_8 !< Total number of particles in 64 bit 213 REAL(dp) :: xnr_part !< Must be 64 Bit REAL 214 INTEGER(iwp) :: nr_local_last_pe !< Number of output particles on myid == numprocs-2 215 216 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_all_s 217 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_all_r 218 INTEGER(idp),DIMENSION(0:numprocs-1) :: nr_particles_all_8 219 220 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: sh_indices_s 221 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: io_indices_s 222 INTEGER(iwp),ALLOCATABLE,DIMENSION(:,:) :: mo_indices_s 223 224 part_list_in_file = (pts_id_file /= ' ') 225 226 IF(.NOT. part_list_in_file) THEN 227 IF(.NOT. read_restart) THEN 228 229 CALL set_indef_particle_nr 230 231 CALL count_output_particles (nr_particles_local) 232 233 nr_particles_all_s = 0 234 nr_particles_all_s(myid) = nr_particles_local 235 236 #if defined( __parallel ) 237 CALL MPI_ALLREDUCE( nr_particles_all_s, nr_particles_all_r, SIZE( nr_particles_all_s ), MPI_INTEGER, & 238 MPI_SUM, comm2d, ierr ) 208 209 !--------------------------------------------------------------------------------------------------! 210 ! Description: 211 ! ------------ 212 ! 213 !--------------------------------------------------------------------------------------------------! 214 SUBROUTINE dop_init( read_restart ) 215 IMPLICIT NONE 216 217 INTEGER(iwp) :: i !< 218 #if defined( __parallel ) 219 INTEGER(iwp) :: ierr !< MPI error code 220 #endif 221 INTEGER(iwp) :: nr_local_last_pe !< Number of output particles on myid == numprocs-2 222 INTEGER(idp) :: nr_particles_8 !< Total number of particles in 64 bit 223 INTEGER(iwp) :: nr_particles_local !< total number of particles scheduled for output on this thread 224 225 INTEGER(idp), DIMENSION(0:numprocs-1) :: nr_particles_all_8 226 INTEGER(iwp), DIMENSION(0:numprocs-1) :: nr_particles_all_s 227 INTEGER(iwp), DIMENSION(0:numprocs-1) :: nr_particles_all_r 228 229 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: io_indices_s 230 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: mo_indices_s 231 INTEGER(iwp), ALLOCATABLE, DIMENSION(:,:) :: sh_indices_s 232 233 LOGICAL, INTENT(IN) :: read_restart 234 235 REAL(dp) :: xnr_part !< Must be 64 Bit REAL 236 237 238 part_list_in_file = (pts_id_file /= ' ') 239 240 IF ( .NOT. part_list_in_file ) THEN 241 IF ( .NOT. read_restart) THEN 242 243 CALL set_indef_particle_nr 244 245 CALL count_output_particles( nr_particles_local ) 246 247 nr_particles_all_s = 0 248 nr_particles_all_s(myid) = nr_particles_local 249 250 #if defined( __parallel ) 251 CALL MPI_ALLREDUCE( nr_particles_all_s, nr_particles_all_r, SIZE( nr_particles_all_s ), & 252 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 239 253 #else 240 nr_particles_all_r = nr_particles_all_s 241 #endif 242 243 initial_number_of_active_particles = SUM(nr_particles_all_r) 244 245 start_local_numbering = 1 246 end_local_numbering = nr_particles_all_r(0) 247 248 IF ( myid > 0) THEN 249 DO i=1,numprocs-1 250 start_local_numbering = start_local_numbering+nr_particles_all_r(i-1) 251 end_local_numbering = start_local_numbering+nr_particles_all_r(i)-1 252 IF(myid == i) EXIT 253 ENDDO 254 END IF 255 256 nr_particles_all_8 = nr_particles_all_r ! Use 64 INTEGER, maybe more than 2 GB particles 257 nr_particles_8 = SUM(nr_particles_all_8) 258 IF (nr_particles_8 > 2000000000_8) THEN ! This module works only with 32 Bit INTEGER 259 write(9,*) 'Number of particles too large ',nr_particles_8; flush(9) 260 #if defined( __parallel ) 261 CALL MPI_ABORT (MPI_COMM_WORLD, 1, ierr) 262 #endif 263 ENDIF 264 nr_particles_out = nr_particles_8 ! Total number of particles scheduled for output 265 266 ! 267 !-- reserve space for additional particle 268 IF(number_of_output_particles > 0) THEN 269 nr_particles_out = MAX(number_of_output_particles, nr_particles_out) 270 ELSE IF(oversize > 100.) THEN 271 xnr_part = nr_particles_out*oversize/100. 272 nr_particles_out = xnr_part 273 ENDIF 274 ELSE 275 nr_particles_out = dop_prt_axis_dimension ! Get Number from restart file 276 initial_number_of_active_particles = dop_last_active_particle 277 ENDIF 278 279 ELSE 280 CALL set_indef_particle_nr 281 CALL dop_read_output_particle_list (nr_particles_out) 282 ENDIF 283 dop_prt_axis_dimension = nr_particles_out 284 ! 285 !-- The number of particles must be at least the number of MPI processes 286 287 nr_particles_out = MAX(nr_particles_out, numprocs) 288 289 290 nr_particles_PE = (nr_particles_out+numprocs-1)/numprocs ! Number of paricles scheduled for ouput on this thread 291 292 pe_start_index = myid*nr_particles_PE+1 !Output numberimng on this thread 293 pe_end_index = MIN((myid+1)*nr_particles_PE,nr_particles_out) 294 295 irregular_distribubtion = .FALSE. 296 #if defined( __parallel ) 297 ! 298 !-- In case of few particles, it can happen that not only the last thread gets fewer output particles. 299 !-- In this case, the local number of particles on thread numprocs-1 will be < 1 300 !-- If this happens, irregular distribution of output particles will be used 301 302 IF(myid == numprocs-1) Then 303 nr_local_last_pe = pe_end_index-pe_start_index+1 304 ELSE 305 nr_local_last_pe = 0 306 ENDIF 307 CALL MPI_BCAST (nr_local_last_pe, 1, MPI_INTEGER, numprocs-1, comm2d, ierr) 254 nr_particles_all_r = nr_particles_all_s 255 #endif 256 257 initial_number_of_active_particles = SUM( nr_particles_all_r ) 258 259 start_local_numbering = 1 260 end_local_numbering = nr_particles_all_r(0) 261 262 IF ( myid > 0 ) THEN 263 DO i = 1, numprocs-1 264 start_local_numbering = start_local_numbering+nr_particles_all_r(i-1) 265 end_local_numbering = start_local_numbering+nr_particles_all_r(i)-1 266 IF ( myid == i ) EXIT 267 ENDDO 268 ENDIF 269 270 nr_particles_all_8 = nr_particles_all_r ! Use 64 INTEGER, maybe more than 2 GB particles 271 nr_particles_8 = SUM( nr_particles_all_8 ) 272 IF ( nr_particles_8 > 2000000000_idp ) THEN ! This module works only with 32 Bit INTEGER 273 WRITE( 9, * ) 'Number of particles too large ', nr_particles_8 ; FLUSH( 9 ) 274 #if defined( __parallel ) 275 CALL MPI_ABORT( MPI_COMM_WORLD, 1, ierr ) 276 #endif 277 ENDIF 278 nr_particles_out = nr_particles_8 ! Total number of particles scheduled for output 279 280 ! 281 !-- Reserve space for additional particle 282 IF ( number_of_output_particles > 0 ) THEN 283 nr_particles_out = MAX( number_of_output_particles, nr_particles_out ) 284 ELSEIF ( oversize > 100.0_wp ) THEN 285 xnr_part = nr_particles_out * oversize / 100.0_wp 286 nr_particles_out = xnr_part 287 ENDIF 288 ELSE 289 nr_particles_out = dop_prt_axis_dimension ! Get Number from restart file 290 initial_number_of_active_particles = dop_last_active_particle 291 ENDIF 292 293 ELSE 294 CALL set_indef_particle_nr 295 CALL dop_read_output_particle_list( nr_particles_out ) 296 ENDIF 297 dop_prt_axis_dimension = nr_particles_out 298 299 ! 300 !-- The number of particles must be at least the number of MPI processes 301 nr_particles_out = MAX( nr_particles_out, numprocs ) 302 303 304 nr_particles_pe = ( nr_particles_out + numprocs - 1 ) / numprocs ! Number of paricles scheduled for ouput on this thread 305 306 pe_start_index = myid * nr_particles_pe + 1 !Output numberimng on this thread 307 pe_end_index = MIN( ( myid + 1 ) * nr_particles_pe, nr_particles_out ) 308 309 irregular_distribubtion = .FALSE. 310 #if defined( __parallel ) 311 ! 312 !-- In case of few particles, it can happen that not only the last thread gets fewer output 313 !-- particles. In this case, the local number of particles on thread numprocs-1 will be < 1. 314 !-- If this happens, irregular distribution of output particles will be used. 315 IF ( myid == numprocs-1 ) THEN 316 nr_local_last_pe = pe_end_index-pe_start_index + 1 317 ELSE 318 nr_local_last_pe = 0 319 ENDIF 320 CALL MPI_BCAST( nr_local_last_pe, 1, MPI_INTEGER, numprocs-1, comm2d, ierr ) 308 321 #else 309 nr_local_last_pe = nr_particles_PE310 #endif 311 IF(nr_local_last_pe < 1)THEN312 313 314 315 316 317 IF(.NOT. read_restart .AND. .NOT. part_list_in_file)THEN318 319 320 321 IF(part_list_in_file)THEN322 323 324 325 CALL part_io%sm_init_data_output_particles ()326 327 ALLOCATE (sh_indices_s(2,0:part_io%sh_npes-1))328 ALLOCATE (sh_indices(2,0:part_io%sh_npes-1))329 330 331 #if defined( __parallel ) 332 333 334 335 336 CALL MPI_ALLREDUCE( sh_indices_s, sh_indices, 2*part_io%sh_npes, MPI_INTEGER,&337 MPI_SUM,part_io%comm_shared, ierr )338 339 340 322 nr_local_last_pe = nr_particles_pe 323 #endif 324 IF ( nr_local_last_pe < 1 ) THEN 325 irregular_distribubtion = .TRUE. 326 CALL dop_setup_ireg_distribution 327 ENDIF 328 329 330 IF ( .NOT. read_restart .AND. .NOT. part_list_in_file ) THEN 331 CALL set_particle_number 332 ENDIF 333 334 IF ( part_list_in_file ) THEN 335 CALL dop_find_particle_in_outlist 336 ENDIF 337 338 CALL part_io%sm_init_data_output_particles( ) 339 340 ALLOCATE( sh_indices_s(2,0:part_io%sh_npes-1) ) 341 ALLOCATE( sh_indices(2,0:part_io%sh_npes-1) ) 342 343 344 #if defined( __parallel ) 345 sh_indices_s = 0 346 sh_indices_s(1,part_io%sh_rank) = pe_start_index 347 sh_indices_s(2,part_io%sh_rank) = pe_end_index 348 349 CALL MPI_ALLREDUCE( sh_indices_s, sh_indices, 2*part_io%sh_npes, MPI_INTEGER, MPI_SUM, & 350 part_io%comm_shared, ierr ) 351 352 io_start_index = sh_indices(1,0) ! output numbering on actual IO thread 353 io_end_index = sh_indices(2,part_io%sh_npes-1) 341 354 #else 342 io_start_index = pe_start_index ! output numbering 343 io_end_index = pe_end_index 344 #endif 345 346 347 #if defined( __parallel ) 348 CALL MPI_BCAST (part_io%io_npes, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr) 349 #endif 350 ALLOCATE (io_indices(2,0:part_io%io_npes-1)) 351 IF (part_io%iam_io_pe) THEN 352 ALLOCATE (io_indices_s(2,0:part_io%io_npes-1)) 353 354 355 io_indices_s = 0 356 io_indices_s(1,part_io%io_rank) = io_start_index 357 io_indices_s(2,part_io%io_rank) = io_end_index 358 359 #if defined( __parallel ) 360 CALL MPI_ALLREDUCE( io_indices_s, io_indices, 2*part_io%io_npes, MPI_INTEGER, & 361 MPI_SUM, part_io%comm_io, ierr ) 355 io_start_index = pe_start_index ! output numbering 356 io_end_index = pe_end_index 357 #endif 358 359 360 #if defined( __parallel ) 361 CALL MPI_BCAST( part_io%io_npes, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr ) 362 #endif 363 ALLOCATE( io_indices(2,0:part_io%io_npes-1) ) 364 IF ( part_io%iam_io_pe ) THEN 365 ALLOCATE( io_indices_s(2,0:part_io%io_npes-1) ) 366 367 io_indices_s = 0 368 io_indices_s(1,part_io%io_rank) = io_start_index 369 io_indices_s(2,part_io%io_rank) = io_end_index 370 371 #if defined( __parallel ) 372 CALL MPI_ALLREDUCE( io_indices_s, io_indices, 2 * part_io%io_npes, MPI_INTEGER, MPI_SUM, & 373 part_io%comm_io, ierr ) 362 374 #else 363 io_indices = io_indices_s 364 #endif 365 366 ENDIF 367 368 #if defined( __parallel ) 369 CALL MPI_BCAST (io_indices, size(io_indices), MPI_INTEGER, 0, part_io%comm_shared, ierr) 370 #endif 371 372 ALLOCATE (remote_nr_particles(2,0:numprocs-1)) 373 ALLOCATE (rma_particles(2,0:numprocs-1)) 374 375 ALLOCATE (mo_indices(2,0:numprocs-1)) 376 ALLOCATE (mo_indices_s(2,0:numprocs-1)) 377 378 379 mo_indices_s = 0 380 mo_indices_s(1,myid) = pe_start_index 381 mo_indices_s(2,myid) = pe_end_index 382 383 #if defined( __parallel ) 384 CALL MPI_ALLREDUCE( mo_indices_s, mo_indices, 2*numprocs, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 375 io_indices = io_indices_s 376 #endif 377 378 ENDIF 379 380 #if defined( __parallel ) 381 CALL MPI_BCAST( io_indices, SIZE( io_indices ), MPI_INTEGER, 0, part_io%comm_shared, ierr ) 382 #endif 383 384 ALLOCATE( remote_nr_particles(2,0:numprocs-1) ) 385 ALLOCATE( rma_particles(2,0:numprocs-1) ) 386 387 ALLOCATE( mo_indices(2,0:numprocs-1) ) 388 ALLOCATE( mo_indices_s(2,0:numprocs-1) ) 389 390 mo_indices_s = 0 391 mo_indices_s(1,myid) = pe_start_index 392 mo_indices_s(2,myid) = pe_end_index 393 394 #if defined( __parallel ) 395 CALL MPI_ALLREDUCE( mo_indices_s, mo_indices, 2 * numprocs, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 385 396 #else 386 mo_indices = mo_indices_s 387 #endif 388 389 !-- Allocate output buffer 390 391 #if defined( __parallel ) 392 CALL part_io%sm_allocate_shared (out_buf_r, io_start_index, io_end_index, win_prt_r) 393 CALL part_io%sm_allocate_shared (out_buf_i, io_start_index, io_end_index, win_prt_i) 397 mo_indices = mo_indices_s 398 #endif 399 400 !-- Allocate output buffer 401 #if defined( __parallel ) 402 CALL part_io%sm_allocate_shared( out_buf_r, io_start_index, io_end_index, win_prt_r ) 403 CALL part_io%sm_allocate_shared( out_buf_i, io_start_index, io_end_index, win_prt_i ) 394 404 #else 395 ALLOCATE(out_buf_r(io_start_index:io_end_index)) 396 ALLOCATE(out_buf_i(io_start_index:io_end_index)) 397 #endif 398 399 !-- NetCDF 400 401 CALL dop_netcdf_setup () 402 403 #if defined( __parallel ) 404 CALL MPI_BCAST (nr_fix_variables, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr) 405 CALL MPI_BCAST (nr_variables, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr) 406 #endif 407 408 CALL dop_count_remote_particles 409 410 CALL dop_write_fixed_variables 411 412 CALL deallocate_and_free 413 414 CALL dop_output_tseries 415 416 RETURN 417 CONTAINS 418 SUBROUTINE dop_setup_ireg_distribution 419 IMPLICIT NONE 420 421 422 nr_particles_PE = (nr_particles_out)/numprocs ! Number of paricles scheduled for ouput on this thread 423 424 nr_particles_rest = nr_particles_out - numprocs * nr_particles_PE 425 426 nr_particles_PE = nr_particles_PE+1 427 IF(myid < nr_particles_rest) THEN 428 pe_start_index = myid*nr_particles_PE+1 !Output numberimng on this thread 429 pe_end_index = MIN((myid+1)*nr_particles_PE,nr_particles_out) 430 ELSE 431 pe_start_index = nr_particles_rest*(nr_particles_PE)+(myid-nr_particles_rest)*(nr_particles_PE-1)+1 !Output numberimng on this thread 432 pe_end_index = MIN(pe_start_index+nr_particles_PE-2,nr_particles_out) 433 ENDIF 434 435 436 END SUBROUTINE dop_setup_ireg_distribution 437 438 END SUBROUTINE dop_init 439 ! 440 ! particle output on selected time steps 441 442 SUBROUTINE dop_output_tseries 443 IMPLICIT NONE 444 445 INTEGER(iwp) :: i !< 446 INTEGER(iwp) :: return_value !< Return value data_output_netcdf4 .. routines 447 #if defined( __parallel ) 448 INTEGER(iwp) :: ierr !< MPI error code 449 #endif 450 INTEGER(iwp),SAVE :: icount=0 !< count output steps 451 452 INTEGER(iwp),DIMENSION(2) :: bounds_origin, bounds_start, value_counts 453 REAl(wp),POINTER, CONTIGUOUS, DIMENSION(:) :: my_time 454 455 icount = icount+1 456 457 CALL dop_delete_particle_number 458 459 IF(part_list_in_file) THEN 460 461 CALL dop_find_particle_in_outlist 462 ELSE 463 CALL dop_newly_generated_particles 464 ENDIF 465 466 CALL dop_count_remote_particles 467 468 bounds_origin = 1 469 470 bounds_start(1) = io_start_index 471 bounds_start(2) = icount 472 473 value_counts(1) = io_end_index-io_start_index+1 474 value_counts(2) = 1 475 476 DO i=1,nr_variables 405 ALLOCATE( out_buf_r(io_start_index:io_end_index) ) 406 ALLOCATE( out_buf_i(io_start_index:io_end_index) ) 407 #endif 408 409 ! 410 !-- NetCDF 411 CALL dop_netcdf_setup( ) 412 413 #if defined( __parallel ) 414 CALL MPI_BCAST( nr_fix_variables, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr ) 415 CALL MPI_BCAST( nr_variables, 1, MPI_INTEGER, 0, part_io%comm_shared, ierr ) 416 #endif 417 418 CALL dop_count_remote_particles 419 420 CALL dop_write_fixed_variables 421 422 CALL deallocate_and_free 423 424 CALL dop_output_tseries 425 426 RETURN 427 428 CONTAINS 429 430 431 !--------------------------------------------------------------------------------------------------! 432 ! Description: 433 ! ------------ 434 ! 435 !--------------------------------------------------------------------------------------------------! 436 SUBROUTINE dop_setup_ireg_distribution 437 438 IMPLICIT NONE 439 440 nr_particles_pe = ( nr_particles_out ) / numprocs ! Number of paricles scheduled for ouput on this thread 441 442 nr_particles_rest = nr_particles_out - numprocs * nr_particles_pe 443 444 nr_particles_pe = nr_particles_pe + 1 445 IF ( myid < nr_particles_rest ) THEN 446 pe_start_index = myid * nr_particles_pe + 1 ! Output numberimng on this thread 447 pe_end_index = MIN( ( myid + 1 ) * nr_particles_pe, nr_particles_out) 448 ELSE 449 pe_start_index = nr_particles_rest * ( nr_particles_pe ) & 450 + ( myid - nr_particles_rest ) * ( nr_particles_pe - 1 ) + 1 !Output numberimng on this thread 451 pe_end_index = MIN( pe_start_index + nr_particles_pe - 2, nr_particles_out ) 452 ENDIF 453 454 455 END SUBROUTINE dop_setup_ireg_distribution 456 457 END SUBROUTINE dop_init 458 459 460 !--------------------------------------------------------------------------------------------------! 461 ! Description: 462 ! ------------ 463 !> Particle output on selected time steps 464 !--------------------------------------------------------------------------------------------------! 465 SUBROUTINE dop_output_tseries 466 467 IMPLICIT NONE 468 469 INTEGER(iwp) :: i !< 470 INTEGER(iwp) :: return_value !< Return value data_output_netcdf4 .. routines 471 #if defined( __parallel ) 472 INTEGER(iwp) :: ierr !< MPI error code 473 #endif 474 INTEGER(iwp), SAVE :: icount=0 !< count output steps 475 476 INTEGER(iwp), DIMENSION(2) :: bounds_origin, bounds_start, value_counts 477 478 REAl(wp), POINTER, CONTIGUOUS, DIMENSION(:) :: my_time 479 480 icount = icount + 1 481 482 CALL dop_delete_particle_number 483 484 IF ( part_list_in_file ) THEN 485 486 CALL dop_find_particle_in_outlist 487 ELSE 488 CALL dop_newly_generated_particles 489 ENDIF 490 491 CALL dop_count_remote_particles 492 493 bounds_origin = 1 494 495 bounds_start(1) = io_start_index 496 bounds_start(2) = icount 497 498 value_counts(1) = io_end_index-io_start_index + 1 499 value_counts(2) = 1 500 501 DO i = 1, nr_variables 477 502 #if defined( __netcdf4 ) 478 IF(variables(i)%is_integer) THEN 479 out_buf_i = NF90_FILL_INT 480 ELSE 481 out_buf_r = NF90_FILL_REAL 482 ENDIF 483 #endif 484 485 CALL cpu_log( log_point_s(99), 'dop_fill_out_buf', 'start' ) 486 CALL dop_fill_out_buf (variables(i)) 487 CALL cpu_log( log_point_s(99), 'dop_fill_out_buf', 'stop' ) 488 489 CALL cpu_log( log_point_s(88), 'dop_get_remote_particle', 'start' ) 490 #if defined( __parallel ) 491 CALL dop_get_remote_particle (variables(i)%is_integer) 492 #endif 493 CALL cpu_log( log_point_s(88), 'dop_get_remote_particle', 'stop' ) 494 495 CALL cpu_log( log_point_s(89), 'particle NetCDF output', 'start' ) 496 CALL part_io%sm_node_barrier() 497 IF (part_io%iam_io_pe) THEN 498 IF(variables(i)%is_integer) THEN 499 CALL netcdf4_write_variable('parallel', file_id, variables(i)%var_id, bounds_start,& 500 value_counts, bounds_origin, & 501 .FALSE., values_int32_1d=out_buf_i, return_value=return_value) 502 ELSE 503 CALL netcdf4_write_variable('parallel', file_id, variables(i)%var_id, bounds_start,& 504 value_counts, bounds_origin, & 505 .FALSE., values_real32_1d=out_buf_r, return_value=return_value) 506 ENDIF 507 ENDIF 508 CALL part_io%sm_node_barrier() 509 CALL cpu_log( log_point_s(89), 'particle NetCDF output', 'stop' ) 510 511 #if defined( __parallel ) 512 CALL MPI_BARRIER(comm2d, ierr) !kk This Barrier is necessary, not sure why 513 #endif 514 END DO 515 516 CALL deallocate_and_free 517 518 ! write Time value 519 IF(myid == 0) THEN 520 ALLOCATE(my_time(1)) 521 bounds_start(1) = icount 522 value_counts(1) = 1 523 my_time(1) = simulated_time 524 IF(unlimited_dimension) THEN ! For workaround described in dop finalize 525 time_axis_values(icount) = my_time(1) 526 ELSE 527 CALL netcdf4_write_variable('parallel', file_id, var_id%time, bounds_start(1:1), & 528 value_counts(1:1), bounds_origin(1:1), & 529 .TRUE., values_realwp_1d=my_time, return_value=return_value) 530 ENDIF 531 DEALLOCATE(my_time) 532 ENDIF 533 534 RETURN 535 END SUBROUTINE dop_output_tseries 536 537 SUBROUTINE dop_finalize 538 IMPLICIT NONE 503 IF ( variables(i)%is_integer ) THEN 504 out_buf_i = NF90_FILL_INT 505 ELSE 506 out_buf_r = NF90_FILL_REAL 507 ENDIF 508 #endif 509 510 CALL cpu_log( log_point_s(99), 'dop_fill_out_buf', 'start' ) 511 CALL dop_fill_out_buf (variables(i)) 512 CALL cpu_log( log_point_s(99), 'dop_fill_out_buf', 'stop' ) 513 514 CALL cpu_log( log_point_s(88), 'dop_get_remote_particle', 'start' ) 515 #if defined( __parallel ) 516 CALL dop_get_remote_particle( variables(i)%is_integer ) 517 #endif 518 CALL cpu_log( log_point_s(88), 'dop_get_remote_particle', 'stop' ) 519 520 CALL cpu_log( log_point_s(89), 'particle NetCDF output', 'start' ) 521 CALL part_io%sm_node_barrier( ) 522 IF ( part_io%iam_io_pe ) THEN 523 IF ( variables(i)%is_integer) THEN 524 CALL netcdf4_write_variable( 'parallel', file_id, variables(i)%var_id, bounds_start, & 525 value_counts, bounds_origin, .FALSE., & 526 values_int32_1d=out_buf_i, return_value=return_value ) 527 ELSE 528 CALL netcdf4_write_variable( 'parallel', file_id, variables(i)%var_id, bounds_start, & 529 value_counts, bounds_origin, .FALSE., & 530 values_real32_1d=out_buf_r, return_value=return_value) 531 ENDIF 532 ENDIF 533 CALL part_io%sm_node_barrier( ) 534 CALL cpu_log( log_point_s(89), 'particle NetCDF output', 'stop' ) 535 536 #if defined( __parallel ) 537 CALL MPI_BARRIER( comm2d, ierr ) !kk This Barrier is necessary, not sure why 538 #endif 539 ENDDO 540 541 CALL deallocate_and_free 542 543 ! 544 !-- Write Time value 545 IF ( myid == 0 ) THEN 546 ALLOCATE( my_time(1) ) 547 bounds_start(1) = icount 548 value_counts(1) = 1 549 my_time(1) = simulated_time 550 IF ( unlimited_dimension ) THEN ! For workaround described in dop finalize 551 time_axis_values(icount) = my_time(1) 552 ELSE 553 CALL netcdf4_write_variable( 'parallel', file_id, var_id%time, bounds_start(1:1), & 554 value_counts(1:1), bounds_origin(1:1), .TRUE., & 555 values_realwp_1d=my_time, return_value=return_value ) 556 ENDIF 557 DEALLOCATE( my_time ) 558 ENDIF 559 560 RETURN 561 END SUBROUTINE dop_output_tseries 562 563 !--------------------------------------------------------------------------------------------------! 564 ! Description: 565 ! ------------ 566 ! 567 !--------------------------------------------------------------------------------------------------! 568 SUBROUTINE dop_finalize 569 570 IMPLICIT NONE 571 539 572 #if defined( __netcdf4 ) 540 INTEGER(iwp) :: var_len541 #endif 542 573 INTEGER(iwp) :: ierr !< MPI error code 574 #endif 575 INTEGER(iwp) :: return_value !< Return value data_output_netcdf4 .. routines 543 576 #if defined( __netcdf4 ) 544 INTEGER(iwp) :: ierr !< MPI error code545 #endif 546 547 IF( win_prt_i /= -1)THEN548 CALL part_io%sm_free_shared (win_prt_i)549 550 IF( win_prt_r /= -1)THEN551 CALL part_io%sm_free_shared (win_prt_r)552 553 554 IF( file_id /= -1 .AND. part_io%iam_io_pe)THEN555 556 577 INTEGER(iwp) :: var_len 578 #endif 579 580 IF ( win_prt_i /= -1 ) THEN 581 CALL part_io%sm_free_shared( win_prt_i ) 582 ENDIF 583 IF ( win_prt_r /= -1 ) THEN 584 CALL part_io%sm_free_shared( win_prt_r ) 585 ENDIF 586 587 IF ( file_id /= -1 .AND. part_io%iam_io_pe ) THEN 588 CALL netcdf4_finalize( 'parallel', file_id, return_value ) 589 file_id = -1 557 590 558 591 #if defined( __netcdf4 ) 559 592 ! 560 !-- For yet unknown reasons it is not possible to write in parallel mode the time values to NetCDF variable time 561 !-- This workaround closes the parallel file and writes the time values in sequential mode on PE0 562 !kk This is a real quick and dirty workaround and the problem should be solved in the final version !!! 563 564 If(myid == 0 .AND. unlimited_dimension) THEN 565 ierr = nf90_open (TRIM(file_name),NF90_WRITE, file_id) 566 ierr = nf90_inquire_dimension(file_id, did%time, len = var_len) 567 568 ierr = nf90_put_var (file_id, var_id%time, time_axis_values(1:var_len)) 569 570 ierr = nf90_close (file_id) 571 ENDIF 572 #endif 573 ENDIF 574 575 RETURN 576 END SUBROUTINE dop_finalize 577 578 ! Private subroutines 579 580 581 ! 582 ! kk Not sure if necessary, but set ALL particle numnber to -1 583 584 SUBROUTINE set_indef_particle_nr 585 586 IMPLICIT NONE 587 INTEGER(iwp) :: i !< 588 INTEGER(iwp) :: j !< 589 INTEGER(iwp) :: k !< 590 INTEGER(iwp) :: n !< 591 592 DO i=nxl,nxr 593 DO j=nys,nyn 594 DO k=nzb+1,nzt 595 DO n=1,SIZE(grid_particles(k,j,i)%particles) 596 grid_particles(k,j,i)%particles(n)%particle_nr = -1 597 END DO 598 END DO 599 ENDDO 600 ENDDO 601 602 RETURN 603 END SUBROUTINE set_indef_particle_nr 604 ! 605 !-- Count particles scheduled for output 606 !-- here are pts_increment and pts_percentage are used to select output particles 607 608 SUBROUTINE count_output_particles (pcount) 609 IMPLICIT NONE 610 611 INTEGER(iwp), INTENT(OUT) :: pcount !< 612 613 INTEGER(iwp) :: i !< 614 INTEGER(iwp) :: j !< 615 INTEGER(iwp) :: k !< 616 INTEGER(iwp) :: n !< 617 INTEGER(iwp) :: n_all !< count all particles for MOD function 618 REAL(dp) :: fcount 619 REAL(dp) :: finc 620 621 pcount = 0 622 IF(pts_increment == 1) THEN 623 pcount = SUM(prt_count) 624 ELSE 625 n_all = 0 626 DO i=nxl,nxr 627 DO j=nys,nyn 628 DO k=nzb+1,nzt 629 DO n=1,prt_count(k,j,i) 630 IF(MOD(n_all,pts_increment) == 0) THEN 631 pcount = pcount+1 632 ENDIF 633 n_all = n_all+1 634 END DO 635 END DO 636 ENDDO 637 ENDDO 638 ENDIF 639 640 IF(pts_percentage < 100. ) THEN 641 642 finc = pts_percentage/100 643 644 pcount = 0 645 fcount = 0.0 646 647 DO i=nxl,nxr 648 DO j=nys,nyn 649 DO k=nzb+1,nzt 650 DO n=1,prt_count(k,j,i) 651 fcount = fcount + finc 652 IF(pcount < int(fcount) ) THEN 653 pcount = pcount+1 654 ENDIF 655 END DO 656 END DO 657 ENDDO 658 ENDDO 659 660 ENDIF 661 662 RETURN 663 END SUBROUTINE count_output_particles 664 665 SUBROUTINE dop_read_output_particle_list (nr_particles_out) 666 IMPLICIT NONE 667 INTEGER(iwp),INTENT(OUT) :: nr_particles_out 668 669 INTEGER(iwp) :: i !< 670 INTEGER(iwp) :: iu !< 671 INTEGER(iwp) :: istat !< 672 INTEGER(idp) :: dummy !< 673 674 iu = 345 675 istat = 0 676 nr_particles_out = 0 677 678 OPEN(unit=iu, file=TRIM(pts_id_file)) !kk should be changed to check_open 679 680 ! First stridem cout output particle 681 682 DO WHILE (istat == 0) 683 READ(iu,*,iostat=istat) dummy 684 nr_particles_out = nr_particles_out+1 685 END DO 686 687 nr_particles_out = nr_particles_out-1 ! subtract 1 for end of file read 688 689 ALLOCATE(part_id_list_file(nr_particles_out)) 690 691 REWIND(iu) 692 693 !-- second stride, read particle ids for scheduled output particle 694 695 DO i=1,nr_particles_out 696 READ(iu,*) part_id_list_file(i) 697 END DO 698 699 700 CLOSE (iu) 701 702 END SUBROUTINE dop_read_output_particle_list 703 ! 704 !-- Setb output particle number for selected active particles 705 706 SUBROUTINE set_particle_number 707 IMPLICIT NONE 708 709 INTEGER(iwp) :: i !< 710 INTEGER(iwp) :: j !< 711 INTEGER(iwp) :: k !< 712 INTEGER(iwp) :: n !< 713 INTEGER(iwp) :: n_all !< count all particles for MOD function 714 INTEGER(iwp) :: particle_nr !< output particle number 715 INTEGER(iwp) :: pcount !< local particle count in case of pts_percentage 716 REAL(dp) :: fcount !< partical progress in %/100 717 REAL(dp) :: finc !< increment of particle 718 719 pcount = 0 720 fcount = 0.0 721 722 particle_nr = start_local_numbering 723 n_all = 0 724 DO i=nxl,nxr 725 DO j=nys,nyn 726 DO k=nzb+1,nzt 727 IF(pts_increment > 1) THEN 728 DO n=1,prt_count(k,j,i) 729 IF(MOD(n_all,pts_increment) == 0) THEN 730 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 731 particle_nr = particle_nr+1 732 ELSE 733 grid_particles(k,j,i)%particles(n)%particle_nr = -2 734 ENDIF 735 n_all = n_all+1 736 END DO 737 ELSE IF(pts_percentage < 100. ) THEN 738 finc = pts_percentage/100 739 740 DO n=1,prt_count(k,j,i) 741 ! 742 !-- Every particle move fraction on particle axis (i.e part percent == 80; move 0.8) 743 !-- if increases next whole number, the particle is taken for output 744 fcount = fcount + finc 745 IF(pcount < int(fcount) ) THEN 746 pcount = pcount+1 747 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 748 particle_nr = particle_nr+1 749 ELSE 750 grid_particles(k,j,i)%particles(n)%particle_nr = -2 751 ENDIF 752 END DO 753 ELSE 754 DO n=1,prt_count(k,j,i) 755 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 756 particle_nr = particle_nr+1 757 END DO 758 ENDIF 759 END DO 760 ENDDO 761 ENDDO 762 763 RETURN 764 END SUBROUTINE set_particle_number 765 766 SUBROUTINE dop_find_particle_in_outlist 767 IMPLICIT NONE 768 769 INTEGER(iwp) :: i !< 770 #if defined( __parallel ) 771 INTEGER(iwp) :: ierr !< MPI error code 772 #endif 773 INTEGER(iwp) :: j !< 774 INTEGER(iwp) :: k !< 775 INTEGER(iwp) :: l !< 776 INTEGER(iwp) :: n !< 777 INTEGER(iwp) :: nr_part !< 593 !-- For yet unknown reasons it is not possible to write in parallel mode the time values to 594 !-- NetCDF variable time. 595 !-- This workaround closes the parallel file and writes the time values in sequential mode on 596 !-- PE0. 597 !-- kk This is a real quick and dirty workaround and the problem should be solved in the final 598 !-- version !!! 599 IF ( myid == 0 .AND. unlimited_dimension ) THEN 600 ierr = NF90_OPEN( TRIM( file_name ), NF90_WRITE, file_id ) 601 ierr = NF90_INQUIRE_DIMENSION( file_id, did%time, LEN = var_len) 602 603 ierr = NF90_PUT_VAR( file_id, var_id%time, time_axis_values(1:var_len) ) 604 605 ierr = NF90_CLOSE( file_id ) 606 ENDIF 607 #endif 608 ENDIF 609 610 RETURN 611 END SUBROUTINE dop_finalize 612 613 ! 614 !--Private subroutines 615 616 !--------------------------------------------------------------------------------------------------! 617 ! Description: 618 ! ------------ 619 ! 620 !--------------------------------------------------------------------------------------------------! 621 ! 622 !-- kk Not sure if necessary, but set ALL particle number to -1 623 SUBROUTINE set_indef_particle_nr 624 625 IMPLICIT NONE 626 627 INTEGER(iwp) :: i !< 628 INTEGER(iwp) :: j !< 629 INTEGER(iwp) :: k !< 630 INTEGER(iwp) :: n !< 631 632 DO i = nxl, nxr 633 DO j = nys, nyn 634 DO k = nzb+1, nzt 635 DO n = 1, SIZE( grid_particles(k,j,i)%particles ) 636 grid_particles(k,j,i)%particles(n)%particle_nr = -1 637 ENDDO 638 ENDDO 639 ENDDO 640 ENDDO 641 642 RETURN 643 END SUBROUTINE set_indef_particle_nr 644 645 !--------------------------------------------------------------------------------------------------! 646 ! Description: 647 ! ------------ 648 !> Count particles scheduled for output. 649 !> Here pts_increment and pts_percentage are used to select output particles. 650 !--------------------------------------------------------------------------------------------------! 651 SUBROUTINE count_output_particles (pcount) 652 653 IMPLICIT NONE 654 655 INTEGER(iwp) :: i !< 656 INTEGER(iwp) :: j !< 657 INTEGER(iwp) :: k !< 658 INTEGER(iwp) :: n !< 659 INTEGER(iwp) :: n_all !< count all particles for MOD function 660 INTEGER(iwp), INTENT(OUT) :: pcount !< 661 662 REAL(dp) :: finc 663 REAL(dp) :: fcount 664 665 666 pcount = 0 667 IF ( pts_increment == 1 ) THEN 668 pcount = SUM( prt_count ) 669 ELSE 670 n_all = 0 671 DO i = nxl, nxr 672 DO j = nys, nyn 673 DO k = nzb+1, nzt 674 DO n = 1, prt_count(k,j,i) 675 IF ( MOD( n_all, pts_increment ) == 0 ) THEN 676 pcount = pcount + 1 677 ENDIF 678 n_all = n_all + 1 679 ENDDO 680 ENDDO 681 ENDDO 682 ENDDO 683 ENDIF 684 685 IF ( pts_percentage < 100.0_wp ) THEN 686 687 finc = pts_percentage / 100 688 689 pcount = 0 690 fcount = 0.0_wp 691 692 DO i = nxl, nxr 693 DO j = nys, nyn 694 DO k = nzb+1, nzt 695 DO n = 1, prt_count(k,j,i) 696 fcount = fcount + finc 697 IF ( pcount < INT( fcount ) ) THEN 698 pcount = pcount + 1 699 ENDIF 700 ENDDO 701 ENDDO 702 ENDDO 703 ENDDO 704 705 ENDIF 706 707 RETURN 708 END SUBROUTINE count_output_particles 709 710 !--------------------------------------------------------------------------------------------------! 711 ! Description: 712 ! ------------ 713 ! 714 !--------------------------------------------------------------------------------------------------! 715 SUBROUTINE dop_read_output_particle_list( nr_particles_out ) 716 717 IMPLICIT NONE 718 719 INTEGER(idp) :: dummy !< 720 INTEGER(iwp) :: i !< 721 INTEGER(iwp) :: istat !< 722 INTEGER(iwp) :: iu !< 723 INTEGER(iwp), INTENT(OUT) :: nr_particles_out 724 725 726 iu = 345 727 istat = 0 728 nr_particles_out = 0 729 730 OPEN( UNIT=iu, FILE=TRIM( pts_id_file ) ) !kk should be changed to check_open 731 732 ! 733 !-- First stridem cout output particle 734 DO WHILE( istat == 0 ) 735 READ( iu, *, iostat=istat ) dummy 736 nr_particles_out = nr_particles_out + 1 737 ENDDO 738 739 nr_particles_out = nr_particles_out - 1 ! subtract 1 for end of file read 740 741 ALLOCATE( part_id_list_file(nr_particles_out) ) 742 743 REWIND( iu ) 744 ! 745 !-- Second stride, read particle ids for scheduled output particle. 746 DO i = 1, nr_particles_out 747 READ( iu, * ) part_id_list_file(i) 748 ENDDO 749 750 CLOSE( iu ) 751 752 END SUBROUTINE dop_read_output_particle_list 753 754 !--------------------------------------------------------------------------------------------------! 755 ! Description: 756 ! ------------ 757 !> Set output particle number for selected active particles 758 !--------------------------------------------------------------------------------------------------! 759 SUBROUTINE set_particle_number 760 761 IMPLICIT NONE 762 763 INTEGER(iwp) :: i !< 764 INTEGER(iwp) :: j !< 765 INTEGER(iwp) :: k !< 766 INTEGER(iwp) :: n !< 767 INTEGER(iwp) :: n_all !< count all particles for MOD function 768 INTEGER(iwp) :: particle_nr !< output particle number 769 INTEGER(iwp) :: pcount !< local particle count in case of pts_percentage 770 771 REAL(dp) :: fcount !< partical progress in %/100 772 REAL(dp) :: finc !< increment of particle 773 774 775 pcount = 0 776 fcount = 0.0 777 778 particle_nr = start_local_numbering 779 n_all = 0 780 DO i = nxl, nxr 781 DO j = nys, nyn 782 DO k = nzb+1, nzt 783 IF ( pts_increment > 1 ) THEN 784 DO n = 1, prt_count(k,j,i) 785 IF ( MOD( n_all, pts_increment ) == 0 ) THEN 786 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 787 particle_nr = particle_nr + 1 788 ELSE 789 grid_particles(k,j,i)%particles(n)%particle_nr = -2 790 ENDIF 791 n_all = n_all + 1 792 ENDDO 793 ELSEIF ( pts_percentage < 100.0_wp ) THEN 794 finc = pts_percentage / 100 795 796 DO n = 1, prt_count(k,j,i) 797 ! 798 !-- Eeach particle moves fraction on particle axis (i.e part percent == 80; move 0.8) 799 !-- if increases next whole number, the particle is taken for output. 800 fcount = fcount + finc 801 IF ( pcount < INT( fcount ) ) THEN 802 pcount = pcount + 1 803 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 804 particle_nr = particle_nr + 1 805 ELSE 806 grid_particles(k,j,i)%particles(n)%particle_nr = -2 807 ENDIF 808 ENDDO 809 ELSE 810 DO n = 1, prt_count(k,j,i) 811 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 812 particle_nr = particle_nr + 1 813 ENDDO 814 ENDIF 815 ENDDO 816 ENDDO 817 ENDDO 818 819 RETURN 820 END SUBROUTINE set_particle_number 821 822 !--------------------------------------------------------------------------------------------------! 823 ! Description: 824 ! ------------ 825 ! 826 !--------------------------------------------------------------------------------------------------! 827 SUBROUTINE dop_find_particle_in_outlist 828 829 IMPLICIT NONE 830 831 INTEGER(iwp) :: i !< 832 #if defined( __parallel ) 833 INTEGER(iwp) :: ierr !< MPI error code 834 #endif 835 INTEGER(iwp) :: j !< 836 INTEGER(iwp) :: k !< 837 INTEGER(iwp) :: l !< 838 INTEGER(iwp) :: n !< 839 INTEGER(iwp) :: nr_part !< 778 840 ! INTEGER, save :: icount=0 779 841 780 nr_part = 0 781 782 ! 783 !-- If there is a long particle output list, for performance reason it may become necessary to optimize the 784 !-- following loop. 785 ! 786 !-- Decode the particle id in i,j,k,n 787 !-- Split serach, i.e search first in i, then in j ... 788 789 DO i=nxl,nxr 790 DO j=nys,nyn 791 DO k=nzb+1,nzt 792 DO n=1,prt_count(k,j,i) 793 DO l=1,SIZE(part_id_list_file) 794 IF(grid_particles(k,j,i)%particles(n)%id == part_id_list_file(l)) THEN 795 grid_particles(k,j,i)%particles(n)%particle_nr = l 796 nr_part = nr_part+1 797 ENDIF 798 END DO 799 END DO 800 END DO 801 ENDDO 802 ENDDO 803 804 #if defined( __parallel ) 805 CALL MPI_ALLREDUCE( nr_part, initial_number_of_active_particles, 1, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 842 843 nr_part = 0 844 845 ! 846 !-- If there is a long particle output list, for performance reason it may become necessary to 847 !-- optimize the following loop. 848 ! 849 !-- Decode the particle id in i,j,k,n. 850 !-- Split serach, i.e search first in i, then in j ... 851 DO i = nxl, nxr 852 DO j = nys, nyn 853 DO k = nzb+1, nzt 854 DO n = 1, prt_count(k,j,i) 855 DO l = 1, SIZE( part_id_list_file ) 856 IF ( grid_particles(k,j,i)%particles(n)%id == part_id_list_file(l) ) THEN 857 grid_particles(k,j,i)%particles(n)%particle_nr = l 858 nr_part = nr_part + 1 859 ENDIF 860 ENDDO 861 ENDDO 862 ENDDO 863 ENDDO 864 ENDDO 865 866 #if defined( __parallel ) 867 CALL MPI_ALLREDUCE( nr_part, initial_number_of_active_particles, 1, MPI_INTEGER, MPI_SUM, & 868 comm2d, ierr ) 806 869 #else 807 initial_number_of_active_particles = nr_part 808 #endif 809 810 END SUBROUTINE dop_find_particle_in_outlist 811 870 initial_number_of_active_particles = nr_part 871 #endif 872 873 END SUBROUTINE dop_find_particle_in_outlist 874 875 ! 812 876 !-- Netcdf Setup 813 877 ! 814 !-- Open NetCDF File DATA_1D_PTS_NETCDF 815 !-- Define Dimensions and variables 816 !-- Write constant variables 817 818 SUBROUTINE dop_netcdf_setup 819 IMPLICIT NONE 820 821 INTEGER,PARAMETER :: global_id_in_file = -1 822 INTEGER(iwp) :: i 823 INTEGER(iwp) :: fix_ind 824 INTEGER(iwp) :: var_ind 825 826 INTEGER(iwp) :: return_value 827 LOGICAL :: const_flag 828 829 INTEGER, DIMENSION(2) :: dimension_ids 830 831 nr_time_values = end_time/dt_dopts+1 !kk has to be adapted to formular of 3d output 832 833 IF (part_io%iam_io_pe) THEN 834 CALL netcdf4_init_module( "", part_io%comm_io, 0, 9, .TRUE., -1 ) 835 836 file_name = 'DATA_1D_PTS_NETCDF' 837 #if defined( __parallel ) 838 CALL netcdf4_open_file( 'parallel', trim(file_name), file_id, return_value ) 878 !--------------------------------------------------------------------------------------------------! 879 ! Description: 880 ! ------------ 881 !> Open NetCDF File DATA_1D_PTS_NETCDF 882 !> Define Dimensions and variables 883 !> Write constant variables 884 !--------------------------------------------------------------------------------------------------! 885 SUBROUTINE dop_netcdf_setup 886 887 IMPLICIT NONE 888 889 INTEGER, PARAMETER :: global_id_in_file = -1 890 891 INTEGER(iwp) :: i 892 INTEGER(iwp) :: fix_ind 893 INTEGER(iwp) :: return_value 894 INTEGER(iwp) :: var_ind 895 896 INTEGER, DIMENSION(2) :: dimension_ids 897 898 LOGICAL :: const_flag 899 900 901 nr_time_values = end_time / dt_dopts + 1 !kk has to be adapted to formular of 3d output 902 903 IF ( part_io%iam_io_pe ) THEN 904 CALL netcdf4_init_module( "", part_io%comm_io, 0, 9, .TRUE., -1 ) 905 906 file_name = 'DATA_1D_PTS_NETCDF' 907 #if defined( __parallel ) 908 CALL netcdf4_open_file( 'parallel', TRIM( file_name ), file_id, return_value ) 839 909 #else 840 CALL netcdf4_open_file( 'serial', trim(file_name), file_id, return_value ) 841 #endif 842 ! 843 !-- global attributes 844 845 CALL netcdf4_write_attribute( 'parallel', file_id, global_id_in_file, 'comment', & 846 'Particle ouput created by PALM module data_output_particle', return_value=return_value ) 847 848 CALL netcdf4_write_attribute( 'parallel', file_id, global_id_in_file, 'initial_nr_particles', & 849 value_int32=initial_number_of_active_particles, return_value=return_value ) 850 ! 851 !-- define dimensions 852 CALL netcdf4_init_dimension( 'parallel', file_id, did%prt, var_id%prt, & 853 'prt','int32' , nr_particles_out, .TRUE., return_value ) 854 855 IF(unlimited_dimension) THEN 856 CALL netcdf4_init_dimension( 'parallel', file_id, did%time, var_id%time, & 857 'time','real32' , -1, .TRUE., return_value ) 858 ALLOCATE (time_axis_values(nr_time_values)) 859 ELSE 860 CALL netcdf4_init_dimension( 'parallel', file_id, did%time, var_id%time, & 861 'time','real32' , nr_time_values, .TRUE., return_value ) 862 ENDIF 863 END IF 864 ! 865 !-- Variables without time axis 866 !-- These variables will always be written only once at the beginning of the file 867 868 dimension_ids(1) = did%prt 869 870 fix_ind = 1 871 fix_variables(fix_ind)%name = 'origin_x' 872 fix_variables(fix_ind)%units = 'meter' 873 874 IF (part_io%iam_io_pe) THEN 875 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, fix_variables(fix_ind)%name, 'real32', & 876 dimension_ids(1:1), .FALSE., return_value ) 877 878 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 879 value_char=trim(fix_variables(fix_ind)%units), return_value=return_value ) 880 ENDIF 881 882 fix_ind = fix_ind+1 883 fix_variables(fix_ind)%name = 'origin_y' 884 fix_variables(fix_ind)%units = 'meter' 885 886 IF (part_io%iam_io_pe) THEN 887 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, fix_variables(fix_ind)%name, 'real32', & 888 dimension_ids(1:1), .FALSE., return_value ) 889 890 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 891 value_char=trim(fix_variables(fix_ind)%units), return_value=return_value ) 892 893 ENDIF 894 895 fix_ind = fix_ind+1 896 fix_variables(fix_ind)%name = 'origin_z' 897 fix_variables(fix_ind)%units = 'meter' 898 899 IF (part_io%iam_io_pe) THEN 900 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, fix_variables(fix_ind)%name, 'real32', & 901 dimension_ids(1:1), .FALSE., return_value ) 902 903 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 904 value_char=trim(fix_variables(fix_ind)%units), return_value=return_value ) 905 ENDIF 906 907 ! 908 !-- These variables are written if name end with _const' 909 DO i=1,size(data_output_pts) 910 const_flag = (INDEX(TRIM(data_output_pts(i)),'_const') > 0) 911 IF(LEN(TRIM(data_output_pts(i))) > 0 .AND. const_flag) THEN 912 fix_ind = fix_ind+1 913 fix_variables(fix_ind)%name = TRIM(data_output_pts(i)) 914 915 SELECT CASE (TRIM(fix_variables(fix_ind)%name)) 916 CASE('radius_const') 917 fix_variables(fix_ind)%units = 'meter' 918 fix_variables(fix_ind)%is_integer = .FALSE. 919 CASE('aux1_const') 920 fix_variables(fix_ind)%units = 'depend_on_setup' 921 fix_variables(fix_ind)%is_integer = .FALSE. 922 CASE('aux2_const') 923 fix_variables(fix_ind)%units = 'depend_on_setup' 924 fix_variables(fix_ind)%is_integer = .FALSE. 925 CASE('rvar1_const') 926 fix_variables(fix_ind)%units = 'depend_on_setup' 927 fix_variables(fix_ind)%is_integer = .FALSE. 928 CASE('rvar2_const') 929 fix_variables(fix_ind)%units = 'depend_on_setup' 930 fix_variables(fix_ind)%is_integer = .FALSE. 931 CASE('rvar3_const') 932 fix_variables(fix_ind)%units = 'depend_on_setup' 933 fix_variables(fix_ind)%is_integer = .FALSE. 934 END SELECT 935 936 IF (part_io%iam_io_pe) THEN 937 IF(fix_variables(fix_ind)%is_integer) THEN 938 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 939 fix_variables(fix_ind)%name, 'int32', & 940 dimension_ids(1:1), .FALSE., return_value ) 941 ELSE 942 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 943 fix_variables(fix_ind)%name, 'real32', & 944 dimension_ids(1:1), .FALSE., return_value ) 945 ENDIF 946 947 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 948 value_char=trim(fix_variables(fix_ind)%units), return_value=return_value ) 949 ENDIF 950 951 ENDIF 952 ENDDO 953 954 nr_fix_variables = fix_ind 955 ! 956 !-- Variables time axis 957 !-- These variables will always be written in the time loop 958 959 dimension_ids(1) = did%prt 960 dimension_ids(2) = did%time 961 962 var_ind = 0 963 964 DO i=1,size(data_output_pts) 965 const_flag = (INDEX(TRIM(data_output_pts(i)),'_const') > 0) 966 IF(LEN(TRIM(data_output_pts(i))) > 0 .AND. .NOT. const_flag) THEN 967 var_ind = var_ind+1 968 variables(var_ind)%name = TRIM(data_output_pts(i)) 969 970 SELECT CASE (TRIM(variables(var_ind)%name)) 971 CASE('id') 972 variables(var_ind)%name = TRIM(data_output_pts(i)) // '_low' 973 variables(var_ind)%units = 'Number' 974 variables(var_ind)%is_integer = .TRUE. 975 IF (part_io%iam_io_pe) THEN 976 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, variables(var_ind)%name, & 977 'int32', dimension_ids(1:2), .FALSE., return_value ) 978 CALL netcdf4_write_attribute( 'parallel', file_id, variables(var_ind)%var_id, 'units', & 979 value_char=trim(variables(var_ind)%units), return_value=return_value ) 980 ENDIF 981 982 var_ind = var_ind+1 983 variables(var_ind)%name = TRIM(data_output_pts(i)) // '_high' 984 variables(var_ind)%units = 'Number' 985 variables(var_ind)%is_integer = .TRUE. 986 CASE('particle_nr') 987 variables(var_ind)%units = 'Number' 988 variables(var_ind)%is_integer = .TRUE. 989 CASE('class') 990 variables(var_ind)%units = 'Number' 991 variables(var_ind)%is_integer = .TRUE. 992 CASE('group') 993 variables(var_ind)%units = 'Number' 994 variables(var_ind)%is_integer = .TRUE. 995 CASE('x') 996 variables(var_ind)%units = 'meter' 997 variables(var_ind)%is_integer = .FALSE. 998 CASE('y') 999 variables(var_ind)%units = 'meter' 1000 variables(var_ind)%is_integer = .FALSE. 1001 CASE('z') 1002 variables(var_ind)%units = 'meter' 1003 variables(var_ind)%is_integer = .FALSE. 1004 CASE('speed_x') 1005 variables(var_ind)%units = 'm/s' 1006 variables(var_ind)%is_integer = .FALSE. 1007 CASE('speed_y') 1008 variables(var_ind)%units = 'm/s' 1009 variables(var_ind)%is_integer = .FALSE. 1010 CASE('speed_z') 1011 variables(var_ind)%units = 'm/s' 1012 variables(var_ind)%is_integer = .FALSE. 1013 CASE('radius') 1014 variables(var_ind)%units = 'meter' 1015 variables(var_ind)%is_integer = .FALSE. 1016 CASE('age') 1017 variables(var_ind)%units = 'sec' 1018 variables(var_ind)%is_integer = .FALSE. 1019 CASE('age_m') 1020 variables(var_ind)%units = 'sec' 1021 variables(var_ind)%is_integer = .FALSE. 1022 CASE('dt_sum') 1023 variables(var_ind)%units = 'sec' 1024 variables(var_ind)%is_integer = .FALSE. 1025 CASE('e_m') 1026 variables(var_ind)%units = 'Ws' 1027 variables(var_ind)%is_integer = .FALSE. 1028 CASE('weight_factor') 1029 variables(var_ind)%units = 'factor' 1030 variables(var_ind)%is_integer = .FALSE. 1031 CASE('aux1') 1032 variables(var_ind)%units = 'depend_on_setup' 1033 variables(var_ind)%is_integer = .FALSE. 1034 CASE('aux2') 1035 variables(var_ind)%units = 'depend_on_setup' 1036 variables(var_ind)%is_integer = .FALSE. 1037 CASE('rvar1') 1038 variables(var_ind)%units = 'depend_on_setup' 1039 variables(var_ind)%is_integer = .FALSE. 1040 CASE('rvar2') 1041 variables(var_ind)%units = 'depend_on_setup' 1042 variables(var_ind)%is_integer = .FALSE. 1043 CASE('rvar3') 1044 variables(var_ind)%units = 'depend_on_setup' 1045 variables(var_ind)%is_integer = .FALSE. 1046 END SELECT 1047 1048 IF (part_io%iam_io_pe) THEN 1049 IF(variables(var_ind)%is_integer) THEN 1050 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, variables(var_ind)%name, 'int32', & 1051 dimension_ids(1:2), .FALSE., return_value ) 1052 ELSE 1053 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, variables(var_ind)%name, 'real32', & 1054 dimension_ids(1:2), .FALSE., return_value ) 1055 ENDIF 1056 1057 CALL netcdf4_write_attribute( 'parallel', file_id, variables(var_ind)%var_id, 'units', & 1058 value_char=trim(variables(var_ind)%units), return_value=return_value ) 1059 ENDIF 1060 1061 ENDIF 1062 ENDDO 1063 1064 nr_variables = var_ind 1065 1066 IF (part_io%iam_io_pe) THEN 1067 CALL netcdf4_stop_file_header_definition( 'parallel', file_id, return_value ) 1068 ENDIF 1069 1070 CALL dop_write_axis 1071 1072 RETURN 1073 1074 CONTAINS 1075 SUBROUTINE dop_write_axis 1076 IMPLICIT NONE 1077 INTEGER(iwp) :: i 1078 INTEGER,DIMENSION(1) :: bounds_start, value_counts, bounds_origin 1079 INTEGER(iwp) :: return_value !< Return value data_output_netcdf4 .. routines 1080 1081 INTEGER, POINTER, CONTIGUOUS, DIMENSION(:) :: prt_val 1082 1083 bounds_origin = 1 1084 bounds_start(1) = 1 1085 1086 IF(myid == 0) THEN 1087 1088 ALLOCATE(prt_val(nr_particles_out)) 1089 DO i=1,nr_particles_out 1090 prt_val(i) = i 1091 ENDDO 1092 value_counts(1) = nr_particles_out 1093 1094 CALL netcdf4_write_variable('parallel', file_id, var_id%prt, bounds_start, value_counts, bounds_origin, & 1095 .TRUE., values_int32_1d=prt_val, return_value=return_value) 1096 1097 DEALLOCATE(prt_val) 1098 END IF 1099 1100 RETURN 1101 END SUBROUTINE dop_write_axis 1102 1103 END SUBROUTINE dop_netcdf_setup 1104 ! 1105 !- write constant variables 1106 1107 SUBROUTINE dop_write_fixed_variables 1108 IMPLICIT NONE 1109 1110 INTEGER(iwp) :: i ! 1111 INTEGER(iwp) :: return_value ! 1112 1113 INTEGER(iwp),DIMENSION(1) :: bounds_origin, bounds_start, value_counts 1114 1115 bounds_origin(1) = 1 1116 bounds_start(1) = io_start_index 1117 value_counts(1) = io_end_index-io_start_index+1 1118 1119 1120 DO i=1,nr_fix_variables 910 CALL netcdf4_open_file( 'serial', TRIM( file_name ), file_id, return_value ) 911 #endif 912 913 ! 914 !-- Global attributes 915 CALL netcdf4_write_attribute( 'parallel', file_id, global_id_in_file, 'comment', & 916 'Particle ouput created by PALM module data_output_particle', & 917 return_value=return_value ) 918 919 CALL netcdf4_write_attribute( 'parallel', file_id, global_id_in_file, & 920 'initial_nr_particles', & 921 value_int32=initial_number_of_active_particles, & 922 return_value=return_value ) 923 ! 924 !-- Define dimensions 925 CALL netcdf4_init_dimension( 'parallel', file_id, did%prt, var_id%prt, 'prt','int32' , & 926 nr_particles_out, .TRUE., return_value ) 927 928 IF ( unlimited_dimension ) THEN 929 CALL netcdf4_init_dimension( 'parallel', file_id, did%time, var_id%time, 'time','real32',& 930 -1, .TRUE., return_value ) 931 ALLOCATE( time_axis_values(nr_time_values) ) 932 ELSE 933 CALL netcdf4_init_dimension( 'parallel', file_id, did%time, var_id%time, 'time','real32',& 934 nr_time_values, .TRUE., return_value ) 935 ENDIF 936 ENDIF 937 ! 938 !-- Variables without time axis 939 !-- These variables will always be written only once at the beginning of the file 940 dimension_ids(1) = did%prt 941 942 fix_ind = 1 943 fix_variables(fix_ind)%name = 'origin_x' 944 fix_variables(fix_ind)%units = 'meter' 945 946 IF ( part_io%iam_io_pe ) THEN 947 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 948 fix_variables(fix_ind)%name, 'real32', dimension_ids(1:1), & 949 .FALSE., return_value ) 950 951 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 952 value_char=TRIM( fix_variables(fix_ind)%units ), & 953 return_value=return_value ) 954 ENDIF 955 956 fix_ind = fix_ind + 1 957 fix_variables(fix_ind)%name = 'origin_y' 958 fix_variables(fix_ind)%units = 'meter' 959 960 IF ( part_io%iam_io_pe ) THEN 961 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 962 fix_variables(fix_ind)%name, 'real32', dimension_ids(1:1), & 963 .FALSE., return_value ) 964 965 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 966 value_char=TRIM( fix_variables(fix_ind)%units ), & 967 return_value=return_value ) 968 969 ENDIF 970 971 fix_ind = fix_ind + 1 972 fix_variables(fix_ind)%name = 'origin_z' 973 fix_variables(fix_ind)%units = 'meter' 974 975 IF ( part_io%iam_io_pe ) THEN 976 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 977 fix_variables(fix_ind)%name, 'real32', dimension_ids(1:1), & 978 .FALSE., return_value ) 979 980 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, 'units', & 981 value_char=TRIM( fix_variables(fix_ind)%units ), & 982 return_value=return_value ) 983 ENDIF 984 985 ! 986 !-- These variables are written if name end with _const' 987 DO i = 1, SIZE( data_output_pts ) 988 const_flag = ( INDEX( TRIM( data_output_pts(i) ), '_const' ) > 0 ) 989 IF ( LEN( TRIM( data_output_pts(i) ) ) > 0 .AND. const_flag) THEN 990 fix_ind = fix_ind + 1 991 fix_variables(fix_ind)%name = TRIM( data_output_pts(i) ) 992 993 SELECT CASE ( TRIM( fix_variables(fix_ind)%name ) ) 994 CASE ( 'radius_const' ) 995 fix_variables(fix_ind)%units = 'meter' 996 fix_variables(fix_ind)%is_integer = .FALSE. 997 CASE ( 'aux1_const' ) 998 fix_variables(fix_ind)%units = 'depend_on_setup' 999 fix_variables(fix_ind)%is_integer = .FALSE. 1000 CASE ( 'aux2_const' ) 1001 fix_variables(fix_ind)%units = 'depend_on_setup' 1002 fix_variables(fix_ind)%is_integer = .FALSE. 1003 CASE ( 'rvar1_const' ) 1004 fix_variables(fix_ind)%units = 'depend_on_setup' 1005 fix_variables(fix_ind)%is_integer = .FALSE. 1006 CASE ( 'rvar2_const' ) 1007 fix_variables(fix_ind)%units = 'depend_on_setup' 1008 fix_variables(fix_ind)%is_integer = .FALSE. 1009 CASE ( 'rvar3_const' ) 1010 fix_variables(fix_ind)%units = 'depend_on_setup' 1011 fix_variables(fix_ind)%is_integer = .FALSE. 1012 END SELECT 1013 1014 IF ( part_io%iam_io_pe ) THEN 1015 IF ( fix_variables(fix_ind)%is_integer ) THEN 1016 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 1017 fix_variables(fix_ind)%name, 'int32', & 1018 dimension_ids(1:1), .FALSE., return_value ) 1019 ELSE 1020 CALL netcdf4_init_variable( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 1021 fix_variables(fix_ind)%name, 'real32', & 1022 dimension_ids(1:1), .FALSE., return_value ) 1023 ENDIF 1024 1025 CALL netcdf4_write_attribute( 'parallel', file_id, fix_variables(fix_ind)%var_id, & 1026 'units', & 1027 value_char=TRIM( fix_variables(fix_ind)%units ), & 1028 return_value=return_value ) 1029 ENDIF 1030 1031 ENDIF 1032 ENDDO 1033 1034 nr_fix_variables = fix_ind 1035 ! 1036 !-- Variables time axis 1037 !-- These variables will always be written in the time loop 1038 dimension_ids(1) = did%prt 1039 dimension_ids(2) = did%time 1040 1041 var_ind = 0 1042 1043 DO i = 1, SIZE( data_output_pts ) 1044 const_flag = ( INDEX( TRIM( data_output_pts(i) ), '_const' ) > 0 ) 1045 IF ( LEN( TRIM( data_output_pts(i) ) ) > 0 .AND. .NOT. const_flag ) THEN 1046 var_ind = var_ind + 1 1047 variables(var_ind)%name = TRIM( data_output_pts(i) ) 1048 1049 SELECT CASE ( TRIM( variables(var_ind)%name) ) 1050 CASE ( 'id' ) 1051 variables(var_ind)%name = TRIM( data_output_pts(i) ) // '_low' 1052 variables(var_ind)%units = 'Number' 1053 variables(var_ind)%is_integer = .TRUE. 1054 IF ( part_io%iam_io_pe ) THEN 1055 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, & 1056 variables(var_ind)%name, 'int32', & 1057 dimension_ids(1:2), .FALSE., return_value ) 1058 CALL netcdf4_write_attribute( 'parallel', file_id, variables(var_ind)%var_id, & 1059 'units', & 1060 value_char=TRIM( variables(var_ind)%units ), & 1061 return_value=return_value ) 1062 ENDIF 1063 1064 var_ind = var_ind + 1 1065 variables(var_ind)%name = TRIM( data_output_pts(i) ) // '_high' 1066 variables(var_ind)%units = 'Number' 1067 variables(var_ind)%is_integer = .TRUE. 1068 CASE ( 'particle_nr' ) 1069 variables(var_ind)%units = 'Number' 1070 variables(var_ind)%is_integer = .TRUE. 1071 CASE ( 'class' ) 1072 variables(var_ind)%units = 'Number' 1073 variables(var_ind)%is_integer = .TRUE. 1074 CASE ( 'group' ) 1075 variables(var_ind)%units = 'Number' 1076 variables(var_ind)%is_integer = .TRUE. 1077 CASE ( 'x' ) 1078 variables(var_ind)%units = 'meter' 1079 variables(var_ind)%is_integer = .FALSE. 1080 CASE ( 'y' ) 1081 variables(var_ind)%units = 'meter' 1082 variables(var_ind)%is_integer = .FALSE. 1083 CASE ( 'z' ) 1084 variables(var_ind)%units = 'meter' 1085 variables(var_ind)%is_integer = .FALSE. 1086 CASE ( 'speed_x' ) 1087 variables(var_ind)%units = 'm/s' 1088 variables(var_ind)%is_integer = .FALSE. 1089 CASE ( 'speed_y' ) 1090 variables(var_ind)%units = 'm/s' 1091 variables(var_ind)%is_integer = .FALSE. 1092 CASE ( 'speed_z' ) 1093 variables(var_ind)%units = 'm/s' 1094 variables(var_ind)%is_integer = .FALSE. 1095 CASE ( 'radius' ) 1096 variables(var_ind)%units = 'meter' 1097 variables(var_ind)%is_integer = .FALSE. 1098 CASE ( 'age' ) 1099 variables(var_ind)%units = 'sec' 1100 variables(var_ind)%is_integer = .FALSE. 1101 CASE ( 'age_m' ) 1102 variables(var_ind)%units = 'sec' 1103 variables(var_ind)%is_integer = .FALSE. 1104 CASE ( 'dt_sum' ) 1105 variables(var_ind)%units = 'sec' 1106 variables(var_ind)%is_integer = .FALSE. 1107 CASE ( 'e_m' ) 1108 variables(var_ind)%units = 'Ws' 1109 variables(var_ind)%is_integer = .FALSE. 1110 CASE( 'weight_factor' ) 1111 variables(var_ind)%units = 'factor' 1112 variables(var_ind)%is_integer = .FALSE. 1113 CASE ( 'aux1' ) 1114 variables(var_ind)%units = 'depend_on_setup' 1115 variables(var_ind)%is_integer = .FALSE. 1116 CASE ( 'aux2' ) 1117 variables(var_ind)%units = 'depend_on_setup' 1118 variables(var_ind)%is_integer = .FALSE. 1119 CASE ( 'rvar1' ) 1120 variables(var_ind)%units = 'depend_on_setup' 1121 variables(var_ind)%is_integer = .FALSE. 1122 CASE ( 'rvar2' ) 1123 variables(var_ind)%units = 'depend_on_setup' 1124 variables(var_ind)%is_integer = .FALSE. 1125 CASE ( 'rvar3' ) 1126 variables(var_ind)%units = 'depend_on_setup' 1127 variables(var_ind)%is_integer = .FALSE. 1128 END SELECT 1129 1130 IF ( part_io%iam_io_pe ) THEN 1131 IF ( variables(var_ind)%is_integer ) THEN 1132 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, & 1133 variables(var_ind)%name, 'int32', dimension_ids(1:2), & 1134 .FALSE., return_value ) 1135 ELSE 1136 CALL netcdf4_init_variable( 'parallel', file_id, variables(var_ind)%var_id, & 1137 variables(var_ind)%name, 'real32', dimension_ids(1:2), & 1138 .FALSE., return_value ) 1139 ENDIF 1140 1141 CALL netcdf4_write_attribute( 'parallel', file_id, variables(var_ind)%var_id, 'units',& 1142 value_char=TRIM( variables(var_ind)%units ), & 1143 return_value=return_value ) 1144 ENDIF 1145 1146 ENDIF 1147 ENDDO 1148 1149 nr_variables = var_ind 1150 1151 IF ( part_io%iam_io_pe ) THEN 1152 CALL netcdf4_stop_file_header_definition( 'parallel', file_id, return_value ) 1153 ENDIF 1154 1155 CALL dop_write_axis 1156 1157 RETURN 1158 1159 CONTAINS 1160 1161 !--------------------------------------------------------------------------------------------------! 1162 ! Description: 1163 ! ------------ 1164 ! 1165 !--------------------------------------------------------------------------------------------------! 1166 SUBROUTINE dop_write_axis 1167 1168 IMPLICIT NONE 1169 1170 INTEGER(iwp) :: i 1171 INTEGER(iwp) :: return_value !< Return value data_output_netcdf4 .. routines 1172 1173 INTEGER,DIMENSION(1) :: bounds_origin, bounds_start, value_counts 1174 1175 INTEGER, POINTER, CONTIGUOUS, DIMENSION(:) :: prt_val 1176 1177 1178 bounds_origin = 1 1179 bounds_start(1) = 1 1180 1181 IF ( myid == 0 ) THEN 1182 1183 ALLOCATE( prt_val(nr_particles_out) ) 1184 DO i = 1, nr_particles_out 1185 prt_val(i) = i 1186 ENDDO 1187 value_counts(1) = nr_particles_out 1188 1189 CALL netcdf4_write_variable( 'parallel', file_id, var_id%prt, bounds_start, & 1190 value_counts, bounds_origin, .TRUE., & 1191 values_int32_1d=prt_val, return_value=return_value) 1192 1193 DEALLOCATE( prt_val ) 1194 ENDIF 1195 1196 RETURN 1197 END SUBROUTINE dop_write_axis 1198 1199 END SUBROUTINE dop_netcdf_setup 1200 1201 !--------------------------------------------------------------------------------------------------! 1202 ! Description: 1203 ! ------------ 1204 !> Write constant variables 1205 !--------------------------------------------------------------------------------------------------! 1206 SUBROUTINE dop_write_fixed_variables 1207 1208 IMPLICIT NONE 1209 1210 INTEGER(iwp) :: i ! 1211 INTEGER(iwp) :: return_value ! 1212 1213 INTEGER(iwp),DIMENSION(1) :: bounds_origin, bounds_start, value_counts 1214 1215 1216 bounds_origin(1) = 1 1217 bounds_start(1) = io_start_index 1218 value_counts(1) = io_end_index - io_start_index + 1 1219 1220 1221 DO i = 1, nr_fix_variables 1121 1222 1122 1223 #if defined( __netcdf4 ) 1123 IF(fix_variables(i)%is_integer) THEN 1124 out_buf_i = NF90_FILL_INT 1125 ELSE 1126 out_buf_r = NF90_FILL_REAL 1127 ENDIF 1128 #endif 1129 1130 CALL dop_fill_out_buf (fix_variables(i)) 1131 1132 CALL dop_get_remote_particle (fix_variables(i)%is_integer) 1133 1134 CALL part_io%sm_node_barrier() 1135 IF (part_io%iam_io_pe) THEN 1136 IF(fix_variables(i)%is_integer) THEN 1137 CALL netcdf4_write_variable('parallel', file_id, fix_variables(i)%var_id, bounds_start, value_counts, & 1138 bounds_origin, .FALSE., values_int32_1d=out_buf_i, return_value=return_value) 1139 ELSE 1140 CALL netcdf4_write_variable('parallel', file_id, fix_variables(i)%var_id, bounds_start, value_counts, & 1141 bounds_origin, .FALSE., values_real32_1d=out_buf_r, return_value=return_value) 1142 ENDIF 1143 ENDIF 1144 CALL part_io%sm_node_barrier() 1145 ENDDO 1146 1147 RETURN 1148 END SUBROUTINE dop_write_fixed_variables 1149 1150 SUBROUTINE dop_delete_particle_number 1151 IMPLICIT NONE 1152 1153 INTEGER(iwp) :: i !< 1154 INTEGER(iwp) :: j !< 1155 INTEGER(iwp) :: k !< 1156 INTEGER(iwp) :: n !< 1157 1158 ! 1159 !-- delete inactive particles, i.e. all particles with particle_mask == .FALSE. 1160 !-- get an output particle nr of -1 1161 !kk Not sure if it is required here or already done in lagrangian_particle_model_mod before this call 1162 1163 remote_nr_particles = 0 1164 DO i=nxl,nxr 1165 DO j=nys,nyn 1166 DO k=nzb+1,nzt 1167 DO n=1,prt_count(k,j,i) 1168 IF( .NOT. grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1169 grid_particles(k,j,i)%particles(n)%particle_nr = -1 1170 END IF 1171 END DO 1172 END DO 1173 ENDDO 1174 ENDDO 1175 1176 RETURN 1177 END SUBROUTINE dop_delete_particle_number 1178 1179 SUBROUTINE dop_newly_generated_particles 1180 IMPLICIT NONE 1181 1182 INTEGER(iwp) :: i !< 1183 INTEGER(iwp) :: j !< 1184 INTEGER(iwp) :: k !< 1185 INTEGER(iwp) :: n !< 1186 INTEGER(iwp) :: n_all !< count all particles for MOD function 1187 INTEGER(iwp) :: nr_new_particle !< 1188 INTEGER(iwp) :: particle_nr !< 1189 #if defined( __parallel ) 1190 INTEGER(iwp) :: ierr !< MPI error code 1191 #endif 1192 REAL(dp) :: fcount 1193 REAL(dp) :: finc 1194 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_new_s 1195 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_new_r 1196 INTEGER(iwp) :: start_new_numbering 1197 1198 1199 ! 1200 !-- Count Number of Newly Generated particles 1201 !-- Condition for a newparticle: particle_mask = .TRUE. and particle nr of -1 1202 ! 1203 !-- For performance reasons, this subroutine may be combined later with dop_delete_particle_number 1204 1205 nr_new_particle = 0 1206 1207 IF(pts_increment > 1) THEN 1208 n_all = 0 1209 DO i=nxl,nxr 1210 DO j=nys,nyn 1211 DO k=nzb+1,nzt 1212 DO n=1,prt_count(k,j,i) 1213 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1214 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1215 IF(MOD(n_all,pts_increment) == 0) THEN 1216 nr_new_particle = nr_new_particle+1 1217 ENDIF 1218 n_all = n_all+1 1219 ENDIF 1220 END IF 1221 END DO 1222 END DO 1223 ENDDO 1224 ENDDO 1225 ELSEIF(pts_percentage < 100. ) THEN 1226 finc = pts_percentage/100 1227 fcount = 0.0 1228 1229 DO i=nxl,nxr 1230 DO j=nys,nyn 1231 DO k=nzb+1,nzt 1232 DO n=1,prt_count(k,j,i) 1233 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1234 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1235 fcount = fcount + finc 1236 IF(nr_new_particle < int(fcount) ) THEN 1237 nr_new_particle = nr_new_particle+1 1238 ENDIF 1239 ENDIF 1240 END IF 1241 END DO 1242 END DO 1243 ENDDO 1244 ENDDO 1245 1246 ELSE 1247 DO i=nxl,nxr 1248 DO j=nys,nyn 1249 DO k=nzb+1,nzt 1250 DO n=1,prt_count(k,j,i) 1251 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1252 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1253 nr_new_particle = nr_new_particle+1 1254 ENDIF 1255 END IF 1256 END DO 1257 END DO 1258 ENDDO 1259 ENDDO 1260 ENDIF 1261 ! 1262 !-- Determine start number of new particles on every thread 1263 1264 nr_particles_new_s = 0 1265 nr_particles_new_s(myid) = nr_new_particle 1266 1267 #if defined( __parallel ) 1268 CALL MPI_ALLREDUCE( nr_particles_new_s, nr_particles_new_r, SIZE( nr_particles_new_s ), MPI_INTEGER, & 1269 MPI_SUM, comm2d, ierr ) 1224 IF ( fix_variables(i)%is_integer ) THEN 1225 out_buf_i = NF90_FILL_INT 1226 ELSE 1227 out_buf_r = NF90_FILL_REAL 1228 ENDIF 1229 #endif 1230 1231 CALL dop_fill_out_buf( fix_variables(i) ) 1232 1233 CALL dop_get_remote_particle( fix_variables(i)%is_integer ) 1234 1235 CALL part_io%sm_node_barrier( ) 1236 IF ( part_io%iam_io_pe ) THEN 1237 IF ( fix_variables(i)%is_integer ) THEN 1238 CALL netcdf4_write_variable( 'parallel', file_id, fix_variables(i)%var_id, & 1239 bounds_start, value_counts, bounds_origin, .FALSE., & 1240 values_int32_1d=out_buf_i, return_value=return_value) 1241 ELSE 1242 CALL netcdf4_write_variable( 'parallel', file_id, fix_variables(i)%var_id, & 1243 bounds_start, value_counts, bounds_origin, .FALSE., & 1244 values_real32_1d=out_buf_r, return_value=return_value) 1245 ENDIF 1246 ENDIF 1247 CALL part_io%sm_node_barrier( ) 1248 ENDDO 1249 1250 RETURN 1251 END SUBROUTINE dop_write_fixed_variables 1252 1253 !--------------------------------------------------------------------------------------------------! 1254 ! Description: 1255 ! ------------ 1256 ! 1257 !--------------------------------------------------------------------------------------------------! 1258 SUBROUTINE dop_delete_particle_number 1259 1260 IMPLICIT NONE 1261 1262 INTEGER(iwp) :: i !< 1263 INTEGER(iwp) :: j !< 1264 INTEGER(iwp) :: k !< 1265 INTEGER(iwp) :: n !< 1266 1267 ! 1268 !-- Delete inactive particles, i.e. all particles with particle_mask == .FALSE.. 1269 !-- Get an output particle nr of -1. 1270 !-- kk Not sure if it is required here or already done in lagrangian_particle_model_mod before 1271 !-- this call. 1272 remote_nr_particles = 0 1273 DO i = nxl, nxr 1274 DO j = nys, nyn 1275 DO k = nzb+1, nzt 1276 DO n = 1, prt_count(k,j,i) 1277 IF ( .NOT. grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1278 grid_particles(k,j,i)%particles(n)%particle_nr = -1 1279 ENDIF 1280 ENDDO 1281 ENDDO 1282 ENDDO 1283 ENDDO 1284 1285 RETURN 1286 END SUBROUTINE dop_delete_particle_number 1287 1288 !--------------------------------------------------------------------------------------------------! 1289 ! Description: 1290 ! ------------ 1291 ! 1292 !--------------------------------------------------------------------------------------------------! 1293 SUBROUTINE dop_newly_generated_particles 1294 1295 IMPLICIT NONE 1296 1297 INTEGER(iwp) :: i !< 1298 #if defined( __parallel ) 1299 INTEGER(iwp) :: ierr !< MPI error code 1300 #endif 1301 INTEGER(iwp) :: j !< 1302 INTEGER(iwp) :: k !< 1303 INTEGER(iwp) :: n !< 1304 INTEGER(iwp) :: n_all !< count all particles for MOD function 1305 INTEGER(iwp) :: nr_new_particle !< 1306 INTEGER(iwp) :: particle_nr !< 1307 INTEGER(iwp) :: start_new_numbering 1308 1309 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_new_r 1310 INTEGER(iwp),DIMENSION(0:numprocs-1) :: nr_particles_new_s 1311 1312 REAL(dp) :: fcount 1313 REAL(dp) :: finc 1314 1315 1316 ! 1317 !-- Count Number of Newly Generated particles. 1318 !-- Condition for a newparticle: particle_mask = .TRUE. and particle nr of -1. 1319 ! 1320 !-- For performance reasons, this subroutine may be combined later with dop_delete_particle_number. 1321 nr_new_particle = 0 1322 1323 IF ( pts_increment > 1 ) THEN 1324 n_all = 0 1325 DO i = nxl, nxr 1326 DO j = nys, nyn 1327 DO k = nzb+1, nzt 1328 DO n = 1, prt_count(k,j,i) 1329 IF ( grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1330 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1331 IF ( MOD( n_all, pts_increment ) == 0 ) THEN 1332 nr_new_particle = nr_new_particle + 1 1333 ENDIF 1334 n_all = n_all + 1 1335 ENDIF 1336 ENDIF 1337 ENDDO 1338 ENDDO 1339 ENDDO 1340 ENDDO 1341 ELSEIF ( pts_percentage < 100.0_wp ) THEN 1342 finc = pts_percentage / 100 1343 fcount = 0.0 1344 1345 DO i = nxl, nxr 1346 DO j = nys, nyn 1347 DO k = nzb+1, nzt 1348 DO n = 1, prt_count(k,j,i) 1349 IF ( grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1350 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1351 fcount = fcount + finc 1352 IF ( nr_new_particle < INT( fcount ) ) THEN 1353 nr_new_particle = nr_new_particle + 1 1354 ENDIF 1355 ENDIF 1356 ENDIF 1357 ENDDO 1358 ENDDO 1359 ENDDO 1360 ENDDO 1361 1362 ELSE 1363 DO i = nxl, nxr 1364 DO j = nys, nyn 1365 DO k = nzb+1, nzt 1366 DO n = 1, prt_count(k,j,i) 1367 IF ( grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1368 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1369 nr_new_particle = nr_new_particle + 1 1370 ENDIF 1371 ENDIF 1372 ENDDO 1373 ENDDO 1374 ENDDO 1375 ENDDO 1376 ENDIF 1377 ! 1378 !-- Determine start number of new particles on eac thread. 1379 nr_particles_new_s = 0 1380 nr_particles_new_s(myid) = nr_new_particle 1381 1382 #if defined( __parallel ) 1383 CALL MPI_ALLREDUCE( nr_particles_new_s, nr_particles_new_r, SIZE( nr_particles_new_s ), & 1384 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 1270 1385 #else 1271 nr_particles_new_r = nr_particles_new_s 1272 #endif 1273 ! 1274 !-- Abortion if selected particles from new particle set would exceed particle axis of output 1275 !-- changed by JS 1276 IF ( ( SUM(nr_particles_new_r) + initial_number_of_active_particles) > nr_particles_out ) THEN 1277 RETURN 1278 ENDIF 1279 1280 start_new_numbering = initial_number_of_active_particles+1 1281 1282 IF ( myid > 0) THEN 1283 DO i=1,numprocs-1 1284 start_new_numbering = start_new_numbering+nr_particles_new_r(i-1) 1285 IF(myid == i) EXIT 1286 ENDDO 1287 END IF 1288 1289 initial_number_of_active_particles = initial_number_of_active_particles+SUM(nr_particles_new_r) 1290 1291 dop_last_active_particle = initial_number_of_active_particles 1292 ! 1293 !-- Set number of new particles 1294 1295 particle_nr = start_new_numbering 1296 nr_new_particle = 0 1297 1298 IF(pts_increment > 1) THEN 1299 n_all = 0 1300 DO i=nxl,nxr 1301 DO j=nys,nyn 1302 DO k=nzb+1,nzt 1303 DO n=1,prt_count(k,j,i) 1304 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1305 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1306 IF(MOD(n_all,pts_increment) == 0) THEN 1307 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 1308 particle_nr = particle_nr+1 1309 ELSE 1310 grid_particles(k,j,i)%particles(n)%particle_nr = -2 1311 ENDIF 1312 n_all = n_all+1 1313 ENDIF 1314 END IF 1315 END DO 1316 END DO 1317 ENDDO 1318 ENDDO 1319 ELSEIF(pts_percentage < 100. ) THEN 1320 finc = pts_percentage/100 1321 fcount = 0.0 1322 1323 DO i=nxl,nxr 1324 DO j=nys,nyn 1325 DO k=nzb+1,nzt 1326 DO n=1,prt_count(k,j,i) 1327 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1328 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1329 fcount = fcount + finc 1330 IF(nr_new_particle < int(fcount) ) THEN 1331 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 1332 particle_nr = particle_nr+1 1333 nr_new_particle = nr_new_particle+1 1334 ELSE 1335 grid_particles(k,j,i)%particles(n)%particle_nr = -2 1336 ENDIF 1337 ENDIF 1338 END IF 1339 END DO 1340 END DO 1341 ENDDO 1342 ENDDO 1343 ELSE 1344 DO i=nxl,nxr 1345 DO j=nys,nyn 1346 DO k=nzb+1,nzt 1347 DO n=1,prt_count(k,j,i) 1348 IF( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1349 IF(grid_particles(k,j,i)%particles(n)%particle_nr == -1) THEN 1386 nr_particles_new_r = nr_particles_new_s 1387 #endif 1388 ! 1389 !-- Abortion if selected particles from new particle set would exceed particle axis of output 1390 !-- changed by JS. 1391 IF ( ( SUM( nr_particles_new_r ) + initial_number_of_active_particles ) > nr_particles_out ) & 1392 THEN 1393 RETURN 1394 ENDIF 1395 1396 start_new_numbering = initial_number_of_active_particles + 1 1397 1398 IF ( myid > 0) THEN 1399 DO i = 1, numprocs-1 1400 start_new_numbering = start_new_numbering + nr_particles_new_r(i-1) 1401 IF ( myid == i ) EXIT 1402 ENDDO 1403 ENDIF 1404 1405 initial_number_of_active_particles = initial_number_of_active_particles + & 1406 SUM( nr_particles_new_r ) 1407 1408 dop_last_active_particle = initial_number_of_active_particles 1409 ! 1410 !-- Set number of new particles 1411 particle_nr = start_new_numbering 1412 nr_new_particle = 0 1413 1414 IF ( pts_increment > 1 ) THEN 1415 n_all = 0 1416 DO i = nxl, nxr 1417 DO j = nys, nyn 1418 DO k = nzb+1, nzt 1419 DO n = 1, prt_count(k,j,i) 1420 IF ( grid_particles(k,j,i)%particles(n)%particle_mask) THEN 1421 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1422 IF ( MOD( n_all, pts_increment ) == 0 ) THEN 1350 1423 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 1351 particle_nr = particle_nr+1 1352 ENDIF 1353 END IF 1354 END DO 1355 END DO 1356 ENDDO 1357 ENDDO 1358 1359 1360 ENDIF 1361 1362 1363 1364 RETURN 1365 END SUBROUTINE dop_newly_generated_particles 1366 1367 SUBROUTINE dop_count_remote_particles 1368 IMPLICIT NONE 1369 1370 #if defined( __parallel ) 1371 INTEGER(iwp) :: i !< 1372 INTEGER(iwp) :: j !< 1373 INTEGER(iwp) :: k !< 1374 INTEGER(iwp) :: n !< 1375 INTEGER(iwp) :: iop !< 1376 INTEGER(iwp) :: particle_nr 1377 INTEGER(iwp) :: pe_nr 1378 INTEGER(iwp) :: win_size 1379 INTEGER(iwp) :: ierr !< MPI error code 1380 INTEGER(iwp), DIMENSION(0:numprocs-1) :: part_ind 1381 1382 ! 1383 !-- Count remote particles 1384 1385 remote_nr_particles = 0 1386 DO i=nxl,nxr 1387 DO j=nys,nyn 1388 DO k=nzb+1,nzt 1389 DO n=1,prt_count(k,j,i) 1390 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1391 IF ( particle_nr > 0) THEN 1392 DO iop=0,numprocs-1 !kk this loop has to be optimized 1393 ! 1394 !-- Although the counting is local PE based, the following if is io processor based 1395 !-- because particles in MPI shared memory do not have to be transfered 1396 IF(particle_nr < io_start_index .OR. particle_nr > io_end_index) THEN 1397 IF(particle_nr >= mo_indices(1,iop) .AND. particle_nr <= mo_indices(2,iop)) THEN 1398 remote_nr_particles(2,iop) = remote_nr_particles(2,iop)+1 1399 ENDIF 1400 ENDIF 1401 ENDDO 1402 END IF 1403 END DO 1404 END DO 1405 ENDDO 1406 ENDDO 1407 1408 remote_nr_particles(1,0) = 0 1409 DO i=1,numprocs-1 1410 remote_nr_particles(1,i) = remote_nr_particles(1,i-1) + remote_nr_particles(2,i-1) 1411 END DO 1412 1413 win_size = sum(remote_nr_particles(2,:)) 1414 CALL dop_alloc_rma_mem (transfer_buffer_i, win_size, win_rma_buf_i) 1415 CALL dop_alloc_rma_mem (transfer_buffer_r, win_size, win_rma_buf_r) 1416 1417 CALL MPI_ALLTOALL (remote_nr_particles, 2, MPI_INTEGER, rma_particles, 2, MPI_INTEGER, comm2d, ierr) 1418 1419 ! 1420 !-- The particles indices are the same for all output variables during one time step 1421 !-- therefore, the indices are transfered only once here 1422 1423 part_ind = remote_nr_particles(1,:) 1424 transfer_buffer_i = -9999 1425 1426 DO i=nxl,nxr 1427 DO j=nys,nyn 1428 DO k=nzb+1,nzt 1429 1430 DO n=1,prt_count(k,j,i) 1431 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1432 IF(particle_nr < io_start_index .OR. particle_nr > io_end_index) THEN 1433 IF (particle_nr > 0) THEN 1434 pe_nr = find_pe_from_particle_nr (particle_nr) 1435 transfer_buffer_i (part_ind(pe_nr)) = particle_nr 1436 part_ind(pe_nr) = part_ind(pe_nr)+1 1437 ENDIF 1438 ENDIF 1439 END DO 1440 END DO 1441 ENDDO 1442 ENDDO 1443 1444 CALL MPI_Barrier (MPI_COMM_WORLD, ierr) 1445 1446 CALL dop_get_remote_indices 1447 1448 #endif 1449 RETURN 1450 END SUBROUTINE dop_count_remote_particles 1451 1452 ! 1453 !- Fill ouput buffer 1454 ! 1455 ! local variables values are copied into output buffer 1456 ! local here means local to shared memory group 1457 ! remote variable values are copied into transfer buffer 1458 ! this is done by all threads 1459 1460 SUBROUTINE dop_fill_out_buf (var) 1461 IMPLICIT NONE 1462 1463 TYPE(var_def),INTENT(IN) :: var 1464 1465 INTEGER(iwp) :: i !< 1466 INTEGER(iwp) :: j !< 1467 INTEGER(iwp) :: k !< 1468 INTEGER(iwp) :: n !< 1469 INTEGER(idp) :: pval !< 1470 INTEGER(iwp) :: particle_nr 1471 INTEGER(iwp) :: pe_nr 1472 INTEGER(iwp) :: local_len 1473 CHARACTER(len=32) :: local_name 1474 INTEGER(iwp), DIMENSION(0:numprocs-1) :: part_ind 1475 1476 part_ind = remote_nr_particles(1,:) 1477 transfer_buffer_i = -9998 1478 ! 1479 !-- filling output buffer is the same for variable name and variable name_const 1480 !-- therefore set local_name without 1481 1482 local_len = INDEX(TRIM(var%name),'_const') 1483 IF(local_len == 0) THEN 1484 local_name = var%name 1485 ELSE 1486 local_name = var%name(1:local_len-1) 1487 END IF 1488 ! 1489 !-- In this subroutine the particles are seperated: 1490 ! 1491 !-- All particles which are located in the share memory area of the respective IO thread are copied into 1492 !-- the output buffer. The other output particle are copied into the transfer buffer. 1493 1494 SELECT CASE (TRIM(local_name)) 1495 CASE('origin_x') 1496 DO i=nxl,nxr 1497 DO j=nys,nyn 1498 DO k=nzb+1,nzt 1499 DO n=1,prt_count(k,j,i) 1500 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1501 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1502 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_x 1503 ELSE IF (particle_nr > 0) THEN 1504 pe_nr = find_pe_from_particle_nr (particle_nr) 1505 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%origin_x 1506 part_ind(pe_nr) = part_ind(pe_nr)+1 1507 ENDIF 1508 END DO 1509 END DO 1510 ENDDO 1511 ENDDO 1512 CASE('origin_y') 1513 DO i=nxl,nxr 1514 DO j=nys,nyn 1515 DO k=nzb+1,nzt 1516 DO n=1,prt_count(k,j,i) 1517 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1518 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1519 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_y 1520 ELSE IF (particle_nr > 0) THEN 1521 pe_nr = find_pe_from_particle_nr (particle_nr) 1522 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%origin_y 1523 part_ind(pe_nr) = part_ind(pe_nr)+1 1524 ENDIF 1525 END DO 1526 END DO 1527 ENDDO 1528 ENDDO 1529 CASE('origin_z') 1530 DO i=nxl,nxr 1531 DO j=nys,nyn 1532 DO k=nzb+1,nzt 1533 DO n=1,prt_count(k,j,i) 1534 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1535 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1536 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_z 1537 ELSE IF (particle_nr > 0) THEN 1538 pe_nr = find_pe_from_particle_nr (particle_nr) 1539 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%origin_z 1540 part_ind(pe_nr) = part_ind(pe_nr)+1 1541 ENDIF 1542 END DO 1543 END DO 1544 ENDDO 1545 ENDDO 1546 CASE('id_low') 1547 DO i=nxl,nxr 1548 DO j=nys,nyn 1549 DO k=nzb+1,nzt 1550 DO n=1,prt_count(k,j,i) 1551 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1552 pval = IBITS(grid_particles(k,j,i)%particles(n)%id,0,32) 1553 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1554 out_buf_i(particle_nr) = INT(pval,4) 1555 ELSE IF (particle_nr > 0) THEN 1556 pe_nr = find_pe_from_particle_nr (particle_nr) 1557 transfer_buffer_i (part_ind(pe_nr)) = INT(pval,4) 1558 part_ind(pe_nr) = part_ind(pe_nr)+1 1559 ENDIF 1560 END DO 1561 END DO 1562 ENDDO 1563 ENDDO 1564 CASE('id_high') 1565 DO i=nxl,nxr 1566 DO j=nys,nyn 1567 DO k=nzb+1,nzt 1568 DO n=1,prt_count(k,j,i) 1569 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1570 pval = IBITS(grid_particles(k,j,i)%particles(n)%id,32,32) 1571 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1572 out_buf_i(particle_nr) = INT(pval,4) 1573 ELSE IF (particle_nr > 0) THEN 1574 pe_nr = find_pe_from_particle_nr (particle_nr) 1575 transfer_buffer_i (part_ind(pe_nr)) = INT(pval,4) 1576 part_ind(pe_nr) = part_ind(pe_nr)+1 1577 ENDIF 1578 END DO 1579 END DO 1580 ENDDO 1581 ENDDO 1582 CASE('particle_nr') 1583 DO i=nxl,nxr 1584 DO j=nys,nyn 1585 DO k=nzb+1,nzt 1586 DO n=1,prt_count(k,j,i) 1587 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1588 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1589 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%particle_nr 1590 ELSE IF (particle_nr > 0) THEN 1591 pe_nr = find_pe_from_particle_nr (particle_nr) 1592 transfer_buffer_i (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%particle_nr 1593 part_ind(pe_nr) = part_ind(pe_nr)+1 1594 ENDIF 1595 END DO 1596 END DO 1597 ENDDO 1598 ENDDO 1599 CASE('class') 1600 DO i=nxl,nxr 1601 DO j=nys,nyn 1602 DO k=nzb+1,nzt 1603 DO n=1,prt_count(k,j,i) 1604 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1605 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1606 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%class 1607 ELSE IF (particle_nr > 0) THEN 1608 pe_nr = find_pe_from_particle_nr (particle_nr) 1609 transfer_buffer_i (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%class 1610 part_ind(pe_nr) = part_ind(pe_nr)+1 1611 ENDIF 1612 END DO 1613 END DO 1614 ENDDO 1615 ENDDO 1616 CASE('group') 1617 DO i=nxl,nxr 1618 DO j=nys,nyn 1619 DO k=nzb+1,nzt 1620 DO n=1,prt_count(k,j,i) 1621 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1622 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1623 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%group 1624 ELSE IF (particle_nr > 0) THEN 1625 pe_nr = find_pe_from_particle_nr (particle_nr) 1626 transfer_buffer_i (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%group 1627 part_ind(pe_nr) = part_ind(pe_nr)+1 1628 ENDIF 1629 END DO 1630 END DO 1631 ENDDO 1632 ENDDO 1633 CASE('x') 1634 DO i=nxl,nxr 1635 DO j=nys,nyn 1636 DO k=nzb+1,nzt 1637 DO n=1,prt_count(k,j,i) 1638 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1639 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1640 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%x 1641 ELSE IF (particle_nr > 0) THEN 1642 pe_nr = find_pe_from_particle_nr (particle_nr) 1643 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%x 1644 part_ind(pe_nr) = part_ind(pe_nr)+1 1645 ENDIF 1646 END DO 1647 END DO 1648 ENDDO 1649 ENDDO 1650 CASE('y') 1651 DO i=nxl,nxr 1652 DO j=nys,nyn 1653 DO k=nzb+1,nzt 1654 DO n=1,prt_count(k,j,i) 1655 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1656 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1657 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%y 1658 ELSE IF (particle_nr > 0) THEN 1659 pe_nr = find_pe_from_particle_nr (particle_nr) 1660 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%y 1661 part_ind(pe_nr) = part_ind(pe_nr)+1 1662 ENDIF 1663 END DO 1664 END DO 1665 ENDDO 1666 ENDDO 1667 CASE('z') 1668 DO i=nxl,nxr 1669 DO j=nys,nyn 1670 DO k=nzb+1,nzt 1671 DO n=1,prt_count(k,j,i) 1672 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1673 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1674 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%z 1675 ELSE IF (particle_nr > 0) THEN 1676 pe_nr = find_pe_from_particle_nr (particle_nr) 1677 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%z 1678 part_ind(pe_nr) = part_ind(pe_nr)+1 1679 ENDIF 1680 END DO 1681 END DO 1682 ENDDO 1683 ENDDO 1684 CASE('speed_x') 1685 DO i=nxl,nxr 1686 DO j=nys,nyn 1687 DO k=nzb+1,nzt 1688 DO n=1,prt_count(k,j,i) 1689 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1690 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1691 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_x 1692 ELSE IF (particle_nr > 0) THEN 1693 pe_nr = find_pe_from_particle_nr (particle_nr) 1694 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%speed_x 1695 part_ind(pe_nr) = part_ind(pe_nr)+1 1696 ENDIF 1697 END DO 1698 END DO 1699 ENDDO 1700 ENDDO 1701 CASE('speed_y') 1702 DO i=nxl,nxr 1703 DO j=nys,nyn 1704 DO k=nzb+1,nzt 1705 DO n=1,prt_count(k,j,i) 1706 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1707 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1708 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_y 1709 ELSE IF (particle_nr > 0) THEN 1710 pe_nr = find_pe_from_particle_nr (particle_nr) 1711 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%speed_y 1712 part_ind(pe_nr) = part_ind(pe_nr)+1 1713 ENDIF 1714 END DO 1715 END DO 1716 ENDDO 1717 ENDDO 1718 CASE('speed_z') 1719 DO i=nxl,nxr 1720 DO j=nys,nyn 1721 DO k=nzb+1,nzt 1722 DO n=1,prt_count(k,j,i) 1723 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1724 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1725 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_z 1726 ELSE IF (particle_nr > 0) THEN 1727 pe_nr = find_pe_from_particle_nr (particle_nr) 1728 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%speed_z 1729 part_ind(pe_nr) = part_ind(pe_nr)+1 1730 ENDIF 1731 END DO 1732 END DO 1733 ENDDO 1734 ENDDO 1735 CASE('radius') 1736 DO i=nxl,nxr 1737 DO j=nys,nyn 1738 DO k=nzb+1,nzt 1739 DO n=1,prt_count(k,j,i) 1740 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1741 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1742 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%radius 1743 ELSE IF (particle_nr > 0) THEN 1744 pe_nr = find_pe_from_particle_nr (particle_nr) 1745 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%radius 1746 part_ind(pe_nr) = part_ind(pe_nr)+1 1747 ENDIF 1748 END DO 1749 END DO 1750 ENDDO 1751 ENDDO 1752 CASE('age') 1753 DO i=nxl,nxr 1754 DO j=nys,nyn 1755 DO k=nzb+1,nzt 1756 DO n=1,prt_count(k,j,i) 1757 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1758 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1759 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%age 1760 ELSE IF (particle_nr > 0) THEN 1761 pe_nr = find_pe_from_particle_nr (particle_nr) 1762 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%age 1763 part_ind(pe_nr) = part_ind(pe_nr)+1 1764 ENDIF 1765 END DO 1766 END DO 1767 ENDDO 1768 ENDDO 1769 CASE('age_m') 1770 DO i=nxl,nxr 1771 DO j=nys,nyn 1772 DO k=nzb+1,nzt 1773 DO n=1,prt_count(k,j,i) 1774 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1775 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1776 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%age_m 1777 ELSE IF (particle_nr > 0) THEN 1778 pe_nr = find_pe_from_particle_nr (particle_nr) 1779 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%age_m 1780 part_ind(pe_nr) = part_ind(pe_nr)+1 1781 ENDIF 1782 END DO 1783 END DO 1784 ENDDO 1785 ENDDO 1786 CASE('dt_sum') 1787 DO i=nxl,nxr 1788 DO j=nys,nyn 1789 DO k=nzb+1,nzt 1790 DO n=1,prt_count(k,j,i) 1791 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1792 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1793 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%dt_sum 1794 ELSE IF (particle_nr > 0) THEN 1795 pe_nr = find_pe_from_particle_nr (particle_nr) 1796 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%dt_sum 1797 part_ind(pe_nr) = part_ind(pe_nr)+1 1798 ENDIF 1799 END DO 1800 END DO 1801 ENDDO 1802 ENDDO 1803 CASE('e_m') 1804 DO i=nxl,nxr 1805 DO j=nys,nyn 1806 DO k=nzb+1,nzt 1807 DO n=1,prt_count(k,j,i) 1808 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1809 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1810 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%e_m 1811 ELSE IF (particle_nr > 0) THEN 1812 pe_nr = find_pe_from_particle_nr (particle_nr) 1813 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%e_m 1814 part_ind(pe_nr) = part_ind(pe_nr)+1 1815 ENDIF 1816 END DO 1817 END DO 1818 ENDDO 1819 ENDDO 1820 CASE('weight_factor') 1821 DO i=nxl,nxr 1822 DO j=nys,nyn 1823 DO k=nzb+1,nzt 1824 DO n=1,prt_count(k,j,i) 1825 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1826 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1827 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%weight_factor 1828 ELSE IF (particle_nr > 0) THEN 1829 pe_nr = find_pe_from_particle_nr (particle_nr) 1830 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%weight_factor 1831 part_ind(pe_nr) = part_ind(pe_nr)+1 1832 ENDIF 1833 END DO 1834 END DO 1835 ENDDO 1836 ENDDO 1837 CASE('aux1') 1838 DO i=nxl,nxr 1839 DO j=nys,nyn 1840 DO k=nzb+1,nzt 1841 DO n=1,prt_count(k,j,i) 1842 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1843 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1844 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%aux1 1845 ELSE IF (particle_nr > 0) THEN 1846 pe_nr = find_pe_from_particle_nr (particle_nr) 1847 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%aux1 1848 part_ind(pe_nr) = part_ind(pe_nr)+1 1849 ENDIF 1850 END DO 1851 END DO 1852 ENDDO 1853 ENDDO 1854 CASE('aux2') 1855 DO i=nxl,nxr 1856 DO j=nys,nyn 1857 DO k=nzb+1,nzt 1858 DO n=1,prt_count(k,j,i) 1859 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1860 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1861 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%aux2 1862 ELSE IF (particle_nr > 0) THEN 1863 pe_nr = find_pe_from_particle_nr (particle_nr) 1864 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%aux2 1865 part_ind(pe_nr) = part_ind(pe_nr)+1 1866 ENDIF 1867 END DO 1868 END DO 1869 ENDDO 1870 ENDDO 1871 CASE('rvar1') 1872 DO i=nxl,nxr 1873 DO j=nys,nyn 1874 DO k=nzb+1,nzt 1875 DO n=1,prt_count(k,j,i) 1876 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1877 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1878 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar1 1879 ELSE IF (particle_nr > 0) THEN 1880 pe_nr = find_pe_from_particle_nr (particle_nr) 1881 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%rvar1 1882 part_ind(pe_nr) = part_ind(pe_nr)+1 1883 ENDIF 1884 END DO 1885 END DO 1886 ENDDO 1887 ENDDO 1888 CASE('rvar2') 1889 DO i=nxl,nxr 1890 DO j=nys,nyn 1891 DO k=nzb+1,nzt 1892 DO n=1,prt_count(k,j,i) 1893 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1894 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1895 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar2 1896 ELSE IF (particle_nr > 0) THEN 1897 pe_nr = find_pe_from_particle_nr (particle_nr) 1898 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%rvar2 1899 part_ind(pe_nr) = part_ind(pe_nr)+1 1900 ENDIF 1901 END DO 1902 END DO 1903 ENDDO 1904 ENDDO 1905 CASE('rvar3') 1906 DO i=nxl,nxr 1907 DO j=nys,nyn 1908 DO k=nzb+1,nzt 1909 DO n=1,prt_count(k,j,i) 1910 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1911 IF(particle_nr >= io_start_index .AND. particle_nr <= io_end_index) THEN 1912 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar3 1913 ELSE IF (particle_nr > 0) THEN 1914 pe_nr = find_pe_from_particle_nr (particle_nr) 1915 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%rvar3 1916 part_ind(pe_nr) = part_ind(pe_nr)+1 1917 ENDIF 1918 END DO 1919 END DO 1920 ENDDO 1921 ENDDO 1922 END SELECT 1923 1924 RETURN 1925 END SUBROUTINE dop_fill_out_buf 1926 1927 #if defined( __parallel ) 1928 ! 1929 !- Get indices (displacement) of remot particles 1930 SUBROUTINE dop_get_remote_indices 1931 1932 IMPLICIT NONE 1933 1934 INTEGER(iwp) :: i !< 1935 1936 INTEGER(iwp) :: bufsize !< size of remote indices array 1937 INTEGER(iwp) :: ind_local !< index in remore indices array 1938 INTEGER(iwp) :: ierr !< MPI error code 1939 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !< displacement in RMA window 1940 1941 bufsize = SUM(rma_particles(2,:)) 1942 ALLOCATE (remote_indices(0:bufsize)) 1943 remote_indices = -1 1944 1945 ind_local = 0 1946 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 1947 DO i=0,numprocs-1 1948 IF (rma_particles(2,i) > 0) THEN 1949 disp = rma_particles(1,i) 1950 IF(rma_particles(2,i) > 0) THEN 1951 CALL MPI_GET (remote_indices(ind_local), rma_particles(2,i), MPI_INTEGER, i, disp, & 1952 rma_particles(2,i), MPI_INTEGER, win_rma_buf_i, ierr) 1953 ind_local = ind_local + rma_particles(2,i) 1954 END IF 1955 END IF 1956 ENDDO 1957 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 1958 1959 RETURN 1960 END SUBROUTINE dop_get_remote_indices 1961 #endif 1962 1963 1964 SUBROUTINE dop_get_remote_particle (is_integer) 1965 1966 IMPLICIT NONE 1967 1968 LOGICAL,INTENT(IN) :: is_integer 1969 1970 #if defined( __parallel ) 1971 INTEGER(iwp) :: i !< 1972 INTEGER(iwp) :: j !< 1973 INTEGER(iwp) :: bufsize !< size of remote data array 1974 INTEGER(iwp) :: ind_local !< index in remore indices array 1975 INTEGER(iwp) :: particle_nr !< particle number 1976 INTEGER(iwp) :: ierr !< MPI error code 1977 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !< displacement in RMA window 1978 REAL(sp),ALLOCATABLE, DIMENSION(:) :: rma_buf_r !< buffer to receive remote data (REAL) 1979 INTEGER(iwp),ALLOCATABLE, DIMENSION(:) :: rma_buf_i !< buffer to receive remote data (INTEGER) 1980 1981 bufsize = sum(rma_particles(2,:)) 1982 ind_local = 0 1983 ALLOCATE (rma_buf_r(0:bufsize-1)) 1984 ALLOCATE (rma_buf_i(0:bufsize-1)) 1985 1986 IF(is_integer) THEN 1987 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 1988 ELSE 1989 CALL MPI_WIN_FENCE( 0, win_rma_buf_r, ierr ) 1990 ENDIF 1991 DO i=0,numprocs-1 1992 IF (rma_particles(2,i) > 0) THEN 1993 IF(is_integer) THEN 1994 disp = rma_particles(1,i) 1995 CALL MPI_GET (rma_buf_i(ind_local), rma_particles(2,i), MPI_INTEGER, i, disp, & 1996 rma_particles(2,i), MPI_INTEGER, win_rma_buf_i, ierr) 1997 ind_local = ind_local + rma_particles(2,i) 1998 ELSE 1999 disp = rma_particles(1,i) 2000 CALL MPI_GET (rma_buf_r(ind_local), rma_particles(2,i), MPI_real, i, disp, & 2001 rma_particles(2,i), MPI_real, win_rma_buf_r, ierr) 2002 ind_local = ind_local + rma_particles(2,i) 2003 ENDIF 2004 END IF 2005 ENDDO 2006 IF(is_integer) THEN 2007 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 2008 ELSE 2009 CALL MPI_WIN_FENCE( 0, win_rma_buf_r, ierr ) 2010 ENDIF 2011 2012 ind_local = 0 2013 2014 DO i=0,numprocs-1 2015 IF (rma_particles(2,i) > 0) THEN 2016 IF(is_integer) THEN 2017 ! 2018 !-- Copy data from remote PEs into output array 2019 DO j=0,rma_particles(2,i)-1 2020 particle_nr = remote_indices(ind_local) 2021 out_buf_i(particle_nr) = rma_buf_i(ind_local) 2022 ind_local = ind_local+1 2023 END DO 2024 ELSE 2025 ! 2026 !-- Copy data from remote PEs into output array 2027 2028 DO j=0,rma_particles(2,i)-1 2029 particle_nr = remote_indices(ind_local) 2030 out_buf_r(particle_nr) = rma_buf_r(ind_local) 2031 ind_local = ind_local+1 2032 END DO 2033 ENDIF 2034 END IF 2035 ENDDO 2036 2037 IF(ALLOCATED(rma_buf_r)) DEALLOCATE(rma_buf_r) 2038 IF(ALLOCATED(rma_buf_i)) DEALLOCATE(rma_buf_i) 1424 particle_nr = particle_nr + 1 1425 ELSE 1426 grid_particles(k,j,i)%particles(n)%particle_nr = -2 1427 ENDIF 1428 n_all = n_all + 1 1429 ENDIF 1430 ENDIF 1431 ENDDO 1432 ENDDO 1433 ENDDO 1434 ENDDO 1435 ELSEIF ( pts_percentage < 100.0_wp ) THEN 1436 finc = pts_percentage / 100 1437 fcount = 0.0_wp 1438 1439 DO i = nxl, nxr 1440 DO j = nys, nyn 1441 DO k = nzb+1, nzt 1442 DO n = 1, prt_count(k,j,i) 1443 IF ( grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1444 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1445 fcount = fcount + finc 1446 IF ( nr_new_particle < INT( fcount ) ) THEN 1447 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 1448 particle_nr = particle_nr + 1 1449 nr_new_particle = nr_new_particle + 1 1450 ELSE 1451 grid_particles(k,j,i)%particles(n)%particle_nr = -2 1452 ENDIF 1453 ENDIF 1454 ENDIF 1455 ENDDO 1456 ENDDO 1457 ENDDO 1458 ENDDO 1459 ELSE 1460 DO i = nxl, nxr 1461 DO j = nys, nyn 1462 DO k = nzb+1, nzt 1463 DO n = 1, prt_count(k,j,i) 1464 IF ( grid_particles(k,j,i)%particles(n)%particle_mask ) THEN 1465 IF ( grid_particles(k,j,i)%particles(n)%particle_nr == -1 ) THEN 1466 grid_particles(k,j,i)%particles(n)%particle_nr = particle_nr 1467 particle_nr = particle_nr + 1 1468 ENDIF 1469 ENDIF 1470 ENDDO 1471 ENDDO 1472 ENDDO 1473 ENDDO 1474 1475 1476 ENDIF 1477 1478 1479 1480 RETURN 1481 END SUBROUTINE dop_newly_generated_particles 1482 1483 !--------------------------------------------------------------------------------------------------! 1484 ! Description: 1485 ! ------------ 1486 ! 1487 !--------------------------------------------------------------------------------------------------! 1488 SUBROUTINE dop_count_remote_particles 1489 1490 IMPLICIT NONE 1491 1492 #if defined( __parallel ) 1493 INTEGER(iwp) :: i !< 1494 INTEGER(iwp) :: ierr !< MPI error code 1495 INTEGER(iwp) :: j !< 1496 INTEGER(iwp) :: k !< 1497 INTEGER(iwp) :: n !< 1498 INTEGER(iwp) :: iop !< 1499 INTEGER(iwp) :: particle_nr 1500 INTEGER(iwp) :: pe_nr 1501 INTEGER(iwp) :: win_size 1502 1503 INTEGER(iwp), DIMENSION(0:numprocs-1) :: part_ind 1504 1505 ! 1506 !-- Count remote particles 1507 remote_nr_particles = 0 1508 DO i = nxl, nxr 1509 DO j = nys, nyn 1510 DO k = nzb+1, nzt 1511 DO n = 1, prt_count(k,j,i) 1512 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1513 IF ( particle_nr > 0 ) THEN 1514 DO iop = 0, numprocs-1 !kk this loop has to be optimized 1515 ! 1516 !-- Although the counting is local PE based, the following if is io processor 1517 !-- based because particles in MPI shared memory do not have to be transfered. 1518 IF ( particle_nr < io_start_index .OR. particle_nr > io_end_index ) THEN 1519 IF ( particle_nr >= mo_indices(1,iop) .AND. & 1520 particle_nr <= mo_indices(2,iop) ) THEN 1521 remote_nr_particles(2,iop) = remote_nr_particles(2,iop) + 1 1522 ENDIF 1523 ENDIF 1524 ENDDO 1525 ENDIF 1526 ENDDO 1527 ENDDO 1528 ENDDO 1529 ENDDO 1530 1531 remote_nr_particles(1,0) = 0 1532 DO i = 1, numprocs-1 1533 remote_nr_particles(1,i) = remote_nr_particles(1,i-1) + remote_nr_particles(2,i-1) 1534 ENDDO 1535 1536 win_size = SUM( remote_nr_particles(2,:) ) 1537 CALL dop_alloc_rma_mem( transfer_buffer_i, win_size, win_rma_buf_i ) 1538 CALL dop_alloc_rma_mem( transfer_buffer_r, win_size, win_rma_buf_r ) 1539 1540 CALL MPI_ALLTOALL( remote_nr_particles, 2, MPI_INTEGER, rma_particles, 2, MPI_INTEGER, comm2d, & 1541 ierr) 1542 1543 ! 1544 !-- The particles indices are the same for all output variables during one time step therefore, the 1545 !-- indices are transfered only once here. 1546 part_ind = remote_nr_particles(1,:) 1547 transfer_buffer_i = -9999 1548 1549 DO i = nxl, nxr 1550 DO j = nys, nyn 1551 DO k = nzb+1, nzt 1552 1553 DO n = 1, prt_count(k,j,i) 1554 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1555 IF ( particle_nr < io_start_index .OR. particle_nr > io_end_index ) THEN 1556 IF ( particle_nr > 0 ) THEN 1557 pe_nr = find_pe_from_particle_nr( particle_nr ) 1558 transfer_buffer_i(part_ind(pe_nr)) = particle_nr 1559 part_ind(pe_nr) = part_ind(pe_nr) + 1 1560 ENDIF 1561 ENDIF 1562 ENDDO 1563 ENDDO 1564 ENDDO 1565 ENDDO 1566 1567 CALL MPI_BARRIER( MPI_COMM_WORLD, ierr ) 1568 1569 CALL dop_get_remote_indices 1570 1571 #endif 1572 RETURN 1573 END SUBROUTINE dop_count_remote_particles 1574 1575 !--------------------------------------------------------------------------------------------------! 1576 ! Description: 1577 ! ------------ 1578 !> Fill ouput buffer. 1579 !> Local variables values are copied into output buffer. Local here means local to shared memory 1580 !> group. 1581 !> Remote variable values are copied into transfer buffer this is done by all threads. 1582 !--------------------------------------------------------------------------------------------------! 1583 SUBROUTINE dop_fill_out_buf (var) 1584 1585 IMPLICIT NONE 1586 1587 CHARACTER(LEN=32) :: local_name 1588 1589 INTEGER(iwp) :: i !< 1590 INTEGER(iwp) :: j !< 1591 INTEGER(iwp) :: k !< 1592 INTEGER(iwp) :: local_len !< 1593 INTEGER(iwp) :: n !< 1594 INTEGER(iwp) :: particle_nr !< 1595 INTEGER(iwp) :: pe_nr !< 1596 INTEGER(idp) :: pval !< 1597 1598 INTEGER(iwp), DIMENSION(0:numprocs-1) :: part_ind 1599 1600 TYPE(var_def), INTENT(IN) :: var 1601 1602 1603 part_ind = remote_nr_particles(1,:) 1604 transfer_buffer_i = -9998 1605 ! 1606 !-- Filling output buffer is the same for variable name and variable name_const, therefore set 1607 !-- local_name without _const 1608 local_len = INDEX( TRIM( var%name ), '_const' ) 1609 IF ( local_len == 0 ) THEN 1610 local_name = var%name 1611 ELSE 1612 local_name = var%name(1:local_len-1) 1613 ENDIF 1614 ! 1615 !-- In this subroutine the particles are seperated: 1616 ! 1617 !-- All particles which are located in the share memory area of the respective IO thread are copied 1618 !-- into the output buffer. The other output particle are copied into the transfer buffer. 1619 SELECT CASE ( TRIM( local_name ) ) 1620 CASE ( 'origin_x' ) 1621 DO i = nxl, nxr 1622 DO j = nys, nyn 1623 DO k = nzb+1, nzt 1624 DO n =1, prt_count(k,j,i) 1625 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1626 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1627 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_x 1628 ELSEIF ( particle_nr > 0 ) THEN 1629 pe_nr = find_pe_from_particle_nr(particle_nr) 1630 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1631 %particles(n)%origin_x 1632 part_ind(pe_nr) = part_ind(pe_nr) + 1 1633 ENDIF 1634 ENDDO 1635 ENDDO 1636 ENDDO 1637 ENDDO 1638 CASE ( 'origin_y' ) 1639 DO i = nxl, nxr 1640 DO j = nys, nyn 1641 DO k = nzb+1, nzt 1642 DO n = 1, prt_count(k,j,i) 1643 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1644 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1645 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_y 1646 ELSEIF ( particle_nr > 0 ) THEN 1647 pe_nr = find_pe_from_particle_nr (particle_nr) 1648 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1649 %particles(n)%origin_y 1650 part_ind(pe_nr) = part_ind(pe_nr) + 1 1651 ENDIF 1652 ENDDO 1653 ENDDO 1654 ENDDO 1655 ENDDO 1656 CASE ( 'origin_z' ) 1657 DO i = nxl, nxr 1658 DO j = nys, nyn 1659 DO k = nzb+1, nzt 1660 DO n = 1, prt_count(k,j,i) 1661 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1662 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1663 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%origin_z 1664 ELSEIF ( particle_nr > 0 ) THEN 1665 pe_nr = find_pe_from_particle_nr (particle_nr) 1666 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1667 %particles(n)%origin_z 1668 part_ind(pe_nr) = part_ind(pe_nr) + 1 1669 ENDIF 1670 ENDDO 1671 ENDDO 1672 ENDDO 1673 ENDDO 1674 CASE ( 'id_low' ) 1675 DO i = nxl, nxr 1676 DO j = nys, nyn 1677 DO k = nzb+1, nzt 1678 DO n = 1, prt_count(k,j,i) 1679 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1680 pval = IBITS( grid_particles(k,j,i)%particles(n)%id,0,32 ) 1681 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1682 out_buf_i(particle_nr) = INT(pval,4) 1683 ELSEIF ( particle_nr > 0 ) THEN 1684 pe_nr = find_pe_from_particle_nr(particle_nr) 1685 transfer_buffer_i(part_ind(pe_nr)) = INT( pval, 4 ) 1686 part_ind(pe_nr) = part_ind(pe_nr) + 1 1687 ENDIF 1688 ENDDO 1689 ENDDO 1690 ENDDO 1691 ENDDO 1692 CASE ( 'id_high' ) 1693 DO i = nxl, nxr 1694 DO j = nys, nyn 1695 DO k = nzb+1, nzt 1696 DO n = 1, prt_count(k,j,i) 1697 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1698 pval = IBITS( grid_particles(k,j,i)%particles(n)%id,32,32 ) 1699 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1700 out_buf_i(particle_nr) = INT(pval,4) 1701 ELSEIF ( particle_nr > 0 ) THEN 1702 pe_nr = find_pe_from_particle_nr(particle_nr) 1703 transfer_buffer_i(part_ind(pe_nr)) = INT( pval, 4 ) 1704 part_ind(pe_nr) = part_ind(pe_nr) + 1 1705 ENDIF 1706 ENDDO 1707 ENDDO 1708 ENDDO 1709 ENDDO 1710 CASE ( 'particle_nr' ) 1711 DO i = nxl, nxr 1712 DO j = nys, nyn 1713 DO k = nzb+1, nzt 1714 DO n = 1, prt_count(k,j,i) 1715 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1716 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1717 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%particle_nr 1718 ELSEIF ( particle_nr > 0 ) THEN 1719 pe_nr = find_pe_from_particle_nr(particle_nr) 1720 transfer_buffer_i(part_ind(pe_nr)) = grid_particles(k,j,i) & 1721 %particles(n)%particle_nr 1722 part_ind(pe_nr) = part_ind(pe_nr) + 1 1723 ENDIF 1724 ENDDO 1725 ENDDO 1726 ENDDO 1727 ENDDO 1728 CASE ( 'class' ) 1729 DO i = nxl, nxr 1730 DO j = nys, nyn 1731 DO k = nzb+1, nzt 1732 DO n = 1, prt_count(k,j,i) 1733 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1734 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1735 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%class 1736 ELSEIF ( particle_nr > 0 ) THEN 1737 pe_nr = find_pe_from_particle_nr(particle_nr) 1738 transfer_buffer_i(part_ind(pe_nr)) = grid_particles(k,j,i) & 1739 %particles(n)%class 1740 part_ind(pe_nr) = part_ind(pe_nr) + 1 1741 ENDIF 1742 ENDDO 1743 ENDDO 1744 ENDDO 1745 ENDDO 1746 CASE ( 'group' ) 1747 DO i = nxl, nxr 1748 DO j = nys, nyn 1749 DO k = nzb+1, nzt 1750 DO n = 1, prt_count(k,j,i) 1751 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1752 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1753 out_buf_i(particle_nr) = grid_particles(k,j,i)%particles(n)%group 1754 ELSEIF ( particle_nr > 0 ) THEN 1755 pe_nr = find_pe_from_particle_nr(particle_nr) 1756 transfer_buffer_i(part_ind(pe_nr)) = grid_particles(k,j,i) & 1757 %particles(n)%group 1758 part_ind(pe_nr) = part_ind(pe_nr) + 1 1759 ENDIF 1760 ENDDO 1761 ENDDO 1762 ENDDO 1763 ENDDO 1764 CASE ( 'x' ) 1765 DO i = nxl, nxr 1766 DO j = nys, nyn 1767 DO k = nzb+1, nzt 1768 DO n = 1, prt_count(k,j,i) 1769 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1770 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1771 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%x 1772 ELSEIF ( particle_nr > 0 ) THEN 1773 pe_nr = find_pe_from_particle_nr(particle_nr) 1774 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%x 1775 part_ind(pe_nr) = part_ind(pe_nr) + 1 1776 ENDIF 1777 ENDDO 1778 ENDDO 1779 ENDDO 1780 ENDDO 1781 CASE ( 'y' ) 1782 DO i = nxl, nxr 1783 DO j = nys, nyn 1784 DO k = nzb+1, nzt 1785 DO n = 1, prt_count(k,j,i) 1786 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1787 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1788 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%y 1789 ELSEIF ( particle_nr > 0 ) THEN 1790 pe_nr = find_pe_from_particle_nr(particle_nr) 1791 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%y 1792 part_ind(pe_nr) = part_ind(pe_nr) + 1 1793 ENDIF 1794 ENDDO 1795 ENDDO 1796 ENDDO 1797 ENDDO 1798 CASE ( 'z' ) 1799 DO i = nxl, nxr 1800 DO j = nys, nyn 1801 DO k = nzb+1, nzt 1802 DO n = 1, prt_count(k,j,i) 1803 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1804 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1805 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%z 1806 ELSEIF ( particle_nr > 0 ) THEN 1807 pe_nr = find_pe_from_particle_nr(particle_nr) 1808 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%z 1809 part_ind(pe_nr) = part_ind(pe_nr) + 1 1810 ENDIF 1811 ENDDO 1812 ENDDO 1813 ENDDO 1814 ENDDO 1815 CASE ( 'speed_x' ) 1816 DO i = nxl, nxr 1817 DO j = nys, nyn 1818 DO k = nzb+1, nzt 1819 DO n = 1, prt_count(k,j,i) 1820 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1821 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1822 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_x 1823 ELSEIF ( particle_nr > 0 ) THEN 1824 pe_nr = find_pe_from_particle_nr(particle_nr) 1825 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1826 %particles(n)%speed_x 1827 part_ind(pe_nr) = part_ind(pe_nr) + 1 1828 ENDIF 1829 ENDDO 1830 ENDDO 1831 ENDDO 1832 ENDDO 1833 CASE ( 'speed_y' ) 1834 DO i = nxl, nxr 1835 DO j = nys, nyn 1836 DO k = nzb+1, nzt 1837 DO n = 1, prt_count(k,j,i) 1838 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1839 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1840 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_y 1841 ELSEIF ( particle_nr > 0 ) THEN 1842 pe_nr = find_pe_from_particle_nr(particle_nr) 1843 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1844 %particles(n)%speed_y 1845 part_ind(pe_nr) = part_ind(pe_nr) + 1 1846 ENDIF 1847 ENDDO 1848 ENDDO 1849 ENDDO 1850 ENDDO 1851 CASE ( 'speed_z' ) 1852 DO i = nxl, nxr 1853 DO j = nys, nyn 1854 DO k = nzb+1, nzt 1855 DO n = 1, prt_count(k,j,i) 1856 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1857 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1858 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%speed_z 1859 ELSEIF ( particle_nr > 0 ) THEN 1860 pe_nr = find_pe_from_particle_nr (particle_nr) 1861 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%speed_z 1862 part_ind(pe_nr) = part_ind(pe_nr) + 1 1863 ENDIF 1864 ENDDO 1865 ENDDO 1866 ENDDO 1867 ENDDO 1868 CASE ( 'radius' ) 1869 DO i=nxl,nxr 1870 DO j=nys,nyn 1871 DO k=nzb+1,nzt 1872 DO n=1,prt_count(k,j,i) 1873 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1874 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1875 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%radius 1876 ELSEIF ( particle_nr > 0 ) THEN 1877 pe_nr = find_pe_from_particle_nr(particle_nr) 1878 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1879 %particles(n)%radius 1880 part_ind(pe_nr) = part_ind(pe_nr) + 1 1881 ENDIF 1882 ENDDO 1883 ENDDO 1884 ENDDO 1885 ENDDO 1886 CASE ( 'age' ) 1887 DO i = nxl, nxr 1888 DO j = nys, nyn 1889 DO k = nzb+1, nzt 1890 DO n = 1, prt_count(k,j,i) 1891 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1892 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1893 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%age 1894 ELSEIF ( particle_nr > 0 ) THEN 1895 pe_nr = find_pe_from_particle_nr(particle_nr) 1896 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%age 1897 part_ind(pe_nr) = part_ind(pe_nr) + 1 1898 ENDIF 1899 ENDDO 1900 ENDDO 1901 ENDDO 1902 ENDDO 1903 CASE ( 'age_m' ) 1904 DO i = nxl, nxr 1905 DO j = nys, nyn 1906 DO k = nzb+1, nzt 1907 DO n = 1, prt_count(k,j,i) 1908 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1909 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1910 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%age_m 1911 ELSEIF ( particle_nr > 0 ) THEN 1912 pe_nr = find_pe_from_particle_nr(particle_nr) 1913 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1914 %particles(n)%age_m 1915 part_ind(pe_nr) = part_ind(pe_nr) + 1 1916 ENDIF 1917 ENDDO 1918 ENDDO 1919 ENDDO 1920 ENDDO 1921 CASE ( 'dt_sum' ) 1922 DO i = nxl, nxr 1923 DO j = nys, nyn 1924 DO k = nzb+1, nzt 1925 DO n = 1, prt_count(k,j,i) 1926 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1927 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1928 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%dt_sum 1929 ELSEIF ( particle_nr > 0 ) THEN 1930 pe_nr = find_pe_from_particle_nr(particle_nr) 1931 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1932 %particles(n)%dt_sum 1933 part_ind(pe_nr) = part_ind(pe_nr) + 1 1934 ENDIF 1935 ENDDO 1936 ENDDO 1937 ENDDO 1938 ENDDO 1939 CASE( 'e_m' ) 1940 DO i = nxl, nxr 1941 DO j = nys, nyn 1942 DO k = nzb+1, nzt 1943 DO n = 1, prt_count(k,j,i) 1944 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1945 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1946 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%e_m 1947 ELSEIF ( particle_nr > 0 ) THEN 1948 pe_nr = find_pe_from_particle_nr(particle_nr) 1949 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i)%particles(n)%e_m 1950 part_ind(pe_nr) = part_ind(pe_nr) + 1 1951 ENDIF 1952 ENDDO 1953 ENDDO 1954 ENDDO 1955 ENDDO 1956 CASE ( 'weight_factor' ) 1957 DO i = nxl, nxr 1958 DO j = nys, nyn 1959 DO k = nzb+1, nzt 1960 DO n = 1, prt_count(k,j,i) 1961 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1962 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1963 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%weight_factor 1964 ELSEIF ( particle_nr > 0 ) THEN 1965 pe_nr = find_pe_from_particle_nr(particle_nr) 1966 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1967 %particles(n)%weight_factor 1968 part_ind(pe_nr) = part_ind(pe_nr) + 1 1969 ENDIF 1970 ENDDO 1971 ENDDO 1972 ENDDO 1973 ENDDO 1974 CASE ( 'aux1' ) 1975 DO i = nxl, nxr 1976 DO j = nys, nyn 1977 DO k = nzb+1, nzt 1978 DO n = 1, prt_count(k,j,i) 1979 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1980 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1981 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%aux1 1982 ELSEIF ( particle_nr > 0 ) THEN 1983 pe_nr = find_pe_from_particle_nr(particle_nr) 1984 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 1985 %particles(n)%aux1 1986 part_ind(pe_nr) = part_ind(pe_nr) + 1 1987 ENDIF 1988 ENDDO 1989 ENDDO 1990 ENDDO 1991 ENDDO 1992 CASE ( 'aux2' ) 1993 DO i = nxl, nxr 1994 DO j = nys, nyn 1995 DO k = nzb+1, nzt 1996 DO n = 1, prt_count(k,j,i) 1997 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 1998 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 1999 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%aux2 2000 ELSEIF ( particle_nr > 0 ) THEN 2001 pe_nr = find_pe_from_particle_nr(particle_nr) 2002 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 2003 %particles(n)%aux2 2004 part_ind(pe_nr) = part_ind(pe_nr) + 1 2005 ENDIF 2006 ENDDO 2007 ENDDO 2008 ENDDO 2009 ENDDO 2010 CASE ( 'rvar1' ) 2011 DO i = nxl, nxr 2012 DO j = nys, nyn 2013 DO k = nzb+1, nzt 2014 DO n = 1, prt_count(k,j,i) 2015 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 2016 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 2017 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar1 2018 ELSEIF ( particle_nr > 0 ) THEN 2019 pe_nr = find_pe_from_particle_nr(particle_nr) 2020 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 2021 %particles(n)%rvar1 2022 part_ind(pe_nr) = part_ind(pe_nr) + 1 2023 ENDIF 2024 ENDDO 2025 ENDDO 2026 ENDDO 2027 ENDDO 2028 CASE ( 'rvar2' ) 2029 DO i = nxl, nxr 2030 DO j = nys, nyn 2031 DO k = nzb+1, nzt 2032 DO n = 1, prt_count(k,j,i) 2033 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 2034 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 2035 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar2 2036 ELSEIF ( particle_nr > 0 ) THEN 2037 pe_nr = find_pe_from_particle_nr (particle_nr) 2038 transfer_buffer_r (part_ind(pe_nr)) = grid_particles(k,j,i) & 2039 %particles(n)%rvar2 2040 part_ind(pe_nr) = part_ind(pe_nr) + 1 2041 ENDIF 2042 ENDDO 2043 ENDDO 2044 ENDDO 2045 ENDDO 2046 CASE ( 'rvar3' ) 2047 DO i = nxl, nxr 2048 DO j = nys, nyn 2049 DO k = nzb+1, nzt 2050 DO n = 1, prt_count(k,j,i) 2051 particle_nr = grid_particles(k,j,i)%particles(n)%particle_nr 2052 IF ( particle_nr >= io_start_index .AND. particle_nr <= io_end_index ) THEN 2053 out_buf_r(particle_nr) = grid_particles(k,j,i)%particles(n)%rvar3 2054 ELSEIF ( particle_nr > 0 ) THEN 2055 pe_nr = find_pe_from_particle_nr(particle_nr) 2056 transfer_buffer_r(part_ind(pe_nr)) = grid_particles(k,j,i) & 2057 %particles(n)%rvar3 2058 part_ind(pe_nr) = part_ind(pe_nr) + 1 2059 ENDIF 2060 ENDDO 2061 ENDDO 2062 ENDDO 2063 ENDDO 2064 END SELECT 2065 2066 RETURN 2067 END SUBROUTINE dop_fill_out_buf 2068 2069 #if defined( __parallel ) 2070 2071 !--------------------------------------------------------------------------------------------------! 2072 ! Description: 2073 ! ------------ 2074 !> Get indices (displacement) of remot particles 2075 !--------------------------------------------------------------------------------------------------! 2076 SUBROUTINE dop_get_remote_indices 2077 2078 IMPLICIT NONE 2079 2080 INTEGER(iwp) :: bufsize !< size of remote indices array 2081 INTEGER(iwp) :: i !< 2082 INTEGER(iwp) :: ierr !< MPI error code 2083 INTEGER(iwp) :: ind_local !< index in remore indices array 2084 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !< displacement in RMA window 2085 2086 2087 bufsize = SUM( rma_particles(2,:) ) 2088 ALLOCATE( remote_indices(0:bufsize) ) 2089 remote_indices = -1 2090 2091 ind_local = 0 2092 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 2093 DO i = 0, numprocs-1 2094 IF ( rma_particles(2,i) > 0 ) THEN 2095 disp = rma_particles(1,i) 2096 IF ( rma_particles(2,i) > 0 ) THEN 2097 CALL MPI_GET( remote_indices(ind_local), rma_particles(2,i), MPI_INTEGER, i, disp, & 2098 rma_particles(2,i), MPI_INTEGER, win_rma_buf_i, ierr ) 2099 ind_local = ind_local + rma_particles(2,i) 2100 ENDIF 2101 ENDIF 2102 ENDDO 2103 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 2104 2105 RETURN 2106 END SUBROUTINE dop_get_remote_indices 2107 #endif 2108 2109 !--------------------------------------------------------------------------------------------------! 2110 ! Description: 2111 ! ------------ 2112 !--------------------------------------------------------------------------------------------------! 2113 SUBROUTINE dop_get_remote_particle (is_integer) 2114 2115 IMPLICIT NONE 2116 2117 LOGICAL, INTENT(IN) :: is_integer 2118 2119 2120 #if defined( __parallel ) 2121 INTEGER(iwp) :: bufsize !< size of remote data array 2122 INTEGER(iwp) :: i !< 2123 INTEGER(iwp) :: ierr !< MPI error code 2124 INTEGER(iwp) :: ind_local !< index in remore indices array 2125 INTEGER(iwp) :: j !< 2126 INTEGER(iwp) :: particle_nr !< particle number 2127 INTEGER(KIND=MPI_ADDRESS_KIND) :: disp !< displacement in RMA window 2128 2129 INTEGER(iwp), ALLOCATABLE, DIMENSION(:) :: rma_buf_i !< buffer to receive remote data (INTEGER) 2130 2131 REAL(sp), ALLOCATABLE, DIMENSION(:) :: rma_buf_r !< buffer to receive remote data (REAL) 2132 2133 2134 bufsize = SUM( rma_particles(2,:) ) 2135 ind_local = 0 2136 ALLOCATE( rma_buf_r(0:bufsize-1) ) 2137 ALLOCATE( rma_buf_i(0:bufsize-1) ) 2138 2139 IF ( is_integer ) THEN 2140 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 2141 ELSE 2142 CALL MPI_WIN_FENCE( 0, win_rma_buf_r, ierr ) 2143 ENDIF 2144 DO i = 0, numprocs-1 2145 IF ( rma_particles(2,i) > 0 ) THEN 2146 IF ( is_integer ) THEN 2147 disp = rma_particles(1,i) 2148 CALL MPI_GET( rma_buf_i(ind_local), rma_particles(2,i), MPI_INTEGER, i, disp, & 2149 rma_particles(2,i), MPI_INTEGER, win_rma_buf_i, ierr ) 2150 ind_local = ind_local + rma_particles(2,i) 2151 ELSE 2152 disp = rma_particles(1,i) 2153 CALL MPI_GET( rma_buf_r(ind_local), rma_particles(2,i), MPI_real, i, disp, & 2154 rma_particles(2,i), MPI_real, win_rma_buf_r, ierr ) 2155 ind_local = ind_local + rma_particles(2,i) 2156 ENDIF 2157 ENDIF 2158 ENDDO 2159 IF ( is_integer ) THEN 2160 CALL MPI_WIN_FENCE( 0, win_rma_buf_i, ierr ) 2161 ELSE 2162 CALL MPI_WIN_FENCE( 0, win_rma_buf_r, ierr ) 2163 ENDIF 2164 2165 ind_local = 0 2166 2167 DO i = 0, numprocs-1 2168 IF ( rma_particles(2,i) > 0 ) THEN 2169 IF ( is_integer ) THEN 2170 ! 2171 !-- Copy data from remote PEs into output array 2172 DO j = 0, rma_particles(2,i)-1 2173 particle_nr = remote_indices(ind_local) 2174 out_buf_i(particle_nr) = rma_buf_i(ind_local) 2175 ind_local = ind_local + 1 2176 ENDDO 2177 ELSE 2178 ! 2179 !-- Copy data from remote PEs into output array 2180 DO j = 0, rma_particles(2,i)-1 2181 particle_nr = remote_indices(ind_local) 2182 out_buf_r(particle_nr) = rma_buf_r(ind_local) 2183 ind_local = ind_local + 1 2184 ENDDO 2185 ENDIF 2186 ENDIF 2187 ENDDO 2188 2189 IF ( ALLOCATED( rma_buf_r) ) DEALLOCATE( rma_buf_r ) 2190 IF ( ALLOCATED( rma_buf_i) ) DEALLOCATE( rma_buf_i ) 2039 2191 #else 2040 IF ( is_integer) THEN2192 IF ( is_integer ) THEN 2041 2193 ENDIF 2042 2194 #endif 2043 2195 2044 RETURN 2045 END SUBROUTINE dop_get_remote_particle 2046 2047 #if defined( __parallel ) 2048 ! 2049 !- Allocate memory and cread window for one-sided communication (INTEGER 1-D array) 2050 SUBROUTINE dop_alloc_rma_mem_i1( array, idim1, win ) 2051 IMPLICIT NONE 2052 2053 INTEGER(isp), DIMENSION(:), POINTER, INTENT(INOUT) :: array !< 2054 INTEGER(iwp), INTENT(IN) :: idim1 !< 2055 INTEGER(iwp), INTENT(OUT) :: win !< 2056 2057 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< size of RMA window 2058 INTEGER :: ierr !< MPI error code 2059 2060 winsize = max(idim1, 2) 2061 2062 ALLOCATE(array(0:winsize-1)) 2063 2064 winsize = winsize * isp 2065 2066 CALL MPI_WIN_CREATE( array, winsize, isp, MPI_INFO_NULL, comm2d, win, ierr ) 2067 2068 array = -1 2069 2070 CALL MPI_WIN_FENCE( 0, win, ierr ) 2071 2072 END SUBROUTINE dop_alloc_rma_mem_i1 2073 #endif 2074 2075 #if defined( __parallel ) 2076 ! 2077 !- Allocate memory and cread window for one-sided communication (REAL 1-D array) 2078 SUBROUTINE dop_alloc_rma_mem_r1( array, idim1, win ) 2079 IMPLICIT NONE 2080 2081 REAL(sp), DIMENSION(:), POINTER, INTENT(INOUT) :: array !< 2082 INTEGER(iwp), INTENT(IN) :: idim1 !< 2083 INTEGER(iwp), INTENT(OUT) :: win !< 2084 2085 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< size of RMA window 2086 INTEGER :: ierr !< MPI error code 2087 2088 2089 winsize = max(idim1, 2) 2090 2091 2092 ALLOCATE(array(0:winsize-1)) 2093 2094 winsize = winsize * sp 2095 2096 CALL MPI_WIN_CREATE( array, winsize, sp, MPI_INFO_NULL, comm2d, win, ierr ) 2097 2098 array = -1.0 2099 2100 CALL MPI_WIN_FENCE( 0, win, ierr ) 2101 2102 END SUBROUTINE dop_alloc_rma_mem_r1 2103 #endif 2104 2105 2106 SUBROUTINE deallocate_and_free 2107 IMPLICIT NONE 2108 2109 #if defined( __parallel ) 2110 INTEGER :: ierr !< MPI error code 2111 #endif 2112 2113 #if defined( __parallel ) 2114 CALL MPI_Win_free (win_rma_buf_i, ierr) 2115 CALL MPI_Win_free (win_rma_buf_r, ierr) 2116 #endif 2117 IF (ALLOCATED(remote_indices)) DEALLOCATE(remote_indices) 2118 2119 DEALLOCATE(transfer_buffer_i) 2120 DEALLOCATE(transfer_buffer_r) 2121 2122 RETURN 2123 2124 END SUBROUTINE deallocate_and_free 2125 2126 FUNCTION find_pe_from_particle_nr (particle_nr) RESULT (pe_nr) 2127 IMPLICIT NONE 2128 2129 INTEGER(iwp), INTENT(IN) :: particle_nr 2130 INTEGER(iwp) :: pe_nr !< 2131 INTEGER(iwp) :: base !< 2132 INTEGER(iwp) :: pnr !< 2133 2134 IF(irregular_distribubtion) THEN 2135 IF(particle_nr <= nr_particles_rest*nr_particles_PE) THEN 2136 pe_nr = (particle_nr-1)/nr_particles_PE 2137 ELSE 2138 base = nr_particles_rest*nr_particles_PE 2139 pnr = particle_nr - base 2140 pe_nr = (pnr-1)/(nr_particles_PE-1) 2141 pe_nr = pe_nr+nr_particles_rest 2142 ENDIF 2143 ELSE 2144 pe_nr = (particle_nr-1)/nr_particles_PE 2145 ENDIF 2146 2147 2148 !kk This error test is to detect programming errors. For performance reasons it can be removed in 2149 !kk the final, stable version 2150 2151 END FUNCTION find_pe_from_particle_nr 2152 2153 END MODULE data_output_particle_mod 2196 RETURN 2197 END SUBROUTINE dop_get_remote_particle 2198 2199 #if defined( __parallel ) 2200 !--------------------------------------------------------------------------------------------------! 2201 ! Description: 2202 ! ------------ 2203 !> Allocate memory and cread window for one-sided communication (INTEGER 1-D array) 2204 !--------------------------------------------------------------------------------------------------! 2205 SUBROUTINE dop_alloc_rma_mem_i1( array, idim1, win ) 2206 2207 IMPLICIT NONE 2208 2209 INTEGER(iwp), INTENT(IN) :: idim1 !< 2210 INTEGER :: ierr !< MPI error code 2211 INTEGER(iwp), INTENT(OUT) :: win !< 2212 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< size of RMA window 2213 2214 INTEGER(isp), DIMENSION(:), POINTER, INTENT(INOUT) :: array !< 2215 2216 2217 winsize = MAX( idim1, 2 ) 2218 2219 ALLOCATE( array(0:winsize-1) ) 2220 2221 winsize = winsize * isp 2222 2223 CALL MPI_WIN_CREATE( array, winsize, isp, MPI_INFO_NULL, comm2d, win, ierr ) 2224 2225 array = -1 2226 2227 CALL MPI_WIN_FENCE( 0, win, ierr ) 2228 2229 END SUBROUTINE dop_alloc_rma_mem_i1 2230 #endif 2231 2232 #if defined( __parallel ) 2233 !--------------------------------------------------------------------------------------------------! 2234 ! Description: 2235 ! ------------ 2236 !> Allocate memory and cread window for one-sided communication (REAL 1-D array) 2237 !--------------------------------------------------------------------------------------------------! 2238 SUBROUTINE dop_alloc_rma_mem_r1( array, idim1, win ) 2239 2240 IMPLICIT NONE 2241 2242 INTEGER(iwp), INTENT(IN) :: idim1 !< 2243 INTEGER :: ierr !< MPI error code 2244 INTEGER(iwp), INTENT(OUT) :: win !< 2245 INTEGER(KIND=MPI_ADDRESS_KIND) :: winsize !< size of RMA window 2246 2247 REAL(sp), DIMENSION(:), POINTER, INTENT(INOUT) :: array !< 2248 2249 2250 winsize = MAX( idim1, 2 ) 2251 2252 ALLOCATE( array(0:winsize-1) ) 2253 2254 winsize = winsize * sp 2255 2256 CALL MPI_WIN_CREATE( array, winsize, sp, MPI_INFO_NULL, comm2d, win, ierr ) 2257 2258 array = -1.0_wp 2259 2260 CALL MPI_WIN_FENCE( 0, win, ierr ) 2261 2262 END SUBROUTINE dop_alloc_rma_mem_r1 2263 #endif 2264 2265 !--------------------------------------------------------------------------------------------------! 2266 ! Description: 2267 ! ------------ 2268 !--------------------------------------------------------------------------------------------------! 2269 SUBROUTINE deallocate_and_free 2270 2271 IMPLICIT NONE 2272 2273 #if defined( __parallel ) 2274 INTEGER :: ierr !< MPI error code 2275 #endif 2276 2277 #if defined( __parallel ) 2278 CALL MPI_WIN_FREE( win_rma_buf_i, ierr ) 2279 CALL MPI_WIN_FREE( win_rma_buf_r, ierr ) 2280 #endif 2281 IF ( ALLOCATED( remote_indices ) ) DEALLOCATE( remote_indices ) 2282 2283 DEALLOCATE( transfer_buffer_i ) 2284 DEALLOCATE( transfer_buffer_r ) 2285 2286 RETURN 2287 2288 END SUBROUTINE deallocate_and_free 2289 2290 2291 FUNCTION find_pe_from_particle_nr( particle_nr ) RESULT( pe_nr ) 2292 IMPLICIT NONE 2293 2294 INTEGER(iwp) :: base !< 2295 INTEGER(iwp), INTENT(IN) :: particle_nr 2296 INTEGER(iwp) :: pe_nr !< 2297 INTEGER(iwp) :: pnr !< 2298 2299 IF ( irregular_distribubtion ) THEN 2300 IF ( particle_nr <= nr_particles_rest * nr_particles_pe ) THEN 2301 pe_nr = ( particle_nr - 1 ) / nr_particles_pe 2302 ELSE 2303 base = nr_particles_rest * nr_particles_pe 2304 pnr = particle_nr - base 2305 pe_nr = ( pnr - 1 ) / ( nr_particles_pe - 1 ) 2306 pe_nr = pe_nr + nr_particles_rest 2307 ENDIF 2308 ELSE 2309 pe_nr = ( particle_nr - 1 ) / nr_particles_pe 2310 ENDIF 2311 2312 2313 !-- kk This error test is to detect programming errors. For performance reasons it can be removed 2314 !-- kk in the final, stable version. 2315 2316 END FUNCTION find_pe_from_particle_nr 2317 2318 END MODULE data_output_particle_mod
Note: See TracChangeset
for help on using the changeset viewer.