Changeset 4015 for palm/trunk
- Timestamp:
- Jun 5, 2019 1:25:35 PM (5 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/cpulog_mod.f90
r3885 r4015 25 25 ! ----------------- 26 26 ! $Id$ 27 ! all reals changed to double precision in order to work with 32-bit working precision, 28 ! otherwise calculated time intervals would mostly give zero 29 ! 30 ! 3885 2019-04-11 11:29:34Z kanani 27 31 ! Changes related to global restructuring of location messages and introduction 28 32 ! of additional debug messages … … 151 155 152 156 153 USE control_parameters, & 154 ONLY: message_string, nr_timesteps_this_run, run_description_header, & 155 synchronous_exchange 157 USE control_parameters, & 158 ONLY: message_string, nr_timesteps_this_run, run_description_header, synchronous_exchange 156 159 157 USE indices, &160 USE indices, & 158 161 ONLY: ngp_3d, nx, ny, nz 159 162 … … 165 168 166 169 PRIVATE 167 PUBLIC cpu_log, cpu_log_barrierwait, cpu_log_nowait, cpu_statistics, 168 initial_wallclock_time,log_point, log_point_s170 PUBLIC cpu_log, cpu_log_barrierwait, cpu_log_nowait, cpu_statistics, initial_wallclock_time, & 171 log_point, log_point_s 169 172 170 173 INTERFACE cpu_log … … 184 187 LOGICAL, PARAMETER :: cpu_log_nowait = .FALSE. !< 185 188 186 REAL( wp) :: initial_wallclock_time !<189 REAL(dp) :: initial_wallclock_time !< 187 190 188 191 TYPE logpoint 189 REAL( wp) :: isum !<190 REAL( wp) :: ivect !<191 REAL( wp) :: mean !<192 REAL( wp) :: mtime !<193 REAL( wp) :: mtimevec !<194 REAL( wp) :: sum !<195 REAL( wp) :: vector !<192 REAL(dp) :: isum !< 193 REAL(dp) :: ivect !< 194 REAL(dp) :: mean !< 195 REAL(dp) :: mtime !< 196 REAL(dp) :: mtimevec !< 197 REAL(dp) :: sum !< 198 REAL(dp) :: vector !< 196 199 INTEGER(iwp) :: counts !< 197 200 CHARACTER (LEN=25) :: place !< 198 201 END TYPE logpoint 199 202 200 TYPE(logpoint), DIMENSION(100) :: log_point = logpoint( 0.0_wp, 0.0_wp, & 201 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 202 0, ' ' ) 203 TYPE(logpoint), DIMENSION(100) :: log_point_s = logpoint( 0.0_wp, 0.0_wp, & 204 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, 0.0_wp, & 205 0, ' ' ) 203 TYPE(logpoint), DIMENSION(100) :: log_point = logpoint( 0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, & 204 0.0_dp, 0.0_dp, 0.0_dp, 0, ' ' ) 205 TYPE(logpoint), DIMENSION(100) :: log_point_s = logpoint( 0.0_dp, 0.0_dp, 0.0_dp, 0.0_dp, & 206 0.0_dp, 0.0_dp, 0.0_dp, 0, ' ' ) 206 207 207 208 SAVE … … 225 226 LOGICAL, SAVE :: first = .TRUE. !< 226 227 227 REAL( wp) :: mtime = 0.0_wp !<228 REAL( wp) :: mtimevec = 0.0_wp !<228 REAL(dp) :: mtime = 0.0_dp !< 229 REAL(dp) :: mtimevec = 0.0_dp !< 229 230 TYPE(logpoint) :: log_event !< 230 231 … … 238 239 log_event%place = place 239 240 ELSEIF ( log_event%place /= place ) THEN 240 WRITE( message_string, * ) 'wrong argument expected: ', &241 TRIM(log_event%place), ' given: ', TRIM( place )241 WRITE( message_string, * ) 'wrong argument expected: ', & 242 TRIM(log_event%place), ' given: ', TRIM( place ) 242 243 CALL message( 'cpu_log', 'PA0174', 1, 2, 0, 6, 0 ) 243 244 ENDIF … … 256 257 !-- PEs that have not yet finished 257 258 #if defined( __parallel ) 258 IF ( cpu_log_barrierwait .AND. wait_allowed .AND. &259 IF ( cpu_log_barrierwait .AND. wait_allowed .AND. & 259 260 ( modus == 'start' .OR. modus == 'continue' ) ) THEN 260 261 CALL MPI_BARRIER( comm2d, ierr ) … … 265 266 !-- Take current time 266 267 CALL SYSTEM_CLOCK( count, count_rate ) 267 mtime = REAL( count, KIND= wp ) / REAL( count_rate, KIND=wp )268 mtime = REAL( count, KIND=dp ) / REAL( count_rate, KIND=dp ) 268 269 269 270 ! … … 274 275 ELSEIF ( modus == 'pause' ) THEN 275 276 IF ( ( mtime - log_event%mtime ) < 0.0 .AND. first ) THEN 276 WRITE( message_string, * ) 'negative time interval occured', &277 '&PE',myid,' L=PAUSE "',TRIM(log_event%place),&278 '" new=', mtime,' last=',log_event%mtime277 WRITE( message_string, * ) 'negative time interval occured', & 278 '&PE',myid,' L=PAUSE "',TRIM(log_event%place), & 279 '" new=', mtime,' last=',log_event%mtime 279 280 CALL message( 'cpu_log', 'PA0176', 0, 1, -1, 6, 0 ) 280 281 first = .FALSE. … … 283 284 log_event%ivect = log_event%ivect + mtimevec - log_event%mtimevec 284 285 ELSEIF ( modus == 'stop' ) THEN 285 IF ( ( mtime - log_event%mtime + log_event%isum ) < 0.0 .AND. & 286 first ) THEN 287 WRITE( message_string, * ) 'negative time interval occured', & 288 '&PE',myid,' L=STOP "',TRIM(log_event%place),'" new=', & 289 mtime,' last=',log_event%mtime,' isum=',log_event%isum 286 IF ( ( mtime - log_event%mtime + log_event%isum ) < 0.0 .AND. first ) THEN 287 WRITE( message_string, * ) 'negative time interval occured', & 288 '&PE',myid,' L=STOP "',TRIM(log_event%place),'" new=', & 289 mtime,' last=',log_event%mtime,' isum=',log_event%isum 290 290 CALL message( 'cpu_log', 'PA0177', 0, 1, -1, 6, 0 ) 291 291 first = .FALSE. … … 295 295 log_event%sum = log_event%sum + log_event%mtime 296 296 IF ( log_event%sum < 0.0 .AND. first ) THEN 297 WRITE( message_string, * ) 'negative time interval occured', &298 '&PE',myid,' L=STOP "',TRIM(log_event%place),'" sum=',&299 log_event%sum,' mtime=',log_event%mtime297 WRITE( message_string, * ) 'negative time interval occured', & 298 '&PE',myid,' L=STOP "',TRIM(log_event%place),'" sum=', & 299 log_event%sum,' mtime=',log_event%mtime 300 300 CALL message( 'cpu_log', 'PA0178', 0, 1, -1, 6, 0 ) 301 301 first = .FALSE. … … 303 303 log_event%vector = log_event%vector + log_event%mtimevec 304 304 log_event%counts = log_event%counts + 1 305 log_event%isum = 0.0_ wp306 log_event%ivect = 0.0_ wp305 log_event%isum = 0.0_dp 306 log_event%ivect = 0.0_dp 307 307 ELSE 308 message_string = 'unknown modus of time measurement: ' // & 309 TRIM( modus ) 308 message_string = 'unknown modus of time measurement: ' // TRIM( modus ) 310 309 CALL message( 'cpu_log', 'PA0179', 0, 1, -1, 6, 0 ) 311 310 ENDIF … … 331 330 INTEGER(iwp) :: iii !< 332 331 INTEGER(iwp) :: sender !< 333 REAL( wp) :: average_cputime !<334 REAL( wp), SAVE :: norm = 1.0_wp !<335 REAL( wp), DIMENSION(:), ALLOCATABLE :: pe_max !<336 REAL( wp), DIMENSION(:), ALLOCATABLE :: pe_min !<337 REAL( wp), DIMENSION(:), ALLOCATABLE :: pe_rms !<338 REAL( wp), DIMENSION(:), ALLOCATABLE :: pe_tmp !<339 REAL( wp), DIMENSION(:), ALLOCATABLE :: sum !<340 REAL( wp), DIMENSION(:,:), ALLOCATABLE :: pe_log_points !<332 REAL(dp) :: average_cputime !< 333 REAL(dp), SAVE :: norm = 1.0_dp !< 334 REAL(dp), DIMENSION(:), ALLOCATABLE :: pe_max !< 335 REAL(dp), DIMENSION(:), ALLOCATABLE :: pe_min !< 336 REAL(dp), DIMENSION(:), ALLOCATABLE :: pe_rms !< 337 REAL(dp), DIMENSION(:), ALLOCATABLE :: pe_tmp !< 338 REAL(dp), DIMENSION(:), ALLOCATABLE :: sum !< 339 REAL(dp), DIMENSION(:,:), ALLOCATABLE :: pe_log_points !< 341 340 342 341 … … 358 357 ! 359 358 !-- Allocate and initialize temporary arrays needed for statistics 360 ALLOCATE( pe_max( SIZE( log_point ) ), pe_min( SIZE( log_point ) ), &361 pe_rms( SIZE( log_point ) ), pe_tmp( SIZE( log_point ) ), &359 ALLOCATE( pe_max( SIZE( log_point ) ), pe_min( SIZE( log_point ) ), & 360 pe_rms( SIZE( log_point ) ), pe_tmp( SIZE( log_point ) ), & 362 361 pe_log_points( SIZE( log_point ), 0:numprocs-1 ) ) 363 362 pe_min = log_point%sum 364 363 pe_max = log_point%sum ! need to be set in case of 1 PE 365 pe_rms = 0.0_ wp366 pe_tmp = 0.0_ wp364 pe_rms = 0.0_dp 365 pe_tmp = 0.0_dp 367 366 368 367 #if defined( __parallel ) … … 370 369 !-- Receive data from all PEs 371 370 DO i = 1, numprocs-1 372 CALL MPI_RECV( pe_tmp(1), SIZE( log_point ), MPI_ REAL,&373 i, i, comm2d,status, ierr )371 CALL MPI_RECV( pe_tmp(1), SIZE( log_point ), MPI_DOUBLE_PRECISION, i, i, comm2d, & 372 status, ierr ) 374 373 sender = status(MPI_SOURCE) 375 374 pe_log_points(:,sender) = pe_tmp … … 389 388 !-- Calculate rms 390 389 DO i = 0, numprocs-1 391 pe_rms(iii) = pe_rms(iii) + ( & 392 pe_log_points(iii,i) - log_point(iii)%sum & 393 )**2 390 pe_rms(iii) = pe_rms(iii) + ( pe_log_points(iii,i) - log_point(iii)%sum )**2 394 391 ENDDO 395 392 pe_rms(iii) = SQRT( pe_rms(iii) / numprocs ) … … 401 398 ALLOCATE( pe_max( SIZE( log_point ) ) ) 402 399 pe_max = log_point%sum 403 CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_REAL, 0, myid, & 404 comm2d, ierr ) 400 CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_DOUBLE_PRECISION, 0, myid, comm2d, ierr ) 405 401 #endif 406 402 … … 416 412 sum = log_point%sum 417 413 ELSEWHERE 418 sum = -1.0_ wp414 sum = -1.0_dp 419 415 ENDWHERE 420 416 … … 423 419 !-- timestep 424 420 IF ( nr_timesteps_this_run /= 0 ) THEN 425 average_cputime = log_point(1)%sum / REAL( ngp_3d(0), KIND=wp ) / & 426 REAL( nr_timesteps_this_run, KIND=wp ) * 1E6_wp 427 ! in micro-sec 421 average_cputime = log_point(1)%sum / REAL( ngp_3d(0), KIND=dp ) / & 422 REAL( nr_timesteps_this_run, KIND=dp ) * 1E6_dp ! in micro-sec 428 423 ELSE 429 average_cputime = -1.0_ wp424 average_cputime = -1.0_dp 430 425 ENDIF 431 426 … … 434 429 CALL check_open( 18 ) 435 430 #if defined( __parallel ) 436 WRITE ( 18, 100 ) TRIM( run_description_header ), & 437 numprocs * threads_per_task, pdims(1), pdims(2), & 438 threads_per_task, nx+1, ny+1, nz, & 431 WRITE ( 18, 100 ) TRIM( run_description_header ), numprocs * threads_per_task, & 432 pdims(1), pdims(2), threads_per_task, nx+1, ny+1, nz, & 439 433 nr_timesteps_this_run, average_cputime 440 434 441 435 WRITE ( 18, 110 ) 442 436 #else 443 WRITE ( 18, 100 ) TRIM( run_description_header ), & 444 numprocs * threads_per_task, 1, 1, & 445 threads_per_task, nx+1, ny+1, nz, & 446 nr_timesteps_this_run, average_cputime 437 WRITE ( 18, 100 ) TRIM( run_description_header ), numprocs * threads_per_task, 1, 1, & 438 threads_per_task, nx+1, ny+1, nz, nr_timesteps_this_run, & 439 average_cputime 447 440 448 441 WRITE ( 18, 110 ) … … 451 444 ii = MAXLOC( sum ) 452 445 i = ii(1) 453 IF ( sum(i) /= -1.0_wp ) THEN 454 WRITE ( 18, 102 ) & 455 log_point(i)%place, log_point(i)%sum, & 456 log_point(i)%sum / log_point(1)%sum * 100.0_wp, & 457 log_point(i)%counts, pe_min(i), pe_max(i), pe_rms(i) 458 sum(i) = -1.0_wp 446 IF ( sum(i) /= -1.0_dp ) THEN 447 WRITE ( 18, 102 ) log_point(i)%place, log_point(i)%sum, & 448 log_point(i)%sum / log_point(1)%sum * 100.0_dp, & 449 log_point(i)%counts, pe_min(i), pe_max(i), pe_rms(i) 450 sum(i) = -1.0_dp 459 451 ELSE 460 452 EXIT … … 488 480 pe_min = log_point_s%sum 489 481 pe_max = log_point_s%sum ! need to be set in case of 1 PE 490 pe_rms = 0.0_ wp482 pe_rms = 0.0_dp 491 483 492 484 #if defined( __parallel ) … … 494 486 !-- Receive data from all PEs 495 487 DO i = 1, numprocs-1 496 CALL MPI_RECV( pe_tmp(1), SIZE( log_point ), MPI_ REAL,&497 MPI_ANY_ SOURCE, MPI_ANY_TAG, comm2d, status, ierr )488 CALL MPI_RECV( pe_tmp(1), SIZE( log_point ), MPI_DOUBLE_PRECISION, MPI_ANY_SOURCE, & 489 MPI_ANY_TAG, comm2d, status, ierr ) 498 490 sender = status(MPI_SOURCE) 499 491 pe_log_points(:,sender) = pe_tmp … … 505 497 DO iii = 1, SIZE( log_point ) 506 498 DO i = 1, numprocs-1 507 log_point_s(iii)%sum = log_point_s(iii)%sum + & 508 pe_log_points(iii,i) 499 log_point_s(iii)%sum = log_point_s(iii)%sum + pe_log_points(iii,i) 509 500 pe_min(iii) = MIN( pe_min(iii), pe_log_points(iii,i) ) 510 501 pe_max(iii) = MAX( pe_max(iii), pe_log_points(iii,i) ) … … 514 505 !-- Calculate rms 515 506 DO i = 0, numprocs-1 516 pe_rms(iii) = pe_rms(iii) + ( & 517 pe_log_points(iii,i) - log_point_s(iii)%sum & 518 )**2 507 pe_rms(iii) = pe_rms(iii) + ( pe_log_points(iii,i) - log_point_s(iii)%sum )**2 519 508 ENDDO 520 509 pe_rms(iii) = SQRT( pe_rms(iii) / numprocs ) … … 525 514 !-- the data in order to avoid sending the data type log) 526 515 pe_max = log_point_s%sum 527 CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_REAL, 0, 0, comm2d, & 528 ierr ) 516 CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_DOUBLE_PRECISION, 0, 0, comm2d, ierr ) 529 517 #endif 530 518 … … 539 527 sum = log_point_s%sum 540 528 ELSEWHERE 541 sum = -1.0_ wp529 sum = -1.0_dp 542 530 ENDWHERE 543 531 … … 548 536 ii = MAXLOC( sum ) 549 537 i = ii(1) 550 IF ( sum(i) /= -1.0_wp ) THEN 551 WRITE ( 18, 102 ) & 552 log_point_s(i)%place, log_point_s(i)%sum, & 553 log_point_s(i)%sum / log_point(1)%sum * 100.0_wp, & 554 log_point_s(i)%counts, pe_min(i), pe_max(i), pe_rms(i) 555 sum(i) = -1.0_wp 538 IF ( sum(i) /= -1.0_dp ) THEN 539 WRITE ( 18, 102 ) log_point_s(i)%place, log_point_s(i)%sum, & 540 log_point_s(i)%sum / log_point(1)%sum * 100.0_dp, & 541 log_point_s(i)%counts, pe_min(i), pe_max(i), pe_rms(i) 542 sum(i) = -1.0_dp 556 543 ELSE 557 544 EXIT … … 587 574 588 575 589 100 FORMAT (A/11('-')//'CPU measures for ',I5,' PEs (',I5,'(x) * ',I5,'(y', &590 &') tasks *',I5,' threads):'// &591 'gridpoints (x/y/z): ',20X,I5,' * ',I5,' * ',I5/ &592 'nr of timesteps: ',22X,I6/ &576 100 FORMAT (A/11('-')//'CPU measures for ',I5,' PEs (',I5,'(x) * ',I5,'(y', & 577 &') tasks *',I5,' threads):'// & 578 'gridpoints (x/y/z): ',20X,I5,' * ',I5,' * ',I5/ & 579 'nr of timesteps: ',22X,I6/ & 593 580 'cpu time per grid point and timestep: ',5X,F8.5,' * 10**-6 s') 594 581 595 582 101 FORMAT (/'special measures:'/ & 596 &'-----------------------------------------------------------', &583 &'-----------------------------------------------------------', & 597 584 &'---------------------') 598 585 … … 603 590 106 FORMAT (/'Exchange of ghostpoints via MPI_ISEND/MPI_IRECV') 604 591 107 FORMAT (//) 605 110 FORMAT ('------------------------------------------------------------', &606 &'----------'// &607 &'place: mean counts ', &608 &' min max rms'/ &609 &' sec. % ', &610 &'sec. sec. sec.'/ &611 &'-----------------------------------------------------------', &592 110 FORMAT ('------------------------------------------------------------', & 593 &'----------'// & 594 &'place: mean counts ', & 595 &' min max rms'/ & 596 &' sec. % ', & 597 &'sec. sec. sec.'/ & 598 &'-----------------------------------------------------------', & 612 599 &'---------------------') 613 600 111 FORMAT (/'Barriers are set at beginning (start/continue) of measurements') -
palm/trunk/SOURCE/pres.f90
r3849 r4015 25 25 ! ----------------- 26 26 ! $Id$ 27 ! variable child_domain_nvn eliminated 28 ! 29 ! 3849 2019-04-01 16:35:16Z knoop 27 30 ! OpenACC port for SPEC 28 31 ! … … 231 234 REAL(wp), DIMENSION(1:nzt) :: w_l_l !< 232 235 233 LOGICAL :: child_domain_nvn !<234 235 236 236 237 CALL cpu_log( log_point(8), 'pres', 'start' ) … … 381 382 382 383 ! 383 !-- Remove mean vertical velocity in case that Neumann conditions are 384 !-- used both at bottom and top boundary, and if not a nested domain in a 385 !-- normal nesting run. In case of vertical nesting, this must be done. 386 !-- Therefore an auxiliary logical variable child_domain_nvn is used here, and 387 !-- nvn stands for non-vertical nesting. 388 !-- This cannot be done before the first initial time step because ngp_2dh_outer 384 !-- Remove mean vertical velocity in case that Neumann conditions are used both at bottom and top 385 !-- boundary. With Neumann conditions at both vertical boundaries, the solver cannot remove 386 !-- mean vertical velocities. They should be removed, because incompressibility requires that 387 !-- the vertical gradient of vertical velocity is zero. Since w=0 at the solid surface, it must be 388 !-- zero everywhere. 389 !-- This must not be done in case of a 3d-nesting child domain, because a mean vertical velocity 390 !-- can physically exist in such a domain. 391 !-- Also in case of offline nesting, mean vertical velocities may exist (and must not be removed), 392 !-- caused by horizontal divergence/convergence of the large scale flow that is prescribed at the 393 !-- side boundaries. 394 !-- The removal cannot be done before the first initial time step because ngp_2dh_outer 389 395 !-- is not yet known then. 390 child_domain_nvn = child_domain 391 IF ( child_domain .AND. nesting_mode == 'vertical' ) THEN 392 child_domain_nvn = .FALSE. 393 ENDIF 394 395 IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 .AND. .NOT. nesting_offline .AND. & 396 .NOT. child_domain_nvn .AND. intermediate_timestep_count /= 0 ) & 396 IF ( ibc_p_b == 1 .AND. ibc_p_t == 1 .AND. .NOT. nesting_offline & 397 .AND. .NOT. ( child_domain .AND. nesting_mode /= 'vertical' ) & 398 .AND. intermediate_timestep_count /= 0 ) & 397 399 THEN 398 400 w_l = 0.0_wp; w_l_l = 0.0_wp … … 400 402 DO j = nys, nyn 401 403 DO k = nzb+1, nzt 402 w_l_l(k) = w_l_l(k) + w(k,j,i) & 403 * MERGE( 1.0_wp, 0.0_wp, & 404 BTEST( wall_flags_0(k,j,i), 3 ) & 405 ) 404 w_l_l(k) = w_l_l(k) + w(k,j,i) & 405 * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_0(k,j,i), 3 ) ) 406 406 ENDDO 407 407 ENDDO … … 409 409 #if defined( __parallel ) 410 410 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 411 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, & 412 comm2d, ierr ) 411 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d, ierr ) 413 412 #else 414 413 w_l = w_l_l … … 420 419 DO j = nysg, nyng 421 420 DO k = nzb+1, nzt 422 w(k,j,i) = w(k,j,i) - w_l(k) & 423 * MERGE( 1.0_wp, 0.0_wp, & 424 BTEST( wall_flags_0(k,j,i), 3 ) & 425 ) 421 w(k,j,i) = w(k,j,i) - w_l(k) & 422 * MERGE( 1.0_wp, 0.0_wp, BTEST( wall_flags_0(k,j,i), 3 ) ) 426 423 ENDDO 427 424 ENDDO
Note: See TracChangeset
for help on using the changeset viewer.