SUBROUTINE cpu_statistics !--------------------------------------------------------------------------------! ! This file is part of PALM. ! ! PALM is free software: you can redistribute it and/or modify it under the terms ! of the GNU General Public License as published by the Free Software Foundation, ! either version 3 of the License, or (at your option) any later version. ! ! PALM is distributed in the hope that it will be useful, but WITHOUT ANY ! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR ! A PARTICULAR PURPOSE. See the GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License along with ! PALM. If not, see . ! ! Copyright 1997-2012 Leibniz University Hannover !--------------------------------------------------------------------------------! ! ! Current revisions: ! ----------------- ! ! ! Former revisions: ! ----------------- ! $Id: cpu_statistics.f90 1093 2013-02-02 12:58:49Z maronga $ ! ! 1092 2013-02-02 11:24:22Z raasch ! unused variables removed ! ! 1036 2012-10-22 13:43:42Z raasch ! code put under GPL (PALM 3.9) ! ! 1015 2012-09-27 09:23:24Z raasch ! output of accelerator board information ! ! 683 2011-02-09 14:25:15Z raasch ! output of handling of ghostpoint exchange ! ! 622 2010-12-10 08:08:13Z raasch ! output of handling of collective operations ! ! 222 2009-01-12 16:04:16Z letzel ! Bugfix for nonparallel execution ! ! 197 2008-09-16 15:29:03Z raasch ! Format adjustments in order to allow CPU# > 999, ! data are collected from PE0 in an ordered sequence which seems to avoid ! hanging of processes on SGI-ICE ! ! 82 2007-04-16 15:40:52Z raasch ! Preprocessor directives for old systems removed ! ! RCS Log replace by Id keyword, revision history cleaned up ! ! Revision 1.13 2006/04/26 12:10:51 raasch ! Output of number of threads per task, max = min in case of 1 PE ! ! Revision 1.1 1997/07/24 11:11:11 raasch ! Initial revision ! ! ! Description: ! ------------ ! Analysis and output of the cpu-times measured. All PE results are collected ! on PE0 in order to calculate the mean cpu-time over all PEs and other ! statistics. The output is sorted according to the amount of cpu-time consumed ! and output on PE0. !------------------------------------------------------------------------------! USE cpulog USE pegrid USE control_parameters IMPLICIT NONE INTEGER :: i, ii(1), iii, sender REAL, SAVE :: norm = 1.0 REAL, DIMENSION(:), ALLOCATABLE :: pe_max, pe_min, pe_rms, sum REAL, DIMENSION(:,:), ALLOCATABLE :: pe_log_points ! !-- Compute cpu-times in seconds log_point%mtime = log_point%mtime / norm log_point%sum = log_point%sum / norm log_point%vector = log_point%vector / norm WHERE ( log_point%counts /= 0 ) log_point%mean = log_point%sum / log_point%counts END WHERE ! !-- Collect cpu-times from all PEs and calculate statistics IF ( myid == 0 ) THEN ! !-- Allocate and initialize temporary arrays needed for statistics ALLOCATE( pe_max( SIZE( log_point ) ), pe_min( SIZE( log_point ) ), & pe_rms( SIZE( log_point ) ), & pe_log_points( SIZE( log_point ), 0:numprocs-1 ) ) pe_min = log_point%sum pe_max = log_point%sum ! need to be set in case of 1 PE pe_rms = 0.0 #if defined( __parallel ) ! !-- Receive data from all PEs DO i = 1, numprocs-1 CALL MPI_RECV( pe_max(1), SIZE( log_point ), MPI_REAL, & i, i, comm2d, status, ierr ) sender = status(MPI_SOURCE) pe_log_points(:,sender) = pe_max ENDDO pe_log_points(:,0) = log_point%sum ! Results from PE0 ! !-- Calculate mean of all PEs, store it on log_point%sum !-- and find minimum and maximum DO iii = 1, SIZE( log_point ) DO i = 1, numprocs-1 log_point(iii)%sum = log_point(iii)%sum + pe_log_points(iii,i) pe_min(iii) = MIN( pe_min(iii), pe_log_points(iii,i) ) pe_max(iii) = MAX( pe_max(iii), pe_log_points(iii,i) ) ENDDO log_point(iii)%sum = log_point(iii)%sum / numprocs ! !-- Calculate rms DO i = 0, numprocs-1 pe_rms(iii) = pe_rms(iii) + ( & pe_log_points(iii,i) - log_point(iii)%sum & )**2 ENDDO pe_rms(iii) = SQRT( pe_rms(iii) / numprocs ) ENDDO ELSE ! !-- Send data to PE0 (pe_max is used as temporary storage to send !-- the data in order to avoid sending the data type log) ALLOCATE( pe_max( SIZE( log_point ) ) ) pe_max = log_point%sum CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_REAL, 0, myid, comm2d, & ierr ) #endif ENDIF ! !-- Write cpu-times IF ( myid == 0 ) THEN ! !-- Re-store sums ALLOCATE( sum( SIZE( log_point ) ) ) WHERE ( log_point%counts /= 0 ) sum = log_point%sum ELSEWHERE sum = -1.0 ENDWHERE ! !-- Write cpu-times sorted by size CALL check_open( 18 ) #if defined( __parallel ) WRITE ( 18, 100 ) TRIM( run_description_header ), & numprocs * threads_per_task, pdims(1), pdims(2), & threads_per_task IF ( num_acc_per_node /= 0 ) WRITE ( 18, 108 ) num_acc_per_node WRITE ( 18, 110 ) #else WRITE ( 18, 100 ) TRIM( run_description_header ), & numprocs * threads_per_task, 1, 1, & threads_per_task IF ( num_acc_per_node /= 0 ) WRITE ( 18, 109 ) num_acc_per_node WRITE ( 18, 110 ) #endif DO ii = MAXLOC( sum ) i = ii(1) IF ( sum(i) /= -1.0 ) THEN WRITE ( 18, 102 ) & log_point(i)%place, log_point(i)%sum, & log_point(i)%sum / log_point(1)%sum * 100.0, & log_point(i)%counts, pe_min(i), pe_max(i), pe_rms(i) sum(i) = -1.0 ELSE EXIT ENDIF ENDDO ENDIF ! !-- The same procedure again for the individual measurements. ! !-- Compute cpu-times in seconds log_point_s%mtime = log_point_s%mtime / norm log_point_s%sum = log_point_s%sum / norm log_point_s%vector = log_point_s%vector / norm WHERE ( log_point_s%counts /= 0 ) log_point_s%mean = log_point_s%sum / log_point_s%counts END WHERE ! !-- Collect cpu-times from all PEs and calculate statistics #if defined( __parallel ) ! !-- Set barrier in order to avoid that PE0 receives log_point_s-data !-- while still busy with receiving log_point-data (see above) CALL MPI_BARRIER( comm2d, ierr ) #endif IF ( myid == 0 ) THEN ! !-- Initialize temporary arrays needed for statistics pe_min = log_point_s%sum pe_max = log_point_s%sum ! need to be set in case of 1 PE pe_rms = 0.0 #if defined( __parallel ) ! !-- Receive data from all PEs DO i = 1, numprocs-1 CALL MPI_RECV( pe_max(1), SIZE( log_point ), MPI_REAL, & MPI_ANY_SOURCE, MPI_ANY_TAG, comm2d, status, ierr ) sender = status(MPI_SOURCE) pe_log_points(:,sender) = pe_max ENDDO pe_log_points(:,0) = log_point_s%sum ! Results from PE0 ! !-- Calculate mean of all PEs, store it on log_point_s%sum !-- and find minimum and maximum DO iii = 1, SIZE( log_point ) DO i = 1, numprocs-1 log_point_s(iii)%sum = log_point_s(iii)%sum + pe_log_points(iii,i) pe_min(iii) = MIN( pe_min(iii), pe_log_points(iii,i) ) pe_max(iii) = MAX( pe_max(iii), pe_log_points(iii,i) ) ENDDO log_point_s(iii)%sum = log_point_s(iii)%sum / numprocs ! !-- Calculate rms DO i = 0, numprocs-1 pe_rms(iii) = pe_rms(iii) + ( & pe_log_points(iii,i) - log_point_s(iii)%sum & )**2 ENDDO pe_rms(iii) = SQRT( pe_rms(iii) / numprocs ) ENDDO ELSE ! !-- Send data to PE0 (pe_max is used as temporary storage to send !-- the data in order to avoid sending the data type log) pe_max = log_point_s%sum CALL MPI_SEND( pe_max(1), SIZE( log_point ), MPI_REAL, 0, 0, comm2d, & ierr ) #endif ENDIF ! !-- Write cpu-times IF ( myid == 0 ) THEN ! !-- Re-store sums WHERE ( log_point_s%counts /= 0 ) sum = log_point_s%sum ELSEWHERE sum = -1.0 ENDWHERE ! !-- Write cpu-times sorted by size WRITE ( 18, 101 ) DO ii = MAXLOC( sum ) i = ii(1) IF ( sum(i) /= -1.0 ) THEN WRITE ( 18, 102 ) & log_point_s(i)%place, log_point_s(i)%sum, & log_point_s(i)%sum / log_point(1)%sum * 100.0, & log_point_s(i)%counts, pe_min(i), pe_max(i), pe_rms(i) sum(i) = -1.0 ELSE EXIT ENDIF ENDDO ! !-- Output of handling of MPI operations IF ( collective_wait ) THEN WRITE ( 18, 103 ) ELSE WRITE ( 18, 104 ) ENDIF IF ( synchronous_exchange ) THEN WRITE ( 18, 105 ) ELSE WRITE ( 18, 106 ) ENDIF ! !-- Empty lines in order to create a gap to the results of the model !-- continuation runs WRITE ( 18, 107 ) ! !-- Unit 18 is not needed anymore CALL close_file( 18 ) ENDIF 100 FORMAT (A/11('-')//'CPU measures for ',I5,' PEs (',I5,'(x) * ',I5,'(y', & &') tasks *',I5,' threads):') 101 FORMAT (/'special measures:'/ & &'-----------------------------------------------------------', & &'--------------------') 102 FORMAT (A20,2X,F9.3,2X,F7.2,1X,I7,3(1X,F9.3)) 103 FORMAT (/'Barriers are set in front of collective operations') 104 FORMAT (/'No barriers are set in front of collective operations') 105 FORMAT (/'Exchange of ghostpoints via MPI_SENDRCV') 106 FORMAT (/'Exchange of ghostpoints via MPI_ISEND/MPI_IRECV') 107 FORMAT (//) 108 FORMAT ('Accelerator boards per node: ',I2) 109 FORMAT ('Accelerator boards: ',I2) 110 FORMAT ('----------------------------------------------------------', & &'------------'//& &'place: mean counts min ', & &' max rms'/ & &' sec. % sec. ', & &' sec. sec.'/ & &'-----------------------------------------------------------', & &'-------------------') END SUBROUTINE cpu_statistics