Changeset 622 for palm/trunk/SOURCE
- Timestamp:
- Dec 10, 2010 8:08:13 AM (14 years ago)
- Location:
- palm/trunk/SOURCE
- Files:
-
- 26 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SOURCE/advec_particles.f90
r559 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! optional barriers included in order to speed up collective operations 6 7 ! TEST: PRINT statements on unit 9 (commented out) 7 8 ! … … 792 793 ! 793 794 !-- Compute total sum from local sums 795 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 794 796 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, & 795 797 MPI_REAL, MPI_SUM, comm2d, ierr ) 798 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 796 799 CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, & 797 800 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 830 833 ! 831 834 !-- Compute total sum from local sums 835 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 832 836 CALL MPI_ALLREDUCE( sums_l(nzb,8,0), sums(nzb,8), nzt+2-nzb, & 833 837 MPI_REAL, MPI_SUM, comm2d, ierr ) 838 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 834 839 CALL MPI_ALLREDUCE( sums_l(nzb,30,0), sums(nzb,30), nzt+2-nzb, & 835 840 MPI_REAL, MPI_SUM, comm2d, ierr ) 841 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 836 842 CALL MPI_ALLREDUCE( sums_l(nzb,31,0), sums(nzb,31), nzt+2-nzb, & 837 843 MPI_REAL, MPI_SUM, comm2d, ierr ) 844 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 838 845 CALL MPI_ALLREDUCE( sums_l(nzb,32,0), sums(nzb,32), nzt+2-nzb, & 839 846 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1948 1955 !-- and set the switch corespondingly 1949 1956 #if defined( __parallel ) 1957 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1950 1958 CALL MPI_ALLREDUCE( dt_3d_reached_l, dt_3d_reached, 1, MPI_LOGICAL, & 1951 1959 MPI_LAND, comm2d, ierr ) -
palm/trunk/SOURCE/advec_s_bc.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 166 166 ENDDO 167 167 #if defined( __parallel ) 168 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 168 169 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 169 170 #else … … 463 464 ENDDO 464 465 #if defined( __parallel ) 466 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 465 467 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 466 468 #else … … 863 865 ENDDO 864 866 #if defined( __parallel ) 867 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 865 868 CALL MPI_ALLREDUCE( fmax_l, fmax, 2, MPI_REAL, MPI_MAX, comm2d, ierr ) 866 869 #else -
palm/trunk/SOURCE/buoyancy.f90
r516 r622 4 4 ! Currrent revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 284 284 #if defined( __parallel ) 285 285 286 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 286 287 CALL MPI_ALLREDUCE( sums_l(nzb,pr,0), sums(nzb,pr), nzt+2-nzb, & 287 288 MPI_REAL, MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/check_for_restart.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 63 63 !-- Make a logical OR for all processes. Stop the model run if at least 64 64 !-- one processor has reached the time limit. 65 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 65 66 CALL MPI_ALLREDUCE( terminate_run_l, terminate_run, 1, MPI_LOGICAL, & 66 67 MPI_LOR, comm2d, ierr ) -
palm/trunk/SOURCE/cpu_statistics.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! output of handling of collective operations 7 7 ! 8 8 ! Former revisions: … … 248 248 249 249 ! 250 !-- Output handling of collective operations 251 IF ( collective_wait ) THEN 252 WRITE ( 18, 103 ) 253 ELSE 254 WRITE ( 18, 104 ) 255 ENDIF 256 257 ! 250 258 !-- Empty lines in order to create a gap to the results of the model 251 259 !-- continuation runs 252 WRITE ( 18, 10 3)260 WRITE ( 18, 105 ) 253 261 254 262 ! … … 275 283 276 284 102 FORMAT (A20,2X,F9.3,2X,F7.2,1X,I7,3(1X,F9.3)) 277 103 FORMAT (//) 285 103 FORMAT (/'Barriers are set in front of collective operations') 286 104 FORMAT (/'No barriers are set in front of collective operations') 287 105 FORMAT (//) 278 288 279 289 END SUBROUTINE cpu_statistics -
palm/trunk/SOURCE/data_output_2d.f90
r559 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 899 899 ! 900 900 !-- Now do the averaging over all PEs along y 901 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 901 902 CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb), & 902 903 local_2d(nxl-1,nzb), ngp, MPI_REAL, & … … 942 943 !-- Distribute data over all PEs along y 943 944 ngp = ( nxr-nxl+3 ) * ( nzt-nzb+2 ) 945 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 944 946 CALL MPI_ALLREDUCE( local_2d_l(nxl-1,nzb), & 945 947 local_2d(nxl-1,nzb), ngp, & … … 1198 1200 ! 1199 1201 !-- Now do the averaging over all PEs along x 1202 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1200 1203 CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb), & 1201 1204 local_2d(nys-1,nzb), ngp, MPI_REAL, & … … 1241 1244 !-- Distribute data over all PEs along x 1242 1245 ngp = ( nyn-nys+3 ) * ( nzt-nzb+2 ) 1246 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1243 1247 CALL MPI_ALLREDUCE( local_2d_l(nys-1,nzb), & 1244 1248 local_2d(nys-1,nzb), ngp, & -
palm/trunk/SOURCE/data_output_ptseries.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 138 138 inum = number_of_particle_groups + 1 139 139 140 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 140 141 CALL MPI_ALLREDUCE( pts_value_l(0,1), pts_value(0,1), 14*inum, MPI_REAL, & 141 142 MPI_SUM, comm2d, ierr ) 143 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 142 144 CALL MPI_ALLREDUCE( pts_value_l(0,15), pts_value(0,15), inum, MPI_REAL, & 143 145 MPI_MAX, comm2d, ierr ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 144 147 CALL MPI_ALLREDUCE( pts_value_l(0,16), pts_value(0,16), inum, MPI_REAL, & 145 148 MPI_MIN, comm2d, ierr ) … … 239 242 inum = number_of_particle_groups + 1 240 243 244 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 241 245 CALL MPI_ALLREDUCE( pts_value_l(0,17), pts_value(0,17), inum*10, MPI_REAL, & 242 246 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/flow_statistics.f90
r550 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 237 237 ! 238 238 !-- Compute total sum from local sums 239 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 239 240 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), nzt+2-nzb, MPI_REAL, & 240 241 MPI_SUM, comm2d, ierr ) 242 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 241 243 CALL MPI_ALLREDUCE( sums_l(nzb,2,0), sums(nzb,2), nzt+2-nzb, MPI_REAL, & 242 244 MPI_SUM, comm2d, ierr ) 245 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 243 246 CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, MPI_REAL, & 244 247 MPI_SUM, comm2d, ierr ) 245 248 IF ( ocean ) THEN 249 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 246 250 CALL MPI_ALLREDUCE( sums_l(nzb,23,0), sums(nzb,23), nzt+2-nzb, & 247 251 MPI_REAL, MPI_SUM, comm2d, ierr ) 248 252 ENDIF 249 253 IF ( humidity ) THEN 254 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 250 255 CALL MPI_ALLREDUCE( sums_l(nzb,44,0), sums(nzb,44), nzt+2-nzb, & 251 256 MPI_REAL, MPI_SUM, comm2d, ierr ) 257 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 252 258 CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, & 253 259 MPI_REAL, MPI_SUM, comm2d, ierr ) 254 260 IF ( cloud_physics ) THEN 261 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 255 262 CALL MPI_ALLREDUCE( sums_l(nzb,42,0), sums(nzb,42), nzt+2-nzb, & 256 263 MPI_REAL, MPI_SUM, comm2d, ierr ) 264 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 257 265 CALL MPI_ALLREDUCE( sums_l(nzb,43,0), sums(nzb,43), nzt+2-nzb, & 258 266 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 261 269 262 270 IF ( passive_scalar ) THEN 271 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 263 272 CALL MPI_ALLREDUCE( sums_l(nzb,41,0), sums(nzb,41), nzt+2-nzb, & 264 273 MPI_REAL, MPI_SUM, comm2d, ierr ) … … 796 805 ! 797 806 !-- Compute total sum from local sums 807 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 798 808 CALL MPI_ALLREDUCE( sums_l(nzb,1,0), sums(nzb,1), ngp_sums, MPI_REAL, & 799 809 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/global_min_max.f90
r484 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! optional barriers included in order to speed up collective operations 8 8 ! 9 9 ! Former revisions: … … 61 61 #if defined( __parallel ) 62 62 fmin_l(2) = myid 63 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 63 64 CALL MPI_ALLREDUCE( fmin_l, fmin, 1, MPI_2REAL, MPI_MINLOC, comm2d, ierr ) 64 65 … … 100 101 #if defined( __parallel ) 101 102 fmax_l(2) = myid 103 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 102 104 CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, ierr ) 103 105 … … 158 160 #if defined( __parallel ) 159 161 fmax_l(2) = myid 162 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 160 163 CALL MPI_ALLREDUCE( fmax_l, fmax, 1, MPI_2REAL, MPI_MAXLOC, comm2d, & 161 164 ierr ) -
palm/trunk/SOURCE/inflow_turbulence.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 77 77 ! 78 78 !-- Now, averaging over all PEs 79 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 79 80 CALL MPI_ALLREDUCE( avpr_l(nzb,1), avpr(nzb,1), ngp_pr, MPI_REAL, MPI_SUM, & 80 81 comm2d, ierr ) … … 195 196 196 197 #if defined( __parallel ) 198 ! IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 197 199 ! CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & 198 200 ! MPI_SUM, comm1dy, ierr ) -
palm/trunk/SOURCE/init_3d_model.f90
r561 r622 7 7 ! Current revisions: 8 8 ! ----------------- 9 ! 9 ! optional barriers included in order to speed up collective operations 10 10 ! 11 11 ! Former revisions: … … 860 860 861 861 #if defined( __parallel ) 862 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 862 863 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),& 863 864 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 865 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 864 866 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), & 865 867 2, MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1172 1174 1173 1175 #if defined( __parallel ) 1176 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1174 1177 CALL MPI_ALLREDUCE( volume_flow_initial_l(1), volume_flow_initial(1),& 1175 1178 2, MPI_REAL, MPI_SUM, comm2d, ierr ) 1179 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1176 1180 CALL MPI_ALLREDUCE( volume_flow_area_l(1), volume_flow_area(1), & 1177 1181 2, MPI_REAL, MPI_SUM, comm2d, ierr ) … … 1560 1564 sr = statistic_regions + 1 1561 1565 #if defined( __parallel ) 1566 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1562 1567 CALL MPI_ALLREDUCE( ngp_2dh_l(0), ngp_2dh(0), sr, MPI_INTEGER, MPI_SUM, & 1563 1568 comm2d, ierr ) 1569 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1564 1570 CALL MPI_ALLREDUCE( ngp_2dh_outer_l(0,0), ngp_2dh_outer(0,0), (nz+2)*sr, & 1565 1571 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 1572 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1566 1573 CALL MPI_ALLREDUCE( ngp_2dh_s_inner_l(0,0), ngp_2dh_s_inner(0,0), & 1567 1574 (nz+2)*sr, MPI_INTEGER, MPI_SUM, comm2d, ierr ) 1575 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1568 1576 CALL MPI_ALLREDUCE( ngp_3d_inner_l(0), ngp_3d_inner_tmp(0), sr, MPI_REAL, & 1569 1577 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/init_particles.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 342 342 !-- Calculate the number of particles and tails of the total domain 343 343 #if defined( __parallel ) 344 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 344 345 CALL MPI_ALLREDUCE( number_of_particles, total_number_of_particles, 1, & 345 346 MPI_INTEGER, MPI_SUM, comm2d, ierr ) 347 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 346 348 CALL MPI_ALLREDUCE( number_of_tails, total_number_of_tails, 1, & 347 349 MPI_INTEGER, MPI_SUM, comm2d, ierr ) … … 436 438 437 439 #if defined( __parallel ) 440 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 438 441 CALL MPI_ALLREDUCE( uniform_particles_l, uniform_particles, 1, & 439 442 MPI_LOGICAL, MPI_LAND, comm2d, ierr ) -
palm/trunk/SOURCE/init_pegrid.f90
r482 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! ATTENTION: nnz_x undefined problem still has to be solved!!!!!!!! 8 8 ! TEST OUTPUT (TO BE REMOVED) logging mpi2 ierr values … … 154 154 CALL message( 'init_pegrid', 'PA0223', 1, 2, 0, 6, 0 ) 155 155 ENDIF 156 157 ! 158 !-- For communication speedup, set barriers in front of collective 159 !-- communications by default on SGI-type systems 160 IF ( host(3:5) == 'sgi' ) collective_wait = .TRUE. 156 161 157 162 ! … … 929 934 id_inflow_l = 0 930 935 ENDIF 936 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 931 937 CALL MPI_ALLREDUCE( id_inflow_l, id_inflow, 1, MPI_INTEGER, MPI_SUM, & 932 938 comm1dx, ierr ) … … 935 941 !-- Broadcast the id of the recycling plane 936 942 !-- WARNING: needs to be adjusted in case of inflows other than from left side! 937 IF ( ( recycling_width / dx ) >= nxl .AND. ( recycling_width / dx ) <= nxr )&938 THEN943 IF ( ( recycling_width / dx ) >= nxl .AND. & 944 ( recycling_width / dx ) <= nxr ) THEN 939 945 id_recycling_l = myidx 940 946 ELSE 941 947 id_recycling_l = 0 942 948 ENDIF 949 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 943 950 CALL MPI_ALLREDUCE( id_recycling_l, id_recycling, 1, MPI_INTEGER, MPI_SUM, & 944 951 comm1dx, ierr ) -
palm/trunk/SOURCE/init_slope.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 100 100 ENDDO 101 101 ENDDO 102 ENDDO102 ENDDO 103 103 104 104 #if defined( __parallel ) 105 CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, & 106 MPI_SUM, comm2d, ierr ) 105 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 106 CALL MPI_ALLREDUCE( pt_init_local, pt_init, nzt+2-nzb, MPI_REAL, & 107 MPI_SUM, comm2d, ierr ) 107 108 #else 108 pt_init = pt_init_local109 pt_init = pt_init_local 109 110 #endif 110 111 111 pt_init = pt_init / ngp_2dh(0)112 DEALLOCATE( pt_init_local )112 pt_init = pt_init / ngp_2dh(0) 113 DEALLOCATE( pt_init_local ) 113 114 114 ENDIF115 ENDIF 115 116 116 117 END SUBROUTINE init_slope -
palm/trunk/SOURCE/modules.f90
r601 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! +collective_wait in pegrid 8 8 ! 9 9 ! Former revisions: … … 1163 1163 INTEGER, DIMENSION(:), ALLOCATABLE :: ngp_yz, type_xz 1164 1164 1165 LOGICAL :: reorder = .TRUE.1165 LOGICAL :: collective_wait = .FALSE., reorder = .TRUE. 1166 1166 LOGICAL, DIMENSION(2) :: cyclic = (/ .TRUE. , .TRUE. /), & 1167 1167 remain_dims -
palm/trunk/SOURCE/parin.f90
r601 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! +collective_wait in inipar 7 7 ! 8 8 ! Former revisions: … … 119 119 canyon_width_x, canyon_width_y, canyon_wall_left, & 120 120 canyon_wall_south, cfl_factor, cloud_droplets, cloud_physics, & 121 co nserve_volume_flow, conserve_volume_flow_mode, &121 collective_wait, conserve_volume_flow, conserve_volume_flow_mode, & 122 122 coupling_start_time, cthf, cut_spline_overshoot, & 123 123 cycle_mg, damp_level_1d, dissipation_1d, dp_external, dp_level_b, & -
palm/trunk/SOURCE/poisfft.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 718 718 !-- Transpose array 719 719 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 720 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 720 721 CALL MPI_ALLTOALL( work(nxl,1,0), sendrecvcount_xy, MPI_REAL, & 721 722 f_out(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, & … … 756 757 !-- Transpose array 757 758 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 759 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 758 760 CALL MPI_ALLTOALL( f_in(1,1,nys_x,1), sendrecvcount_xy, MPI_REAL, & 759 761 work(nxl,1,0), sendrecvcount_xy, MPI_REAL, & … … 1073 1075 !-- Transpose array 1074 1076 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 1077 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1075 1078 CALL MPI_ALLTOALL( work(nys,1,0), sendrecvcount_xy, MPI_REAL, & 1076 1079 f_out(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, & … … 1107 1110 !-- Transpose array 1108 1111 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 1112 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1109 1113 CALL MPI_ALLTOALL( f_in(1,1,nxl_y,1), sendrecvcount_xy, MPI_REAL, & 1110 1114 work(nys,1,0), sendrecvcount_xy, MPI_REAL, & -
palm/trunk/SOURCE/poismg.f90
r392 r622 8 8 ! Current revisions: 9 9 ! ----------------- 10 ! 10 ! optional barriers included in order to speed up collective operations 11 11 ! 12 12 ! Former revisions: … … 106 106 maxerror = SUM( r(nzb+1:nzt,nys:nyn,nxl:nxr)**2 ) 107 107 #if defined( __parallel ) 108 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 108 109 CALL MPI_ALLREDUCE( maxerror, residual_norm, 1, MPI_REAL, MPI_SUM, & 109 110 comm2d, ierr) -
palm/trunk/SOURCE/pres.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 105 105 106 106 #if defined( __parallel ) 107 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 107 108 CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 1, MPI_REAL, & 108 109 MPI_SUM, comm1dy, ierr ) … … 143 144 144 145 #if defined( __parallel ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 145 147 CALL MPI_ALLREDUCE( volume_flow_l(2), volume_flow(2), 1, MPI_REAL, & 146 148 MPI_SUM, comm1dx, ierr ) … … 172 174 ENDDO 173 175 #if defined( __parallel ) 176 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 174 177 CALL MPI_ALLREDUCE( w_l_l(1), w_l(1), nzt, MPI_REAL, MPI_SUM, comm2d, & 175 178 ierr ) … … 537 540 538 541 #if defined( __parallel ) 542 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 539 543 CALL MPI_ALLREDUCE( volume_flow_l(1), volume_flow(1), 2, MPI_REAL, & 540 544 MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/read_var_list.f90
r601 r622 3 3 !------------------------------------------------------------------------------! 4 4 ! Current revisions: 5 ! ----------------- _6 ! 5 ! ------------------ 6 ! +collective_wait 7 7 ! 8 8 ! Former revisions: … … 275 275 CASE ( 'cloud_physics' ) 276 276 READ ( 13 ) cloud_physics 277 CASE ( 'collective_wait' ) 278 READ ( 13 ) collective_wait 277 279 CASE ( 'conserve_volume_flow' ) 278 280 READ ( 13 ) conserve_volume_flow -
palm/trunk/SOURCE/set_particle_attributes.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 140 140 #if defined( __parallel ) 141 141 142 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 142 143 CALL MPI_ALLREDUCE( sums_l(nzb,4,0), sums(nzb,4), nzt+2-nzb, & 143 144 MPI_REAL, MPI_SUM, comm2d, ierr ) -
palm/trunk/SOURCE/timestep.f90
r392 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 118 118 uv_gtrans_l = uv_gtrans_l / REAL( (nxr-nxl+1)*(nyn-nys+1)*(nzt-nzb) ) 119 119 #if defined( __parallel ) 120 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 120 121 CALL MPI_ALLREDUCE( uv_gtrans_l, uv_gtrans, 2, MPI_REAL, MPI_SUM, & 121 122 comm2d, ierr ) … … 164 165 !$OMP END PARALLEL 165 166 #if defined( __parallel ) 167 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 166 168 CALL MPI_ALLREDUCE( dt_diff_l, dt_diff, 1, MPI_REAL, MPI_MIN, comm2d, & 167 169 ierr ) … … 252 254 !-- Determine the global minumum 253 255 #if defined( __parallel ) 256 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 254 257 CALL MPI_ALLREDUCE( dt_plant_canopy_l, dt_plant_canopy, 1, MPI_REAL, & 255 258 MPI_MIN, comm2d, ierr ) -
palm/trunk/SOURCE/transpose.f90
r484 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 69 69 !-- Transpose array 70 70 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 71 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 71 72 CALL MPI_ALLTOALL( f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, & 72 73 work(1), sendrecvcount_xy, MPI_REAL, & … … 143 144 !-- Transpose array 144 145 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 146 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 145 147 CALL MPI_ALLTOALL( work(1), sendrecvcount_zx, MPI_REAL, & 146 148 f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, & … … 229 231 !-- Transpose array 230 232 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 233 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 231 234 CALL MPI_ALLTOALL( work(1), sendrecvcount_xy, MPI_REAL, & 232 235 f_inv(nys_x,nzb_x,0), sendrecvcount_xy, MPI_REAL, & … … 291 294 !-- Transpose array 292 295 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 296 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 293 297 CALL MPI_ALLTOALL( f_inv(nxl,1,nys), sendrecvcount_xy, MPI_REAL, & 294 298 work(1), sendrecvcount_xy, MPI_REAL, & … … 373 377 !-- Transpose array 374 378 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 379 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 375 380 CALL MPI_ALLTOALL( f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, & 376 381 work(1), sendrecvcount_yz, MPI_REAL, & … … 454 459 !-- Transpose array 455 460 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 461 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 456 462 CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zx, MPI_REAL, & 457 463 work(1), sendrecvcount_zx, MPI_REAL, & … … 528 534 !-- Transpose array 529 535 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 536 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 530 537 CALL MPI_ALLTOALL( work(1), sendrecvcount_yz, MPI_REAL, & 531 538 f_inv(nxl_y,nzb_y,0), sendrecvcount_yz, MPI_REAL, & … … 627 634 !-- Transpose array 628 635 CALL cpu_log( log_point_s(32), 'mpi_alltoall', 'start' ) 636 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 629 637 CALL MPI_ALLTOALL( f_inv(nys,nxl,1), sendrecvcount_zyd, MPI_REAL, & 630 638 work(1), sendrecvcount_zyd, MPI_REAL, & -
palm/trunk/SOURCE/user_statistics.f90
r556 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! optional barriers included in order to speed up collective operations 7 7 ! 8 8 ! Former revisions: … … 92 92 !-- assign ts_value(dots_num_palm+1:,sr) = ts_value_l directly. 93 93 !#if defined( __parallel ) 94 ! IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 94 95 ! CALL MPI_ALLREDUCE( ts_value_l(dots_num_palm+1), & 95 96 ! ts_value(dots_num_palm+1,sr), & -
palm/trunk/SOURCE/write_compressed.f90
r484 r622 5 5 ! Current revisions: 6 6 ! ----------------- 7 ! 7 ! optional barriers included in order to speed up collective operations 8 8 ! 9 9 ! Former revisions: … … 90 90 91 91 #if defined( __parallel ) 92 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 92 93 CALL MPI_ALLREDUCE( ifieldmax_l, ifieldmax, 1, MPI_INTEGER, MPI_MAX, & 93 94 comm2d, ierr ) 95 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 94 96 CALL MPI_ALLREDUCE( ifieldmin_l, ifieldmin, 1, MPI_INTEGER, MPI_MIN, & 95 97 comm2d, ierr ) -
palm/trunk/SOURCE/write_var_list.f90
r601 r622 4 4 ! Current revisions: 5 5 ! ----------------- 6 ! 6 ! +collective_wait 7 7 ! 8 8 ! Former revisions: … … 200 200 WRITE ( 14 ) 'cloud_physics ' 201 201 WRITE ( 14 ) cloud_physics 202 WRITE ( 14 ) 'collective_wait ' 203 WRITE ( 14 ) collective_wait 202 204 WRITE ( 14 ) 'conserve_volume_flow ' 203 205 WRITE ( 14 ) conserve_volume_flow
Note: See TracChangeset
for help on using the changeset viewer.