- Timestamp:
- Mar 13, 2014 2:58:42 PM (11 years ago)
- Location:
- palm/trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/.mrun.config.hlrnIII
r1278 r1308 37 37 %compiler_name ftn lccrayb parallel 38 38 %compiler_name_ser ftn lccrayb parallel 39 %cpp_options -e:Z:-DMPI_REAL=MPI_DOUBLE_PRECISION:-DMPI_2REAL=MPI_2DOUBLE_PRECISION:-D__netcdf:-D__netcdf4:-D__ fftw lccrayb parallel39 %cpp_options -e:Z:-DMPI_REAL=MPI_DOUBLE_PRECISION:-DMPI_2REAL=MPI_2DOUBLE_PRECISION:-D__netcdf:-D__netcdf4:-D__netcdf4_parallel:-D__fftw lccrayb parallel 40 40 %mopts -j:4 lccrayb parallel 41 41 %fopts -em:-s:real64:-O3:-hnoomp:-hfp3:-hdynamic lccrayb parallel 42 42 %lopts -em:-s:real64:-O3:-hnoomp:-hfp3:-hdynamic:-dynamic lccrayb parallel 43 %remote_username <replace by your HLRN-III username> 44 %memory 1500 lccrayb parallel45 %modules fftw:cray- netcdflccrayb parallel43 %remote_username <replace by your HLRN-III username> lccrayb parallel 44 %memory 1500 lccrayb parallel 45 %modules fftw:cray-hdf5-parallel:cray-netcdf-hdf5parallel lccrayb parallel 46 46 # 47 47 # HLRN-III Hannover … … 50 50 %compiler_name ftn lccrayh parallel 51 51 %compiler_name_ser ftn lccrayh parallel 52 %cpp_options -e:Z:-DMPI_REAL=MPI_DOUBLE_PRECISION:-DMPI_2REAL=MPI_2DOUBLE_PRECISION:-D__netcdf:-D__netcdf4:-D__ fftw lccrayh parallel52 %cpp_options -e:Z:-DMPI_REAL=MPI_DOUBLE_PRECISION:-DMPI_2REAL=MPI_2DOUBLE_PRECISION:-D__netcdf:-D__netcdf4:-D__netcdf4_parallel:-D__fftw lccrayh parallel 53 53 %mopts -j:4 lccrayh parallel 54 54 %fopts -em:-s:real64:-O3:-hnoomp:-hfp3:-hdynamic lccrayh parallel 55 55 %lopts -em:-s:real64:-O3:-hnoomp:-hfp3:-hdynamic:-dynamic lccrayh parallel 56 %remote_username <replace by your HLRN-III username> 57 %memory 1500 lccrayh parallel58 %modules fftw:cray- netcdflccrayh parallel56 %remote_username <replace by your HLRN-III username> lccrayh parallel 57 %memory 1500 lccrayh parallel 58 %modules fftw:cray-hdf5-parallel:cray-netcdf-hdf5parallel lccrayh parallel 59 59 # 60 60 %write_binary true restart … … 70 70 IC:[[ \$localhost = lccrayb ]] && ulimit -v unlimited 71 71 IC:[[ \$localhost = lccrayb ]] && ulimit -s unlimited 72 IC:[[ \$localhost = lccrayb ]] && export MPICH_MPIIO_HINTS_DISPLAY=1 73 IC:[[ \$localhost = lccrayb ]] && export MPICH_MPIIO_STATS=1 74 IC:[[ \$localhost = lccrayb ]] && E8="=8" 75 IC:[[ \$localhost = lccrayb ]] && export MPICH_MPIIO_HINTS="*DATA_**:striping_factor"\$E8 72 76 IC:[[ \$localhost = lccrayh ]] && export APRUN_XFER_LIMITS=1 73 77 IC:[[ \$localhost = lccrayh ]] && export APRUN_XFER_STACK_LIMIT=0 74 78 IC:[[ \$localhost = lccrayh ]] && ulimit -v unlimited 75 79 IC:[[ \$localhost = lccrayh ]] && ulimit -s unlimited 80 IC:[[ \$localhost = lccrayh ]] && export MPICH_MPIIO_HINTS_DISPLAY=1 81 IC:[[ \$localhost = lccrayh ]] && export MPICH_MPIIO_STATS=1 82 IC:[[ \$localhost = lccrayh ]] && E8="=8" 83 IC:[[ \$localhost = lccrayh ]] && export MPICH_MPIIO_HINTS="*DATA_**:striping_factor"\$E8 76 84 # 77 85 #---------------------------------------------------------------------------- -
palm/trunk/SOURCE/check_parameters.f90
r1300 r1308 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! +netcdf_data_format_save 23 ! Calculate fixed number of output time levels for parallel netcdf output. 24 ! For masked data, parallel netcdf output is not tested so far, hence 25 ! netcdf_data_format is switched back to non-paralell output. 23 26 ! 24 27 ! Former revisions: … … 337 340 CHARACTER (LEN=100) :: action 338 341 339 INTEGER :: i, ilen, iremote = 0, j, k, kk, position, prec 342 INTEGER :: i, ilen, iremote = 0, j, k, kk, netcdf_data_format_save, & 343 position, prec 340 344 LOGICAL :: found, ldum 341 345 REAL :: gradient, remote = 0.0, simulation_time_since_reference … … 3265 3269 ! 3266 3270 !-- Generate masks for masked data output 3271 !-- Parallel netcdf output is not tested so far for masked data, hence 3272 !-- netcdf_data_format is switched back to non-paralell output. 3273 netcdf_data_format_save = netcdf_data_format 3274 IF ( netcdf_data_format > 4 ) THEN 3275 IF ( netcdf_data_format == 5 ) netcdf_data_format = 3 3276 IF ( netcdf_data_format == 6 ) netcdf_data_format = 4 3277 message_string = 'netCDF file formats '// & 3278 '5 (parallel netCDF 4) and ' // & 3279 '6 (parallel netCDF 4 Classic model) '// & 3280 '&are currently not supported (not yet tested) ' // & 3281 'for masked data.&Using respective non-parallel' // & 3282 ' output for masked data.' 3283 CALL message( 'check_parameters', 'PA0383', 0, 0, 0, 6, 0 ) 3284 ENDIF 3267 3285 CALL init_masks 3286 netcdf_data_format = netcdf_data_format_save 3268 3287 ENDIF 3269 3288 … … 3294 3313 ENDIF 3295 3314 #endif 3315 3316 ! 3317 !-- Calculate fixed number of output time levels for parallel netcdf output. 3318 !-- The time dimension has to be limited for paralell output, otherwise the 3319 !-- performance drops significantly. 3320 IF ( netcdf_output ) THEN 3321 IF ( netcdf_data_format > 4 ) THEN 3322 ntdim_3d(0) = INT( ( end_time - skip_time_do3d ) / dt_do3d ) 3323 IF ( do3d_at_begin ) ntdim_3d(0) = ntdim_3d(0) + 1 3324 ntdim_3d(1) = INT( ( end_time - skip_time_data_output_av ) & 3325 / dt_data_output_av ) 3326 ntdim_2d_xy(0) = INT( ( end_time - skip_time_do2d_xy ) / dt_do2d_xy ) 3327 ntdim_2d_xz(0) = INT( ( end_time - skip_time_do2d_xz ) / dt_do2d_xz ) 3328 ntdim_2d_yz(0) = INT( ( end_time - skip_time_do2d_yz ) / dt_do2d_yz ) 3329 IF ( do2d_at_begin ) THEN 3330 ntdim_2d_xy(0) = ntdim_2d_xy(0) + 1 3331 ntdim_2d_xz(0) = ntdim_2d_xz(0) + 1 3332 ntdim_2d_yz(0) = ntdim_2d_yz(0) + 1 3333 ENDIF 3334 ntdim_2d_xy(1) = ntdim_3d(1) 3335 ntdim_2d_xz(1) = ntdim_3d(1) 3336 ntdim_2d_yz(1) = ntdim_3d(1) 3337 ENDIF 3338 ENDIF 3296 3339 3297 3340 #if ! defined( __check ) -
palm/trunk/SOURCE/data_output_2d.f90
r1116 r1308 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! +local_2d_sections, local_2d_sections_l, ns 23 ! Check, if the limit of the time dimension is exceeded for parallel output 24 ! To increase the performance for parallel output, the following is done: 25 ! - Update of time axis is only done by PE0 26 ! - Cross sections are first stored on a local array and are written 27 ! collectively to the output file by all PEs. 23 28 ! 24 29 ! Former revisions: … … 143 148 CHARACTER (LEN=25) :: section_chr 144 149 CHARACTER (LEN=50) :: rtext 145 INTEGER :: av, ngp, file_id, i, if, is, iis, j, k, l, layer_xy, n, psi, &146 s, sender, &150 INTEGER :: av, ngp, file_id, i, if, is, iis, j, k, l, layer_xy, n, ns, & 151 psi, s, sender, & 147 152 ind(4) 148 153 LOGICAL :: found, resorted, two_d … … 150 155 REAL, DIMENSION(:), ALLOCATABLE :: level_z 151 156 REAL, DIMENSION(:,:), ALLOCATABLE :: local_2d, local_2d_l 152 REAL, DIMENSION(:,:,:), ALLOCATABLE :: local_pf 157 REAL, DIMENSION(:,:,:), ALLOCATABLE :: local_pf, local_2d_sections, & 158 local_2d_sections_l 153 159 #if defined( __parallel ) 154 160 REAL, DIMENSION(:,:), ALLOCATABLE :: total_2d … … 157 163 158 164 NAMELIST /LOCAL/ rtext 159 160 CALL cpu_log (log_point(3),'data_output_2d','start')161 165 162 166 ! … … 166 170 IF ( mode == 'xz' .AND. .NOT. data_output_xz(av) ) RETURN 167 171 IF ( mode == 'yz' .AND. .NOT. data_output_yz(av) ) RETURN 172 ! 173 !-- For parallel netcdf output the time axis must be limited. Return, if this 174 !-- limit is exceeded. This could be the case, if the simulated time exceeds 175 !-- the given end time by the length of the given output interval. 176 IF ( netcdf_data_format > 4 ) THEN 177 IF ( mode == 'xy' .AND. do2d_xy_time_count(av) + 1 > & 178 ntdim_2d_xy(av) ) THEN 179 WRITE ( message_string, * ) 'Output of xy cross-sections is not ', & 180 'given at t=', simulated_time, '&because the', & 181 ' maximum number of output time levels is exceeded.' 182 CALL message( 'data_output_2d', 'PA0384', 0, 1, 0, 6, 0 ) 183 RETURN 184 ENDIF 185 IF ( mode == 'xz' .AND. do2d_xz_time_count(av) + 1 > & 186 ntdim_2d_xz(av) ) THEN 187 WRITE ( message_string, * ) 'Output of xz cross-sections is not ', & 188 'given at t=', simulated_time, '&because the', & 189 ' maximum number of output time levels is exceeded.' 190 CALL message( 'data_output_2d', 'PA0385', 0, 1, 0, 6, 0 ) 191 RETURN 192 ENDIF 193 IF ( mode == 'yz' .AND. do2d_yz_time_count(av) + 1 > & 194 ntdim_2d_yz(av) ) THEN 195 WRITE ( message_string, * ) 'Output of yz cross-sections is not ', & 196 'given at t=', simulated_time, '&because the', & 197 ' maximum number of output time levels is exceeded.' 198 CALL message( 'data_output_2d', 'PA0386', 0, 1, 0, 6, 0 ) 199 RETURN 200 ENDIF 201 ENDIF 202 203 CALL cpu_log (log_point(3),'data_output_2d','start') 168 204 169 205 two_d = .FALSE. ! local variable to distinguish between output of pure 2D … … 178 214 s = 1 179 215 ALLOCATE( level_z(nzb:nzt+1), local_2d(nxlg:nxrg,nysg:nyng) ) 216 217 IF ( netcdf_data_format > 4 ) THEN 218 ns = 1 219 DO WHILE ( section(ns,s) /= -9999 .AND. ns <= 100 ) 220 ns = ns + 1 221 ENDDO 222 ns = ns - 1 223 ALLOCATE( local_2d_sections(nxlg:nxrg,nysg:nyng,1:ns) ) 224 local_2d_sections = 0.0 225 ENDIF 180 226 181 227 ! … … 201 247 ALLOCATE( local_2d(nxlg:nxrg,nzb:nzt+1) ) 202 248 249 IF ( netcdf_data_format > 4 ) THEN 250 ns = 1 251 DO WHILE ( section(ns,s) /= -9999 .AND. ns <= 100 ) 252 ns = ns + 1 253 ENDDO 254 ns = ns - 1 255 ALLOCATE( local_2d_sections(nxlg:nxrg,1:ns,nzb:nzt+1) ) 256 ALLOCATE( local_2d_sections_l(nxlg:nxrg,1:ns,nzb:nzt+1) ) 257 local_2d_sections = 0.0; local_2d_sections_l = 0.0 258 ENDIF 259 203 260 ! 204 261 !-- Parallel netCDF4/HDF5 output is done on all PEs, all other on PE0 only … … 220 277 221 278 CASE ( 'yz' ) 222 223 279 s = 3 224 280 ALLOCATE( local_2d(nysg:nyng,nzb:nzt+1) ) 281 282 IF ( netcdf_data_format > 4 ) THEN 283 ns = 1 284 DO WHILE ( section(ns,s) /= -9999 .AND. ns <= 100 ) 285 ns = ns + 1 286 ENDDO 287 ns = ns - 1 288 ALLOCATE( local_2d_sections(1:ns,nysg:nyng,nzb:nzt+1) ) 289 ALLOCATE( local_2d_sections_l(1:ns,nysg:nyng,nzb:nzt+1) ) 290 local_2d_sections = 0.0; local_2d_sections_l = 0.0 291 ENDIF 225 292 226 293 ! … … 243 310 244 311 CASE DEFAULT 245 246 312 message_string = 'unknown cross-section: ' // TRIM( mode ) 247 313 CALL message( 'data_output_2d', 'PA0180', 1, 2, 0, 6, 0 ) … … 799 865 800 866 ! 801 !-- Update the netCDF xy cross section time axis 802 IF ( myid == 0 .OR. netcdf_data_format > 4 ) THEN 803 IF ( simulated_time /= do2d_xy_last_time(av) ) THEN 804 do2d_xy_time_count(av) = do2d_xy_time_count(av) + 1 805 do2d_xy_last_time(av) = simulated_time 867 !-- Update the netCDF xy cross section time axis. 868 !-- In case of parallel output, this is only done by PE0 869 !-- to increase the performance. 870 IF ( simulated_time /= do2d_xy_last_time(av) ) THEN 871 do2d_xy_time_count(av) = do2d_xy_time_count(av) + 1 872 do2d_xy_last_time(av) = simulated_time 873 IF ( myid == 0 ) THEN 806 874 IF ( ( .NOT. data_output_2d_on_each_pe .AND. & 807 875 netcdf_output ) .OR. netcdf_data_format > 4 ) & … … 846 914 ! 847 915 !-- Parallel output in netCDF4/HDF5 format. 848 !-- Do not output redundant ghost point data except for the849 !-- boundaries of the total domain.850 916 IF ( two_d ) THEN 851 917 iis = 1 … … 855 921 856 922 #if defined( __netcdf ) 857 IF ( nxr == nx .AND. nyn /= ny ) THEN 858 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 859 id_var_do2d(av,if), & 860 local_2d(nxl:nxr+1,nys:nyn), & 861 start = (/ nxl+1, nys+1, iis, & 862 do2d_xy_time_count(av) /), & 863 count = (/ nxr-nxl+2, & 864 nyn-nys+1, 1, 1 /) ) 865 ELSEIF ( nxr /= nx .AND. nyn == ny ) THEN 866 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 867 id_var_do2d(av,if), & 868 local_2d(nxl:nxr,nys:nyn+1), & 869 start = (/ nxl+1, nys+1, iis, & 870 do2d_xy_time_count(av) /), & 871 count = (/ nxr-nxl+1, & 872 nyn-nys+2, 1, 1 /) ) 873 ELSEIF ( nxr == nx .AND. nyn == ny ) THEN 874 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 875 id_var_do2d(av,if), & 876 local_2d(nxl:nxr+1,nys:nyn+1),& 877 start = (/ nxl+1, nys+1, iis, & 878 do2d_xy_time_count(av) /), & 879 count = (/ nxr-nxl+2, & 880 nyn-nys+2, 1, 1 /) ) 881 ELSE 882 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 883 id_var_do2d(av,if), & 884 local_2d(nxl:nxr,nys:nyn), & 885 start = (/ nxl+1, nys+1, iis, & 886 do2d_xy_time_count(av) /), & 887 count = (/ nxr-nxl+1, & 888 nyn-nys+1, 1, 1 /) ) 889 ENDIF 890 891 CALL handle_netcdf_error( 'data_output_2d', 55 ) 923 ! 924 !-- For parallel output, all cross sections are first stored 925 !-- here on a local array and will be written to the output 926 !-- file afterwards to increase the performance. 927 DO i = nxlg, nxrg 928 DO j = nysg, nyng 929 local_2d_sections(i,j,iis) = local_2d(i,j) 930 ENDDO 931 ENDDO 892 932 #endif 893 933 ELSE … … 1047 1087 CASE ( 'xz' ) 1048 1088 ! 1049 !-- Update the netCDF xz cross section time axis 1050 IF ( myid == 0 .OR. netcdf_data_format > 4 ) THEN 1051 1052 IF ( simulated_time /= do2d_xz_last_time(av) ) THEN 1053 do2d_xz_time_count(av) = do2d_xz_time_count(av) + 1 1054 do2d_xz_last_time(av) = simulated_time 1089 !-- Update the netCDF xz cross section time axis. 1090 !-- In case of parallel output, this is only done by PE0 1091 !-- to increase the performance. 1092 IF ( simulated_time /= do2d_xz_last_time(av) ) THEN 1093 do2d_xz_time_count(av) = do2d_xz_time_count(av) + 1 1094 do2d_xz_last_time(av) = simulated_time 1095 IF ( myid == 0 ) THEN 1055 1096 IF ( ( .NOT. data_output_2d_on_each_pe .AND. & 1056 1097 netcdf_output ) .OR. netcdf_data_format > 4 ) & … … 1066 1107 ENDIF 1067 1108 ENDIF 1068 1069 1109 ENDIF 1070 1110 … … 1114 1154 IF ( netcdf_output .AND. netcdf_data_format > 4 ) THEN 1115 1155 ! 1116 !-- ATTENTION: The following lines are a workaround, because1117 !-- independet output does not work with the1118 !-- current netCDF4 installation. Therefore, data1119 !-- are transferred from PEs having the cross1120 !-- sections to other PEs along y having no cross1121 !-- section data. Some of these data are the1122 !-- output.1123 !-- BEGIN WORKAROUND---------------------------------------1124 IF ( npey /= 1 .AND. section(is,s) /= -1) THEN1125 ALLOCATE( local_2d_l(nxlg:nxrg,nzb:nzt+1) )1126 local_2d_l = 0.01127 IF ( section(is,s) >= nys .AND. section(is,s) <= nyn )&1128 THEN1129 local_2d_l = local_2d1130 ENDIF1131 #if defined( __parallel )1132 !1133 !-- Distribute data over all PEs along y1134 ngp = ( nxrg-nxlg+1 ) * ( nzt-nzb+2 )1135 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr )1136 CALL MPI_ALLREDUCE( local_2d_l(nxlg,nzb), &1137 local_2d(nxlg,nzb), ngp, &1138 MPI_REAL, MPI_SUM, comm1dy, ierr )1139 #else1140 local_2d = local_2d_l1141 #endif1142 DEALLOCATE( local_2d_l )1143 ENDIF1144 !-- END WORKAROUND-----------------------------------------1145 1146 !1147 1156 !-- Output in netCDF4/HDF5 format. 1148 1157 !-- Output only on those PEs where the respective cross … … 1152 1161 section(is,s) <= nyn ) .OR. & 1153 1162 ( section(is,s) == -1 .AND. myidy == 0 ) ) THEN 1154 !1155 !-- Do not output redundant ghost point data except for the1156 !-- boundaries of the total domain.1157 1163 #if defined( __netcdf ) 1158 IF ( nxr == nx ) THEN 1159 nc_stat = NF90_PUT_VAR( id_set_xz(av), & 1160 id_var_do2d(av,if), & 1161 local_2d(nxl:nxr+1,nzb:nzt+1), & 1162 start = (/ nxl+1, is, 1, & 1163 do2d_xz_time_count(av) /), & 1164 count = (/ nxr-nxl+2, 1, & 1165 nzt+2, 1 /) ) 1166 ELSE 1167 nc_stat = NF90_PUT_VAR( id_set_xz(av), & 1168 id_var_do2d(av,if), & 1169 local_2d(nxl:nxr,nzb:nzt+1), & 1170 start = (/ nxl+1, is, 1, & 1171 do2d_xz_time_count(av) /), & 1172 count = (/ nxr-nxl+1, 1, & 1173 nzt+2, 1 /) ) 1174 ENDIF 1175 1176 CALL handle_netcdf_error( 'data_output_2d', 57 ) 1177 1178 ELSE 1179 ! 1180 !-- Output on other PEs. Only one point is output!! 1181 !-- ATTENTION: This is a workaround (see above)!! 1182 IF ( npey /= 1 ) THEN 1183 nc_stat = NF90_PUT_VAR( id_set_xz(av), & 1184 id_var_do2d(av,if), & 1185 local_2d(nxl:nxl,nzb:nzb), & 1186 start = (/ nxl+1, is, 1, & 1187 do2d_xz_time_count(av) /), & 1188 count = (/ 1, 1, 1, 1 /) ) 1189 CALL handle_netcdf_error( 'data_output_2d', 451 ) 1190 ENDIF 1164 ! 1165 !-- For parallel output, all cross sections are first 1166 !-- stored here on a local array and will be written to the 1167 !-- output file afterwards to increase the performance. 1168 DO i = nxlg, nxrg 1169 DO k = nzb, nzt+1 1170 local_2d_sections_l(i,is,k) = local_2d(i,k) 1171 ENDDO 1172 ENDDO 1191 1173 #endif 1192 1174 ENDIF … … 1357 1339 CASE ( 'yz' ) 1358 1340 ! 1359 !-- Update the netCDF yz cross section time axis 1360 IF ( myid == 0 .OR. netcdf_data_format > 4 ) THEN 1361 1362 IF ( simulated_time /= do2d_yz_last_time(av) ) THEN 1363 do2d_yz_time_count(av) = do2d_yz_time_count(av) + 1 1364 do2d_yz_last_time(av) = simulated_time 1341 !-- Update the netCDF yz cross section time axis. 1342 !-- In case of parallel output, this is only done by PE0 1343 !-- to increase the performance. 1344 IF ( simulated_time /= do2d_yz_last_time(av) ) THEN 1345 do2d_yz_time_count(av) = do2d_yz_time_count(av) + 1 1346 do2d_yz_last_time(av) = simulated_time 1347 IF ( myid == 0 ) THEN 1365 1348 IF ( ( .NOT. data_output_2d_on_each_pe .AND. & 1366 1349 netcdf_output ) .OR. netcdf_data_format > 4 ) & … … 1376 1359 ENDIF 1377 1360 ENDIF 1378 1379 ENDIF 1361 ENDIF 1362 1380 1363 ! 1381 1364 !-- If required, carry out averaging along x … … 1423 1406 IF ( netcdf_output .AND. netcdf_data_format > 4 ) THEN 1424 1407 ! 1425 !-- ATTENTION: The following lines are a workaround, because1426 !-- independet output does not work with the1427 !-- current netCDF4 installation. Therefore, data1428 !-- are transferred from PEs having the cross1429 !-- sections to other PEs along y having no cross1430 !-- section data. Some of these data are the1431 !-- output.1432 !-- BEGIN WORKAROUND---------------------------------------1433 IF ( npex /= 1 .AND. section(is,s) /= -1) THEN1434 ALLOCATE( local_2d_l(nysg:nyng,nzb:nzt+1) )1435 local_2d_l = 0.01436 IF ( section(is,s) >= nxl .AND. section(is,s) <= nxr )&1437 THEN1438 local_2d_l = local_2d1439 ENDIF1440 #if defined( __parallel )1441 !1442 !-- Distribute data over all PEs along x1443 ngp = ( nyng-nysg+1 ) * ( nzt-nzb + 2 )1444 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr )1445 CALL MPI_ALLREDUCE( local_2d_l(nysg,nzb), &1446 local_2d(nysg,nzb), ngp, &1447 MPI_REAL, MPI_SUM, comm1dx, ierr )1448 #else1449 local_2d = local_2d_l1450 #endif1451 DEALLOCATE( local_2d_l )1452 ENDIF1453 !-- END WORKAROUND-----------------------------------------1454 1455 !1456 1408 !-- Output in netCDF4/HDF5 format. 1457 1409 !-- Output only on those PEs where the respective cross … … 1461 1413 section(is,s) <= nxr ) .OR. & 1462 1414 ( section(is,s) == -1 .AND. myidx == 0 ) ) THEN 1463 !1464 !-- Do not output redundant ghost point data except for the1465 !-- boundaries of the total domain.1466 1415 #if defined( __netcdf ) 1467 IF ( nyn == ny ) THEN 1468 nc_stat = NF90_PUT_VAR( id_set_yz(av), & 1469 id_var_do2d(av,if), & 1470 local_2d(nys:nyn+1,nzb:nzt+1), & 1471 start = (/ is, nys+1, 1, & 1472 do2d_yz_time_count(av) /), & 1473 count = (/ 1, nyn-nys+2, & 1474 nzt+2, 1 /) ) 1475 ELSE 1476 nc_stat = NF90_PUT_VAR( id_set_yz(av), & 1477 id_var_do2d(av,if), & 1478 local_2d(nys:nyn,nzb:nzt+1), & 1479 start = (/ is, nys+1, 1, & 1480 do2d_yz_time_count(av) /), & 1481 count = (/ 1, nyn-nys+1, & 1482 nzt+2, 1 /) ) 1483 ENDIF 1484 1485 CALL handle_netcdf_error( 'data_output_2d', 60 ) 1486 1487 ELSE 1488 ! 1489 !-- Output on other PEs. Only one point is output!! 1490 !-- ATTENTION: This is a workaround (see above)!! 1491 IF ( npex /= 1 ) THEN 1492 nc_stat = NF90_PUT_VAR( id_set_yz(av), & 1493 id_var_do2d(av,if), & 1494 local_2d(nys:nys,nzb:nzb), & 1495 start = (/ is, nys+1, 1, & 1496 do2d_yz_time_count(av) /), & 1497 count = (/ 1, 1, 1, 1 /) ) 1498 CALL handle_netcdf_error( 'data_output_2d', 452 ) 1499 ENDIF 1416 ! 1417 !-- For parallel output, all cross sections are first 1418 !-- stored here on a local array and will be written to the 1419 !-- output file afterwards to increase the performance. 1420 DO j = nysg, nyng 1421 DO k = nzb, nzt+1 1422 local_2d_sections_l(is,j,k) = local_2d(j,k) 1423 ENDDO 1424 ENDDO 1500 1425 #endif 1501 1426 ENDIF … … 1669 1594 ENDDO loop1 1670 1595 1596 ! 1597 !-- For parallel output, all data were collected before on a local array 1598 !-- and are written now to the netcdf file. This must be done to increase 1599 !-- the performance of the parallel output. 1600 #if defined( __netcdf ) 1601 IF ( netcdf_output .AND. netcdf_data_format > 4 ) THEN 1602 1603 SELECT CASE ( mode ) 1604 1605 CASE ( 'xy' ) 1606 IF ( two_d ) THEN 1607 iis = 1 1608 ELSE 1609 iis = is-1 1610 ENDIF 1611 ! 1612 !-- Do not output redundant ghost point data except for the 1613 !-- boundaries of the total domain. 1614 IF ( nxr == nx .AND. nyn /= ny ) THEN 1615 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 1616 id_var_do2d(av,if), & 1617 local_2d_sections(nxl:nxr+1, & 1618 nys:nyn,1:ns), & 1619 start = (/ nxl+1, nys+1, 1, & 1620 do2d_xy_time_count(av) /), & 1621 count = (/ nxr-nxl+2, & 1622 nyn-nys+1, ns, 1 & 1623 /) ) 1624 ELSEIF ( nxr /= nx .AND. nyn == ny ) THEN 1625 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 1626 id_var_do2d(av,if), & 1627 local_2d_sections(nxl:nxr, & 1628 nys:nyn+1,1:ns), & 1629 start = (/ nxl+1, nys+1, 1, & 1630 do2d_xy_time_count(av) /), & 1631 count = (/ nxr-nxl+1, & 1632 nyn-nys+2, ns, 1 & 1633 /) ) 1634 ELSEIF ( nxr == nx .AND. nyn == ny ) THEN 1635 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 1636 id_var_do2d(av,if), & 1637 local_2d_sections(nxl:nxr+1, & 1638 nys:nyn+1,1:ns), & 1639 start = (/ nxl+1, nys+1, 1, & 1640 do2d_xy_time_count(av) /), & 1641 count = (/ nxr-nxl+2, & 1642 nyn-nys+2, ns, 1 & 1643 /) ) 1644 ELSE 1645 nc_stat = NF90_PUT_VAR( id_set_xy(av), & 1646 id_var_do2d(av,if), & 1647 local_2d_sections(nxl:nxr, & 1648 nys:nyn,1:ns), & 1649 start = (/ nxl+1, nys+1, 1, & 1650 do2d_xy_time_count(av) /), & 1651 count = (/ nxr-nxl+1, & 1652 nyn-nys+1, ns, 1 & 1653 /) ) 1654 ENDIF 1655 1656 CALL handle_netcdf_error( 'data_output_2d', 55 ) 1657 1658 CASE ( 'xz' ) 1659 ! 1660 !-- First, all PEs get the information of all cross-sections. 1661 !-- Then the data are written to the output file by all PEs 1662 !-- while NF90_COLLECTIVE is set in subroutine 1663 !-- define_netcdf_header. Although redundant information are 1664 !-- written to the output file in that case, the performance 1665 !-- is significantly better compared to the case where only 1666 !-- the first row of PEs in x-direction (myidx = 0) is given 1667 !-- the output while NF90_INDEPENDENT is set. 1668 IF ( npey /= 1 ) THEN 1669 1670 #if defined( __parallel ) 1671 ! 1672 !-- Distribute data over all PEs along y 1673 ngp = ( nxrg-nxlg+1 ) * ( nzt-nzb+2 ) * ns 1674 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1675 CALL MPI_ALLREDUCE( local_2d_sections_l(nxlg,1,nzb), & 1676 local_2d_sections(nxlg,1,nzb), & 1677 ngp, MPI_REAL, MPI_SUM, comm1dy, & 1678 ierr ) 1679 #else 1680 local_2d_sections = local_2d_sections_l 1681 #endif 1682 ENDIF 1683 ! 1684 !-- Do not output redundant ghost point data except for the 1685 !-- boundaries of the total domain. 1686 IF ( nxr == nx ) THEN 1687 nc_stat = NF90_PUT_VAR( id_set_xz(av), & 1688 id_var_do2d(av,if), & 1689 local_2d_sections(nxl:nxr+1,1:ns, & 1690 nzb:nzt+1), & 1691 start = (/ nxl+1, 1, 1, & 1692 do2d_xz_time_count(av) /), & 1693 count = (/ nxr-nxl+2, ns, nzt+2, & 1694 1 /) ) 1695 ELSE 1696 nc_stat = NF90_PUT_VAR( id_set_xz(av), & 1697 id_var_do2d(av,if), & 1698 local_2d_sections(nxl:nxr,1:ns, & 1699 nzb:nzt+1), & 1700 start = (/ nxl+1, 1, 1, & 1701 do2d_xz_time_count(av) /), & 1702 count = (/ nxr-nxl+1, ns, nzt+2, & 1703 1 /) ) 1704 ENDIF 1705 1706 CALL handle_netcdf_error( 'data_output_2d', 57 ) 1707 1708 CASE ( 'yz' ) 1709 ! 1710 !-- First, all PEs get the information of all cross-sections. 1711 !-- Then the data are written to the output file by all PEs 1712 !-- while NF90_COLLECTIVE is set in subroutine 1713 !-- define_netcdf_header. Although redundant information are 1714 !-- written to the output file in that case, the performance 1715 !-- is significantly better compared to the case where only 1716 !-- the first row of PEs in y-direction (myidy = 0) is given 1717 !-- the output while NF90_INDEPENDENT is set. 1718 IF ( npex /= 1 ) THEN 1719 1720 #if defined( __parallel ) 1721 ! 1722 !-- Distribute data over all PEs along x 1723 ngp = ( nyng-nysg+1 ) * ( nzt-nzb + 2 ) * ns 1724 IF ( collective_wait ) CALL MPI_BARRIER( comm2d, ierr ) 1725 CALL MPI_ALLREDUCE( local_2d_sections_l(1,nysg,nzb), & 1726 local_2d_sections(1,nysg,nzb), & 1727 ngp, MPI_REAL, MPI_SUM, comm1dx, & 1728 ierr ) 1729 #else 1730 local_2d_sections = local_2d_sections_l 1731 #endif 1732 ENDIF 1733 ! 1734 !-- Do not output redundant ghost point data except for the 1735 !-- boundaries of the total domain. 1736 IF ( nyn == ny ) THEN 1737 nc_stat = NF90_PUT_VAR( id_set_yz(av), & 1738 id_var_do2d(av,if), & 1739 local_2d_sections(1:ns, & 1740 nys:nyn+1,nzb:nzt+1), & 1741 start = (/ 1, nys+1, 1, & 1742 do2d_yz_time_count(av) /), & 1743 count = (/ ns, nyn-nys+2, & 1744 nzt+2, 1 /) ) 1745 ELSE 1746 nc_stat = NF90_PUT_VAR( id_set_yz(av), & 1747 id_var_do2d(av,if), & 1748 local_2d_sections(1:ns,nys:nyn, & 1749 nzb:nzt+1), & 1750 start = (/ 1, nys+1, 1, & 1751 do2d_yz_time_count(av) /), & 1752 count = (/ ns, nyn-nys+1, & 1753 nzt+2, 1 /) ) 1754 ENDIF 1755 1756 CALL handle_netcdf_error( 'data_output_2d', 60 ) 1757 1758 CASE DEFAULT 1759 message_string = 'unknown cross-section: ' // TRIM( mode ) 1760 CALL message( 'data_output_2d', 'PA0180', 1, 2, 0, 6, 0 ) 1761 1762 END SELECT 1763 1764 ENDIF 1765 1671 1766 ENDIF 1672 1767 … … 1680 1775 !-- Deallocate temporary arrays. 1681 1776 IF ( ALLOCATED( level_z ) ) DEALLOCATE( level_z ) 1682 DEALLOCATE( local_pf, local_2d ) 1777 IF ( netcdf_data_format > 4 ) THEN 1778 DEALLOCATE( local_pf, local_2d, local_2d_sections ) 1779 IF( mode == 'xz' .OR. mode == 'yz' ) DEALLOCATE( local_2d_sections_l ) 1780 ENDIF 1683 1781 #if defined( __parallel ) 1684 1782 IF ( .NOT. data_output_2d_on_each_pe .AND. myid == 0 ) THEN … … 1704 1802 ENDIF 1705 1803 1706 1707 1804 CALL cpu_log (log_point(3),'data_output_2d','stop','nobarrier') 1708 1805 -
palm/trunk/SOURCE/data_output_3d.f90
r1245 r1308 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! Check, if the limit of the time dimension is exceeded for parallel output 23 ! To increase the performance for parallel output, the following is done: 24 ! - Update of time axis is only done by PE0 23 25 ! 24 26 ! Former revisions: … … 144 146 !-- Return, if nothing to output 145 147 IF ( do3d_no(av) == 0 ) RETURN 148 ! 149 !-- For parallel netcdf output the time axis must be limited. Return, if this 150 !-- limit is exceeded. This could be the case, if the simulated time exceeds 151 !-- the given end time by the length of the given output interval. 152 IF ( netcdf_data_format > 4 ) THEN 153 IF ( do3d_time_count(av) + 1 > ntdim_3d(av) ) THEN 154 WRITE ( message_string, * ) 'Output of 3d data is not given at t=', & 155 simulated_time, '&because the maximum ', & 156 'number of output time levels is ', & 157 'exceeded.' 158 CALL message( 'data_output_3d', 'PA0387', 0, 1, 0, 6, 0 ) 159 RETURN 160 ENDIF 161 ENDIF 146 162 147 163 CALL cpu_log (log_point(14),'data_output_3d','start') … … 174 190 ! 175 191 !-- Update the netCDF time axis 192 !-- In case of parallel output, this is only done by PE0 to increase the 193 !-- performance. 176 194 #if defined( __netcdf ) 177 IF ( myid == 0 .OR. netcdf_data_format > 4 ) THEN178 do3d_time_count(av) = do3d_time_count(av) + 1195 do3d_time_count(av) = do3d_time_count(av) + 1 196 IF ( myid == 0 ) THEN 179 197 IF ( netcdf_output ) THEN 180 198 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_time_3d(av), & -
palm/trunk/SOURCE/header.f90
r1300 r1308 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! output of the fixed number of output time levels 23 ! output_format adjusted for masked data if netcdf_data_format > 5 23 24 ! 24 25 ! Former revisions: … … 924 925 ENDIF 925 926 ENDIF 926 927 IF ( netcdf_data_format > 4 ) THEN 928 WRITE ( io, 352 ) ntdim_2d_xy(av) 929 ELSE 930 WRITE ( io, 353 ) 931 ENDIF 927 932 ENDIF 928 933 … … 962 967 ENDIF 963 968 ENDIF 969 IF ( netcdf_data_format > 4 ) THEN 970 WRITE ( io, 352 ) ntdim_2d_xz(av) 971 ELSE 972 WRITE ( io, 353 ) 973 ENDIF 964 974 ENDIF 965 975 … … 999 1009 ENDIF 1000 1010 ENDIF 1011 IF ( netcdf_data_format > 4 ) THEN 1012 WRITE ( io, 352 ) ntdim_2d_yz(av) 1013 ELSE 1014 WRITE ( io, 353 ) 1015 ENDIF 1001 1016 ENDIF 1002 1017 … … 1050 1065 TRIM( begin_chr ), averaging_interval, & 1051 1066 dt_averaging_input, zu(nz_do3d), nz_do3d 1067 ENDIF 1068 1069 IF ( netcdf_data_format > 4 ) THEN 1070 WRITE ( io, 352 ) ntdim_3d(av) 1071 ELSE 1072 WRITE ( io, 353 ) 1052 1073 ENDIF 1053 1074 … … 1119 1140 output_format = output_format_netcdf 1120 1141 ENDIF 1142 !-- Parallel output not implemented for mask data, hence 1143 !-- output_format must be adjusted. 1144 IF ( netcdf_data_format == 5 ) output_format = 'netCDF4/HDF5' 1145 IF ( netcdf_data_format == 6 ) output_format = 'netCDF4/HDF5 classic' 1121 1146 WRITE ( io, 344 ) output_format 1122 1147 … … 1908 1933 'mask_scale_',A,' constructed from array mask_',I2.2,'_',A,'_loop:'/ & 1909 1934 ' loop begin:',F8.2,', end:',F8.2,', stride:',F8.2 ) 1935 352 FORMAT (/' Fixed number of output time levels allowed: ',I3 /) 1936 353 FORMAT (/' Fixed number of output time levels allowed: unlimited' /) 1910 1937 #if defined( __dvrp_graphics ) 1911 1938 360 FORMAT (' Plot-Sequence with dvrp-software:'/ & -
palm/trunk/SOURCE/modules.f90
r1258 r1308 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! +ntdim_2d_xy, ntdim_2d_xz, ntdim_2d_yz, ntdim_3d 23 23 ! 24 24 ! Former revisions: … … 760 760 INTEGER :: check_restart = 0 761 761 #endif 762 763 INTEGER, DIMENSION(0:1) :: ntdim_2d_xy, ntdim_2d_xz, ntdim_2d_yz, ntdim_3d 762 764 763 765 INTEGER, DIMENSION(:), ALLOCATABLE :: grid_level_count -
palm/trunk/SOURCE/netcdf.f90
r1207 r1308 23 23 ! Current revisions: 24 24 ! ------------------ 25 ! 25 ! +ntime_count, oldmode 26 ! Adjust NF90_CREATE and NF90_OPEN statement for parallel output 27 ! To increase the performance for parallel output, the following is done: 28 ! - Limit time dimension 29 ! - Values of axis data are only written by PE0 30 ! - No fill is set for all variables 31 ! Check the number of output time levels for restart jobs 26 32 ! 27 33 ! Former revisions: … … 169 175 INTEGER :: av, cross_profiles_count, cross_profiles_maxi, delim, & 170 176 delim_old, file_id, i, id_last, id_x, id_y, id_z, j, & 171 k, kk, ns, ns_old, nz_old 177 k, kk, ns, ns_old, ntime_count, nz_old 178 179 INTEGER, SAVE :: oldmode 172 180 173 181 INTEGER, DIMENSION(1) :: id_dim_time_old, id_dim_x_yz_old, & … … 858 866 859 867 ! 860 !-- Define time coordinate for volume data (unlimited dimension) 861 nc_stat = NF90_DEF_DIM( id_set_3d(av), 'time', NF90_UNLIMITED, & 862 id_dim_time_3d(av) ) 863 CALL handle_netcdf_error( 'netcdf', 64 ) 868 !-- Define time coordinate for volume data. 869 !-- For parallel output the time dimensions has to be limited, otherwise 870 !-- the performance drops significantly. 871 IF ( netcdf_data_format < 5 ) THEN 872 nc_stat = NF90_DEF_DIM( id_set_3d(av), 'time', NF90_UNLIMITED, & 873 id_dim_time_3d(av) ) 874 CALL handle_netcdf_error( 'netcdf', 64 ) 875 ELSE 876 nc_stat = NF90_DEF_DIM( id_set_3d(av), 'time', ntdim_3d(av), & 877 id_dim_time_3d(av) ) 878 CALL handle_netcdf_error( 'netcdf', 523 ) 879 ENDIF 864 880 865 881 nc_stat = NF90_DEF_VAR( id_set_3d(av), 'time', NF90_DOUBLE, & … … 1088 1104 CALL handle_netcdf_error( 'netcdf', 357 ) 1089 1105 #if defined( __netcdf4_parallel ) 1090 !1091 !-- Set collective io operations for parallel io1092 1106 IF ( netcdf_data_format > 4 ) THEN 1107 ! 1108 !-- Set no fill for every variable to increase performance. 1109 nc_stat = NF90_DEF_VAR_FILL( id_set_3d(av), & 1110 id_var_do3d(av,i), & 1111 1, 0 ) 1112 CALL handle_netcdf_error( 'netcdf', 532 ) 1113 ! 1114 !-- Set collective io operations for parallel io 1093 1115 nc_stat = NF90_VAR_PAR_ACCESS( id_set_3d(av), & 1094 1116 id_var_do3d(av,i), & … … 1113 1135 1114 1136 ! 1137 !-- Set general no fill, otherwise the performance drops significantly for 1138 !-- parallel output. 1139 nc_stat = NF90_SET_FILL( id_set_3d(av), NF90_NOFILL, oldmode ) 1140 CALL handle_netcdf_error( 'netcdf', 528 ) 1141 1142 ! 1115 1143 !-- Leave netCDF define mode 1116 1144 nc_stat = NF90_ENDDEF( id_set_3d(av) ) … … 1118 1146 1119 1147 ! 1120 !-- Write data for x (shifted by +dx/2) and xu axis 1121 ALLOCATE( netcdf_data(0:nx+1) ) 1122 1123 DO i = 0, nx+1 1124 netcdf_data(i) = ( i + 0.5 ) * dx 1125 ENDDO 1126 1127 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_x_3d(av), netcdf_data, & 1128 start = (/ 1 /), count = (/ nx+2 /) ) 1129 CALL handle_netcdf_error( 'netcdf', 83 ) 1130 1131 DO i = 0, nx+1 1132 netcdf_data(i) = i * dx 1133 ENDDO 1134 1135 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_xu_3d(av), & 1136 netcdf_data, start = (/ 1 /), & 1137 count = (/ nx+2 /) ) 1138 CALL handle_netcdf_error( 'netcdf', 385 ) 1139 1140 DEALLOCATE( netcdf_data ) 1141 1142 ! 1143 !-- Write data for y (shifted by +dy/2) and yv axis 1144 ALLOCATE( netcdf_data(0:ny+1) ) 1145 1146 DO i = 0, ny+1 1147 netcdf_data(i) = ( i + 0.5 ) * dy 1148 ENDDO 1149 1150 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_y_3d(av), netcdf_data, & 1151 start = (/ 1 /), count = (/ ny+2 /)) 1152 CALL handle_netcdf_error( 'netcdf', 84 ) 1153 1154 DO i = 0, ny+1 1155 netcdf_data(i) = i * dy 1156 ENDDO 1157 1158 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_yv_3d(av), & 1159 netcdf_data, start = (/ 1 /), & 1160 count = (/ ny+2 /)) 1161 CALL handle_netcdf_error( 'netcdf', 387 ) 1162 1163 DEALLOCATE( netcdf_data ) 1164 1165 ! 1166 !-- Write zu and zw data (vertical axes) 1167 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zu_3d(av), & 1168 zu(nzb:nz_do3d), start = (/ 1 /), & 1169 count = (/ nz_do3d-nzb+1 /) ) 1170 CALL handle_netcdf_error( 'netcdf', 85 ) 1171 1172 1173 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zw_3d(av), & 1174 zw(nzb:nz_do3d), start = (/ 1 /), & 1175 count = (/ nz_do3d-nzb+1 /) ) 1176 CALL handle_netcdf_error( 'netcdf', 86 ) 1177 1178 1179 ! 1180 !-- In case of non-flat topography write height information 1181 IF ( TRIM( topography ) /= 'flat' ) THEN 1182 1183 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zusi_3d(av), & 1184 zu_s_inner(0:nx+1,0:ny+1), & 1185 start = (/ 1, 1 /), & 1186 count = (/ nx+2, ny+2 /) ) 1187 CALL handle_netcdf_error( 'netcdf', 419 ) 1188 1189 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zwwi_3d(av), & 1190 zw_w_inner(0:nx+1,0:ny+1), & 1191 start = (/ 1, 1 /), & 1192 count = (/ nx+2, ny+2 /) ) 1193 CALL handle_netcdf_error( 'netcdf', 420 ) 1194 1148 !-- These data are only written by PE0 for parallel output to increase 1149 !-- the performance. 1150 IF ( myid == 0 .OR. netcdf_data_format < 5 ) THEN 1151 ! 1152 !-- Write data for x (shifted by +dx/2) and xu axis 1153 ALLOCATE( netcdf_data(0:nx+1) ) 1154 1155 DO i = 0, nx+1 1156 netcdf_data(i) = ( i + 0.5 ) * dx 1157 ENDDO 1158 1159 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_x_3d(av), & 1160 netcdf_data, start = (/ 1 /), & 1161 count = (/ nx+2 /) ) 1162 CALL handle_netcdf_error( 'netcdf', 83 ) 1163 1164 DO i = 0, nx+1 1165 netcdf_data(i) = i * dx 1166 ENDDO 1167 1168 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_xu_3d(av), & 1169 netcdf_data, start = (/ 1 /), & 1170 count = (/ nx+2 /) ) 1171 CALL handle_netcdf_error( 'netcdf', 385 ) 1172 1173 DEALLOCATE( netcdf_data ) 1174 1175 ! 1176 !-- Write data for y (shifted by +dy/2) and yv axis 1177 ALLOCATE( netcdf_data(0:ny+1) ) 1178 1179 DO i = 0, ny+1 1180 netcdf_data(i) = ( i + 0.5 ) * dy 1181 ENDDO 1182 1183 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_y_3d(av), & 1184 netcdf_data, start = (/ 1 /), & 1185 count = (/ ny+2 /) ) 1186 CALL handle_netcdf_error( 'netcdf', 84 ) 1187 1188 DO i = 0, ny+1 1189 netcdf_data(i) = i * dy 1190 ENDDO 1191 1192 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_yv_3d(av), & 1193 netcdf_data, start = (/ 1 /), & 1194 count = (/ ny+2 /)) 1195 CALL handle_netcdf_error( 'netcdf', 387 ) 1196 1197 DEALLOCATE( netcdf_data ) 1198 1199 ! 1200 !-- Write zu and zw data (vertical axes) 1201 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zu_3d(av), & 1202 zu(nzb:nz_do3d), start = (/ 1 /), & 1203 count = (/ nz_do3d-nzb+1 /) ) 1204 CALL handle_netcdf_error( 'netcdf', 85 ) 1205 1206 1207 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zw_3d(av), & 1208 zw(nzb:nz_do3d), start = (/ 1 /), & 1209 count = (/ nz_do3d-nzb+1 /) ) 1210 CALL handle_netcdf_error( 'netcdf', 86 ) 1211 1212 ! 1213 !-- In case of non-flat topography write height information 1214 IF ( TRIM( topography ) /= 'flat' ) THEN 1215 1216 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zusi_3d(av), & 1217 zu_s_inner(0:nx+1,0:ny+1), & 1218 start = (/ 1, 1 /), & 1219 count = (/ nx+2, ny+2 /) ) 1220 CALL handle_netcdf_error( 'netcdf', 419 ) 1221 1222 nc_stat = NF90_PUT_VAR( id_set_3d(av), id_var_zwwi_3d(av), & 1223 zw_w_inner(0:nx+1,0:ny+1), & 1224 start = (/ 1, 1 /), & 1225 count = (/ nx+2, ny+2 /) ) 1226 CALL handle_netcdf_error( 'netcdf', 420 ) 1227 1228 ENDIF 1195 1229 ENDIF 1196 1230 … … 1271 1305 1272 1306 nc_stat = NF90_INQUIRE_DIMENSION( id_set_3d(av), id_dim_time_3d(av), & 1273 len = do3d_time_count(av))1307 len = ntime_count ) 1274 1308 CALL handle_netcdf_error( 'netcdf', 93 ) 1309 1310 ! 1311 !-- For non-parallel output use the last output time level of the netcdf 1312 !-- file because the time dimension is unlimited. In case of parallel 1313 !-- output the variable ntime_count could get the value of 9*10E36 because 1314 !-- the time dimension is limited. 1315 IF ( netcdf_data_format < 5 ) do3d_time_count(av) = ntime_count 1275 1316 1276 1317 nc_stat = NF90_GET_VAR( id_set_3d(av), id_var_time_3d(av), & … … 1294 1335 ENDIF 1295 1336 1337 IF ( netcdf_data_format > 4 ) THEN 1338 IF ( ntdim_3d(av) > ntime_count ) THEN 1339 message_string = 'netCDF file for volume data ' // & 1340 TRIM( var ) // ' from previous run found,' // & 1341 '&but this file cannot be extended becaus' // & 1342 'e the number of output time levels&has b' // & 1343 'een increased compared to the previous s' // & 1344 'imulation.' // & 1345 '&New file is created instead.' 1346 CALL message( 'define_netcdf_header', 'PA0388', 0, 1, 0, 6, 0 ) 1347 do3d_time_count(av) = 0 1348 extend = .FALSE. 1349 RETURN 1350 ENDIF 1351 ENDIF 1352 1296 1353 ! 1297 1354 !-- Dataset seems to be extendable. … … 1367 1424 1368 1425 ! 1369 !-- Define time coordinate for xy sections (unlimited dimension) 1370 nc_stat = NF90_DEF_DIM( id_set_xy(av), 'time', NF90_UNLIMITED, & 1371 id_dim_time_xy(av) ) 1372 CALL handle_netcdf_error( 'netcdf', 99 ) 1426 !-- Define time coordinate for xy sections. 1427 !-- For parallel output the time dimensions has to be limited, otherwise 1428 !-- the performance drops significantly. 1429 IF ( netcdf_data_format < 5 ) THEN 1430 nc_stat = NF90_DEF_DIM( id_set_xy(av), 'time', NF90_UNLIMITED, & 1431 id_dim_time_xy(av) ) 1432 CALL handle_netcdf_error( 'netcdf', 99 ) 1433 ELSE 1434 nc_stat = NF90_DEF_DIM( id_set_xy(av), 'time', ntdim_2d_xy(av), & 1435 id_dim_time_xy(av) ) 1436 CALL handle_netcdf_error( 'netcdf', 524 ) 1437 ENDIF 1373 1438 1374 1439 nc_stat = NF90_DEF_VAR( id_set_xy(av), 'time', NF90_DOUBLE, & … … 1655 1720 CALL handle_netcdf_error( 'netcdf', 354 ) 1656 1721 #if defined( __netcdf4_parallel ) 1657 !1658 !-- Set collective io operations for parallel io1659 1722 IF ( netcdf_data_format > 4 ) THEN 1723 ! 1724 !-- Set no fill for every variable to increase performance. 1725 nc_stat = NF90_DEF_VAR_FILL( id_set_xy(av), & 1726 id_var_do2d(av,i), & 1727 1, 0 ) 1728 CALL handle_netcdf_error( 'netcdf', 533 ) 1729 ! 1730 !-- Set collective io operations for parallel io 1660 1731 nc_stat = NF90_VAR_PAR_ACCESS( id_set_xy(av), & 1661 1732 id_var_do2d(av,i), & … … 1682 1753 1683 1754 ! 1755 !-- Set general no fill, otherwise the performance drops significantly for 1756 !-- parallel output. 1757 nc_stat = NF90_SET_FILL( id_set_xy(av), NF90_NOFILL, oldmode ) 1758 CALL handle_netcdf_error( 'netcdf', 529 ) 1759 1760 ! 1684 1761 !-- Leave netCDF define mode 1685 1762 nc_stat = NF90_ENDDEF( id_set_xy(av) ) … … 1687 1764 1688 1765 ! 1689 !-- Write axis data: z_xy, x, y 1690 ALLOCATE( netcdf_data(1:ns) ) 1691 1692 ! 1693 !-- Write zu data 1694 DO i = 1, ns 1695 IF( section(i,1) == -1 ) THEN 1696 netcdf_data(i) = -1.0 ! section averaged along z 1697 ELSE 1698 netcdf_data(i) = zu( section(i,1) ) 1766 !-- These data are only written by PE0 for parallel output to increase 1767 !-- the performance. 1768 IF ( myid == 0 .OR. netcdf_data_format < 5 ) THEN 1769 1770 ! 1771 !-- Write axis data: z_xy, x, y 1772 ALLOCATE( netcdf_data(1:ns) ) 1773 1774 ! 1775 !-- Write zu data 1776 DO i = 1, ns 1777 IF( section(i,1) == -1 ) THEN 1778 netcdf_data(i) = -1.0 ! section averaged along z 1779 ELSE 1780 netcdf_data(i) = zu( section(i,1) ) 1781 ENDIF 1782 ENDDO 1783 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zu_xy(av), & 1784 netcdf_data, start = (/ 1 /), & 1785 count = (/ ns /) ) 1786 CALL handle_netcdf_error( 'netcdf', 123 ) 1787 1788 ! 1789 !-- Write zw data 1790 DO i = 1, ns 1791 IF( section(i,1) == -1 ) THEN 1792 netcdf_data(i) = -1.0 ! section averaged along z 1793 ELSE 1794 netcdf_data(i) = zw( section(i,1) ) 1795 ENDIF 1796 ENDDO 1797 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zw_xy(av), & 1798 netcdf_data, start = (/ 1 /), & 1799 count = (/ ns /) ) 1800 CALL handle_netcdf_error( 'netcdf', 124 ) 1801 1802 ! 1803 !-- Write gridpoint number data 1804 netcdf_data(1:ns) = section(1:ns,1) 1805 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_ind_z_xy(av), & 1806 netcdf_data, start = (/ 1 /), & 1807 count = (/ ns /) ) 1808 CALL handle_netcdf_error( 'netcdf', 125 ) 1809 1810 DEALLOCATE( netcdf_data ) 1811 1812 ! 1813 !-- Write the cross section height u*, t* 1814 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zu1_xy(av), & 1815 (/ zu(nzb+1) /), start = (/ 1 /), & 1816 count = (/ 1 /) ) 1817 CALL handle_netcdf_error( 'netcdf', 126 ) 1818 1819 ! 1820 !-- Write data for x (shifted by +dx/2) and xu axis 1821 ALLOCATE( netcdf_data(0:nx+1) ) 1822 1823 DO i = 0, nx+1 1824 netcdf_data(i) = ( i + 0.5 ) * dx 1825 ENDDO 1826 1827 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_x_xy(av), & 1828 netcdf_data, start = (/ 1 /), & 1829 count = (/ nx+2 /) ) 1830 CALL handle_netcdf_error( 'netcdf', 127 ) 1831 1832 DO i = 0, nx+1 1833 netcdf_data(i) = i * dx 1834 ENDDO 1835 1836 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_xu_xy(av), & 1837 netcdf_data, start = (/ 1 /), & 1838 count = (/ nx+2 /) ) 1839 CALL handle_netcdf_error( 'netcdf', 367 ) 1840 1841 DEALLOCATE( netcdf_data ) 1842 1843 ! 1844 !-- Write data for y (shifted by +dy/2) and yv axis 1845 ALLOCATE( netcdf_data(0:ny+1) ) 1846 1847 DO i = 0, ny+1 1848 netcdf_data(i) = ( i + 0.5 ) * dy 1849 ENDDO 1850 1851 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_y_xy(av), & 1852 netcdf_data, start = (/ 1 /), & 1853 count = (/ ny+2 /)) 1854 CALL handle_netcdf_error( 'netcdf', 128 ) 1855 1856 DO i = 0, ny+1 1857 netcdf_data(i) = i * dy 1858 ENDDO 1859 1860 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_yv_xy(av), & 1861 netcdf_data, start = (/ 1 /), & 1862 count = (/ ny+2 /)) 1863 CALL handle_netcdf_error( 'netcdf', 368 ) 1864 1865 DEALLOCATE( netcdf_data ) 1866 1867 ! 1868 !-- In case of non-flat topography write height information 1869 IF ( TRIM( topography ) /= 'flat' ) THEN 1870 1871 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zusi_xy(av), & 1872 zu_s_inner(0:nx+1,0:ny+1), & 1873 start = (/ 1, 1 /), & 1874 count = (/ nx+2, ny+2 /) ) 1875 CALL handle_netcdf_error( 'netcdf', 427 ) 1876 1877 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zwwi_xy(av), & 1878 zw_w_inner(0:nx+1,0:ny+1), & 1879 start = (/ 1, 1 /), & 1880 count = (/ nx+2, ny+2 /) ) 1881 CALL handle_netcdf_error( 'netcdf', 428 ) 1882 1699 1883 ENDIF 1700 ENDDO 1701 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zu_xy(av), & 1702 netcdf_data, start = (/ 1 /), & 1703 count = (/ ns /) ) 1704 CALL handle_netcdf_error( 'netcdf', 123 ) 1705 1706 ! 1707 !-- Write zw data 1708 DO i = 1, ns 1709 IF( section(i,1) == -1 ) THEN 1710 netcdf_data(i) = -1.0 ! section averaged along z 1711 ELSE 1712 netcdf_data(i) = zw( section(i,1) ) 1713 ENDIF 1714 ENDDO 1715 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zw_xy(av), & 1716 netcdf_data, start = (/ 1 /), & 1717 count = (/ ns /) ) 1718 CALL handle_netcdf_error( 'netcdf', 124 ) 1719 1720 ! 1721 !-- Write gridpoint number data 1722 netcdf_data(1:ns) = section(1:ns,1) 1723 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_ind_z_xy(av), & 1724 netcdf_data, start = (/ 1 /), & 1725 count = (/ ns /) ) 1726 CALL handle_netcdf_error( 'netcdf', 125 ) 1727 1728 DEALLOCATE( netcdf_data ) 1729 1730 ! 1731 !-- Write the cross section height u*, t* 1732 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zu1_xy(av), & 1733 (/ zu(nzb+1) /), start = (/ 1 /), & 1734 count = (/ 1 /) ) 1735 CALL handle_netcdf_error( 'netcdf', 126 ) 1736 1737 ! 1738 !-- Write data for x (shifted by +dx/2) and xu axis 1739 ALLOCATE( netcdf_data(0:nx+1) ) 1740 1741 DO i = 0, nx+1 1742 netcdf_data(i) = ( i + 0.5 ) * dx 1743 ENDDO 1744 1745 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_x_xy(av), netcdf_data, & 1746 start = (/ 1 /), count = (/ nx+2 /) ) 1747 CALL handle_netcdf_error( 'netcdf', 127 ) 1748 1749 DO i = 0, nx+1 1750 netcdf_data(i) = i * dx 1751 ENDDO 1752 1753 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_xu_xy(av), & 1754 netcdf_data, start = (/ 1 /), & 1755 count = (/ nx+2 /) ) 1756 CALL handle_netcdf_error( 'netcdf', 367 ) 1757 1758 DEALLOCATE( netcdf_data ) 1759 1760 ! 1761 !-- Write data for y (shifted by +dy/2) and yv axis 1762 ALLOCATE( netcdf_data(0:ny+1) ) 1763 1764 DO i = 0, ny+1 1765 netcdf_data(i) = ( i + 0.5 ) * dy 1766 ENDDO 1767 1768 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_y_xy(av), netcdf_data, & 1769 start = (/ 1 /), count = (/ ny+2 /)) 1770 CALL handle_netcdf_error( 'netcdf', 128 ) 1771 1772 DO i = 0, ny+1 1773 netcdf_data(i) = i * dy 1774 ENDDO 1775 1776 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_yv_xy(av), & 1777 netcdf_data, start = (/ 1 /), & 1778 count = (/ ny+2 /)) 1779 CALL handle_netcdf_error( 'netcdf', 368 ) 1780 1781 DEALLOCATE( netcdf_data ) 1782 1783 ! 1784 !-- In case of non-flat topography write height information 1785 IF ( TRIM( topography ) /= 'flat' ) THEN 1786 1787 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zusi_xy(av), & 1788 zu_s_inner(0:nx+1,0:ny+1), & 1789 start = (/ 1, 1 /), & 1790 count = (/ nx+2, ny+2 /) ) 1791 CALL handle_netcdf_error( 'netcdf', 427 ) 1792 1793 nc_stat = NF90_PUT_VAR( id_set_xy(av), id_var_zwwi_xy(av), & 1794 zw_w_inner(0:nx+1,0:ny+1), & 1795 start = (/ 1, 1 /), & 1796 count = (/ nx+2, ny+2 /) ) 1797 CALL handle_netcdf_error( 'netcdf', 428 ) 1798 1799 ENDIF 1800 1884 ENDIF 1801 1885 1802 1886 CASE ( 'xy_ext' ) … … 1924 2008 1925 2009 nc_stat = NF90_INQUIRE_DIMENSION( id_set_xy(av), id_dim_time_xy(av), & 1926 len = do2d_xy_time_count(av))2010 len = ntime_count ) 1927 2011 CALL handle_netcdf_error( 'netcdf', 136 ) 2012 2013 ! 2014 !-- For non-parallel output use the last output time level of the netcdf 2015 !-- file because the time dimension is unlimited. In case of parallel 2016 !-- output the variable ntime_count could get the value of 9*10E36 because 2017 !-- the time dimension is limited. 2018 IF ( netcdf_data_format < 5 ) do2d_xy_time_count(av) = ntime_count 1928 2019 1929 2020 nc_stat = NF90_GET_VAR( id_set_xy(av), id_var_time_xy(av), & … … 1945 2036 extend = .FALSE. 1946 2037 RETURN 2038 ENDIF 2039 2040 IF ( netcdf_data_format > 4 ) THEN 2041 IF ( ntdim_2d_xy(av) > ntime_count ) THEN 2042 message_string = 'netCDF file for cross sections ' // & 2043 TRIM( var ) // ' from previous run found,' // & 2044 '&but this file cannot be extended becaus' // & 2045 'e the number of output time levels&has b' // & 2046 'een increased compared to the previous s' // & 2047 'imulation.' // & 2048 '&New file is created instead.' 2049 CALL message( 'define_netcdf_header', 'PA0389', 0, 1, 0, 6, 0 ) 2050 do2d_xy_time_count(av) = 0 2051 extend = .FALSE. 2052 RETURN 2053 ENDIF 1947 2054 ENDIF 1948 2055 … … 2023 2130 2024 2131 ! 2025 !-- Define time coordinate for xz sections (unlimited dimension) 2026 nc_stat = NF90_DEF_DIM( id_set_xz(av), 'time', NF90_UNLIMITED, & 2027 id_dim_time_xz(av) ) 2028 CALL handle_netcdf_error( 'netcdf', 142 ) 2132 !-- Define time coordinate for xz sections. 2133 !-- For parallel output the time dimensions has to be limited, otherwise 2134 !-- the performance drops significantly. 2135 IF ( netcdf_data_format < 5 ) THEN 2136 nc_stat = NF90_DEF_DIM( id_set_xz(av), 'time', NF90_UNLIMITED, & 2137 id_dim_time_xz(av) ) 2138 CALL handle_netcdf_error( 'netcdf', 142 ) 2139 ELSE 2140 nc_stat = NF90_DEF_DIM( id_set_xz(av), 'time', ntdim_2d_xz(av), & 2141 id_dim_time_xz(av) ) 2142 CALL handle_netcdf_error( 'netcdf', 525 ) 2143 ENDIF 2029 2144 2030 2145 nc_stat = NF90_DEF_VAR( id_set_xz(av), 'time', NF90_DOUBLE, & … … 2240 2355 CALL handle_netcdf_error( 'netcdf', 355 ) 2241 2356 #if defined( __netcdf4_parallel ) 2242 ! 2243 !-- Set independent io operations for parallel io. Collective io 2244 !-- is only allowed in case of a 1d-decomposition along x, because 2245 !-- otherwise, not all PEs have output data. 2357 2246 2358 IF ( netcdf_data_format > 4 ) THEN 2359 ! 2360 !-- Set no fill for every variable to increase performance. 2361 nc_stat = NF90_DEF_VAR_FILL( id_set_xz(av), & 2362 id_var_do2d(av,i), & 2363 1, 0 ) 2364 CALL handle_netcdf_error( 'netcdf', 534 ) 2365 ! 2366 !-- Set independent io operations for parallel io. Collective io 2367 !-- is only allowed in case of a 1d-decomposition along x, 2368 !-- because otherwise, not all PEs have output data. 2247 2369 IF ( npey == 1 ) THEN 2248 2370 nc_stat = NF90_VAR_PAR_ACCESS( id_set_xz(av), & … … 2251 2373 ELSE 2252 2374 ! 2253 !-- ATTENTION: Due to a probable bug in the netCDF42254 !-- installation, independet output does not work2255 !-- A workaround is used in data_output_2d on those2256 !-- PEs having no data2257 nc_stat = NF90_VAR_PAR_ACCESS( id_set_xz(av), &2258 2259 2375 !-- Test simulations showed that the output of cross sections 2376 !-- by all PEs in data_output_2d using NF90_COLLECTIVE is 2377 !-- faster than the output by the first row of PEs in 2378 !-- x-direction using NF90_INDEPENDENT. 2379 nc_stat = NF90_VAR_PAR_ACCESS( id_set_xz(av), & 2380 id_var_do2d(av,i), & 2381 NF90_COLLECTIVE ) 2260 2382 ! nc_stat = NF90_VAR_PAR_ACCESS( id_set_xz(av), & 2261 2383 ! id_var_do2d(av,i), & … … 2283 2405 2284 2406 ! 2407 !-- Set general no fill, otherwise the performance drops significantly for 2408 !-- parallel output. 2409 nc_stat = NF90_SET_FILL( id_set_xz(av), NF90_NOFILL, oldmode ) 2410 CALL handle_netcdf_error( 'netcdf', 530 ) 2411 2412 ! 2285 2413 !-- Leave netCDF define mode 2286 2414 nc_stat = NF90_ENDDEF( id_set_xz(av) ) … … 2288 2416 2289 2417 ! 2290 !-- Write axis data: y_xz, x, zu, zw 2291 ALLOCATE( netcdf_data(1:ns) ) 2292 2293 ! 2294 !-- Write y_xz data (shifted by +dy/2) 2295 DO i = 1, ns 2296 IF( section(i,2) == -1 ) THEN 2297 netcdf_data(i) = -1.0 ! section averaged along y 2298 ELSE 2299 netcdf_data(i) = ( section(i,2) + 0.5 ) * dy 2300 ENDIF 2301 ENDDO 2302 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_y_xz(av), netcdf_data, & 2303 start = (/ 1 /), count = (/ ns /) ) 2304 CALL handle_netcdf_error( 'netcdf', 163 ) 2305 2306 ! 2307 !-- Write yv_xz data 2308 DO i = 1, ns 2309 IF( section(i,2) == -1 ) THEN 2310 netcdf_data(i) = -1.0 ! section averaged along y 2311 ELSE 2312 netcdf_data(i) = section(i,2) * dy 2313 ENDIF 2314 ENDDO 2315 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_yv_xz(av), & 2316 netcdf_data, start = (/ 1 /), & 2317 count = (/ ns /) ) 2318 CALL handle_netcdf_error( 'netcdf', 375 ) 2319 2320 ! 2321 !-- Write gridpoint number data 2322 netcdf_data(1:ns) = section(1:ns,2) 2323 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_ind_y_xz(av), & 2324 netcdf_data, start = (/ 1 /), & 2325 count = (/ ns /) ) 2326 CALL handle_netcdf_error( 'netcdf', 164 ) 2327 2328 2329 DEALLOCATE( netcdf_data ) 2330 2331 ! 2332 !-- Write data for x (shifted by +dx/2) and xu axis 2333 ALLOCATE( netcdf_data(0:nx+1) ) 2334 2335 DO i = 0, nx+1 2336 netcdf_data(i) = ( i + 0.5 ) * dx 2337 ENDDO 2338 2339 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_x_xz(av), netcdf_data, & 2340 start = (/ 1 /), count = (/ nx+2 /) ) 2341 CALL handle_netcdf_error( 'netcdf', 165 ) 2342 2343 DO i = 0, nx+1 2344 netcdf_data(i) = i * dx 2345 ENDDO 2346 2347 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_xu_xz(av), & 2348 netcdf_data, start = (/ 1 /), & 2349 count = (/ nx+2 /) ) 2350 CALL handle_netcdf_error( 'netcdf', 377 ) 2351 2352 DEALLOCATE( netcdf_data ) 2353 2354 ! 2355 !-- Write zu and zw data (vertical axes) 2356 ALLOCATE( netcdf_data(0:nz+1) ) 2357 2358 netcdf_data(0:nz+1) = zu(nzb:nzt+1) 2359 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_zu_xz(av), & 2360 netcdf_data, start = (/ 1 /), & 2361 count = (/ nz+2 /) ) 2362 CALL handle_netcdf_error( 'netcdf', 166 ) 2363 2364 netcdf_data(0:nz+1) = zw(nzb:nzt+1) 2365 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_zw_xz(av), & 2366 netcdf_data, start = (/ 1 /), & 2367 count = (/ nz+2 /) ) 2368 CALL handle_netcdf_error( 'netcdf', 167 ) 2369 2370 DEALLOCATE( netcdf_data ) 2418 !-- These data are only written by PE0 for parallel output to increase 2419 !-- the performance. 2420 IF ( myid == 0 .OR. netcdf_data_format < 5 ) THEN 2421 2422 ! 2423 !-- Write axis data: y_xz, x, zu, zw 2424 ALLOCATE( netcdf_data(1:ns) ) 2425 2426 ! 2427 !-- Write y_xz data (shifted by +dy/2) 2428 DO i = 1, ns 2429 IF( section(i,2) == -1 ) THEN 2430 netcdf_data(i) = -1.0 ! section averaged along y 2431 ELSE 2432 netcdf_data(i) = ( section(i,2) + 0.5 ) * dy 2433 ENDIF 2434 ENDDO 2435 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_y_xz(av), & 2436 netcdf_data, start = (/ 1 /), & 2437 count = (/ ns /) ) 2438 CALL handle_netcdf_error( 'netcdf', 163 ) 2439 2440 ! 2441 !-- Write yv_xz data 2442 DO i = 1, ns 2443 IF( section(i,2) == -1 ) THEN 2444 netcdf_data(i) = -1.0 ! section averaged along y 2445 ELSE 2446 netcdf_data(i) = section(i,2) * dy 2447 ENDIF 2448 ENDDO 2449 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_yv_xz(av), & 2450 netcdf_data, start = (/ 1 /), & 2451 count = (/ ns /) ) 2452 CALL handle_netcdf_error( 'netcdf', 375 ) 2453 2454 ! 2455 !-- Write gridpoint number data 2456 netcdf_data(1:ns) = section(1:ns,2) 2457 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_ind_y_xz(av), & 2458 netcdf_data, start = (/ 1 /), & 2459 count = (/ ns /) ) 2460 CALL handle_netcdf_error( 'netcdf', 164 ) 2461 2462 2463 DEALLOCATE( netcdf_data ) 2464 2465 ! 2466 !-- Write data for x (shifted by +dx/2) and xu axis 2467 ALLOCATE( netcdf_data(0:nx+1) ) 2468 2469 DO i = 0, nx+1 2470 netcdf_data(i) = ( i + 0.5 ) * dx 2471 ENDDO 2472 2473 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_x_xz(av), & 2474 netcdf_data, start = (/ 1 /), & 2475 count = (/ nx+2 /) ) 2476 CALL handle_netcdf_error( 'netcdf', 165 ) 2477 2478 DO i = 0, nx+1 2479 netcdf_data(i) = i * dx 2480 ENDDO 2481 2482 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_xu_xz(av), & 2483 netcdf_data, start = (/ 1 /), & 2484 count = (/ nx+2 /) ) 2485 CALL handle_netcdf_error( 'netcdf', 377 ) 2486 2487 DEALLOCATE( netcdf_data ) 2488 2489 ! 2490 !-- Write zu and zw data (vertical axes) 2491 ALLOCATE( netcdf_data(0:nz+1) ) 2492 2493 netcdf_data(0:nz+1) = zu(nzb:nzt+1) 2494 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_zu_xz(av), & 2495 netcdf_data, start = (/ 1 /), & 2496 count = (/ nz+2 /) ) 2497 CALL handle_netcdf_error( 'netcdf', 166 ) 2498 2499 netcdf_data(0:nz+1) = zw(nzb:nzt+1) 2500 nc_stat = NF90_PUT_VAR( id_set_xz(av), id_var_zw_xz(av), & 2501 netcdf_data, start = (/ 1 /), & 2502 count = (/ nz+2 /) ) 2503 CALL handle_netcdf_error( 'netcdf', 167 ) 2504 2505 DEALLOCATE( netcdf_data ) 2506 2507 ENDIF 2371 2508 2372 2509 … … 2495 2632 2496 2633 nc_stat = NF90_INQUIRE_DIMENSION( id_set_xz(av), id_dim_time_xz(av), & 2497 len = do2d_xz_time_count(av))2634 len = ntime_count ) 2498 2635 CALL handle_netcdf_error( 'netcdf', 175 ) 2636 2637 ! 2638 !-- For non-parallel output use the last output time level of the netcdf 2639 !-- file because the time dimension is unlimited. In case of parallel 2640 !-- output the variable ntime_count could get the value of 9*10E36 because 2641 !-- the time dimension is limited. 2642 IF ( netcdf_data_format < 5 ) do2d_xz_time_count(av) = ntime_count 2499 2643 2500 2644 nc_stat = NF90_GET_VAR( id_set_xz(av), id_var_time_xz(av), & … … 2517 2661 RETURN 2518 2662 ENDIF 2663 2664 IF ( netcdf_data_format > 4 ) THEN 2665 IF ( ntdim_2d_xz(av) > ntime_count ) THEN 2666 message_string = 'netCDF file for cross sections ' // & 2667 TRIM( var ) // ' from previous run found,' // & 2668 '&but this file cannot be extended becaus' // & 2669 'e the number of output time levels&has b' // & 2670 'een increased compared to the previous s' // & 2671 'imulation.' // & 2672 '&New file is created instead.' 2673 CALL message( 'define_netcdf_header', 'PA0390', 0, 1, 0, 6, 0 ) 2674 do2d_xz_time_count(av) = 0 2675 extend = .FALSE. 2676 RETURN 2677 ENDIF 2678 ENDIF 2519 2679 2520 2680 ! … … 2539 2699 ELSE 2540 2700 ! 2541 !-- ATTENTION: Due to a probable bug in the netCDF42542 !-- installation, independet output does not work2543 !-- A workaround is used in data_output_2d on those2544 !-- PEs having no data2701 !-- Test simulations showed that the output of cross sections 2702 !-- by all PEs in data_output_2d using NF90_COLLECTIVE is 2703 !-- faster than the output by the first row of PEs in 2704 !-- x-direction using NF90_INDEPENDENT. 2545 2705 nc_stat = NF90_VAR_PAR_ACCESS( id_set_xz(av), & 2546 2706 id_var_do2d(av,i), & … … 2610 2770 2611 2771 ! 2612 !-- Define time coordinate for yz sections (unlimited dimension) 2613 nc_stat = NF90_DEF_DIM( id_set_yz(av), 'time', NF90_UNLIMITED, & 2614 id_dim_time_yz(av) ) 2615 CALL handle_netcdf_error( 'netcdf', 181 ) 2772 !-- Define time coordinate for yz sections. 2773 !-- For parallel output the time dimensions has to be limited, otherwise 2774 !-- the performance drops significantly. 2775 IF ( netcdf_data_format < 5 ) THEN 2776 nc_stat = NF90_DEF_DIM( id_set_yz(av), 'time', NF90_UNLIMITED, & 2777 id_dim_time_yz(av) ) 2778 CALL handle_netcdf_error( 'netcdf', 181 ) 2779 ELSE 2780 nc_stat = NF90_DEF_DIM( id_set_yz(av), 'time', ntdim_2d_yz(av), & 2781 id_dim_time_yz(av) ) 2782 CALL handle_netcdf_error( 'netcdf', 526 ) 2783 ENDIF 2616 2784 2617 2785 nc_stat = NF90_DEF_VAR( id_set_yz(av), 'time', NF90_DOUBLE, & … … 2827 2995 CALL handle_netcdf_error( 'netcdf', 356 ) 2828 2996 #if defined( __netcdf4_parallel ) 2829 !2830 !-- Set independent io operations for parallel io. Collective io2831 !-- is only allowed in case of a 1d-decomposition along y, because2832 !-- otherwise, not all PEs have output data.2833 2997 IF ( netcdf_data_format > 4 ) THEN 2998 ! 2999 !-- Set no fill for every variable to increase performance. 3000 nc_stat = NF90_DEF_VAR_FILL( id_set_yz(av), & 3001 id_var_do2d(av,i), & 3002 1, 0 ) 3003 CALL handle_netcdf_error( 'netcdf', 535 ) 3004 ! 3005 !-- Set independent io operations for parallel io. Collective io 3006 !-- is only allowed in case of a 1d-decomposition along y, 3007 !-- because otherwise, not all PEs have output data. 2834 3008 IF ( npex == 1 ) THEN 2835 3009 nc_stat = NF90_VAR_PAR_ACCESS( id_set_yz(av), & … … 2838 3012 ELSE 2839 3013 ! 2840 !-- ATTENTION: Due to a probable bug in the netCDF42841 !-- installation, independet output does not work2842 !-- A workaround is used in data_output_2d on those2843 !-- PEs having no data3014 !-- Test simulations showed that the output of cross sections 3015 !-- by all PEs in data_output_2d using NF90_COLLECTIVE is 3016 !-- faster than the output by the first row of PEs in 3017 !-- y-direction using NF90_INDEPENDENT. 2844 3018 nc_stat = NF90_VAR_PAR_ACCESS( id_set_yz(av), & 2845 3019 id_var_do2d(av,i), & … … 2870 3044 2871 3045 ! 3046 !-- Set general no fill, otherwise the performance drops significantly for 3047 !-- parallel output. 3048 nc_stat = NF90_SET_FILL( id_set_yz(av), NF90_NOFILL, oldmode ) 3049 CALL handle_netcdf_error( 'netcdf', 531 ) 3050 3051 ! 2872 3052 !-- Leave netCDF define mode 2873 3053 nc_stat = NF90_ENDDEF( id_set_yz(av) ) … … 2875 3055 2876 3056 ! 2877 !-- Write axis data: x_yz, y, zu, zw 2878 ALLOCATE( netcdf_data(1:ns) ) 2879 2880 ! 2881 !-- Write x_yz data (shifted by +dx/2) 2882 DO i = 1, ns 2883 IF( section(i,3) == -1 ) THEN 2884 netcdf_data(i) = -1.0 ! section averaged along x 2885 ELSE 2886 netcdf_data(i) = ( section(i,3) + 0.5 ) * dx 2887 ENDIF 2888 ENDDO 2889 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_x_yz(av), netcdf_data, & 2890 start = (/ 1 /), count = (/ ns /) ) 2891 CALL handle_netcdf_error( 'netcdf', 202 ) 2892 2893 ! 2894 !-- Write x_yz data (xu grid) 2895 DO i = 1, ns 2896 IF( section(i,3) == -1 ) THEN 2897 netcdf_data(i) = -1.0 ! section averaged along x 2898 ELSE 2899 netcdf_data(i) = section(i,3) * dx 2900 ENDIF 2901 ENDDO 2902 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_xu_yz(av), netcdf_data, & 2903 start = (/ 1 /), count = (/ ns /) ) 2904 CALL handle_netcdf_error( 'netcdf', 383 ) 2905 2906 ! 2907 !-- Write gridpoint number data 2908 netcdf_data(1:ns) = section(1:ns,3) 2909 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_ind_x_yz(av), & 2910 netcdf_data, start = (/ 1 /), & 2911 count = (/ ns /) ) 2912 CALL handle_netcdf_error( 'netcdf', 203 ) 2913 2914 DEALLOCATE( netcdf_data ) 2915 2916 ! 2917 !-- Write data for y (shifted by +dy/2) and yv axis 2918 ALLOCATE( netcdf_data(0:ny+1) ) 2919 2920 DO j = 0, ny+1 2921 netcdf_data(j) = ( j + 0.5 ) * dy 2922 ENDDO 2923 2924 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_y_yz(av), netcdf_data, & 2925 start = (/ 1 /), count = (/ ny+2 /) ) 2926 CALL handle_netcdf_error( 'netcdf', 204 ) 2927 2928 DO j = 0, ny+1 2929 netcdf_data(j) = j * dy 2930 ENDDO 2931 2932 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_yv_yz(av), & 2933 netcdf_data, start = (/ 1 /), & 2934 count = (/ ny+2 /) ) 2935 CALL handle_netcdf_error( 'netcdf', 384 ) 2936 2937 DEALLOCATE( netcdf_data ) 2938 2939 ! 2940 !-- Write zu and zw data (vertical axes) 2941 ALLOCATE( netcdf_data(0:nz+1) ) 2942 2943 netcdf_data(0:nz+1) = zu(nzb:nzt+1) 2944 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_zu_yz(av), & 2945 netcdf_data, start = (/ 1 /), & 2946 count = (/ nz+2 /) ) 2947 CALL handle_netcdf_error( 'netcdf', 205 ) 2948 2949 netcdf_data(0:nz+1) = zw(nzb:nzt+1) 2950 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_zw_yz(av), & 2951 netcdf_data, start = (/ 1 /), & 2952 count = (/ nz+2 /) ) 2953 CALL handle_netcdf_error( 'netcdf', 206 ) 2954 2955 DEALLOCATE( netcdf_data ) 3057 !-- These data are only written by PE0 for parallel output to increase 3058 !-- the performance. 3059 IF ( myid == 0 .OR. netcdf_data_format < 5 ) THEN 3060 3061 ! 3062 !-- Write axis data: x_yz, y, zu, zw 3063 ALLOCATE( netcdf_data(1:ns) ) 3064 3065 ! 3066 !-- Write x_yz data (shifted by +dx/2) 3067 DO i = 1, ns 3068 IF( section(i,3) == -1 ) THEN 3069 netcdf_data(i) = -1.0 ! section averaged along x 3070 ELSE 3071 netcdf_data(i) = ( section(i,3) + 0.5 ) * dx 3072 ENDIF 3073 ENDDO 3074 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_x_yz(av), & 3075 netcdf_data, start = (/ 1 /), & 3076 count = (/ ns /) ) 3077 CALL handle_netcdf_error( 'netcdf', 202 ) 3078 3079 ! 3080 !-- Write x_yz data (xu grid) 3081 DO i = 1, ns 3082 IF( section(i,3) == -1 ) THEN 3083 netcdf_data(i) = -1.0 ! section averaged along x 3084 ELSE 3085 netcdf_data(i) = section(i,3) * dx 3086 ENDIF 3087 ENDDO 3088 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_xu_yz(av), & 3089 netcdf_data, start = (/ 1 /), & 3090 count = (/ ns /) ) 3091 CALL handle_netcdf_error( 'netcdf', 383 ) 3092 3093 ! 3094 !-- Write gridpoint number data 3095 netcdf_data(1:ns) = section(1:ns,3) 3096 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_ind_x_yz(av), & 3097 netcdf_data, start = (/ 1 /), & 3098 count = (/ ns /) ) 3099 CALL handle_netcdf_error( 'netcdf', 203 ) 3100 3101 DEALLOCATE( netcdf_data ) 3102 3103 ! 3104 !-- Write data for y (shifted by +dy/2) and yv axis 3105 ALLOCATE( netcdf_data(0:ny+1) ) 3106 3107 DO j = 0, ny+1 3108 netcdf_data(j) = ( j + 0.5 ) * dy 3109 ENDDO 3110 3111 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_y_yz(av), & 3112 netcdf_data, start = (/ 1 /), & 3113 count = (/ ny+2 /) ) 3114 CALL handle_netcdf_error( 'netcdf', 204 ) 3115 3116 DO j = 0, ny+1 3117 netcdf_data(j) = j * dy 3118 ENDDO 3119 3120 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_yv_yz(av), & 3121 netcdf_data, start = (/ 1 /), & 3122 count = (/ ny+2 /) ) 3123 CALL handle_netcdf_error( 'netcdf', 384 ) 3124 3125 DEALLOCATE( netcdf_data ) 3126 3127 ! 3128 !-- Write zu and zw data (vertical axes) 3129 ALLOCATE( netcdf_data(0:nz+1) ) 3130 3131 netcdf_data(0:nz+1) = zu(nzb:nzt+1) 3132 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_zu_yz(av), & 3133 netcdf_data, start = (/ 1 /), & 3134 count = (/ nz+2 /) ) 3135 CALL handle_netcdf_error( 'netcdf', 205 ) 3136 3137 netcdf_data(0:nz+1) = zw(nzb:nzt+1) 3138 nc_stat = NF90_PUT_VAR( id_set_yz(av), id_var_zw_yz(av), & 3139 netcdf_data, start = (/ 1 /), & 3140 count = (/ nz+2 /) ) 3141 CALL handle_netcdf_error( 'netcdf', 206 ) 3142 3143 DEALLOCATE( netcdf_data ) 3144 3145 ENDIF 2956 3146 2957 3147 … … 3080 3270 3081 3271 nc_stat = NF90_INQUIRE_DIMENSION( id_set_yz(av), id_dim_time_yz(av), & 3082 len = do2d_yz_time_count(av))3272 len = ntime_count ) 3083 3273 CALL handle_netcdf_error( 'netcdf', 214 ) 3274 3275 ! 3276 !-- For non-parallel output use the last output time level of the netcdf 3277 !-- file because the time dimension is unlimited. In case of parallel 3278 !-- output the variable ntime_count could get the value of 9*10E36 because 3279 !-- the time dimension is limited. 3280 IF ( netcdf_data_format < 5 ) do2d_yz_time_count(av) = ntime_count 3084 3281 3085 3282 nc_stat = NF90_GET_VAR( id_set_yz(av), id_var_time_yz(av), & … … 3103 3300 ENDIF 3104 3301 3302 IF ( netcdf_data_format > 4 ) THEN 3303 IF ( ntdim_2d_yz(av) > ntime_count ) THEN 3304 message_string = 'netCDF file for cross sections ' // & 3305 TRIM( var ) // ' from previous run found,' // & 3306 '&but this file cannot be extended becaus' // & 3307 'e the number of output time levels&has b' // & 3308 'een increased compared to the previous s' // & 3309 'imulation.' // & 3310 '&New file is created instead.' 3311 CALL message( 'define_netcdf_header', 'PA0391', 0, 1, 0, 6, 0 ) 3312 do2d_yz_time_count(av) = 0 3313 extend = .FALSE. 3314 RETURN 3315 ENDIF 3316 ENDIF 3317 3105 3318 ! 3106 3319 !-- Dataset seems to be extendable. … … 3124 3337 ELSE 3125 3338 ! 3126 !-- ATTENTION: Due to a probable bug in the netCDF43127 !-- installation, independet output does not work3128 !-- A workaround is used in data_output_2d on those3129 !-- PEs having no data3339 !-- Test simulations showed that the output of cross sections 3340 !-- by all PEs in data_output_2d using NF90_COLLECTIVE is 3341 !-- faster than the output by the first row of PEs in 3342 !-- y-direction using NF90_INDEPENDENT. 3130 3343 nc_stat = NF90_VAR_PAR_ACCESS( id_set_yz(av), & 3131 3344 id_var_do2d(av,i), & … … 4651 4864 !-- 64bit-offset format 4652 4865 nc_stat = NF90_CREATE( filename, & 4653 OR( NF90_NOCLOBBER, NF90_64BIT_OFFSET ), id )4866 IOR( NF90_NOCLOBBER, NF90_64BIT_OFFSET ), id ) 4654 4867 4655 4868 #if defined( __netcdf4 ) … … 4658 4871 ! 4659 4872 !-- netCDF4/HDF5 format 4660 nc_stat = NF90_CREATE( filename, OR( NF90_NOCLOBBER, NF90_NETCDF4 ), id )4873 nc_stat = NF90_CREATE( filename, IOR( NF90_NOCLOBBER, NF90_NETCDF4 ), id ) 4661 4874 4662 4875 ELSEIF ( netcdf_data_format == 4 .OR. & … … 4665 4878 !-- netCDF4/HDF5 format with classic model flag 4666 4879 nc_stat = NF90_CREATE( filename, & 4667 OR( NF90_NOCLOBBER, &4668 OR( NF90_CLASSIC_MODEL, NF90_HDF5 ) ), id )4880 IOR( NF90_NOCLOBBER, & 4881 IOR( NF90_CLASSIC_MODEL, NF90_HDF5 ) ), id ) 4669 4882 4670 4883 #if defined( __netcdf4_parallel ) … … 4672 4885 ! 4673 4886 !-- netCDF4/HDF5 format, parallel 4674 nc_stat = NF90_CREATE( filename, OR( NF90_NOCLOBBER, NF90_NETCDF4 ), & 4887 nc_stat = NF90_CREATE( filename, & 4888 IOR( NF90_NOCLOBBER, & 4889 IOR( NF90_NETCDF4, NF90_MPIIO ) ), & 4675 4890 id, COMM = comm2d, INFO = MPI_INFO_NULL ) 4676 4891 … … 4679 4894 !-- netCDF4/HDF5 format with classic model flag, parallel 4680 4895 nc_stat = NF90_CREATE( filename, & 4681 OR( NF90_NOCLOBBER, & 4682 OR( NF90_CLASSIC_MODEL, NF90_HDF5 ) ), & 4896 IOR( NF90_NOCLOBBER, & 4897 IOR( NF90_MPIIO, & 4898 IOR( NF90_CLASSIC_MODEL, NF90_HDF5 ) ) ), & 4683 4899 id, COMM = comm2d, INFO = MPI_INFO_NULL ) 4684 4900 … … 4720 4936 #if defined( __netcdf4_parallel ) 4721 4937 ELSEIF ( netcdf_data_format > 4 .AND. parallel ) THEN 4722 nc_stat = NF90_OPEN( filename, NF90_WRITE, id, COMM = comm2d,&4723 INFO = MPI_INFO_NULL )4938 nc_stat = NF90_OPEN( filename, IOR( NF90_WRITE, NF90_MPIIO ), id, & 4939 COMM = comm2d, INFO = MPI_INFO_NULL ) 4724 4940 #endif 4725 4941 #endif -
palm/trunk/SOURCE/read_var_list.f90
r1254 r1308 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! +do2d_xy_time_count, do2d_xz_time_count, do2d_yz_time_count, 23 ! +do3d_time_count 23 24 ! 24 25 ! Former revisions: … … 202 203 !-- Make version number check first 203 204 READ ( 13 ) version_on_file 204 binary_version = '3.9 a'205 binary_version = '3.9b' 205 206 IF ( TRIM( version_on_file ) /= TRIM( binary_version ) ) THEN 206 207 WRITE( message_string, * ) 'version mismatch concerning control ', & … … 383 384 CASE ( 'dissipation_1d' ) 384 385 READ ( 13 ) dissipation_1d 386 CASE ( 'do2d_xy_time_count' ) 387 READ ( 13 ) do2d_xy_time_count 388 CASE ( 'do2d_xz_time_count' ) 389 READ ( 13 ) do2d_xz_time_count 390 CASE ( 'do2d_yz_time_count' ) 391 READ ( 13 ) do2d_yz_time_count 392 CASE ( 'do3d_time_count' ) 393 READ ( 13 ) do3d_time_count 385 394 CASE ( 'dp_external' ) 386 395 READ ( 13 ) dp_external -
palm/trunk/SOURCE/time_integration.f90
r1277 r1308 20 20 ! Current revisions: 21 21 ! ------------------ 22 ! 22 ! +netcdf_data_format_save 23 ! For masked data, parallel netcdf output is not tested so far, hence 24 ! netcdf_data_format is switched back to non-paralell output. 23 25 ! 24 26 ! Former revisions: … … 186 188 187 189 CHARACTER (LEN=9) :: time_to_string 190 INTEGER :: netcdf_data_format_save 188 191 189 192 ! … … 810 813 ! 811 814 !-- masked data output 815 !-- Parallel netcdf output is not tested so far for masked data, hence 816 !-- netcdf_data_format is switched back to non-paralell output. 817 netcdf_data_format_save = netcdf_data_format 818 IF ( netcdf_data_format == 5 ) netcdf_data_format = 3 819 IF ( netcdf_data_format == 6 ) netcdf_data_format = 4 812 820 DO mid = 1, masks 813 821 IF ( time_domask(mid) >= dt_domask(mid) ) THEN … … 817 825 ENDIF 818 826 ENDDO 827 netcdf_data_format = netcdf_data_format_save 819 828 820 829 ! … … 826 835 CALL data_output_2d( 'yz', 1 ) 827 836 CALL data_output_3d( 1 ) 837 !-- Parallel netcdf output is not tested so far for masked data, hence 838 !-- netcdf_data_format is switched back to non-paralell output. 839 netcdf_data_format_save = netcdf_data_format 840 IF ( netcdf_data_format == 5 ) netcdf_data_format = 3 841 IF ( netcdf_data_format == 6 ) netcdf_data_format = 4 828 842 DO mid = 1, masks 829 843 CALL data_output_mask( 1 ) 830 844 ENDDO 845 netcdf_data_format = netcdf_data_format_save 831 846 time_do_av = MOD( time_do_av, MAX( dt_data_output_av, dt_3d ) ) 832 847 ENDIF -
palm/trunk/SOURCE/write_var_list.f90
r1242 r1308 20 20 ! Current revisions: 21 21 ! ----------------- 22 ! 22 ! +do2d_xy_time_count, do2d_xz_time_count, do2d_yz_time_count, 23 ! +do3d_time_count 23 24 ! 24 25 ! Former revisions: … … 176 177 177 178 178 binary_version = '3.9 a'179 binary_version = '3.9b' 179 180 180 181 WRITE ( 14 ) binary_version … … 297 298 WRITE ( 14 ) 'dissipation_1d ' 298 299 WRITE ( 14 ) dissipation_1d 300 WRITE ( 14 ) 'do2d_xy_time_count ' 301 WRITE ( 14 ) do2d_xy_time_count 302 WRITE ( 14 ) 'do2d_xz_time_count ' 303 WRITE ( 14 ) do2d_xz_time_count 304 WRITE ( 14 ) 'do2d_yz_time_count ' 305 WRITE ( 14 ) do2d_yz_time_count 306 WRITE ( 14 ) 'do3d_time_count ' 307 WRITE ( 14 ) do3d_time_count 299 308 WRITE ( 14 ) 'dp_external ' 300 309 WRITE ( 14 ) dp_external
Note: See TracChangeset
for help on using the changeset viewer.