Changeset 4763 for palm/trunk/SCRIPTS/palm_cvd
- Timestamp:
- Oct 30, 2020 12:06:47 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/palm_cvd
r4758 r4763 26 26 # ----------------- 27 27 # $Id$ 28 # - Check automatically for data organization (stored in subdirectories or not) 29 # - Convert trajectory and timeseriesProfile coordinates into 1-D coordinates 30 # equivalent to timeseries coordiates. This simplifies processing in PALM and 31 # makes the virtual-measurement module also applicable to other campaigns. 32 # 33 # 4758 2020-10-26 13:03:52Z suehring 28 34 # In order to do not omit observations that are on the same site but have different 29 35 # coordinates or feature-types, process all files rather than only one and omit … … 54 60 import os 55 61 import numpy as np 62 import time 56 63 57 64 … … 187 194 name_featuretype = "featureType" 188 195 name_ts = "timeSeries" 196 name_tspr = "timeSeriesProfile" 189 197 name_traj = "trajectory" 190 198 name_ntime = "ntime" … … 196 204 name_eutm = "E_UTM" 197 205 name_nutm = "N_UTM" 198 name_hao = "height _above_origin"206 name_hao = "height" 199 207 name_station_h = "station_h" 200 208 name_z = "z" … … 271 279 if ( input_from_observations ): 272 280 273 # Run loop over all subdirectories, detect the files and extract a list of sites. 281 # Run loop over all listed input data. Depending on the data set, this could be 282 # a list of files or a list of subdirectories. 274 283 # This is done to reduce the number of virtual measurements in the model. Each 275 284 # virtual measurement has an overhead and consumes memory. 276 285 sites = [] 277 286 input_files = [] 287 input_files_orig = [] 278 288 for dirname in list_input_data: 279 289 data_file = data_path + dirname 280 290 281 # Directory may contain various file versions. 282 # Take the one with highest cycle number. 283 highest_cycle_nr = 0 284 for filename in os.listdir(data_file): 285 start_seq = len( filename ) - 6 286 end_seq = len( filename ) - 3 287 if int( filename[start_seq:end_seq] ) > highest_cycle_nr: 288 highest_cycle_nr = int(filename[start_seq:end_seq]) 289 latest_file = filename 291 if ( os.path.isdir(data_file) == True ): 292 # Directory may contain various file versions. 293 # Take the one with highest cycle number. 294 highest_cycle_nr = 0 295 for filename in os.listdir(data_file): 296 start_seq = len( filename ) - 6 297 end_seq = len( filename ) - 3 298 if int( filename[start_seq:end_seq] ) > highest_cycle_nr: 299 highest_cycle_nr = int(filename[start_seq:end_seq]) 300 latest_file = filename 301 input_file = data_file + "/" + latest_file 302 input_file_orig = latest_file 303 else: 304 input_file = data_file 305 input_file_orig = dirname 290 306 291 307 # Open the NetCDF file 292 input_file = data_file + "/" + latest_file293 308 ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii') 294 309 input_files.append(input_file) 295 296 # Define a nested list of default variables that shall be measured. Based on this list, 297 # the final number of measured variables is determined. 298 measured_variables_all_sites = [ ['u', 'v', 'w', 'theta', 'hus'] for var in range(0, len(input_files))]299 300 # Run loop over all subdirectories that contain observational data301 for counter, file in enumerate(input_files):302 303 # Open the NetCDF input file304 input_file = input_files[counter]310 input_files_orig.append(input_file_orig) 311 312 # Gather all files according to their feature type and all sites for the respective feature type 313 files_traj = [] 314 files_ts = [] 315 files_tspr = [] 316 sites_traj = [] 317 sites_ts = [] 318 sites_tspr = [] 319 for input_file in input_files: 305 320 ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) 306 321 307 # Determine index for the treated site308 num_vmeas = input_files.index( input_file ) + 1309 print( counter, num_vmeas )310 311 # Read global attributes and write them immediately into the output file312 322 for att in ncfile_in.ncattrs(): 313 323 if ( att == name_featuretype ): 314 324 feature = ncfile_in.getncattr(att) 315 if ( att == name_datacontent ):316 content = ncfile_in.getncattr(att)317 325 if ( att == name_site ): 318 326 site = ncfile_in.getncattr(att) 319 327 320 if ( att in atts_float ): 321 ncfile_out.setncattr( att + str(num_vmeas), np.double(ncfile_in.getncattr(att)) ) 322 else: 323 ncfile_out.setncattr( att + str(num_vmeas), ncfile_in.getncattr(att) ) 324 325 #timeSeries 326 if ( feature == name_ts ): 327 ntime = len( ncfile_in.dimensions[name_ntime] ) 328 nstat = len( ncfile_in.dimensions[name_station] ) 329 ncfile_out.createDimension( name_ntime + str(num_vmeas), ntime ) 330 ncfile_out.createDimension( name_station + str(num_vmeas), nstat ) 331 332 #trajectory 333 elif ( feature == name_traj ): 334 ntime = len( ncfile_in.dimensions[name_ntime] ) 335 ntraj = len( ncfile_in.dimensions[name_traj_dim] ) 336 ncfile_out.createDimension( name_ntime + str(num_vmeas), ntime ) 337 ncfile_out.createDimension( name_traj_dim + str(num_vmeas), ntraj ) 338 339 #timeseriesProfile 328 if ( feature == name_traj ): 329 files_traj.append(input_file) 330 elif ( feature == name_ts ): 331 files_ts.append(input_file) 340 332 else: 341 ntime = len( ncfile_in.dimensions[name_ntime] ) 342 nstat = len( ncfile_in.dimensions[name_station] ) 343 nz = len( ncfile_in.dimensions[name_nz] ) 344 ncfile_out.createDimension( name_ntime + str(num_vmeas), ntime ) 345 ncfile_out.createDimension( name_station + str(num_vmeas), nstat ) 346 ncfile_out.createDimension( name_nz + str(num_vmeas), nz ) 347 348 for var in ncfile_in.variables.keys(): 349 if ( var in dims_out ): 350 # Create a variable and write it to file after it is read. In order to 351 # avoid fill values in the dimensions, these are converted to zero 352 # before written to file. Depending on the featureType of the measurement, 353 # the array shape is different. For more informations, please see 354 # [UC]2 data standard. 355 # Timeseries 356 if ( feature == name_ts ): 357 temp_ts = ncfile_out.createVariable( var + str(num_vmeas), float, \ 358 name_station + str(num_vmeas)) 359 temp_ts[:] = np.nan_to_num( ncfile_in.variables[var][:] ) 360 361 # Trajectories 362 elif ( feature == name_traj ): 363 # @note: If there are files where station_h is present although featureType is 364 # trajectory, station_h must not be read. 365 if var != name_station_h: 366 temp_traj = ncfile_out.createVariable( var + str(num_vmeas), float, \ 367 ( name_traj_dim + str(num_vmeas), \ 368 name_ntime + str(num_vmeas) ) ) 369 temp_traj[:,:] = np.nan_to_num( ncfile_in.variables[var][:,:] ) 370 371 # TimeseriesProfiles 372 else: 373 if ( var == 'z' ): 374 temp_pr = ncfile_out.createVariable( var + str(num_vmeas), float, \ 375 ( name_station + str(num_vmeas), \ 376 name_nz + str(num_vmeas) ) ) 377 temp_pr[:] = np.nan_to_num( ncfile_in.variables[var][:,0,:] ) 378 else: 379 temp_pr = ncfile_out.createVariable( var + str(num_vmeas), float, \ 380 name_station + str(num_vmeas)) 381 temp_pr[:] = np.nan_to_num( ncfile_in.variables[var][:] ) 382 383 # Search for variables to be measured. In case the variable isn't already defined, 384 # append the variable to the list. 385 for var in ncfile_in.variables.keys(): 386 if ( var not in non_measurable_vars and \ 387 var not in vars_default and \ 388 var not in measured_variables_all_sites[input_files.index( input_file )] ): 389 390 measured_variables_all_sites[input_files.index( input_file )].append(var) 391 392 # Close the NetCDF input file 333 files_tspr.append(input_file) 334 335 if ( feature == name_traj and site not in sites_traj ): 336 sites_traj.append(site) 337 if ( feature == name_ts and site not in sites_ts ): 338 sites_ts.append(site) 339 if ( feature == name_tspr and site not in sites_tspr ): 340 sites_tspr.append(site) 341 393 342 ncfile_in.close() 394 343 395 # After variables are gathered and dimensions / attributes are already written to file, 396 # the list of measured variables is written to file. 397 for site in input_files: 398 399 num_vmeas = input_files.index( site ) + 1 400 401 ncfile_out.createDimension( "nvar"+ str(num_vmeas), \ 402 len( measured_variables_all_sites[input_files.index( site )] ) ) 403 404 measured = ncfile_out.createVariable( 'measured_variables' + str(num_vmeas), 'S1', \ 405 ("nvar" + str(num_vmeas), "string_len")) # must be NC_CHAR 406 407 for counter, meas in enumerate( measured_variables_all_sites[input_files.index( site )] ): 408 measured[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) 409 410 # Check if any of the measured variables is a soil variable. Set flag accordingly. 344 for input_file in files_traj: 345 print( "traj", input_file ) 346 for site in sites_traj: 347 print( "traj", site ) 348 349 for site in sites_ts: 350 print( "ts", site ) 351 for site in sites_tspr: 352 print( "tspr", site ) 353 354 for file in files_tspr: 355 print( "tspr", file ) 356 counter_id = 1 357 for site_traj in sites_traj: 358 # For the given site already define the featureTpye and site 359 ncfile_out.setncattr( name_featuretype + str(counter_id), name_traj ) 360 ncfile_out.setncattr( name_site + str(counter_id), site_traj ) 361 362 # Define the number of coordinates for the site 363 num_coords = 0 364 365 e_utm_traj = np.array([]) 366 n_utm_traj = np.array([]) 367 h_traj = np.array([]) 368 measured_variables = ['u', 'v', 'w', 'theta', 'hus'] 369 for input_file in files_traj: 370 ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) 371 for att in ncfile_in.ncattrs(): 372 if ( att == name_site ): 373 site = ncfile_in.getncattr(att) 374 375 if ( site == site_traj ): 376 377 for att in ncfile_in.ncattrs(): 378 if ( att == name_orig_x ): 379 orig_x = ncfile_in.getncattr(att) 380 if ( att == name_orig_y ): 381 orig_y = ncfile_in.getncattr(att) 382 if ( att == name_orig_z ): 383 orig_z = ncfile_in.getncattr(att) 384 385 ntime = len( ncfile_in.dimensions[name_ntime] ) 386 ntraj = len( ncfile_in.dimensions[name_traj_dim] ) 387 388 num_coords += ntime * ntraj 389 # Gather UTM and height coordinates and merge them into one array. Further, gather 390 # the variables that shall be sampled. Coordinates are checked to for NaN values and 391 # are tranformed to arithmetric numbers. Further, 2D input array is transformed into 392 # a 1D array. 393 for var in ncfile_in.variables.keys(): 394 if ( var in dims_out and var == name_eutm ): 395 e_utm_traj = np.append(e_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) 396 #e_utm_traj.append( np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten() ) 397 if ( var in dims_out and var == name_nutm ): 398 n_utm_traj = np.append(n_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) 399 if ( var in dims_out and var == name_hao ): 400 h_traj = np.append(h_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) 401 402 if ( var not in non_measurable_vars and \ 403 var not in vars_default and \ 404 var not in measured_variables ): 405 measured_variables.append(var) 406 407 ncfile_in.close() 408 409 # After all files for the current site are processed, write the origin-coordinates for x,y,z 410 ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) 411 ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) 412 ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) 413 # Create the dimensions 414 ncfile_out.createDimension( name_station + str(counter_id), num_coords ) 415 416 temp_traj = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) 417 temp_traj[:] = e_utm_traj 418 419 temp_traj = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) 420 temp_traj[:] = n_utm_traj 421 422 temp_traj = ncfile_out.createVariable( name_hao + str(counter_id), float, name_station + str(counter_id) ) 423 temp_traj[:] = h_traj 424 425 # Check if any of the measured variables is a soil variable. Set flag accordingly. 411 426 soil = False 412 for var in measured_variables _all_sites[input_files.index( site )]:427 for var in measured_variables: 413 428 if ( var in soil_vars ): 414 429 soil = True 415 416 # Write soil flag 417 ncfile_out.setncattr( name_soil_sampling + str( num_vmeas), np.int8(soil) ) 418 419 420 421 # Store the number of observational sites 422 num_sites += len( input_files ) 430 # Write soil flag 431 ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) 432 433 # Create dimension for sample-variable string 434 ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) 435 436 measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ 437 ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR 438 439 # Write the variables to the file 440 for counter, meas in enumerate( measured_variables ): 441 measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) 442 443 # Increment the counter 444 counter_id += 1 445 446 447 for site_tspr in sites_tspr: 448 # For the given site already define the featureTpye and site 449 ncfile_out.setncattr( name_featuretype + str(counter_id), name_tspr ) 450 ncfile_out.setncattr( name_site + str(counter_id), site_tspr ) 451 452 # Define the number of coordinates for the site 453 num_coords = 0 454 e_utm_tspr = np.array([]) 455 n_utm_tspr = np.array([]) 456 station_h_tspr = np.array([]) 457 z_tspr = np.array([]) 458 459 measured_variables = ['u', 'v', 'w', 'theta', 'hus'] 460 for input_file in files_tspr: 461 ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) 462 for att in ncfile_in.ncattrs(): 463 if ( att == name_site ): 464 site = ncfile_in.getncattr(att) 465 466 if ( site == site_tspr ): 467 for att in ncfile_in.ncattrs(): 468 if ( att == name_orig_x ): 469 orig_x = ncfile_in.getncattr(att) 470 if ( att == name_orig_y ): 471 orig_y = ncfile_in.getncattr(att) 472 if ( att == name_orig_z ): 473 orig_z = ncfile_in.getncattr(att) 474 475 nstation = len( ncfile_in.dimensions[name_station] ) 476 ntime = len( ncfile_in.dimensions[name_ntime] ) 477 nz = len( ncfile_in.dimensions[name_nz] ) 478 479 num_coords += nstation * ntime * nz 480 # Gather UTM and height coordinates and merge them into one array. Further, gather 481 # the variables that shall be sampled. Coordinates are checked to for NaN values and 482 # are tranformed to arithmetric numbers. Further, 2D input array is transformed into 483 # a 1D array. 484 for var in ncfile_in.variables.keys(): 485 tspr_tmp1 = np.zeros((nstation)) 486 tspr_tmp2 = np.zeros((ntime*nz)) 487 if ( var in dims_out and var == name_eutm ): 488 tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) 489 for ns in range(0,int(nstation)): 490 tspr_tmp2[:] = tspr_tmp1[ns] 491 e_utm_tspr = np.append(e_utm_tspr, tspr_tmp2) 492 if ( var in dims_out and var == name_nutm ): 493 tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) 494 for ns in range(0,int(nstation)): 495 tspr_tmp2[:] = tspr_tmp1[ns] 496 n_utm_tspr = np.append(n_utm_tspr, tspr_tmp2) 497 if ( var in dims_out and var == name_z ): 498 z_tspr_tmp = np.nan_to_num( ncfile_in.variables[var][:,:,:] ) 499 z_tspr = np.append(z_tspr, np.concatenate( z_tspr_tmp )) 500 if ( var in dims_out and var == name_station_h ): 501 tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) 502 for ns in range(0,int(nstation)): 503 tspr_tmp2[:] = tspr_tmp1[ns] 504 station_h_tspr = np.append(station_h_tspr, tspr_tmp2) 505 506 if ( var not in non_measurable_vars and \ 507 var not in vars_default and \ 508 var not in measured_variables ): 509 measured_variables.append(var) 510 511 ncfile_in.close() 512 513 # After all files for the current site are processed, write the origin-coordinates for x,y,z 514 ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) 515 ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) 516 ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) 517 # Create the dimensions 518 ncfile_out.createDimension( name_station + str(counter_id), num_coords ) 519 520 temp_tspr = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) 521 temp_tspr[:] = e_utm_tspr 522 523 temp_tspr = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) 524 temp_tspr[:] = n_utm_tspr 525 526 temp_tspr = ncfile_out.createVariable( name_z + str(counter_id), float, name_station + str(counter_id) ) 527 temp_tspr[:] = z_tspr 528 529 temp_tspr = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) ) 530 temp_tspr[:] = station_h_tspr 531 532 # Check if any of the measured variables is a soil variable. Set flag accordingly. 533 soil = False 534 for var in measured_variables: 535 if ( var in soil_vars ): 536 soil = True 537 # Write soil flag 538 ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) 539 540 # Create dimension for sample-variable string 541 ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) 542 543 measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ 544 ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR 545 546 # Write the variables to the file 547 for counter, meas in enumerate( measured_variables ): 548 measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) 549 550 # Increment the counter 551 counter_id += 1 552 553 554 for site_ts in sites_ts: 555 # For the given site already define the featureTpye and site 556 ncfile_out.setncattr( name_featuretype + str(counter_id), name_ts ) 557 ncfile_out.setncattr( name_site + str(counter_id), site_ts ) 558 559 # Define the number of coordinates for the site 560 num_coords = 0 561 e_utm_ts = np.array([]) 562 n_utm_ts = np.array([]) 563 station_h_ts = np.array([]) 564 z_ts = np.array([]) 565 566 measured_variables = ['u', 'v', 'w', 'theta', 'hus'] 567 for input_file in files_ts: 568 ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) 569 for att in ncfile_in.ncattrs(): 570 if ( att == name_site ): 571 site = ncfile_in.getncattr(att) 572 573 if ( site == site_ts ): 574 575 for att in ncfile_in.ncattrs(): 576 if ( att == name_orig_x ): 577 orig_x = ncfile_in.getncattr(att) 578 if ( att == name_orig_y ): 579 orig_y = ncfile_in.getncattr(att) 580 if ( att == name_orig_z ): 581 orig_z = ncfile_in.getncattr(att) 582 583 nstation = len( ncfile_in.dimensions[name_station] ) 584 num_coords += nstation 585 # Gather UTM and height coordinates and merge them into one array. Further, gather 586 # the variables that shall be sampled. Coordinates are checked to for NaN values and 587 # are tranformed to arithmetric numbers. 588 for var in ncfile_in.variables.keys(): 589 if ( var in dims_out and var == name_eutm ): 590 e_utm_ts = np.append(e_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] )) 591 if ( var in dims_out and var == name_nutm ): 592 n_utm_ts = np.append(n_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] )) 593 if ( var in dims_out and var == name_z ): 594 z_ts = np.append(z_ts, np.nan_to_num( ncfile_in.variables[var][:] )) 595 if ( var in dims_out and var == name_station_h ): 596 station_h_ts = np.append(station_h_ts, np.nan_to_num( ncfile_in.variables[var][:] )) 597 598 if ( var not in non_measurable_vars and \ 599 var not in vars_default and \ 600 var not in measured_variables ): 601 measured_variables.append(var) 602 603 ncfile_in.close() 604 605 # After all files for the current site are processed, write the origin-coordinates for x,y,z 606 ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) 607 ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) 608 ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) 609 # Create the dimensions 610 ncfile_out.createDimension( name_station + str(counter_id), num_coords ) 611 612 temp_ts = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) 613 temp_ts[:] = e_utm_ts 614 615 temp_ts = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) 616 temp_ts[:] = n_utm_ts 617 618 temp_ts = ncfile_out.createVariable( name_z + str(counter_id), float, name_station + str(counter_id) ) 619 temp_ts[:] = z_ts 620 621 temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) ) 622 temp_ts[:] = station_h_ts 623 624 # Check if any of the measured variables is a soil variable. Set flag accordingly. 625 soil = False 626 for var in measured_variables: 627 if ( var in soil_vars ): 628 soil = True 629 # Write soil flag 630 ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) 631 632 # Create dimension for sample-variable string 633 ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) 634 635 measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ 636 ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR 637 638 # Write the variables to the file 639 for counter, meas in enumerate( measured_variables ): 640 measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) 641 642 # Increment the counter 643 counter_id += 1 644 645 # Store the number of observational sites 646 num_sites = len( sites_traj ) + len( sites_ts ) + len( sites_tspr ) 423 647 424 648 … … 426 650 # are possible. 427 651 if ( custom_coordinates ): 428 429 count_site = num_sites + 1 652 num_sites = counter_id - 1 430 653 for coord in coordinates: 431 654 # Define mandatory attributes 432 ncfile_out.setncattr( name_featuretype + str(count _site), \655 ncfile_out.setncattr( name_featuretype + str(counter_id), \ 433 656 name_ts ) 434 ncfile_out.setncattr( name_site + str(count _site), \435 "custom" + str(count _site- num_sites) )436 ncfile_out.setncattr( name_orig_x + str(count _site), \657 ncfile_out.setncattr( name_site + str(counter_id), \ 658 "custom" + str(counter_id - num_sites) ) 659 ncfile_out.setncattr( name_orig_x + str(counter_id), \ 437 660 coord[0] ) 438 ncfile_out.setncattr( name_orig_y + str(count _site), \661 ncfile_out.setncattr( name_orig_y + str(counter_id), \ 439 662 coord[1] ) 440 ncfile_out.setncattr( name_orig_z + str(count _site), \663 ncfile_out.setncattr( name_orig_z + str(counter_id), \ 441 664 0.0 ) 442 665 … … 444 667 ntime = 1 445 668 nstat = 1 446 ncfile_out.createDimension( name_ntime + str(count _site), ntime )447 ncfile_out.createDimension( name_station + str(count _site), nstat )669 ncfile_out.createDimension( name_ntime + str(counter_id), ntime ) 670 ncfile_out.createDimension( name_station + str(counter_id), nstat ) 448 671 449 672 # Define coordinate variables 450 temp_ts = ncfile_out.createVariable( name_eutm + str(count _site), \673 temp_ts = ncfile_out.createVariable( name_eutm + str(counter_id), \ 451 674 float, \ 452 name_station + str(count _site) )675 name_station + str(counter_id) ) 453 676 temp_ts[:] = np.array( coord[0] ) 454 677 455 temp_ts = ncfile_out.createVariable( name_nutm + str(count _site), \678 temp_ts = ncfile_out.createVariable( name_nutm + str(counter_id), \ 456 679 float, \ 457 name_station + str(count _site) )680 name_station + str(counter_id) ) 458 681 temp_ts[:] = np.array( coord[1] ) 459 682 460 temp_ts = ncfile_out.createVariable( name_z + str(count _site), \683 temp_ts = ncfile_out.createVariable( name_z + str(counter_id), \ 461 684 float, \ 462 name_station + str(count _site) )685 name_station + str(counter_id) ) 463 686 temp_ts[:] = np.array( coord[2] ) 464 687 465 temp_ts = ncfile_out.createVariable( name_station_h + str(count _site), \688 temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), \ 466 689 float, \ 467 name_station + str(count _site) )690 name_station + str(counter_id) ) 468 691 temp_ts[:] = np.array( 0.0 ) 469 692 470 693 471 count _site+= 1694 counter_id += 1 472 695 473 696 # Reset counter variable 474 count _site= num_sites + 1697 counter_id = num_sites + 1 475 698 476 699 # check if variables are prescribed. If so, prepare final output string … … 491 714 measured_variables.append(var) 492 715 493 ncfile_out.createDimension( "nvar"+ str(count _site), \716 ncfile_out.createDimension( "nvar"+ str(counter_id), \ 494 717 len( measured_variables ) ) 495 718 496 measured_var = ncfile_out.createVariable( 'measured_variables' + str(count _site), 'S1', \497 ("nvar" + str(count _site), "string_len") ) # must be NC_CHAR719 measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ 720 ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR 498 721 499 722 # Write the variables to the file … … 507 730 508 731 # Write soil flag 509 ncfile_out.setncattr( name_soil_sampling + str( count _site), np.int8(soil) )732 ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) 510 733 511 734 # Increment counter variable 512 count _site+= 1735 counter_id += 1 513 736 514 737 del ( measured_variables[:] ) 515 738 516 739 # Add the number of customized sites. 517 num_sites += int( number_positions )740 num_sites = counter_id - 1 518 741 519 742
Note: See TracChangeset
for help on using the changeset viewer.