source: palm/trunk/SCRIPTS/palm_cvd @ 4868

Last change on this file since 4868 was 4843, checked in by raasch, 4 years ago

local namelist parameter added to switch off the module although the respective module namelist appears in the namelist file, further copyright updates

  • Property svn:executable set to *
  • Property svn:keywords set to ID
File size: 30.2 KB
RevLine 
[4400]1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3#
4#--------------------------------------------------------------------------------#
5# This file is part of the PALM model system.
6#
7# PALM is free software: you can redistribute it and/or modify it under the terms
8# of the GNU General Public License as published by the Free Software Foundation,
9# either version 3 of the License, or (at your option) any later version.
10#
11# PALM is distributed in the hope that it will be useful, but WITHOUT ANY
12# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13# A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License along with
16# PALM. If not, see <http://www.gnu.org/licenses/>.
17#
[4843]18# Copyright 1997-2021  Leibniz Universitaet Hannover
[4400]19#--------------------------------------------------------------------------------#
20#
21# Current revisions:
22# -----------------
[4663]23#
24#
[4400]25# Former revisions:
26# -----------------
27# $Id$
[4763]28# - Check automatically for data organization (stored in subdirectories or not)
29# - Convert trajectory and timeseriesProfile coordinates into 1-D coordinates
30#   equivalent to timeseries coordiates. This simplifies processing in PALM and
31#   makes the virtual-measurement module also applicable to other campaigns.
32#
33# 4758 2020-10-26 13:03:52Z suehring
[4758]34# In order to do not omit observations that are on the same site but have different
35# coordinates or feature-types, process all files rather than only one and omit
36# the rest.
37#
38# 4663 2020-09-02 14:54:09Z gronemeier
[4663]39# bugfix in non_measurable_vars; ignore station_h if featureType is trajectory
40#
41# 4400 2020-02-10 20:32:41Z suehring
[4400]42# Initial revision
43#
44# Description:
45# ------------
46# Processing tool for creating PIDS conform virtual measurement setup file
47# from UC2 data-standard conform observational data or from prescribed input
48# coordinates.
49#
50# @Authors Matthias Suehring (suehring@muk.uni-hannover.de)
51#          Tobias Gronemeier (gronemeier@muk.uni-hannover.de)
52#
53# @todo Add further feature tpyes for customized observations. At the moment only
54#       timeSeries is possible.
55#--------------------------------------------------------------------------------#
56
57
58import netCDF4
59from netCDF4 import Dataset, stringtochar
60import os
61import numpy as np
[4763]62import time
[4400]63
64
65# Function to read the config file
66def read_config_file():
67
68   import configparser
69   import os
70   import sys
71   import json
72
73   # Definition of global configuration parameters
[4663]74   global global_acronym
75   global global_author
76   global global_campaign
77   global global_comment
78   global global_contact
[4400]79   global global_data_content
80   global global_dependencies
[4663]81   global global_institution
82   global global_keywords
83   global global_location
84   global global_references
85   global global_site
86   global global_source
[4400]87   global global_palm_version
[4663]88   global data_path
89   global output_path
90   global output_filename
[4400]91   global number_positions
92   global input_from_observations
93   global coordinates
94   global vars_to_be_measured
95   global custom_coordinates
96
97   global_acronym          = " "
98   global_author           = " "
99   global_campaign         = " "
100   global_comment          = " "
101   global_contact          = " "
102   global_data_content     = " "
103   global_dependencies     = " "
104   global_institution      = " "
105   global_keywords         = " "
106   global_location         = " "
107   global_references       = " "
108   global_site             = " "
109   global_source           = " "
110   global_palm_version     = 6.0
111   data_path               = " "
112   output_path             = " "
113   output_filename         = "none"
114   number_positions        = -999
115   input_from_observations = False
116   coordinates             = []
117   vars_to_be_measured     = []
118   custom_coordinates      = False
119
120   # Check if configuration files exists and quit otherwise
121   input_config = ".cvd.config.default"
[4663]122   for i in range(1,len(sys.argv)):
[4400]123      input_config = str(sys.argv[i])
124
125   # Allow empty settings
126   config = configparser.RawConfigParser(allow_no_value=True)
127
128   # Check if a config file exists.
129   if ( os.path.isfile(input_config) == False ):
130      print ("Error. No configuration file " + input_config + " found.")
131      quit()
132
133   config.read(input_config)
[4663]134
[4400]135   for section in range( 0, len( config.sections() ) ):
136
137      current_section = config.sections()[section]
138
139      # read global attributes which are written into the output file header
140      if ( current_section == 'global' ):
141
142         global_acronym      = config.get( current_section, 'acronym'        )
143         global_author       = config.get( current_section, 'author'         )
144         global_campaign     = config.get( current_section, 'campaign'       )
145         global_comment      = config.get( current_section, 'comment'        )
146         global_contact      = config.get( current_section, 'contact_person' )
147         global_data_content = config.get( current_section, 'data_content'   )
148         global_dependencies = config.get( current_section, 'dependencies'   )
149         global_institution  = config.get( current_section, 'institution'    )
150         global_keywords     = config.get( current_section, 'keywords'       )
151         global_location     = config.get( current_section, 'location'       )
152         global_references   = config.get( current_section, 'references'     )
153         global_site         = config.get( current_section, 'site'           )
154         global_source       = config.get( current_section, 'source'         )
155         global_palm_version = float( config.get( current_section, 'palm_version' ) )
156
157      # Read data input path for observational data
158      elif ( current_section == 'input' ):
159
160         data_path = config.get( current_section, 'data_path' )
161         input_from_observations = True
162
163      # Read output path and filename for the VM driver
164      elif ( current_section == 'output' ):
165
166         output_path     = config.get( current_section, 'output_path' )
167         output_filename = config.get( current_section, 'output_filename' )
168
[4663]169      # Read customized coordinates where virtual measurements shall be taken,
[4400]170      # as well as the variables that should be sampled.
171      elif ( current_section == 'custom_positions' ):
172
173         number_positions = config.get( current_section, 'number_positions' )
174
175         for count in range( 0, int( number_positions ) ):
176            coordinates.append( json.loads( config.get( current_section, \
177                                                        "coordinates" + str( count + 1 ) ) ) )
178            # If coordinates are given, set a global flag.
179            custom_coordinates = True
180
181         for count in range( 0, int( number_positions ) ):
182            vars_to_be_measured.append( json.loads( config.get( current_section, \
183                                                    "vars_to_be_measured" + str( count + 1 ) ) ) )
184
185
186   return 0
187
188#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
189# Main program:
190#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
191
192
193# Define strings
194name_featuretype   = "featureType"
195name_ts            = "timeSeries"
[4763]196name_tspr          = "timeSeriesProfile"
[4400]197name_traj          = "trajectory"
198name_ntime         = "ntime"
199name_time          = "time"
200name_station       = "station"
201name_traj_dim      = "traj"
202name_nz            = "nz"
203name_datacontent   = "data_content"
204name_eutm          = "E_UTM"
205name_nutm          = "N_UTM"
[4763]206name_hao           = "height"
[4400]207name_station_h     = "station_h"
208name_z             = "z"
209name_soil_sampling = "soil_sample"
210name_num_stat      = "number_of_stations"
211name_fill          = "_FillValue"
212name_site          = "site"
[4758]213name_acro          = "acronym"
214name_content       = "data_content"
[4400]215name_orig_x        = "origin_x"
216name_orig_y        = "origin_y"
217name_orig_z        = "origin_z"
218
219max_string_len     = 50
220
221name_measvars      = "measured_variables"
222
223non_measurable_vars = ['station_name', 'time', 'time_bounds', 'crs', \
224                       'vrs', 'x', 'y', 'z', 'lon', 'lat', 'ntime', 'station', 'traj', \
225                       'E_UTM', 'N_UTM', 'height_above_origin', 'station_h', \
[4663]226                       'traj_name', 'height', 'band_pm_size', 'bands_pm', 'bands_pm_size_bounds', \
[4400]227                       'bands_pm_size', 'ancillary_detected_layer' ]
228
229soil_vars            = [ 't_soil', 'm_soil', 'lwc', 'lwcs', 'smp' ]
230
231dims_out             = [ name_eutm, name_nutm, name_hao, name_z, name_station_h ]
232
[4663]233# Define list of attributes which need to be of type float. In the data set this is not
[4400]234# necessarily guranteed.
235atts_float           = [ 'origin_x', 'origin_y', 'origin_z', 'origin_lon', 'origin_lat', 'rotation_angle' ]
236
237# Define list of default variables that shall be measured at each site
238vars_default         = [ 'u', 'v', 'w', 'theta', 'hus' ]
239
240
241#Read config file
242read_config_file()
243
244# Initialize counter variable for the number of sites
245num_sites = 0
246
247# Set the output path for the data
248output_filename = output_path + output_filename
249
250# Open output file
251ncfile_out = Dataset( output_filename, "w", format="NETCDF4" )
252
253# First, add global attributes
254ncfile_out.setncattr( 'acronym',        global_acronym      )
255ncfile_out.setncattr( 'author',         global_author       )
256ncfile_out.setncattr( 'campaign',       global_campaign     )
257ncfile_out.setncattr( 'comment',        global_comment      )
258ncfile_out.setncattr( 'contact_person', global_contact      )
259ncfile_out.setncattr( 'data_content',   global_data_content )
260ncfile_out.setncattr( 'dependencies',   global_dependencies )
261ncfile_out.setncattr( 'institution',    global_institution  )
262ncfile_out.setncattr( 'keywords',       global_keywords     )
263ncfile_out.setncattr( 'location',       global_location     )
264ncfile_out.setncattr( 'references',     global_references   )
265ncfile_out.setncattr( 'site',           global_site         )
266ncfile_out.setncattr( 'source',         global_source       )
267ncfile_out.setncattr( 'palm_version',   global_palm_version )
268
269# Create universal dimension for the string length.
270ncfile_out.createDimension("string_len", max_string_len)
271
272
[4663]273# Check if observational data is available. This case,
274# obtain an alphabetically sorted list of input data. List is sorted
[4400]275# just for the sake of clarity in the resulting setup file.
276if ( input_from_observations == True ):
277   list_input_data = sorted( os.listdir( data_path ) )
278
279if ( input_from_observations ):
280
[4763]281   # Run loop over all listed input data. Depending on the data set, this could be
282   # a list of files or a list of subdirectories.
[4400]283   # This is done to reduce the number of virtual measurements in the model. Each
284   # virtual measurement has an overhead and consumes memory.
285   sites = []
[4758]286   input_files = []
[4763]287   input_files_orig = []
[4400]288   for dirname in list_input_data:
289       data_file = data_path + dirname
290
[4763]291       if ( os.path.isdir(data_file) == True ):
292          # Directory may contain various file versions.
293          # Take the one with highest cycle number.
294          highest_cycle_nr = 0
295          for filename in os.listdir(data_file):
296             start_seq = len( filename ) - 6
297             end_seq   = len( filename ) - 3
298             if int( filename[start_seq:end_seq] ) > highest_cycle_nr:
299                highest_cycle_nr = int(filename[start_seq:end_seq])
300                latest_file      = filename
301          input_file = data_file + "/" + latest_file
302          input_file_orig = latest_file
303       else:
304          input_file = data_file
305          input_file_orig = dirname
[4400]306
307       # Open the NetCDF file
308       ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii')
[4758]309       input_files.append(input_file)
[4763]310       input_files_orig.append(input_file_orig)
[4400]311
[4763]312#  Gather all files according to their feature type and all sites for the respective feature type
313   files_traj  = []
314   files_ts    = []
315   files_tspr  = []
316   sites_traj  = []
317   sites_ts    = []
318   sites_tspr  = []
319   for input_file in input_files:
[4400]320      ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' )
321
322      for att in ncfile_in.ncattrs():
[4758]323         if ( att == name_featuretype ):
324            feature = ncfile_in.getncattr(att)
[4400]325         if ( att == name_site ):
326            site = ncfile_in.getncattr(att)
327
[4763]328      if ( feature == name_traj ):
329         files_traj.append(input_file)
330      elif ( feature == name_ts ):
331         files_ts.append(input_file)
332      else:
333         files_tspr.append(input_file)
[4400]334
[4763]335      if ( feature == name_traj  and  site not in sites_traj ):
336         sites_traj.append(site)
337      if ( feature == name_ts  and  site not in sites_ts ):
338         sites_ts.append(site)
339      if ( feature == name_tspr  and  site not in sites_tspr ):
340         sites_tspr.append(site)
[4400]341
[4763]342      ncfile_in.close()
[4400]343
[4763]344   for input_file in files_traj:
345      print( "traj", input_file )
346   for site in sites_traj:
347      print( "traj", site )
[4400]348
[4763]349   for site in sites_ts:
350      print( "ts", site )
351   for site in sites_tspr:
352      print( "tspr", site )
353     
354   for file in files_tspr:
355      print( "tspr", file )
356   counter_id = 1
357   for site_traj in sites_traj:
358      # For the given site already define the featureTpye and site
359      ncfile_out.setncattr( name_featuretype + str(counter_id), name_traj )
360      ncfile_out.setncattr( name_site + str(counter_id), site_traj )
[4400]361
[4763]362      # Define the number of coordinates for the site
363      num_coords = 0
[4400]364
[4763]365      e_utm_traj = np.array([])
366      n_utm_traj = np.array([])
367      h_traj     = np.array([])
368      measured_variables = ['u', 'v', 'w', 'theta', 'hus']
369      for input_file in files_traj:
370         ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' )
371         for att in ncfile_in.ncattrs():
372            if ( att == name_site ):
373               site = ncfile_in.getncattr(att)
[4400]374
[4763]375         if ( site == site_traj ):
[4400]376
[4763]377            for att in ncfile_in.ncattrs():
378               if ( att == name_orig_x ):
379                  orig_x = ncfile_in.getncattr(att)
380               if ( att == name_orig_y ):
381                  orig_y = ncfile_in.getncattr(att)
382               if ( att == name_orig_z ):
383                  orig_z = ncfile_in.getncattr(att)
[4400]384
[4763]385            ntime = len( ncfile_in.dimensions[name_ntime]    )
386            ntraj = len( ncfile_in.dimensions[name_traj_dim] )
[4400]387
[4763]388            num_coords += ntime * ntraj
389#           Gather UTM and height coordinates and merge them into one array. Further, gather
390#           the variables that shall be sampled. Coordinates are checked to for NaN values and
391#           are tranformed to arithmetric numbers. Further, 2D input array is transformed into
392#           a 1D array.
393            for var in ncfile_in.variables.keys():
394               if ( var in dims_out  and  var == name_eutm ):
395                  e_utm_traj = np.append(e_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten())
396                  #e_utm_traj.append( np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten() )
397               if ( var in dims_out  and  var == name_nutm ):
398                  n_utm_traj = np.append(n_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten())
399               if ( var in dims_out  and  var == name_hao ):
400                  h_traj = np.append(h_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten())
[4400]401
[4763]402               if ( var not in non_measurable_vars  and  \
403                    var not in vars_default         and  \
404                    var not in measured_variables ):
405                  measured_variables.append(var)
[4400]406
[4763]407         ncfile_in.close()
[4400]408
[4763]409#     After all files for the current site are processed, write the origin-coordinates for x,y,z
410      ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x )
411      ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y )
412      ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z )
413#     Create the dimensions
414      ncfile_out.createDimension( name_station + str(counter_id), num_coords )
[4400]415
[4763]416      temp_traj = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) )
417      temp_traj[:] = e_utm_traj
[4400]418
[4763]419      temp_traj = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) )
420      temp_traj[:] = n_utm_traj
421
422      temp_traj = ncfile_out.createVariable( name_hao  + str(counter_id), float, name_station + str(counter_id) )
423      temp_traj[:] = h_traj
424
425#     Check if any of the measured variables is a soil variable. Set flag accordingly.
[4400]426      soil = False
[4763]427      for var in measured_variables:
[4400]428         if ( var in soil_vars ):
429            soil = True
[4763]430#     Write soil flag
431      ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) )
[4400]432
[4763]433#     Create dimension for sample-variable string
434      ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) )
[4400]435
[4763]436      measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \
437                                                ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR
[4400]438
[4763]439      # Write the variables to the file
440      for counter, meas in enumerate( measured_variables ):
441         measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) )
[4400]442
[4763]443#     Increment the counter
444      counter_id += 1
445   
446   
447   for site_tspr in sites_tspr:
448      # For the given site already define the featureTpye and site
449      ncfile_out.setncattr( name_featuretype + str(counter_id), name_tspr )
450      ncfile_out.setncattr( name_site + str(counter_id), site_tspr )
[4400]451
[4763]452      # Define the number of coordinates for the site
453      num_coords = 0
454      e_utm_tspr     = np.array([])
455      n_utm_tspr     = np.array([])
456      station_h_tspr = np.array([])
457      z_tspr         = np.array([])
[4400]458
[4763]459      measured_variables = ['u', 'v', 'w', 'theta', 'hus']
460      for input_file in files_tspr:
461         ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' )
462         for att in ncfile_in.ncattrs():
463            if ( att == name_site ):
464               site = ncfile_in.getncattr(att)
465
466         if ( site == site_tspr ):
467            for att in ncfile_in.ncattrs():
468               if ( att == name_orig_x ):
469                  orig_x = ncfile_in.getncattr(att)
470               if ( att == name_orig_y ):
471                  orig_y = ncfile_in.getncattr(att)
472               if ( att == name_orig_z ):
473                  orig_z = ncfile_in.getncattr(att)
474
475            nstation = len( ncfile_in.dimensions[name_station] )
476            ntime    = len( ncfile_in.dimensions[name_ntime]   )
477            nz       = len( ncfile_in.dimensions[name_nz]   )
478
479            num_coords += nstation * ntime * nz
480#           Gather UTM and height coordinates and merge them into one array. Further, gather
481#           the variables that shall be sampled. Coordinates are checked to for NaN values and
482#           are tranformed to arithmetric numbers. Further, 2D input array is transformed into
483#           a 1D array.
484            for var in ncfile_in.variables.keys():
485               tspr_tmp1 = np.zeros((nstation))
486               tspr_tmp2 = np.zeros((ntime*nz))
487               if ( var in dims_out  and  var == name_eutm ):
488                  tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] )
489                  for ns in range(0,int(nstation)):
490                     tspr_tmp2[:] = tspr_tmp1[ns]
491                     e_utm_tspr = np.append(e_utm_tspr, tspr_tmp2)
492               if ( var in dims_out  and  var == name_nutm ):
493                  tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] )
494                  for ns in range(0,int(nstation)):
495                     tspr_tmp2[:] = tspr_tmp1[ns]
496                     n_utm_tspr = np.append(n_utm_tspr, tspr_tmp2)
497               if ( var in dims_out  and  var == name_z ):
498                  z_tspr_tmp = np.nan_to_num( ncfile_in.variables[var][:,:,:] )
499                  z_tspr = np.append(z_tspr, np.concatenate( z_tspr_tmp ))
500               if ( var in dims_out  and  var == name_station_h ):
501                  tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] )
502                  for ns in range(0,int(nstation)):
503                     tspr_tmp2[:] = tspr_tmp1[ns]
504                     station_h_tspr = np.append(station_h_tspr, tspr_tmp2)
505
506               if ( var not in non_measurable_vars  and  \
507                    var not in vars_default         and  \
508                    var not in measured_variables ):
509                  measured_variables.append(var)
510
511         ncfile_in.close()
512
513#     After all files for the current site are processed, write the origin-coordinates for x,y,z
514      ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x )
515      ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y )
516      ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z )
517#     Create the dimensions
518      ncfile_out.createDimension( name_station + str(counter_id), num_coords )
519
520      temp_tspr = ncfile_out.createVariable( name_eutm      + str(counter_id), float, name_station + str(counter_id) )
521      temp_tspr[:] = e_utm_tspr
522
523      temp_tspr = ncfile_out.createVariable( name_nutm      + str(counter_id), float, name_station + str(counter_id) )
524      temp_tspr[:] = n_utm_tspr
525
526      temp_tspr = ncfile_out.createVariable( name_z         + str(counter_id), float, name_station + str(counter_id) )
527      temp_tspr[:] = z_tspr
528
529      temp_tspr = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) )
530      temp_tspr[:] = station_h_tspr
531
532#     Check if any of the measured variables is a soil variable. Set flag accordingly.
533      soil = False
534      for var in measured_variables:
535         if ( var in soil_vars ):
536            soil = True
537#     Write soil flag
538      ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) )
539
540#     Create dimension for sample-variable string
541      ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) )
542
543      measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \
544                                                ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR
545
546      # Write the variables to the file
547      for counter, meas in enumerate( measured_variables ):
548         measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) )
549
550#     Increment the counter
551      counter_id += 1
552   
553   
554   for site_ts in sites_ts:
555      # For the given site already define the featureTpye and site
556      ncfile_out.setncattr( name_featuretype + str(counter_id), name_ts )
557      ncfile_out.setncattr( name_site + str(counter_id), site_ts )
558
559      # Define the number of coordinates for the site
560      num_coords = 0
561      e_utm_ts     = np.array([])
562      n_utm_ts     = np.array([])
563      station_h_ts = np.array([])
564      z_ts         = np.array([])
565     
566      measured_variables = ['u', 'v', 'w', 'theta', 'hus']
567      for input_file in files_ts:
568         ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' )
569         for att in ncfile_in.ncattrs():
570            if ( att == name_site ):
571               site = ncfile_in.getncattr(att)
572
573         if ( site == site_ts ):
574
575            for att in ncfile_in.ncattrs():
576               if ( att == name_orig_x ):
577                  orig_x = ncfile_in.getncattr(att)
578               if ( att == name_orig_y ):
579                  orig_y = ncfile_in.getncattr(att)
580               if ( att == name_orig_z ):
581                  orig_z = ncfile_in.getncattr(att)
582
583            nstation = len( ncfile_in.dimensions[name_station]    )
584            num_coords += nstation
585#           Gather UTM and height coordinates and merge them into one array. Further, gather
586#           the variables that shall be sampled. Coordinates are checked to for NaN values and
587#           are tranformed to arithmetric numbers.
588            for var in ncfile_in.variables.keys():
589               if ( var in dims_out  and  var == name_eutm ):
590                  e_utm_ts = np.append(e_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] ))
591               if ( var in dims_out  and  var == name_nutm ):
592                  n_utm_ts = np.append(n_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] ))
593               if ( var in dims_out  and  var == name_z ):
594                  z_ts = np.append(z_ts, np.nan_to_num( ncfile_in.variables[var][:] ))
595               if ( var in dims_out  and  var == name_station_h ):
596                  station_h_ts = np.append(station_h_ts, np.nan_to_num( ncfile_in.variables[var][:] ))
597
598               if ( var not in non_measurable_vars  and  \
599                    var not in vars_default         and  \
600                    var not in measured_variables ):
601                  measured_variables.append(var)
602
603         ncfile_in.close()
604
605#     After all files for the current site are processed, write the origin-coordinates for x,y,z
606      ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x )
607      ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y )
608      ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z )
609#     Create the dimensions
610      ncfile_out.createDimension( name_station + str(counter_id), num_coords )
611
612      temp_ts = ncfile_out.createVariable( name_eutm      + str(counter_id), float, name_station + str(counter_id) )
613      temp_ts[:] = e_utm_ts
614
615      temp_ts = ncfile_out.createVariable( name_nutm      + str(counter_id), float, name_station + str(counter_id) )
616      temp_ts[:] = n_utm_ts
617
618      temp_ts = ncfile_out.createVariable( name_z         + str(counter_id), float, name_station + str(counter_id) )
619      temp_ts[:] = z_ts
620
621      temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) )
622      temp_ts[:] = station_h_ts
623
624#     Check if any of the measured variables is a soil variable. Set flag accordingly.
625      soil = False
626      for var in measured_variables:
627         if ( var in soil_vars ):
628            soil = True
629#     Write soil flag
630      ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) )
631
632#     Create dimension for sample-variable string
633      ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) )
634
635      measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \
636                                                ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR
637
638      # Write the variables to the file
639      for counter, meas in enumerate( measured_variables ):
640         measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) )
641
642#     Increment the counter
643      counter_id += 1
644
645# Store the number of observational sites
646   num_sites = len( sites_traj ) + len( sites_ts ) + len( sites_tspr )
647
648
[4400]649#  Now process the customized input data. Please note, at the moment only timeseries are
650# are possible.
651if ( custom_coordinates ):
[4763]652   num_sites = counter_id - 1
[4400]653   for coord in coordinates:
654      # Define mandatory attributes
[4763]655      ncfile_out.setncattr( name_featuretype + str(counter_id),  \
[4400]656                            name_ts )
[4763]657      ncfile_out.setncattr( name_site        + str(counter_id),  \
658                            "custom"         + str(counter_id - num_sites) )
659      ncfile_out.setncattr( name_orig_x      + str(counter_id),  \
[4400]660                            coord[0] )
[4763]661      ncfile_out.setncattr( name_orig_y      + str(counter_id),  \
[4400]662                            coord[1] )
[4763]663      ncfile_out.setncattr( name_orig_z      + str(counter_id),  \
[4400]664                            0.0 )
665
666      # Define dimensions
667      ntime = 1
668      nstat = 1
[4763]669      ncfile_out.createDimension( name_ntime   + str(counter_id), ntime )
670      ncfile_out.createDimension( name_station + str(counter_id), nstat )
[4400]671
672      # Define coordinate variables
[4763]673      temp_ts = ncfile_out.createVariable( name_eutm      + str(counter_id), \
[4400]674                                           float,                            \
[4763]675                                           name_station   + str(counter_id) )
[4400]676      temp_ts[:] = np.array( coord[0] )
677
[4763]678      temp_ts = ncfile_out.createVariable( name_nutm      + str(counter_id), \
[4400]679                                           float,                            \
[4763]680                                           name_station   + str(counter_id) )
[4400]681      temp_ts[:] = np.array( coord[1] )
682
[4763]683      temp_ts = ncfile_out.createVariable( name_z         + str(counter_id), \
[4400]684                                           float,                            \
[4763]685                                           name_station   + str(counter_id) )
[4400]686      temp_ts[:] = np.array( coord[2] )
687
[4763]688      temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), \
[4400]689                                           float,                            \
[4763]690                                           name_station   + str(counter_id) )
[4400]691      temp_ts[:] = np.array( 0.0 )
692
693
[4763]694      counter_id += 1
[4400]695
696   # Reset counter variable
[4763]697   counter_id = num_sites + 1
[4400]698
699   # check if variables are prescribed. If so, prepare final output string
700   # stored in measured_variables.
701   if ( vars_to_be_measured ):
702
703      for custom_vars in vars_to_be_measured:
704
705         measured_variables = []
706         for var in vars_default:
707            measured_variables.append(var)
708
[4663]709         # Check if given variables are already in the default variables.
[4400]710         # If not, extend.
711         for var in custom_vars:
712             if ( var  not in  measured_variables ):
713
714                measured_variables.append(var)
715
[4763]716         ncfile_out.createDimension( "nvar"+ str(counter_id), \
[4400]717                                     len( measured_variables ) )
718
[4763]719         measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \
720                                                  ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR
[4400]721
722         # Write the variables to the file
723         for counter, meas in enumerate( measured_variables ):
724            measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) )
725
726         # Add soil attribute for the current measurement.
727         soil = False
728         if ( any( var == soil_vars for var in measured_variables) ):
729            soil = True
730
731         # Write soil flag
[4763]732         ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) )
[4400]733
734         # Increment counter variable
[4763]735         counter_id += 1
[4400]736
737         del ( measured_variables[:] )
738
739   # Add the number of customized sites.
[4763]740   num_sites = counter_id - 1
[4400]741
742
743# Finally, write the total number of sites to the output file
744ncfile_out.setncattr( name_num_stat, num_sites )
745
746
747print( "*** palm_cvd has been finished. You can find the output file under: " )
748print( "    " + output_filename )
749
750quit()
Note: See TracBrowser for help on using the repository browser.