1 | #!/usr/bin/env python3 |
---|
2 | # -*- coding: utf-8 -*- |
---|
3 | # |
---|
4 | #--------------------------------------------------------------------------------# |
---|
5 | # This file is part of the PALM model system. |
---|
6 | # |
---|
7 | # PALM is free software: you can redistribute it and/or modify it under the terms |
---|
8 | # of the GNU General Public License as published by the Free Software Foundation, |
---|
9 | # either version 3 of the License, or (at your option) any later version. |
---|
10 | # |
---|
11 | # PALM is distributed in the hope that it will be useful, but WITHOUT ANY |
---|
12 | # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR |
---|
13 | # A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
---|
14 | # |
---|
15 | # You should have received a copy of the GNU General Public License along with |
---|
16 | # PALM. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | # |
---|
18 | # Copyright 1997-2020 Leibniz Universitaet Hannover |
---|
19 | #--------------------------------------------------------------------------------# |
---|
20 | # |
---|
21 | # Current revisions: |
---|
22 | # ----------------- |
---|
23 | # |
---|
24 | # |
---|
25 | # Former revisions: |
---|
26 | # ----------------- |
---|
27 | # $Id$ |
---|
28 | # - Check automatically for data organization (stored in subdirectories or not) |
---|
29 | # - Convert trajectory and timeseriesProfile coordinates into 1-D coordinates |
---|
30 | # equivalent to timeseries coordiates. This simplifies processing in PALM and |
---|
31 | # makes the virtual-measurement module also applicable to other campaigns. |
---|
32 | # |
---|
33 | # 4758 2020-10-26 13:03:52Z suehring |
---|
34 | # In order to do not omit observations that are on the same site but have different |
---|
35 | # coordinates or feature-types, process all files rather than only one and omit |
---|
36 | # the rest. |
---|
37 | # |
---|
38 | # 4663 2020-09-02 14:54:09Z gronemeier |
---|
39 | # bugfix in non_measurable_vars; ignore station_h if featureType is trajectory |
---|
40 | # |
---|
41 | # 4400 2020-02-10 20:32:41Z suehring |
---|
42 | # Initial revision |
---|
43 | # |
---|
44 | # Description: |
---|
45 | # ------------ |
---|
46 | # Processing tool for creating PIDS conform virtual measurement setup file |
---|
47 | # from UC2 data-standard conform observational data or from prescribed input |
---|
48 | # coordinates. |
---|
49 | # |
---|
50 | # @Authors Matthias Suehring (suehring@muk.uni-hannover.de) |
---|
51 | # Tobias Gronemeier (gronemeier@muk.uni-hannover.de) |
---|
52 | # |
---|
53 | # @todo Add further feature tpyes for customized observations. At the moment only |
---|
54 | # timeSeries is possible. |
---|
55 | #--------------------------------------------------------------------------------# |
---|
56 | |
---|
57 | |
---|
58 | import netCDF4 |
---|
59 | from netCDF4 import Dataset, stringtochar |
---|
60 | import os |
---|
61 | import numpy as np |
---|
62 | import time |
---|
63 | |
---|
64 | |
---|
65 | # Function to read the config file |
---|
66 | def read_config_file(): |
---|
67 | |
---|
68 | import configparser |
---|
69 | import os |
---|
70 | import sys |
---|
71 | import json |
---|
72 | |
---|
73 | # Definition of global configuration parameters |
---|
74 | global global_acronym |
---|
75 | global global_author |
---|
76 | global global_campaign |
---|
77 | global global_comment |
---|
78 | global global_contact |
---|
79 | global global_data_content |
---|
80 | global global_dependencies |
---|
81 | global global_institution |
---|
82 | global global_keywords |
---|
83 | global global_location |
---|
84 | global global_references |
---|
85 | global global_site |
---|
86 | global global_source |
---|
87 | global global_palm_version |
---|
88 | global data_path |
---|
89 | global output_path |
---|
90 | global output_filename |
---|
91 | global number_positions |
---|
92 | global input_from_observations |
---|
93 | global coordinates |
---|
94 | global vars_to_be_measured |
---|
95 | global custom_coordinates |
---|
96 | |
---|
97 | global_acronym = " " |
---|
98 | global_author = " " |
---|
99 | global_campaign = " " |
---|
100 | global_comment = " " |
---|
101 | global_contact = " " |
---|
102 | global_data_content = " " |
---|
103 | global_dependencies = " " |
---|
104 | global_institution = " " |
---|
105 | global_keywords = " " |
---|
106 | global_location = " " |
---|
107 | global_references = " " |
---|
108 | global_site = " " |
---|
109 | global_source = " " |
---|
110 | global_palm_version = 6.0 |
---|
111 | data_path = " " |
---|
112 | output_path = " " |
---|
113 | output_filename = "none" |
---|
114 | number_positions = -999 |
---|
115 | input_from_observations = False |
---|
116 | coordinates = [] |
---|
117 | vars_to_be_measured = [] |
---|
118 | custom_coordinates = False |
---|
119 | |
---|
120 | # Check if configuration files exists and quit otherwise |
---|
121 | input_config = ".cvd.config.default" |
---|
122 | for i in range(1,len(sys.argv)): |
---|
123 | input_config = str(sys.argv[i]) |
---|
124 | |
---|
125 | # Allow empty settings |
---|
126 | config = configparser.RawConfigParser(allow_no_value=True) |
---|
127 | |
---|
128 | # Check if a config file exists. |
---|
129 | if ( os.path.isfile(input_config) == False ): |
---|
130 | print ("Error. No configuration file " + input_config + " found.") |
---|
131 | quit() |
---|
132 | |
---|
133 | config.read(input_config) |
---|
134 | |
---|
135 | for section in range( 0, len( config.sections() ) ): |
---|
136 | |
---|
137 | current_section = config.sections()[section] |
---|
138 | |
---|
139 | # read global attributes which are written into the output file header |
---|
140 | if ( current_section == 'global' ): |
---|
141 | |
---|
142 | global_acronym = config.get( current_section, 'acronym' ) |
---|
143 | global_author = config.get( current_section, 'author' ) |
---|
144 | global_campaign = config.get( current_section, 'campaign' ) |
---|
145 | global_comment = config.get( current_section, 'comment' ) |
---|
146 | global_contact = config.get( current_section, 'contact_person' ) |
---|
147 | global_data_content = config.get( current_section, 'data_content' ) |
---|
148 | global_dependencies = config.get( current_section, 'dependencies' ) |
---|
149 | global_institution = config.get( current_section, 'institution' ) |
---|
150 | global_keywords = config.get( current_section, 'keywords' ) |
---|
151 | global_location = config.get( current_section, 'location' ) |
---|
152 | global_references = config.get( current_section, 'references' ) |
---|
153 | global_site = config.get( current_section, 'site' ) |
---|
154 | global_source = config.get( current_section, 'source' ) |
---|
155 | global_palm_version = float( config.get( current_section, 'palm_version' ) ) |
---|
156 | |
---|
157 | # Read data input path for observational data |
---|
158 | elif ( current_section == 'input' ): |
---|
159 | |
---|
160 | data_path = config.get( current_section, 'data_path' ) |
---|
161 | input_from_observations = True |
---|
162 | |
---|
163 | # Read output path and filename for the VM driver |
---|
164 | elif ( current_section == 'output' ): |
---|
165 | |
---|
166 | output_path = config.get( current_section, 'output_path' ) |
---|
167 | output_filename = config.get( current_section, 'output_filename' ) |
---|
168 | |
---|
169 | # Read customized coordinates where virtual measurements shall be taken, |
---|
170 | # as well as the variables that should be sampled. |
---|
171 | elif ( current_section == 'custom_positions' ): |
---|
172 | |
---|
173 | number_positions = config.get( current_section, 'number_positions' ) |
---|
174 | |
---|
175 | for count in range( 0, int( number_positions ) ): |
---|
176 | coordinates.append( json.loads( config.get( current_section, \ |
---|
177 | "coordinates" + str( count + 1 ) ) ) ) |
---|
178 | # If coordinates are given, set a global flag. |
---|
179 | custom_coordinates = True |
---|
180 | |
---|
181 | for count in range( 0, int( number_positions ) ): |
---|
182 | vars_to_be_measured.append( json.loads( config.get( current_section, \ |
---|
183 | "vars_to_be_measured" + str( count + 1 ) ) ) ) |
---|
184 | |
---|
185 | |
---|
186 | return 0 |
---|
187 | |
---|
188 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
---|
189 | # Main program: |
---|
190 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
---|
191 | |
---|
192 | |
---|
193 | # Define strings |
---|
194 | name_featuretype = "featureType" |
---|
195 | name_ts = "timeSeries" |
---|
196 | name_tspr = "timeSeriesProfile" |
---|
197 | name_traj = "trajectory" |
---|
198 | name_ntime = "ntime" |
---|
199 | name_time = "time" |
---|
200 | name_station = "station" |
---|
201 | name_traj_dim = "traj" |
---|
202 | name_nz = "nz" |
---|
203 | name_datacontent = "data_content" |
---|
204 | name_eutm = "E_UTM" |
---|
205 | name_nutm = "N_UTM" |
---|
206 | name_hao = "height" |
---|
207 | name_station_h = "station_h" |
---|
208 | name_z = "z" |
---|
209 | name_soil_sampling = "soil_sample" |
---|
210 | name_num_stat = "number_of_stations" |
---|
211 | name_fill = "_FillValue" |
---|
212 | name_site = "site" |
---|
213 | name_acro = "acronym" |
---|
214 | name_content = "data_content" |
---|
215 | name_orig_x = "origin_x" |
---|
216 | name_orig_y = "origin_y" |
---|
217 | name_orig_z = "origin_z" |
---|
218 | |
---|
219 | max_string_len = 50 |
---|
220 | |
---|
221 | name_measvars = "measured_variables" |
---|
222 | |
---|
223 | non_measurable_vars = ['station_name', 'time', 'time_bounds', 'crs', \ |
---|
224 | 'vrs', 'x', 'y', 'z', 'lon', 'lat', 'ntime', 'station', 'traj', \ |
---|
225 | 'E_UTM', 'N_UTM', 'height_above_origin', 'station_h', \ |
---|
226 | 'traj_name', 'height', 'band_pm_size', 'bands_pm', 'bands_pm_size_bounds', \ |
---|
227 | 'bands_pm_size', 'ancillary_detected_layer' ] |
---|
228 | |
---|
229 | soil_vars = [ 't_soil', 'm_soil', 'lwc', 'lwcs', 'smp' ] |
---|
230 | |
---|
231 | dims_out = [ name_eutm, name_nutm, name_hao, name_z, name_station_h ] |
---|
232 | |
---|
233 | # Define list of attributes which need to be of type float. In the data set this is not |
---|
234 | # necessarily guranteed. |
---|
235 | atts_float = [ 'origin_x', 'origin_y', 'origin_z', 'origin_lon', 'origin_lat', 'rotation_angle' ] |
---|
236 | |
---|
237 | # Define list of default variables that shall be measured at each site |
---|
238 | vars_default = [ 'u', 'v', 'w', 'theta', 'hus' ] |
---|
239 | |
---|
240 | |
---|
241 | #Read config file |
---|
242 | read_config_file() |
---|
243 | |
---|
244 | # Initialize counter variable for the number of sites |
---|
245 | num_sites = 0 |
---|
246 | |
---|
247 | # Set the output path for the data |
---|
248 | output_filename = output_path + output_filename |
---|
249 | |
---|
250 | # Open output file |
---|
251 | ncfile_out = Dataset( output_filename, "w", format="NETCDF4" ) |
---|
252 | |
---|
253 | # First, add global attributes |
---|
254 | ncfile_out.setncattr( 'acronym', global_acronym ) |
---|
255 | ncfile_out.setncattr( 'author', global_author ) |
---|
256 | ncfile_out.setncattr( 'campaign', global_campaign ) |
---|
257 | ncfile_out.setncattr( 'comment', global_comment ) |
---|
258 | ncfile_out.setncattr( 'contact_person', global_contact ) |
---|
259 | ncfile_out.setncattr( 'data_content', global_data_content ) |
---|
260 | ncfile_out.setncattr( 'dependencies', global_dependencies ) |
---|
261 | ncfile_out.setncattr( 'institution', global_institution ) |
---|
262 | ncfile_out.setncattr( 'keywords', global_keywords ) |
---|
263 | ncfile_out.setncattr( 'location', global_location ) |
---|
264 | ncfile_out.setncattr( 'references', global_references ) |
---|
265 | ncfile_out.setncattr( 'site', global_site ) |
---|
266 | ncfile_out.setncattr( 'source', global_source ) |
---|
267 | ncfile_out.setncattr( 'palm_version', global_palm_version ) |
---|
268 | |
---|
269 | # Create universal dimension for the string length. |
---|
270 | ncfile_out.createDimension("string_len", max_string_len) |
---|
271 | |
---|
272 | |
---|
273 | # Check if observational data is available. This case, |
---|
274 | # obtain an alphabetically sorted list of input data. List is sorted |
---|
275 | # just for the sake of clarity in the resulting setup file. |
---|
276 | if ( input_from_observations == True ): |
---|
277 | list_input_data = sorted( os.listdir( data_path ) ) |
---|
278 | |
---|
279 | if ( input_from_observations ): |
---|
280 | |
---|
281 | # Run loop over all listed input data. Depending on the data set, this could be |
---|
282 | # a list of files or a list of subdirectories. |
---|
283 | # This is done to reduce the number of virtual measurements in the model. Each |
---|
284 | # virtual measurement has an overhead and consumes memory. |
---|
285 | sites = [] |
---|
286 | input_files = [] |
---|
287 | input_files_orig = [] |
---|
288 | for dirname in list_input_data: |
---|
289 | data_file = data_path + dirname |
---|
290 | |
---|
291 | if ( os.path.isdir(data_file) == True ): |
---|
292 | # Directory may contain various file versions. |
---|
293 | # Take the one with highest cycle number. |
---|
294 | highest_cycle_nr = 0 |
---|
295 | for filename in os.listdir(data_file): |
---|
296 | start_seq = len( filename ) - 6 |
---|
297 | end_seq = len( filename ) - 3 |
---|
298 | if int( filename[start_seq:end_seq] ) > highest_cycle_nr: |
---|
299 | highest_cycle_nr = int(filename[start_seq:end_seq]) |
---|
300 | latest_file = filename |
---|
301 | input_file = data_file + "/" + latest_file |
---|
302 | input_file_orig = latest_file |
---|
303 | else: |
---|
304 | input_file = data_file |
---|
305 | input_file_orig = dirname |
---|
306 | |
---|
307 | # Open the NetCDF file |
---|
308 | ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii') |
---|
309 | input_files.append(input_file) |
---|
310 | input_files_orig.append(input_file_orig) |
---|
311 | |
---|
312 | # Gather all files according to their feature type and all sites for the respective feature type |
---|
313 | files_traj = [] |
---|
314 | files_ts = [] |
---|
315 | files_tspr = [] |
---|
316 | sites_traj = [] |
---|
317 | sites_ts = [] |
---|
318 | sites_tspr = [] |
---|
319 | for input_file in input_files: |
---|
320 | ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) |
---|
321 | |
---|
322 | for att in ncfile_in.ncattrs(): |
---|
323 | if ( att == name_featuretype ): |
---|
324 | feature = ncfile_in.getncattr(att) |
---|
325 | if ( att == name_site ): |
---|
326 | site = ncfile_in.getncattr(att) |
---|
327 | |
---|
328 | if ( feature == name_traj ): |
---|
329 | files_traj.append(input_file) |
---|
330 | elif ( feature == name_ts ): |
---|
331 | files_ts.append(input_file) |
---|
332 | else: |
---|
333 | files_tspr.append(input_file) |
---|
334 | |
---|
335 | if ( feature == name_traj and site not in sites_traj ): |
---|
336 | sites_traj.append(site) |
---|
337 | if ( feature == name_ts and site not in sites_ts ): |
---|
338 | sites_ts.append(site) |
---|
339 | if ( feature == name_tspr and site not in sites_tspr ): |
---|
340 | sites_tspr.append(site) |
---|
341 | |
---|
342 | ncfile_in.close() |
---|
343 | |
---|
344 | for input_file in files_traj: |
---|
345 | print( "traj", input_file ) |
---|
346 | for site in sites_traj: |
---|
347 | print( "traj", site ) |
---|
348 | |
---|
349 | for site in sites_ts: |
---|
350 | print( "ts", site ) |
---|
351 | for site in sites_tspr: |
---|
352 | print( "tspr", site ) |
---|
353 | |
---|
354 | for file in files_tspr: |
---|
355 | print( "tspr", file ) |
---|
356 | counter_id = 1 |
---|
357 | for site_traj in sites_traj: |
---|
358 | # For the given site already define the featureTpye and site |
---|
359 | ncfile_out.setncattr( name_featuretype + str(counter_id), name_traj ) |
---|
360 | ncfile_out.setncattr( name_site + str(counter_id), site_traj ) |
---|
361 | |
---|
362 | # Define the number of coordinates for the site |
---|
363 | num_coords = 0 |
---|
364 | |
---|
365 | e_utm_traj = np.array([]) |
---|
366 | n_utm_traj = np.array([]) |
---|
367 | h_traj = np.array([]) |
---|
368 | measured_variables = ['u', 'v', 'w', 'theta', 'hus'] |
---|
369 | for input_file in files_traj: |
---|
370 | ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) |
---|
371 | for att in ncfile_in.ncattrs(): |
---|
372 | if ( att == name_site ): |
---|
373 | site = ncfile_in.getncattr(att) |
---|
374 | |
---|
375 | if ( site == site_traj ): |
---|
376 | |
---|
377 | for att in ncfile_in.ncattrs(): |
---|
378 | if ( att == name_orig_x ): |
---|
379 | orig_x = ncfile_in.getncattr(att) |
---|
380 | if ( att == name_orig_y ): |
---|
381 | orig_y = ncfile_in.getncattr(att) |
---|
382 | if ( att == name_orig_z ): |
---|
383 | orig_z = ncfile_in.getncattr(att) |
---|
384 | |
---|
385 | ntime = len( ncfile_in.dimensions[name_ntime] ) |
---|
386 | ntraj = len( ncfile_in.dimensions[name_traj_dim] ) |
---|
387 | |
---|
388 | num_coords += ntime * ntraj |
---|
389 | # Gather UTM and height coordinates and merge them into one array. Further, gather |
---|
390 | # the variables that shall be sampled. Coordinates are checked to for NaN values and |
---|
391 | # are tranformed to arithmetric numbers. Further, 2D input array is transformed into |
---|
392 | # a 1D array. |
---|
393 | for var in ncfile_in.variables.keys(): |
---|
394 | if ( var in dims_out and var == name_eutm ): |
---|
395 | e_utm_traj = np.append(e_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) |
---|
396 | #e_utm_traj.append( np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten() ) |
---|
397 | if ( var in dims_out and var == name_nutm ): |
---|
398 | n_utm_traj = np.append(n_utm_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) |
---|
399 | if ( var in dims_out and var == name_hao ): |
---|
400 | h_traj = np.append(h_traj, np.nan_to_num( ncfile_in.variables[var][:,:] ).flatten()) |
---|
401 | |
---|
402 | if ( var not in non_measurable_vars and \ |
---|
403 | var not in vars_default and \ |
---|
404 | var not in measured_variables ): |
---|
405 | measured_variables.append(var) |
---|
406 | |
---|
407 | ncfile_in.close() |
---|
408 | |
---|
409 | # After all files for the current site are processed, write the origin-coordinates for x,y,z |
---|
410 | ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) |
---|
411 | ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) |
---|
412 | ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) |
---|
413 | # Create the dimensions |
---|
414 | ncfile_out.createDimension( name_station + str(counter_id), num_coords ) |
---|
415 | |
---|
416 | temp_traj = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
417 | temp_traj[:] = e_utm_traj |
---|
418 | |
---|
419 | temp_traj = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
420 | temp_traj[:] = n_utm_traj |
---|
421 | |
---|
422 | temp_traj = ncfile_out.createVariable( name_hao + str(counter_id), float, name_station + str(counter_id) ) |
---|
423 | temp_traj[:] = h_traj |
---|
424 | |
---|
425 | # Check if any of the measured variables is a soil variable. Set flag accordingly. |
---|
426 | soil = False |
---|
427 | for var in measured_variables: |
---|
428 | if ( var in soil_vars ): |
---|
429 | soil = True |
---|
430 | # Write soil flag |
---|
431 | ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) |
---|
432 | |
---|
433 | # Create dimension for sample-variable string |
---|
434 | ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) |
---|
435 | |
---|
436 | measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ |
---|
437 | ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR |
---|
438 | |
---|
439 | # Write the variables to the file |
---|
440 | for counter, meas in enumerate( measured_variables ): |
---|
441 | measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) |
---|
442 | |
---|
443 | # Increment the counter |
---|
444 | counter_id += 1 |
---|
445 | |
---|
446 | |
---|
447 | for site_tspr in sites_tspr: |
---|
448 | # For the given site already define the featureTpye and site |
---|
449 | ncfile_out.setncattr( name_featuretype + str(counter_id), name_tspr ) |
---|
450 | ncfile_out.setncattr( name_site + str(counter_id), site_tspr ) |
---|
451 | |
---|
452 | # Define the number of coordinates for the site |
---|
453 | num_coords = 0 |
---|
454 | e_utm_tspr = np.array([]) |
---|
455 | n_utm_tspr = np.array([]) |
---|
456 | station_h_tspr = np.array([]) |
---|
457 | z_tspr = np.array([]) |
---|
458 | |
---|
459 | measured_variables = ['u', 'v', 'w', 'theta', 'hus'] |
---|
460 | for input_file in files_tspr: |
---|
461 | ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) |
---|
462 | for att in ncfile_in.ncattrs(): |
---|
463 | if ( att == name_site ): |
---|
464 | site = ncfile_in.getncattr(att) |
---|
465 | |
---|
466 | if ( site == site_tspr ): |
---|
467 | for att in ncfile_in.ncattrs(): |
---|
468 | if ( att == name_orig_x ): |
---|
469 | orig_x = ncfile_in.getncattr(att) |
---|
470 | if ( att == name_orig_y ): |
---|
471 | orig_y = ncfile_in.getncattr(att) |
---|
472 | if ( att == name_orig_z ): |
---|
473 | orig_z = ncfile_in.getncattr(att) |
---|
474 | |
---|
475 | nstation = len( ncfile_in.dimensions[name_station] ) |
---|
476 | ntime = len( ncfile_in.dimensions[name_ntime] ) |
---|
477 | nz = len( ncfile_in.dimensions[name_nz] ) |
---|
478 | |
---|
479 | num_coords += nstation * ntime * nz |
---|
480 | # Gather UTM and height coordinates and merge them into one array. Further, gather |
---|
481 | # the variables that shall be sampled. Coordinates are checked to for NaN values and |
---|
482 | # are tranformed to arithmetric numbers. Further, 2D input array is transformed into |
---|
483 | # a 1D array. |
---|
484 | for var in ncfile_in.variables.keys(): |
---|
485 | tspr_tmp1 = np.zeros((nstation)) |
---|
486 | tspr_tmp2 = np.zeros((ntime*nz)) |
---|
487 | if ( var in dims_out and var == name_eutm ): |
---|
488 | tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) |
---|
489 | for ns in range(0,int(nstation)): |
---|
490 | tspr_tmp2[:] = tspr_tmp1[ns] |
---|
491 | e_utm_tspr = np.append(e_utm_tspr, tspr_tmp2) |
---|
492 | if ( var in dims_out and var == name_nutm ): |
---|
493 | tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) |
---|
494 | for ns in range(0,int(nstation)): |
---|
495 | tspr_tmp2[:] = tspr_tmp1[ns] |
---|
496 | n_utm_tspr = np.append(n_utm_tspr, tspr_tmp2) |
---|
497 | if ( var in dims_out and var == name_z ): |
---|
498 | z_tspr_tmp = np.nan_to_num( ncfile_in.variables[var][:,:,:] ) |
---|
499 | z_tspr = np.append(z_tspr, np.concatenate( z_tspr_tmp )) |
---|
500 | if ( var in dims_out and var == name_station_h ): |
---|
501 | tspr_tmp1 = np.nan_to_num( ncfile_in.variables[var][:] ) |
---|
502 | for ns in range(0,int(nstation)): |
---|
503 | tspr_tmp2[:] = tspr_tmp1[ns] |
---|
504 | station_h_tspr = np.append(station_h_tspr, tspr_tmp2) |
---|
505 | |
---|
506 | if ( var not in non_measurable_vars and \ |
---|
507 | var not in vars_default and \ |
---|
508 | var not in measured_variables ): |
---|
509 | measured_variables.append(var) |
---|
510 | |
---|
511 | ncfile_in.close() |
---|
512 | |
---|
513 | # After all files for the current site are processed, write the origin-coordinates for x,y,z |
---|
514 | ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) |
---|
515 | ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) |
---|
516 | ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) |
---|
517 | # Create the dimensions |
---|
518 | ncfile_out.createDimension( name_station + str(counter_id), num_coords ) |
---|
519 | |
---|
520 | temp_tspr = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
521 | temp_tspr[:] = e_utm_tspr |
---|
522 | |
---|
523 | temp_tspr = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
524 | temp_tspr[:] = n_utm_tspr |
---|
525 | |
---|
526 | temp_tspr = ncfile_out.createVariable( name_z + str(counter_id), float, name_station + str(counter_id) ) |
---|
527 | temp_tspr[:] = z_tspr |
---|
528 | |
---|
529 | temp_tspr = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) ) |
---|
530 | temp_tspr[:] = station_h_tspr |
---|
531 | |
---|
532 | # Check if any of the measured variables is a soil variable. Set flag accordingly. |
---|
533 | soil = False |
---|
534 | for var in measured_variables: |
---|
535 | if ( var in soil_vars ): |
---|
536 | soil = True |
---|
537 | # Write soil flag |
---|
538 | ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) |
---|
539 | |
---|
540 | # Create dimension for sample-variable string |
---|
541 | ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) |
---|
542 | |
---|
543 | measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ |
---|
544 | ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR |
---|
545 | |
---|
546 | # Write the variables to the file |
---|
547 | for counter, meas in enumerate( measured_variables ): |
---|
548 | measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) |
---|
549 | |
---|
550 | # Increment the counter |
---|
551 | counter_id += 1 |
---|
552 | |
---|
553 | |
---|
554 | for site_ts in sites_ts: |
---|
555 | # For the given site already define the featureTpye and site |
---|
556 | ncfile_out.setncattr( name_featuretype + str(counter_id), name_ts ) |
---|
557 | ncfile_out.setncattr( name_site + str(counter_id), site_ts ) |
---|
558 | |
---|
559 | # Define the number of coordinates for the site |
---|
560 | num_coords = 0 |
---|
561 | e_utm_ts = np.array([]) |
---|
562 | n_utm_ts = np.array([]) |
---|
563 | station_h_ts = np.array([]) |
---|
564 | z_ts = np.array([]) |
---|
565 | |
---|
566 | measured_variables = ['u', 'v', 'w', 'theta', 'hus'] |
---|
567 | for input_file in files_ts: |
---|
568 | ncfile_in = Dataset( input_file, "r", format="NETCDF4", encoding='ascii' ) |
---|
569 | for att in ncfile_in.ncattrs(): |
---|
570 | if ( att == name_site ): |
---|
571 | site = ncfile_in.getncattr(att) |
---|
572 | |
---|
573 | if ( site == site_ts ): |
---|
574 | |
---|
575 | for att in ncfile_in.ncattrs(): |
---|
576 | if ( att == name_orig_x ): |
---|
577 | orig_x = ncfile_in.getncattr(att) |
---|
578 | if ( att == name_orig_y ): |
---|
579 | orig_y = ncfile_in.getncattr(att) |
---|
580 | if ( att == name_orig_z ): |
---|
581 | orig_z = ncfile_in.getncattr(att) |
---|
582 | |
---|
583 | nstation = len( ncfile_in.dimensions[name_station] ) |
---|
584 | num_coords += nstation |
---|
585 | # Gather UTM and height coordinates and merge them into one array. Further, gather |
---|
586 | # the variables that shall be sampled. Coordinates are checked to for NaN values and |
---|
587 | # are tranformed to arithmetric numbers. |
---|
588 | for var in ncfile_in.variables.keys(): |
---|
589 | if ( var in dims_out and var == name_eutm ): |
---|
590 | e_utm_ts = np.append(e_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] )) |
---|
591 | if ( var in dims_out and var == name_nutm ): |
---|
592 | n_utm_ts = np.append(n_utm_ts, np.nan_to_num( ncfile_in.variables[var][:] )) |
---|
593 | if ( var in dims_out and var == name_z ): |
---|
594 | z_ts = np.append(z_ts, np.nan_to_num( ncfile_in.variables[var][:] )) |
---|
595 | if ( var in dims_out and var == name_station_h ): |
---|
596 | station_h_ts = np.append(station_h_ts, np.nan_to_num( ncfile_in.variables[var][:] )) |
---|
597 | |
---|
598 | if ( var not in non_measurable_vars and \ |
---|
599 | var not in vars_default and \ |
---|
600 | var not in measured_variables ): |
---|
601 | measured_variables.append(var) |
---|
602 | |
---|
603 | ncfile_in.close() |
---|
604 | |
---|
605 | # After all files for the current site are processed, write the origin-coordinates for x,y,z |
---|
606 | ncfile_out.setncattr( name_orig_x + str(counter_id), orig_x ) |
---|
607 | ncfile_out.setncattr( name_orig_y + str(counter_id), orig_y ) |
---|
608 | ncfile_out.setncattr( name_orig_z + str(counter_id), orig_z ) |
---|
609 | # Create the dimensions |
---|
610 | ncfile_out.createDimension( name_station + str(counter_id), num_coords ) |
---|
611 | |
---|
612 | temp_ts = ncfile_out.createVariable( name_eutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
613 | temp_ts[:] = e_utm_ts |
---|
614 | |
---|
615 | temp_ts = ncfile_out.createVariable( name_nutm + str(counter_id), float, name_station + str(counter_id) ) |
---|
616 | temp_ts[:] = n_utm_ts |
---|
617 | |
---|
618 | temp_ts = ncfile_out.createVariable( name_z + str(counter_id), float, name_station + str(counter_id) ) |
---|
619 | temp_ts[:] = z_ts |
---|
620 | |
---|
621 | temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), float, name_station + str(counter_id) ) |
---|
622 | temp_ts[:] = station_h_ts |
---|
623 | |
---|
624 | # Check if any of the measured variables is a soil variable. Set flag accordingly. |
---|
625 | soil = False |
---|
626 | for var in measured_variables: |
---|
627 | if ( var in soil_vars ): |
---|
628 | soil = True |
---|
629 | # Write soil flag |
---|
630 | ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) |
---|
631 | |
---|
632 | # Create dimension for sample-variable string |
---|
633 | ncfile_out.createDimension( "nvar"+ str(counter_id), len( measured_variables ) ) |
---|
634 | |
---|
635 | measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ |
---|
636 | ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR |
---|
637 | |
---|
638 | # Write the variables to the file |
---|
639 | for counter, meas in enumerate( measured_variables ): |
---|
640 | measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) |
---|
641 | |
---|
642 | # Increment the counter |
---|
643 | counter_id += 1 |
---|
644 | |
---|
645 | # Store the number of observational sites |
---|
646 | num_sites = len( sites_traj ) + len( sites_ts ) + len( sites_tspr ) |
---|
647 | |
---|
648 | |
---|
649 | # Now process the customized input data. Please note, at the moment only timeseries are |
---|
650 | # are possible. |
---|
651 | if ( custom_coordinates ): |
---|
652 | num_sites = counter_id - 1 |
---|
653 | for coord in coordinates: |
---|
654 | # Define mandatory attributes |
---|
655 | ncfile_out.setncattr( name_featuretype + str(counter_id), \ |
---|
656 | name_ts ) |
---|
657 | ncfile_out.setncattr( name_site + str(counter_id), \ |
---|
658 | "custom" + str(counter_id - num_sites) ) |
---|
659 | ncfile_out.setncattr( name_orig_x + str(counter_id), \ |
---|
660 | coord[0] ) |
---|
661 | ncfile_out.setncattr( name_orig_y + str(counter_id), \ |
---|
662 | coord[1] ) |
---|
663 | ncfile_out.setncattr( name_orig_z + str(counter_id), \ |
---|
664 | 0.0 ) |
---|
665 | |
---|
666 | # Define dimensions |
---|
667 | ntime = 1 |
---|
668 | nstat = 1 |
---|
669 | ncfile_out.createDimension( name_ntime + str(counter_id), ntime ) |
---|
670 | ncfile_out.createDimension( name_station + str(counter_id), nstat ) |
---|
671 | |
---|
672 | # Define coordinate variables |
---|
673 | temp_ts = ncfile_out.createVariable( name_eutm + str(counter_id), \ |
---|
674 | float, \ |
---|
675 | name_station + str(counter_id) ) |
---|
676 | temp_ts[:] = np.array( coord[0] ) |
---|
677 | |
---|
678 | temp_ts = ncfile_out.createVariable( name_nutm + str(counter_id), \ |
---|
679 | float, \ |
---|
680 | name_station + str(counter_id) ) |
---|
681 | temp_ts[:] = np.array( coord[1] ) |
---|
682 | |
---|
683 | temp_ts = ncfile_out.createVariable( name_z + str(counter_id), \ |
---|
684 | float, \ |
---|
685 | name_station + str(counter_id) ) |
---|
686 | temp_ts[:] = np.array( coord[2] ) |
---|
687 | |
---|
688 | temp_ts = ncfile_out.createVariable( name_station_h + str(counter_id), \ |
---|
689 | float, \ |
---|
690 | name_station + str(counter_id) ) |
---|
691 | temp_ts[:] = np.array( 0.0 ) |
---|
692 | |
---|
693 | |
---|
694 | counter_id += 1 |
---|
695 | |
---|
696 | # Reset counter variable |
---|
697 | counter_id = num_sites + 1 |
---|
698 | |
---|
699 | # check if variables are prescribed. If so, prepare final output string |
---|
700 | # stored in measured_variables. |
---|
701 | if ( vars_to_be_measured ): |
---|
702 | |
---|
703 | for custom_vars in vars_to_be_measured: |
---|
704 | |
---|
705 | measured_variables = [] |
---|
706 | for var in vars_default: |
---|
707 | measured_variables.append(var) |
---|
708 | |
---|
709 | # Check if given variables are already in the default variables. |
---|
710 | # If not, extend. |
---|
711 | for var in custom_vars: |
---|
712 | if ( var not in measured_variables ): |
---|
713 | |
---|
714 | measured_variables.append(var) |
---|
715 | |
---|
716 | ncfile_out.createDimension( "nvar"+ str(counter_id), \ |
---|
717 | len( measured_variables ) ) |
---|
718 | |
---|
719 | measured_var = ncfile_out.createVariable( 'measured_variables' + str(counter_id), 'S1', \ |
---|
720 | ("nvar" + str(counter_id), "string_len") ) # must be NC_CHAR |
---|
721 | |
---|
722 | # Write the variables to the file |
---|
723 | for counter, meas in enumerate( measured_variables ): |
---|
724 | measured_var[counter] = stringtochar( np.array( meas,"S%s"%(max_string_len) ) ) |
---|
725 | |
---|
726 | # Add soil attribute for the current measurement. |
---|
727 | soil = False |
---|
728 | if ( any( var == soil_vars for var in measured_variables) ): |
---|
729 | soil = True |
---|
730 | |
---|
731 | # Write soil flag |
---|
732 | ncfile_out.setncattr( name_soil_sampling + str( counter_id), np.int8(soil) ) |
---|
733 | |
---|
734 | # Increment counter variable |
---|
735 | counter_id += 1 |
---|
736 | |
---|
737 | del ( measured_variables[:] ) |
---|
738 | |
---|
739 | # Add the number of customized sites. |
---|
740 | num_sites = counter_id - 1 |
---|
741 | |
---|
742 | |
---|
743 | # Finally, write the total number of sites to the output file |
---|
744 | ncfile_out.setncattr( name_num_stat, num_sites ) |
---|
745 | |
---|
746 | |
---|
747 | print( "*** palm_cvd has been finished. You can find the output file under: " ) |
---|
748 | print( " " + output_filename ) |
---|
749 | |
---|
750 | quit() |
---|