1 | #!/usr/bin/env python3 |
---|
2 | # PYTHON_ARGCOMPLETE_OK |
---|
3 | |
---|
4 | import os |
---|
5 | import sys |
---|
6 | import shutil |
---|
7 | from datetime import datetime |
---|
8 | import subprocess |
---|
9 | import multiprocessing |
---|
10 | import socket |
---|
11 | import getpass |
---|
12 | import math |
---|
13 | import re |
---|
14 | import threading |
---|
15 | import queue |
---|
16 | from contextlib import ContextDecorator |
---|
17 | |
---|
18 | try: |
---|
19 | from argparse import ArgumentParser |
---|
20 | from argparse import RawTextHelpFormatter |
---|
21 | except ImportError: |
---|
22 | sys.exit( |
---|
23 | 'ERROR: You need argparse!\n' + |
---|
24 | ' install it from http://pypi.python.org/pypi/argparse\n' + |
---|
25 | ' or run \"pip install argparse\".' |
---|
26 | ) |
---|
27 | |
---|
28 | try: |
---|
29 | import numpy as np |
---|
30 | except ImportError: |
---|
31 | sys.exit( |
---|
32 | 'ERROR: You need numpy!\n' + |
---|
33 | ' install it from http://pypi.python.org/pypi/numpy\n' + |
---|
34 | ' or run \"python3 -m pip install numpy\".' |
---|
35 | ) |
---|
36 | |
---|
37 | try: |
---|
38 | import netCDF4 |
---|
39 | except ImportError: |
---|
40 | sys.exit( |
---|
41 | 'ERROR: You need netCDF4!\n' + |
---|
42 | ' install it from http://pypi.python.org/pypi/netCDF4\n' + |
---|
43 | ' or run \"python3 -m pip install netCDF4\".' |
---|
44 | ) |
---|
45 | |
---|
46 | try: |
---|
47 | import yaml |
---|
48 | except ImportError: |
---|
49 | sys.exit( |
---|
50 | 'ERROR: You need PyYAML!\n' + |
---|
51 | ' install it from http://pypi.python.org/pypi/PyYAML\n' + |
---|
52 | ' or run \"python3 -m pip install PyYAML\".' |
---|
53 | ) |
---|
54 | |
---|
55 | try: |
---|
56 | import argcomplete |
---|
57 | except ImportError: |
---|
58 | print( |
---|
59 | 'INFO: To use Tab-completion you need argcomplete!\n' + |
---|
60 | ' install it from http://pypi.python.org/pypi/argcomplete\n' + |
---|
61 | ' or run \"python3 -m pip install argcomplete\".' |
---|
62 | ) |
---|
63 | has_argcomplete = False |
---|
64 | else: |
---|
65 | has_argcomplete = True |
---|
66 | |
---|
67 | try: |
---|
68 | from termcolor import colored as tcolored |
---|
69 | except ImportError: |
---|
70 | def tcolored(string, color): |
---|
71 | return string |
---|
72 | |
---|
73 | disable_colored_output = False |
---|
74 | |
---|
75 | |
---|
76 | def colored(string, color): |
---|
77 | if not disable_colored_output: |
---|
78 | return tcolored(string, color) |
---|
79 | else: |
---|
80 | return string |
---|
81 | |
---|
82 | |
---|
83 | def disable_color(): |
---|
84 | global disable_colored_output |
---|
85 | disable_colored_output = True |
---|
86 | |
---|
87 | |
---|
88 | version = '1.0.1' |
---|
89 | |
---|
90 | |
---|
91 | class Environment: |
---|
92 | |
---|
93 | scripts_dir = os.path.dirname(os.path.realpath(__file__)) |
---|
94 | trunk_dir = os.path.realpath(os.path.join(scripts_dir, '..')) |
---|
95 | workspace_dir = os.path.realpath(os.path.join(trunk_dir, '..')) |
---|
96 | |
---|
97 | trunk_tests_dir = os.path.join(trunk_dir, 'TUTORIALS') |
---|
98 | trunk_tests_cases_dir = os.path.join(trunk_tests_dir, 'cases') |
---|
99 | trunk_tests_builds_dir = os.path.join(trunk_tests_dir, 'builds') |
---|
100 | |
---|
101 | tests_dir = os.path.join(workspace_dir, 'tutorials') |
---|
102 | |
---|
103 | |
---|
104 | class LogFormatter: |
---|
105 | |
---|
106 | terminal_columns, terminal_lines = shutil.get_terminal_size() |
---|
107 | hline = '#' * min(terminal_columns, 300) + '\n' |
---|
108 | table_width_intro = 12 |
---|
109 | table_width_builds = len(max([s for s in next(os.walk(Environment.trunk_tests_builds_dir))[1] if not s[0] == '.'], key=len)) + len('_debug') |
---|
110 | table_width_cases = len(max([s for s in next(os.walk(Environment.trunk_tests_cases_dir))[1] if not s[0] == '.'], key=len)) |
---|
111 | table_width_cores = 7 |
---|
112 | table_width_total = table_width_intro + table_width_builds + table_width_cases + table_width_cores + 3 |
---|
113 | |
---|
114 | intro_table_line_template = \ |
---|
115 | '{:' + str(table_width_intro) + '} ' |
---|
116 | |
---|
117 | task_table_line_template = \ |
---|
118 | '{:' + str(table_width_intro) + '} ' + \ |
---|
119 | '{:' + str(table_width_cases) + '} ' + \ |
---|
120 | '{:' + str(table_width_builds) + '} ' + \ |
---|
121 | '{:' + str(table_width_cores) + '} ' |
---|
122 | |
---|
123 | config_table_line_template = \ |
---|
124 | '{:' + str(table_width_intro) + '} ' + \ |
---|
125 | '{:' + str(max(table_width_builds, table_width_cases)) + '} ' + \ |
---|
126 | '{:8} ' |
---|
127 | |
---|
128 | file_table_line_template = \ |
---|
129 | '{:' + str(table_width_intro) + '} ' + \ |
---|
130 | '{:' + str(table_width_cases + 13) + '} ' |
---|
131 | |
---|
132 | |
---|
133 | class SignificantDigitsRounder: |
---|
134 | |
---|
135 | @staticmethod |
---|
136 | def _round(value, digits=10): |
---|
137 | if value == 0.0: |
---|
138 | return value |
---|
139 | negative = value < 0.0 |
---|
140 | value = -value if negative else value |
---|
141 | rounded_value = round(value, -int(math.floor(math.log10(value))) + (digits - 1)) |
---|
142 | rounded_value = -rounded_value if negative else rounded_value |
---|
143 | return rounded_value |
---|
144 | |
---|
145 | |
---|
146 | vectorized_round = np.vectorize(_round) |
---|
147 | |
---|
148 | _vectorized_round = np.vectorize(round) |
---|
149 | |
---|
150 | |
---|
151 | @classmethod |
---|
152 | def around(cls, array, digits=10): |
---|
153 | # TODO: divide both arrays and check decimal point |
---|
154 | sign_mask = np.ma.masked_where(array >= 0.0, array).mask |
---|
155 | pos_array = np.where(sign_mask, array, -array) |
---|
156 | non_zero_maks = np.ma.masked_where(pos_array == 0.0, pos_array).mask |
---|
157 | non_zero_array = np.where(non_zero_maks, 1.0, pos_array) |
---|
158 | i1 = -np.floor(np.log10(non_zero_array)).astype(int) + (digits - 1) |
---|
159 | rounded_non_zero_array = cls._vectorized_round(non_zero_array, i1) |
---|
160 | rounded_pos_array = np.where(non_zero_maks, 0.0, rounded_non_zero_array) |
---|
161 | return np.where(sign_mask, rounded_pos_array, -rounded_pos_array) |
---|
162 | |
---|
163 | |
---|
164 | |
---|
165 | class Logger(ContextDecorator): |
---|
166 | |
---|
167 | def __init__(self, logfile_dir, logfile_name='palmtest.log', logfile_mode='a', verbose=False): |
---|
168 | self.logfile_path = os.path.join(logfile_dir, logfile_name) |
---|
169 | self.logfile_mode = logfile_mode |
---|
170 | self.verbose = verbose |
---|
171 | |
---|
172 | def __enter__(self): |
---|
173 | self._file = open(self.logfile_path, self.logfile_mode) |
---|
174 | return self |
---|
175 | |
---|
176 | def to_file(self, message): |
---|
177 | self._file.write(message) |
---|
178 | self._file.flush() |
---|
179 | |
---|
180 | def to_log(self, message): |
---|
181 | if self.verbose: |
---|
182 | sys.stdout.write(message) |
---|
183 | sys.stdout.flush() |
---|
184 | self._file.write(message) |
---|
185 | self._file.flush() |
---|
186 | |
---|
187 | def to_all(self, message): |
---|
188 | sys.stdout.write(message) |
---|
189 | sys.stdout.flush() |
---|
190 | self._file.write(message) |
---|
191 | self._file.flush() |
---|
192 | |
---|
193 | def __exit__(self, *exc): |
---|
194 | self._file.close() |
---|
195 | return False |
---|
196 | |
---|
197 | |
---|
198 | class Executor: |
---|
199 | |
---|
200 | @staticmethod |
---|
201 | def _enqueue_output(out, queue): |
---|
202 | for line in iter(out.readline, b''): |
---|
203 | queue.put(line) |
---|
204 | out.close() |
---|
205 | |
---|
206 | @staticmethod |
---|
207 | def execute(cmd, cwd='.', verbose=True, dry_run=False): |
---|
208 | assert isinstance(cmd, list) |
---|
209 | if dry_run: |
---|
210 | cmd = ['echo'] + cmd |
---|
211 | cmd_str = ' '.join(cmd) |
---|
212 | p = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=1) |
---|
213 | q = queue.Queue() |
---|
214 | t = threading.Thread(target=Executor._enqueue_output, args=(p.stdout, q)) |
---|
215 | t.daemon = True # thread dies with the program |
---|
216 | t.start() |
---|
217 | |
---|
218 | with Logger(cwd, verbose=verbose) as logger: |
---|
219 | # read line without blocking |
---|
220 | logger.to_log(LogFormatter.hline) |
---|
221 | logger.to_log('CMD: ' + cmd_str + '\n') |
---|
222 | logger.to_log(LogFormatter.hline) |
---|
223 | while t.is_alive(): |
---|
224 | try: |
---|
225 | line = q.get_nowait() # or q.get(timeout=.1) |
---|
226 | except queue.Empty: |
---|
227 | pass # print('no output yet') |
---|
228 | else: # got line |
---|
229 | logger.to_log(line.decode("utf-8")) |
---|
230 | line = True |
---|
231 | while line: |
---|
232 | try: |
---|
233 | line = q.get_nowait() # or q.get(timeout=.1) |
---|
234 | except queue.Empty: |
---|
235 | line = False |
---|
236 | else: # got line |
---|
237 | logger.to_log(line.decode("utf-8")) |
---|
238 | logger.to_log(LogFormatter.hline) |
---|
239 | |
---|
240 | rc = p.poll() |
---|
241 | failed = rc != 0 |
---|
242 | return failed |
---|
243 | |
---|
244 | |
---|
245 | class NetCDFInterface: |
---|
246 | |
---|
247 | def __init__(self, filename): |
---|
248 | self.filename = filename |
---|
249 | |
---|
250 | def is_healthy(self): |
---|
251 | try: |
---|
252 | self.get_run_name() |
---|
253 | except: |
---|
254 | return False |
---|
255 | else: |
---|
256 | return True |
---|
257 | |
---|
258 | def get_run_name(self): |
---|
259 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
260 | l = getattr(netcdf, 'title').split() |
---|
261 | i = l.index('run:') |
---|
262 | return l[i+1] |
---|
263 | |
---|
264 | def get_var_list(self): |
---|
265 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
266 | var_list = list(netcdf.variables.keys()) |
---|
267 | var_list = filter(None, var_list) |
---|
268 | return sorted(var_list) |
---|
269 | |
---|
270 | def show_content(self): |
---|
271 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
272 | for name in netcdf.ncattrs(): |
---|
273 | print("Global attr", name, "=", getattr(netcdf, name)) |
---|
274 | print(netcdf) |
---|
275 | for v in netcdf.variables: |
---|
276 | print(v) |
---|
277 | |
---|
278 | def get_times_list(self): |
---|
279 | attributes, times = self.read_var('time') |
---|
280 | times = [str(time) for time in times] |
---|
281 | times = list(filter(None, times)) |
---|
282 | return times |
---|
283 | |
---|
284 | def contains(self, variable): |
---|
285 | return variable in self.get_var_list() |
---|
286 | |
---|
287 | def read_var(self, variable): |
---|
288 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
289 | values = netcdf.variables[variable][:] # extract values |
---|
290 | attributes = dict() |
---|
291 | try: |
---|
292 | attributes['long_name'] = netcdf.variables[variable].name |
---|
293 | except: |
---|
294 | attributes['long_name'] = '' |
---|
295 | try: |
---|
296 | attributes['unit'] = netcdf.variables[variable].units |
---|
297 | except: |
---|
298 | attributes['unit'] = '' |
---|
299 | return attributes, values |
---|
300 | |
---|
301 | |
---|
302 | class FileComparator: |
---|
303 | |
---|
304 | @staticmethod |
---|
305 | def compare_ascii(file_path1, file_path2, start_string=None): |
---|
306 | try: |
---|
307 | with open(file_path1, 'r') as file1: |
---|
308 | content1 = file1.readlines() |
---|
309 | except OSError: |
---|
310 | return True, colored('[reference file not found]', 'red') |
---|
311 | try: |
---|
312 | with open(file_path2, 'r') as file2: |
---|
313 | content2 = file2.readlines() |
---|
314 | except OSError: |
---|
315 | return True, colored('[output file not found]', 'red') |
---|
316 | if start_string: |
---|
317 | index1 = content1.index(start_string) |
---|
318 | index2 = content2.index(start_string) |
---|
319 | comparable_content1 = content1[index1:] |
---|
320 | comparable_content2 = content2[index2:] |
---|
321 | ln = index2 + 1 |
---|
322 | else: |
---|
323 | comparable_content1 = content1 |
---|
324 | comparable_content2 = content2 |
---|
325 | ln = 1 |
---|
326 | if len(comparable_content1) != len(comparable_content2): |
---|
327 | return True, colored('[mismatch in total number of lines]', 'red') |
---|
328 | for line1, line2 in zip(comparable_content1, comparable_content2): |
---|
329 | if not line1 == line2: |
---|
330 | return True, colored('[mismatch in content starting line ' + str(ln) + ']', 'red') |
---|
331 | ln += 1 |
---|
332 | return False, colored('[file ok]', 'green') |
---|
333 | |
---|
334 | @staticmethod |
---|
335 | def compare_netcdf(file_path1, file_path2, digits=None): |
---|
336 | nci1 = NetCDFInterface(file_path1) |
---|
337 | nci2 = NetCDFInterface(file_path2) |
---|
338 | if not nci1.is_healthy(): |
---|
339 | return True, colored('[reference file not found]', 'red') |
---|
340 | if not nci2.is_healthy(): |
---|
341 | return True, colored('[output file not found]', 'red') |
---|
342 | times_list1 = nci1.get_times_list() |
---|
343 | times_list2 = nci2.get_times_list() |
---|
344 | if not times_list1 == times_list2: |
---|
345 | return True, colored('[wrong time dimension]', 'red') |
---|
346 | else: |
---|
347 | time_list = times_list1 |
---|
348 | var_list1 = nci1.get_var_list() |
---|
349 | var_list2 = nci2.get_var_list() |
---|
350 | if not var_list1 == var_list2: |
---|
351 | return True, colored('[wrong set of variables]', 'red') |
---|
352 | else: |
---|
353 | var_list = var_list1 |
---|
354 | content1 = dict() |
---|
355 | content2 = dict() |
---|
356 | for var in var_list: |
---|
357 | attributes1, values1 = nci1.read_var(var) |
---|
358 | attributes2, values2 = nci2.read_var(var) |
---|
359 | if sorted(attributes1.keys()) != sorted(attributes2.keys()): |
---|
360 | return True, colored('[wrong set of attributes in variable \"'+var+'\"]', 'red') |
---|
361 | if isinstance(digits, int): |
---|
362 | values1 = SignificantDigitsRounder.around(values1, digits=digits) |
---|
363 | values2 = SignificantDigitsRounder.around(values2, digits=digits) |
---|
364 | content1[var] = values1 |
---|
365 | content2[var] = values2 |
---|
366 | #for decimals in |
---|
367 | for i, time in enumerate(time_list): |
---|
368 | for var in var_list: |
---|
369 | t_content1 = content1[var][i] |
---|
370 | t_content2 = content2[var][i] |
---|
371 | if not (t_content1==t_content2).all(): |
---|
372 | if isinstance(digits, int): |
---|
373 | return True, colored('[1st mismatch within ' + str(digits) + ' digits at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') |
---|
374 | else: |
---|
375 | return True, colored('[1st mismatch at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') |
---|
376 | return False, colored('[file ok]', 'green') |
---|
377 | |
---|
378 | |
---|
379 | class OutputChecker: |
---|
380 | |
---|
381 | def __init__(self, test_dir, setup_name, build_name, cores, significant_digits=None, verbose=True, dry_run=False): |
---|
382 | self.test_dir = test_dir |
---|
383 | self.setup_name = setup_name |
---|
384 | self.build_name = build_name |
---|
385 | self.cores = cores |
---|
386 | self.significant_digits = significant_digits |
---|
387 | self.verbose = verbose |
---|
388 | self.dry_run = dry_run |
---|
389 | self.job_name = self.setup_name + '__' + build_name + '__' + str(self.cores) |
---|
390 | self.job_dir = os.path.join(self.test_dir, 'JOBS', self.job_name) |
---|
391 | self.ref_monitoring_dir = os.path.join(Environment.trunk_tests_cases_dir, self.setup_name, 'MONITORING') |
---|
392 | self.ref_output_dir = os.path.join(Environment.trunk_tests_cases_dir, self.setup_name, 'OUTPUT') |
---|
393 | self.res_monitoring_dir = os.path.join(self.job_dir, 'MONITORING') |
---|
394 | self.res_output_dir = os.path.join(self.job_dir, 'OUTPUT') |
---|
395 | self.failed = None |
---|
396 | |
---|
397 | def get_checkable_file_dicts(self): |
---|
398 | if os.path.isdir(self.ref_monitoring_dir): |
---|
399 | file_names_monitoring = [s for s in next(os.walk(self.ref_monitoring_dir))[2]] |
---|
400 | else: |
---|
401 | file_names_monitoring = [] |
---|
402 | file_paths_monitoring = [] |
---|
403 | for file_name in file_names_monitoring: |
---|
404 | file_specific_ending = file_name[len(self.setup_name):] |
---|
405 | file_specific_ending_split = file_specific_ending.split('.') |
---|
406 | postfix = file_specific_ending_split[0] |
---|
407 | if len(file_specific_ending_split) > 1: |
---|
408 | extension = file_specific_ending_split[-1] |
---|
409 | else: |
---|
410 | extension = '' |
---|
411 | if len(file_specific_ending_split) > 2: |
---|
412 | cycle_info = file_specific_ending_split[1: -1] |
---|
413 | else: |
---|
414 | cycle_info = [] |
---|
415 | file_paths_monitoring.append( |
---|
416 | dict( |
---|
417 | postfix=postfix, |
---|
418 | cycle_info=cycle_info, |
---|
419 | extension=extension, |
---|
420 | ref_path=self.ref_monitoring_dir, |
---|
421 | res_path=self.res_monitoring_dir, |
---|
422 | ) |
---|
423 | ) |
---|
424 | if os.path.isdir(self.ref_output_dir): |
---|
425 | file_names_output = [s for s in next(os.walk(self.ref_output_dir))[2]] |
---|
426 | else: |
---|
427 | file_names_output = [] |
---|
428 | file_paths_output = [] |
---|
429 | for file_name in file_names_output: |
---|
430 | file_specific_ending = file_name[len(self.setup_name):] |
---|
431 | file_specific_ending_split = file_specific_ending.split('.') |
---|
432 | postfix = file_specific_ending_split[0] |
---|
433 | if len(file_specific_ending_split) > 1: |
---|
434 | extension = file_specific_ending_split[-1] |
---|
435 | else: |
---|
436 | extension = '' |
---|
437 | if len(file_specific_ending_split) > 2: |
---|
438 | cycle_info = file_specific_ending_split[1: -1] |
---|
439 | else: |
---|
440 | cycle_info = [] |
---|
441 | file_paths_output.append( |
---|
442 | dict( |
---|
443 | postfix=postfix, |
---|
444 | cycle_info=cycle_info, |
---|
445 | extension=extension, |
---|
446 | ref_path=self.ref_output_dir, |
---|
447 | res_path=self.res_output_dir, |
---|
448 | ) |
---|
449 | ) |
---|
450 | return file_paths_monitoring + file_paths_output |
---|
451 | |
---|
452 | def check(self): |
---|
453 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
454 | logger.to_log('Checking output files:') |
---|
455 | logger.to_all('\n') |
---|
456 | failed = False |
---|
457 | for file_dict in self.get_checkable_file_dicts(): |
---|
458 | file_failed = False |
---|
459 | ext_list = [file_dict['extension']] if file_dict['extension'] else [] |
---|
460 | file_specific_ending = '.'.join([file_dict['postfix']] + file_dict['cycle_info'] + ext_list ) |
---|
461 | logger.to_all(LogFormatter.file_table_line_template.format('Checking:', self.setup_name + file_specific_ending)) |
---|
462 | ref_file_path = os.path.join(file_dict['ref_path'], self.setup_name + file_specific_ending) |
---|
463 | res_file_path = os.path.join(file_dict['res_path'], self.job_name + file_specific_ending) |
---|
464 | if re.match('_rc', file_dict['postfix']) and not file_dict['extension']: |
---|
465 | file_failed, message = FileComparator.compare_ascii(ref_file_path, res_file_path, start_string='Run-control output:\n') |
---|
466 | elif re.match('nc', file_dict['extension']): |
---|
467 | if self.significant_digits is not None: |
---|
468 | if re.match('_ts', file_dict['postfix']) and 'timeseries' in self.significant_digits: |
---|
469 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
470 | digits=self.significant_digits['timeseries']) |
---|
471 | elif re.match('_pr', file_dict['postfix']) and 'profiles' in self.significant_digits: |
---|
472 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
473 | digits=self.significant_digits['profiles']) |
---|
474 | else: |
---|
475 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
476 | digits=self.significant_digits['other']) |
---|
477 | else: |
---|
478 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path) |
---|
479 | else: |
---|
480 | message = colored('[ignored]', 'blue') |
---|
481 | if file_failed: |
---|
482 | failed = True |
---|
483 | logger.to_all(message + '\n') |
---|
484 | if self.dry_run: |
---|
485 | failed = False |
---|
486 | return failed |
---|
487 | |
---|
488 | |
---|
489 | class PALMJob: |
---|
490 | """The PALM job class deals with the execution of a single PALM job""" |
---|
491 | |
---|
492 | @staticmethod |
---|
493 | def get_job_name(setup_name, build_name, cores): |
---|
494 | return setup_name + '__' + build_name + '__' + str(cores) |
---|
495 | |
---|
496 | def __init__(self, test_dir, test_case, build_name, cores, verbose=False, dry_run=False): |
---|
497 | self.test_dir = test_dir |
---|
498 | self.test_case = test_case |
---|
499 | self.build_name = build_name |
---|
500 | self.cores = cores |
---|
501 | self.verbose = verbose |
---|
502 | self.dry_run = dry_run |
---|
503 | |
---|
504 | self.attempted_debug = False |
---|
505 | self.failed_debug = None |
---|
506 | self.attempted_non_debug = False |
---|
507 | self.failed_non_debug = None |
---|
508 | |
---|
509 | def _link_restart_files(self, build_name): |
---|
510 | if self.dry_run: |
---|
511 | return True, colored('[restart data dry]', 'blue') |
---|
512 | name = self.get_job_name(self.test_case.name, build_name, self.cores) |
---|
513 | source_name = self.get_job_name(self.test_case.use_binary_files_from, build_name, self.cores) |
---|
514 | source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') |
---|
515 | try: |
---|
516 | source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] |
---|
517 | except: |
---|
518 | source_data_dirs_grp = [] |
---|
519 | if len(source_data_dirs_grp) == 0: |
---|
520 | source_data_dirs = [] |
---|
521 | else: |
---|
522 | source_data_dirs = source_data_dirs_grp[0] |
---|
523 | if len(source_data_dirs) == 0 and re.match('.+_debug', build_name): |
---|
524 | source_build_name = build_name[:-len('_debug')] |
---|
525 | source_name = self.get_job_name(self.test_case.use_binary_files_from, source_build_name, self.cores) |
---|
526 | source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') |
---|
527 | try: |
---|
528 | source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] |
---|
529 | except: |
---|
530 | source_data_dirs_grp = [] |
---|
531 | if len(source_data_dirs_grp) == 0: |
---|
532 | source_data_dirs = [] |
---|
533 | else: |
---|
534 | source_data_dirs = source_data_dirs_grp[0] |
---|
535 | if len(source_data_dirs) == 0: |
---|
536 | source_data_dir = 'no_restart_data' |
---|
537 | else: |
---|
538 | source_data_dir = sorted(source_data_dirs)[-1] |
---|
539 | source_data_dir_path = os.path.join(source_restart_dir, source_data_dir) |
---|
540 | if os.path.isdir(source_data_dir_path) and re.match('.+_d3d.*', source_data_dir): |
---|
541 | job_restart_dir = os.path.join(self.test_dir, 'JOBS', name, 'RESTART') |
---|
542 | os.makedirs(job_restart_dir, exist_ok=False) |
---|
543 | job_data_dir_path = os.path.join(job_restart_dir, name + '_d3d') |
---|
544 | os.symlink(source_data_dir_path, job_data_dir_path, target_is_directory=True) |
---|
545 | return False, colored('[linked restart data from: ' + source_data_dir_path + ']', 'green') |
---|
546 | else: |
---|
547 | return True, colored('[no restart data found]', 'red') |
---|
548 | |
---|
549 | def _execute(self, name, build_name): |
---|
550 | execution_failed = Executor.execute( |
---|
551 | [ |
---|
552 | os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmrun'), |
---|
553 | '-c', '\"' + build_name + '\"', |
---|
554 | '-r', name, |
---|
555 | '-a', '\"' + ' '.join(self.test_case.activation_strings) + '\"', |
---|
556 | '-X', str(self.cores), |
---|
557 | '-T', str(self.cores), |
---|
558 | '-B', |
---|
559 | '-v', |
---|
560 | '-z', |
---|
561 | ], |
---|
562 | cwd=self.test_dir, |
---|
563 | verbose=self.verbose, |
---|
564 | dry_run=self.dry_run, |
---|
565 | ) |
---|
566 | |
---|
567 | if self.dry_run: |
---|
568 | return False, colored('[execution dry]', 'blue') |
---|
569 | elif execution_failed: |
---|
570 | return True, colored('[execution failed]', 'red') |
---|
571 | else: |
---|
572 | return False, colored('[execution ok]', 'green') |
---|
573 | |
---|
574 | def _check(self, build_name): |
---|
575 | checker = OutputChecker( |
---|
576 | self.test_dir, |
---|
577 | self.test_case.name, |
---|
578 | build_name, |
---|
579 | self.cores, |
---|
580 | significant_digits=self.test_case.significant_digits, |
---|
581 | verbose=self.verbose, |
---|
582 | dry_run=self.dry_run, |
---|
583 | ) |
---|
584 | check_failed = checker.check() |
---|
585 | |
---|
586 | if self.dry_run: |
---|
587 | return False, colored('[checks dry]', 'blue') |
---|
588 | if check_failed: |
---|
589 | return True, colored('[checks failed]', 'red') |
---|
590 | else: |
---|
591 | return False, colored('[checks ok]', 'green') |
---|
592 | |
---|
593 | def execute(self, debug=False): |
---|
594 | if debug: |
---|
595 | attempted = self.attempted_debug |
---|
596 | build_name = self.build_name + '_debug' |
---|
597 | failed = self.failed_debug |
---|
598 | else: |
---|
599 | attempted = self.attempted_non_debug |
---|
600 | build_name = self.build_name |
---|
601 | failed = self.failed_non_debug |
---|
602 | |
---|
603 | if not attempted: |
---|
604 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
605 | status_prefix = LogFormatter.task_table_line_template.format('Testing:', self.test_case.name, build_name, self.cores) |
---|
606 | logger.to_all(status_prefix) |
---|
607 | logger.to_log('[started]' + '\n') |
---|
608 | attempted = True |
---|
609 | |
---|
610 | name = self.get_job_name(self.test_case.name, build_name, self.cores) |
---|
611 | |
---|
612 | input_dir = os.path.join(self.test_dir, 'JOBS', name, 'INPUT') |
---|
613 | os.makedirs(input_dir, exist_ok=False) |
---|
614 | |
---|
615 | # copying needs to be done per file, because input files need to be renamed |
---|
616 | for input_file in self.test_case.input_file_names: |
---|
617 | postfix = input_file[len(self.test_case.name):] |
---|
618 | src = os.path.join(self.test_case.input_dir, input_file) |
---|
619 | dst = os.path.join(input_dir, name + postfix) |
---|
620 | shutil.copy(src, dst) |
---|
621 | |
---|
622 | # copying the entire directory is ok, because source files do not need to be renamed |
---|
623 | user_code_dir = os.path.join(self.test_dir, 'JOBS', name, 'USER_CODE') |
---|
624 | if os.path.isdir(self.test_case.user_code_dir): |
---|
625 | shutil.copytree(self.test_case.user_code_dir, user_code_dir, copy_function=shutil.copy) |
---|
626 | |
---|
627 | if self.test_case.requires_binary_files: |
---|
628 | link_restart_files_failed, message = self._link_restart_files(build_name) |
---|
629 | logger.to_log(status_prefix) |
---|
630 | logger.to_log(message + ' ') |
---|
631 | logger.to_log('\n') |
---|
632 | |
---|
633 | failed, message = self._execute(name, build_name) |
---|
634 | logger.to_log(status_prefix) |
---|
635 | logger.to_all(message + ' ') |
---|
636 | logger.to_log('\n') |
---|
637 | |
---|
638 | failed, message = self._check(build_name) |
---|
639 | logger.to_log(status_prefix) |
---|
640 | logger.to_log(message + ' ') |
---|
641 | |
---|
642 | logger.to_all('\n') |
---|
643 | |
---|
644 | if debug: |
---|
645 | self.attempted_debug = attempted |
---|
646 | self.failed_debug = failed |
---|
647 | else: |
---|
648 | self.attempted_non_debug = attempted |
---|
649 | self.failed_non_debug = failed |
---|
650 | |
---|
651 | return failed |
---|
652 | |
---|
653 | def status(self): |
---|
654 | return dict( |
---|
655 | attempted=self.attempted_non_debug or self.attempted_debug, |
---|
656 | failed=self.failed_non_debug and self.failed_debug, |
---|
657 | debugged=self.attempted_debug, |
---|
658 | non_debug_failed=self.failed_non_debug, |
---|
659 | ) |
---|
660 | |
---|
661 | |
---|
662 | class PALMBuild: |
---|
663 | """The PALM build class deals with configuration and execution of all required PALM builds""" |
---|
664 | |
---|
665 | def __init__(self, test_dir, build_name, verbose=False, dry_run=False): |
---|
666 | self.test_dir = test_dir |
---|
667 | self.build_name = build_name |
---|
668 | self.verbose = verbose |
---|
669 | self.dry_run = dry_run |
---|
670 | self.configured = False |
---|
671 | self.executed = False |
---|
672 | self.available = False |
---|
673 | self.requires_mpi = False |
---|
674 | self.requires_netcdf = False |
---|
675 | self.requires_fftw = False |
---|
676 | self.requires_rrtmg = False |
---|
677 | self.attempted_non_debug = False |
---|
678 | self.attempted_debug = False |
---|
679 | self.failed_non_debug = None |
---|
680 | self.failed_debug = None |
---|
681 | |
---|
682 | def configure(self): |
---|
683 | try: |
---|
684 | with open(os.path.join(Environment.trunk_tests_builds_dir, self.build_name, 'build_config.yml'), 'r') as f: |
---|
685 | build_config = yaml.load(f) |
---|
686 | except: |
---|
687 | return True, colored('[build not found]', 'red') |
---|
688 | |
---|
689 | if 'compiler' in build_config: |
---|
690 | self.compiler = build_config['compiler'] |
---|
691 | else: |
---|
692 | return True, colored('[missing \"compiler\" keyword]', 'red') |
---|
693 | |
---|
694 | if not isinstance(self.compiler, dict): |
---|
695 | return True, colored('[\"compiler\" keyword must be dict]', 'red') |
---|
696 | |
---|
697 | if 'linker' in build_config: |
---|
698 | self.linker = build_config['linker'] |
---|
699 | else: |
---|
700 | return True, colored('[missing \"linker\" keyword]', 'red') |
---|
701 | |
---|
702 | if not isinstance(self.linker, dict): |
---|
703 | return True, colored('[\"linker\" keyword must be dict]', 'red') |
---|
704 | |
---|
705 | if 'mpi_wrapper' in self.compiler: |
---|
706 | if 'mpi_wrapper}}' in self.compiler['mpi_wrapper']: |
---|
707 | self.requires_mpi = True |
---|
708 | else: |
---|
709 | return True, colored('[missing \"mpi_wrapper\" keyword]', 'red') |
---|
710 | |
---|
711 | if 'includes' in self.compiler: |
---|
712 | for include in self.compiler['includes']: |
---|
713 | if 'include.netcdf}}' in include: |
---|
714 | self.requires_netcdf = True |
---|
715 | if 'include.fftw}}' in include: |
---|
716 | self.requires_fftw = True |
---|
717 | if 'include.rrtmg}}' in include: |
---|
718 | self.requires_rrtmg = True |
---|
719 | else: |
---|
720 | return True, colored('[missing \"includes\" keyword in compiler]', 'red') |
---|
721 | |
---|
722 | if 'options' in self.linker: |
---|
723 | for lib in self.linker['options']: |
---|
724 | if 'lib.netcdf}}' in lib: |
---|
725 | self.requires_netcdf = True |
---|
726 | if 'lib.fftw}}' in lib: |
---|
727 | self.requires_fftw = True |
---|
728 | if 'lib.rrtmg}}' in lib: |
---|
729 | self.requires_rrtmg = True |
---|
730 | else: |
---|
731 | return True, colored('[missing \"options\" keyword in linker]', 'red') |
---|
732 | |
---|
733 | library_names = [] |
---|
734 | if self.requires_netcdf: |
---|
735 | library_names.append('netcdf') |
---|
736 | if self.requires_fftw: |
---|
737 | library_names.append('fftw') |
---|
738 | if self.requires_rrtmg: |
---|
739 | library_names.append('rrtmg') |
---|
740 | |
---|
741 | if not 'executable' in self.compiler: |
---|
742 | return True, colored('[missing \"executable\" keyword in compiler]', 'red') |
---|
743 | |
---|
744 | if not 'definitions' in self.compiler: |
---|
745 | return True, colored('[missing \"definitions\" keyword in compiler]', 'red') |
---|
746 | |
---|
747 | if not 'options' in self.compiler: |
---|
748 | return True, colored('[missing \"options\" keyword in compiler]', 'red') |
---|
749 | |
---|
750 | if not 'default' in self.compiler['options']: |
---|
751 | return True, colored('[missing \"default\" keyword in compiler.options]', 'red') |
---|
752 | |
---|
753 | if not 'debug' in self.compiler['options']: |
---|
754 | return True, colored('[missing \"debug\" keyword in compiler.options]', 'red') |
---|
755 | |
---|
756 | try: |
---|
757 | with open(os.path.join(Environment.workspace_dir, 'palmtest.yml'), 'r') as f: |
---|
758 | palmtest_config = yaml.load(f) |
---|
759 | except: |
---|
760 | return True, colored('[palmtest.yml not found]', 'red') |
---|
761 | |
---|
762 | if 'palm_config_template' in palmtest_config: |
---|
763 | if isinstance(palmtest_config['palm_config_template'], str): |
---|
764 | custom_template = palmtest_config['palm_config_template'] |
---|
765 | try: |
---|
766 | with open(os.path.join(custom_template), 'r') as palm_config_template_file: |
---|
767 | template = palm_config_template_file.read() |
---|
768 | except: |
---|
769 | try: |
---|
770 | with open(os.path.join(Environment.scripts_dir, '.palm.config.default.in'), 'r') as palm_config_template_file: |
---|
771 | template = palm_config_template_file.read() |
---|
772 | except: |
---|
773 | return True, colored('[trunk/SCRIPTS/.palm.config.default.in not found]', 'red') |
---|
774 | |
---|
775 | template = template.replace('@CMAKE_INSTALL_PREFIX@', self.test_dir) |
---|
776 | template = template.replace('@PALM_HOSTNAME@', socket.gethostname()) |
---|
777 | template = template.replace('@CMAKE_USERNAME@', getpass.getuser()) |
---|
778 | template = template.replace('@MPI_Fortran_COMPILER@', self.compiler['mpi_wrapper']) |
---|
779 | template = template.replace('@CMAKE_Fortran_COMPILER@', self.compiler['executable']) |
---|
780 | cpp_options_str = ['-D' + s for s in self.compiler['definitions']] |
---|
781 | template = template.replace('@PALM_CPP_OPTIONS_STR@', ' '.join(cpp_options_str)) |
---|
782 | template = template.replace('@PALM_CORES@', str(multiprocessing.cpu_count())) |
---|
783 | template = template.replace('@PALM_COMPILER_OPTIONS@', '{{palmtest.compiler.options}} ' + ' '.join(self.compiler['includes'])) |
---|
784 | template = template.replace('@PALM_LINKER_OPTIONS@', ' '.join(self.linker['options'])) |
---|
785 | |
---|
786 | if 'environments' in palmtest_config: |
---|
787 | available_environments = palmtest_config['environments'] |
---|
788 | else: |
---|
789 | return True, colored('[missing \"environments\" keyword in palmtest.yml]', 'red') |
---|
790 | |
---|
791 | if 'id' in self.compiler: |
---|
792 | c_id = self.compiler['id'] |
---|
793 | else: |
---|
794 | return True, colored('[missing \"id\" keyword in compiler]', 'red') |
---|
795 | |
---|
796 | if c_id in available_environments: |
---|
797 | self.available = True |
---|
798 | |
---|
799 | environment = available_environments[c_id] |
---|
800 | |
---|
801 | if 'mpi_execution_command' in environment: |
---|
802 | template = template.replace('@PALM_EXECUTE_COMMAND@', environment['mpi_execution_command']) |
---|
803 | else: |
---|
804 | template = template.replace('@PALM_EXECUTE_COMMAND@', 'mpirun -n {{mpi_tasks}}') |
---|
805 | |
---|
806 | if 'executable' not in environment: |
---|
807 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"executable\"]', 'red') |
---|
808 | value = environment['executable'] |
---|
809 | if isinstance(value, str): |
---|
810 | template = template.replace('{{' + '.'.join([c_id, 'executable']) + '}}', value) |
---|
811 | if self.requires_mpi: |
---|
812 | if 'mpi_wrapper' not in environment: |
---|
813 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"mpi_wrapper\"]', 'red') |
---|
814 | value = environment['mpi_wrapper'] |
---|
815 | if isinstance(value, str): |
---|
816 | template = template.replace('{{' + '.'.join([c_id, 'mpi_wrapper']) + '}}', value) |
---|
817 | if 'include' not in environment: |
---|
818 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include\"]', 'red') |
---|
819 | if 'lib' not in environment: |
---|
820 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib\"]', 'red') |
---|
821 | for lib in library_names: |
---|
822 | if lib not in environment['include']: |
---|
823 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include.'+lib+'\"]', 'red') |
---|
824 | value = environment['include'][lib] |
---|
825 | if isinstance(value, str): |
---|
826 | template = template.replace('{{' + '.'.join([c_id, 'include', lib]) + '}}', value) |
---|
827 | if lib not in environment['lib']: |
---|
828 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib.'+lib+'\"]', 'red') |
---|
829 | value = environment['lib'][lib] |
---|
830 | if isinstance(value, str): |
---|
831 | template = template.replace('{{' + '.'.join([c_id, 'lib', lib]) + '}}', value) |
---|
832 | |
---|
833 | with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name), 'w') as palm_config_file: |
---|
834 | palm_config_file.write( |
---|
835 | template.replace( |
---|
836 | '{{palmtest.compiler.options}}', |
---|
837 | ' '.join(self.compiler['options']['default']), |
---|
838 | ) |
---|
839 | ) |
---|
840 | with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name + '_debug'), 'w') as palm_config_file: |
---|
841 | palm_config_file.write( |
---|
842 | template.replace( |
---|
843 | '{{palmtest.compiler.options}}', |
---|
844 | ' '.join(self.compiler['options']['debug']), |
---|
845 | ) |
---|
846 | ) |
---|
847 | self.configured = True |
---|
848 | return False, colored('[configuration ok]', 'green') |
---|
849 | |
---|
850 | else: |
---|
851 | return True, colored('[palmtest.yml environment \"' + c_id + '\" not found]', 'red') |
---|
852 | |
---|
853 | def _execute(self, build_name): |
---|
854 | self.attempted = True |
---|
855 | build_failed = Executor.execute( |
---|
856 | [ |
---|
857 | os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmbuild'), |
---|
858 | '-c', '\"' + build_name + '\"', |
---|
859 | '-v', |
---|
860 | ], |
---|
861 | cwd=self.test_dir, |
---|
862 | verbose=self.verbose, |
---|
863 | dry_run=self.dry_run, |
---|
864 | ) |
---|
865 | |
---|
866 | if self.dry_run: |
---|
867 | return False, colored('[build dry]', 'blue') |
---|
868 | if build_failed: |
---|
869 | return True, colored('[build failed]', 'red') |
---|
870 | else: |
---|
871 | return False, colored('[build ok]', 'green') |
---|
872 | |
---|
873 | def build(self, debug=False): |
---|
874 | if debug: |
---|
875 | attempted = self.attempted_debug |
---|
876 | build_name = self.build_name + '_debug' |
---|
877 | failed = self.failed_debug |
---|
878 | else: |
---|
879 | attempted = self.attempted_non_debug |
---|
880 | build_name = self.build_name |
---|
881 | failed = self.failed_non_debug |
---|
882 | |
---|
883 | if not attempted: |
---|
884 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
885 | status_prefix = LogFormatter.task_table_line_template.format('Building:', '', build_name, '') |
---|
886 | logger.to_all(status_prefix) |
---|
887 | logger.to_log('[started]' + '\n') |
---|
888 | attempted = True |
---|
889 | |
---|
890 | failed, message = self._execute(build_name) |
---|
891 | logger.to_log(status_prefix) |
---|
892 | logger.to_all(message + ' ') |
---|
893 | logger.to_all('\n') |
---|
894 | |
---|
895 | if debug: |
---|
896 | self.attempted_debug = attempted |
---|
897 | self.failed_debug = failed |
---|
898 | else: |
---|
899 | self.attempted_non_debug = attempted |
---|
900 | self.failed_non_debug = failed |
---|
901 | |
---|
902 | return failed |
---|
903 | |
---|
904 | def report(self): |
---|
905 | return dict( |
---|
906 | failed_debug=self.failed_debug, |
---|
907 | failed_non_debug=self.failed_non_debug, |
---|
908 | ) |
---|
909 | |
---|
910 | |
---|
911 | class PALMTestCase: |
---|
912 | """The PALM test case class deals with the configuration and execution of all PALM test cases""" |
---|
913 | |
---|
914 | def __init__(self,test_dir, name, verbose=False, dry_run=False): |
---|
915 | self.test_dir = test_dir |
---|
916 | self.name = name |
---|
917 | self.verbose = verbose |
---|
918 | self.dry_run = dry_run |
---|
919 | self.user_code_dir = os.path.join(Environment.trunk_tests_cases_dir, self.name, 'USER_CODE') |
---|
920 | self.input_dir = os.path.join(Environment.trunk_tests_cases_dir, self.name, 'INPUT') |
---|
921 | self.number_of_cores = [] |
---|
922 | self.build_names = [] |
---|
923 | self.input_file_names = [] |
---|
924 | self.configured = False |
---|
925 | |
---|
926 | def configure(self, requested_build_names, requested_cores): |
---|
927 | f_name = os.path.join(Environment.trunk_tests_cases_dir, self.name, 'case_config.yml') |
---|
928 | try: |
---|
929 | with open(f_name, 'r') as f: |
---|
930 | config = yaml.load(f) |
---|
931 | except: |
---|
932 | return True, colored('[Case \"' + self.name + '\" could not be found.]', 'red') |
---|
933 | try: |
---|
934 | self.use_binary_files_from = config['use_binary_files_from'] |
---|
935 | except: |
---|
936 | self.use_binary_files_from = None |
---|
937 | self.requires_binary_files = bool(self.use_binary_files_from) |
---|
938 | |
---|
939 | if 'allowed_builds' not in config: |
---|
940 | return True, colored('[missing \"allowed_builds\" keyword]', 'red') |
---|
941 | self.allowed_build_names = config['allowed_builds'] |
---|
942 | |
---|
943 | if 'allowed_number_of_cores' not in config: |
---|
944 | return True, colored('[missing \"allowed_number_of_cores\" keyword]', 'red') |
---|
945 | self.allowed_number_of_cores = config['allowed_number_of_cores'] |
---|
946 | |
---|
947 | if 'activation_strings' not in config: |
---|
948 | return True, colored('[missing \"activation_strings\" keyword]', 'red') |
---|
949 | self.activation_strings = config['activation_strings'] |
---|
950 | |
---|
951 | if 'significant_digits_for_netcdf_checks' not in config: |
---|
952 | return True, colored('[missing \"significant_digits_for_netcdf_checks\" keyword]', 'red') |
---|
953 | self.significant_digits = config['significant_digits_for_netcdf_checks'] |
---|
954 | |
---|
955 | if 'timeseries' not in config['significant_digits_for_netcdf_checks']: |
---|
956 | return True, colored('[missing \"timeseries\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
957 | |
---|
958 | if 'profiles' not in config['significant_digits_for_netcdf_checks']: |
---|
959 | return True, colored('[missing \"profiles\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
960 | |
---|
961 | if 'other' not in config['significant_digits_for_netcdf_checks']: |
---|
962 | return True, colored('[missing \"other\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
963 | |
---|
964 | self.number_of_cores = sorted(set(requested_cores).intersection(self.allowed_number_of_cores)) |
---|
965 | self.build_names = sorted(set(requested_build_names).intersection(self.allowed_build_names)) |
---|
966 | self.input_file_names = [s for s in next(os.walk(self.input_dir))[2]] |
---|
967 | self.configured = True |
---|
968 | if len(self.number_of_cores) == 0 : |
---|
969 | return True, colored('[no allowed cores requested]', 'blue') |
---|
970 | if len(self.build_names) == 0: |
---|
971 | return True, colored('[no allowed builds requested]', 'blue') |
---|
972 | if len(self.input_file_names) == 0: |
---|
973 | return True, colored('[no input files found]', 'red') |
---|
974 | return False, colored('[configuration ok]', 'green') |
---|
975 | |
---|
976 | |
---|
977 | |
---|
978 | class PALMTest: |
---|
979 | |
---|
980 | def __init__(self, args): |
---|
981 | self.verbose = args.verbose |
---|
982 | self.no_auto_debug = args.no_auto_debug |
---|
983 | self.force_debug = args.force_debug |
---|
984 | self.fail_on_debug = args.fail_on_debug |
---|
985 | self.dry_run = args.dry_run |
---|
986 | self.no_color = args.no_color |
---|
987 | self.test_id = args.test_id |
---|
988 | self.test_case_names = args.cases |
---|
989 | self.requested_build_names = args.builds |
---|
990 | self.requested_cores = args.cores |
---|
991 | self.test_case_queue = [] |
---|
992 | self.build_database = dict() |
---|
993 | |
---|
994 | def prepare(self): |
---|
995 | if self.no_color: |
---|
996 | disable_color() |
---|
997 | self.test_dir = os.path.join(Environment.tests_dir, self.test_id) |
---|
998 | try: |
---|
999 | os.makedirs(self.test_dir, exist_ok=False) |
---|
1000 | except: |
---|
1001 | print('ERROR: Found existing test directory: ' + self.test_dir) |
---|
1002 | exit(1) |
---|
1003 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
1004 | logger.to_all(LogFormatter.hline) |
---|
1005 | logger.to_all('This is the PALM tester (version: ' + version + ')' + '\n') |
---|
1006 | logger.to_all(LogFormatter.hline) |
---|
1007 | try: |
---|
1008 | with open(os.path.join(Environment.workspace_dir, 'palmtest.yml'), 'r') as f: |
---|
1009 | pass |
---|
1010 | except: |
---|
1011 | logger.to_all('ERROR: No palmtest.yml file was found in your working directory!\n') |
---|
1012 | logger.to_all('INFO: A template for this file can be found at: trunk/TESTS/palmtest.yml\n') |
---|
1013 | logger.to_all(' Please copy the template to your working directory and adjust it to your system!\n') |
---|
1014 | exit(1) |
---|
1015 | |
---|
1016 | self.execution_trunk_dir = os.path.join(self.test_dir, 'trunk') |
---|
1017 | os.symlink(Environment.trunk_dir, self.execution_trunk_dir) |
---|
1018 | self.execution_jobs_dir = os.path.join(self.test_dir, 'JOBS') |
---|
1019 | os.makedirs(self.execution_jobs_dir, exist_ok=False) |
---|
1020 | |
---|
1021 | try: |
---|
1022 | with open(os.path.join(Environment.scripts_dir, '.palm.iofiles'), 'r') as iofiles_template_file: |
---|
1023 | iofiles_template = iofiles_template_file.read() |
---|
1024 | with open(os.path.join(self.test_dir, '.palm.iofiles'), 'w') as iofiles_file: |
---|
1025 | iofiles_file.write(iofiles_template.replace('$fast_io_catalog', '$base_data').replace('$restart_data_path', '$base_data').replace('$output_data_path', '$base_data')) |
---|
1026 | except: |
---|
1027 | logger.to_all('ERROR: No .palm.iofiles file was found in trunk/SCRIPTS/') |
---|
1028 | exit(1) |
---|
1029 | |
---|
1030 | available_cores = multiprocessing.cpu_count() |
---|
1031 | final_cores_list = list(filter(lambda x: x <= available_cores, self.requested_cores)) |
---|
1032 | |
---|
1033 | logger.to_all(LogFormatter.config_table_line_template.format('Object:', 'Name:', 'Action:') + 'Status:\n') |
---|
1034 | logger.to_all(LogFormatter.hline) |
---|
1035 | |
---|
1036 | if 'all' in self.requested_build_names: |
---|
1037 | self.requested_build_names = [name for name in next(os.walk(Environment.trunk_tests_builds_dir))[1] if not name[0] == '.'] |
---|
1038 | found_build_names = [] |
---|
1039 | for build_name in self.requested_build_names: |
---|
1040 | build = PALMBuild(self.test_dir, build_name, verbose=self.verbose, dry_run=self.dry_run) |
---|
1041 | configuration_failed, message = build.configure() |
---|
1042 | if not configuration_failed: |
---|
1043 | self.build_database[build_name] = build |
---|
1044 | found_build_names.append(build_name) |
---|
1045 | logger.to_all(LogFormatter.config_table_line_template.format('Build', build_name, 'approved')) |
---|
1046 | logger.to_all(message + '\n') |
---|
1047 | else: |
---|
1048 | logger.to_all(LogFormatter.config_table_line_template.format('Build', build_name, 'rejected')) |
---|
1049 | logger.to_all(message + '\n') |
---|
1050 | final_build_list = found_build_names |
---|
1051 | |
---|
1052 | if 'all' in self.test_case_names: |
---|
1053 | self.test_case_names = sorted([name for name in next(os.walk(Environment.trunk_tests_cases_dir))[1] if not name[0] == '.']) |
---|
1054 | |
---|
1055 | additional_initial_runs_2 = [self.test_case_names] |
---|
1056 | while len(additional_initial_runs_2[-1]) > 0: |
---|
1057 | additional_initial_runs_1 = [] |
---|
1058 | for test_case_name in additional_initial_runs_2[-1]: |
---|
1059 | test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) |
---|
1060 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1061 | if not test_case_configuration_failed: |
---|
1062 | if test_case.requires_binary_files: |
---|
1063 | additional_initial_runs_1.append(test_case.use_binary_files_from) |
---|
1064 | additional_initial_runs_2.append(sorted(set(additional_initial_runs_1))) |
---|
1065 | |
---|
1066 | test_case_order = [] |
---|
1067 | for i in range(len(additional_initial_runs_2)-1): |
---|
1068 | # low and high refer to priority |
---|
1069 | low = additional_initial_runs_2[i] |
---|
1070 | high = additional_initial_runs_2[i+1] |
---|
1071 | for item in high: |
---|
1072 | while item in low: |
---|
1073 | low.remove(item) |
---|
1074 | test_case_order.append(low) |
---|
1075 | |
---|
1076 | test_case_order_no_dublicates = [] |
---|
1077 | for test_cases in test_case_order: |
---|
1078 | seen = set() |
---|
1079 | seen_add = seen.add |
---|
1080 | test_case_order_no_dublicates.append( [x for x in test_cases if not (x in seen or seen_add(x))] ) |
---|
1081 | |
---|
1082 | approved_test_case_order = [[]] + list(reversed(test_case_order_no_dublicates)) |
---|
1083 | for i, test_cases in enumerate(list(approved_test_case_order)): |
---|
1084 | info = 'Case (dep)' if i < len(approved_test_case_order)-1 else 'Case' |
---|
1085 | for test_case_name in list(test_cases): |
---|
1086 | sys.stdout.flush() |
---|
1087 | test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) |
---|
1088 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1089 | if test_case_configuration_failed: |
---|
1090 | # removing as configuration failed should only apply to added dependencies |
---|
1091 | approved_test_case_order[i].remove(test_case_name) |
---|
1092 | logger.to_all(LogFormatter.config_table_line_template.format(info, test_case_name, 'rejected')) |
---|
1093 | logger.to_all(message + '\n') |
---|
1094 | elif test_case.requires_binary_files: |
---|
1095 | if test_case.use_binary_files_from not in approved_test_case_order[i-1]: |
---|
1096 | # removing as dependency is already removed |
---|
1097 | approved_test_case_order[i].remove(test_case_name) |
---|
1098 | logger.to_all(LogFormatter.config_table_line_template.format(info, test_case_name, 'disabled')) |
---|
1099 | logger.to_all(colored('[requires dependency \"' + test_case.use_binary_files_from + '\"]', 'red') + '\n') |
---|
1100 | else: |
---|
1101 | logger.to_all(LogFormatter.config_table_line_template.format(info, test_case_name, 'approved')) |
---|
1102 | logger.to_all(message + '\n') |
---|
1103 | else: |
---|
1104 | logger.to_all(LogFormatter.config_table_line_template.format(info, test_case_name, 'approved')) |
---|
1105 | logger.to_all(message + '\n') |
---|
1106 | |
---|
1107 | final_case_list = [] |
---|
1108 | for cases in approved_test_case_order: |
---|
1109 | for case in cases: |
---|
1110 | if case not in final_case_list: |
---|
1111 | final_case_list.append(case) |
---|
1112 | |
---|
1113 | for build_name in final_build_list: |
---|
1114 | build = PALMBuild( |
---|
1115 | self.test_dir, |
---|
1116 | build_name, |
---|
1117 | verbose=self.verbose, |
---|
1118 | dry_run=self.dry_run, |
---|
1119 | ) |
---|
1120 | configuration_failed, message = build.configure() |
---|
1121 | if not configuration_failed: |
---|
1122 | self.build_database[build_name] = build |
---|
1123 | else: |
---|
1124 | logger.to_all(message + '\n') |
---|
1125 | |
---|
1126 | for case_name in final_case_list: |
---|
1127 | test_case = PALMTestCase( |
---|
1128 | self.test_dir, |
---|
1129 | case_name, |
---|
1130 | verbose=self.verbose, |
---|
1131 | dry_run=self.dry_run, |
---|
1132 | ) |
---|
1133 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1134 | if not test_case_configuration_failed: |
---|
1135 | self.test_case_queue.append(test_case) |
---|
1136 | logger.to_all(LogFormatter.hline) |
---|
1137 | |
---|
1138 | logger.to_all(LogFormatter.intro_table_line_template.format('Test ID:') + |
---|
1139 | self.test_id + '\n') |
---|
1140 | logger.to_all(LogFormatter.intro_table_line_template.format('Builds:') + |
---|
1141 | str('\n' + LogFormatter.intro_table_line_template.format('')).join(sorted(self.build_database.keys())) + '\n') |
---|
1142 | logger.to_all(LogFormatter.intro_table_line_template.format('Cases:') + |
---|
1143 | str('\n' + LogFormatter.intro_table_line_template.format('')).join([c.name for c in self.test_case_queue]) + '\n') |
---|
1144 | logger.to_all(LogFormatter.intro_table_line_template.format('Cores:') + |
---|
1145 | ' '.join([str(i) for i in final_cores_list]) + '\n') |
---|
1146 | |
---|
1147 | def _execute(self, test_case, build_name, cores): |
---|
1148 | job = PALMJob( |
---|
1149 | self.test_dir, |
---|
1150 | test_case, |
---|
1151 | build_name, |
---|
1152 | cores, |
---|
1153 | verbose=self.verbose, |
---|
1154 | dry_run=self.dry_run |
---|
1155 | ) |
---|
1156 | if self.force_debug: |
---|
1157 | build_failed_non_debug = True |
---|
1158 | job_failed_non_debug = True |
---|
1159 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1160 | if build_failed_debug: |
---|
1161 | job_failed_debug = True |
---|
1162 | else: |
---|
1163 | job_failed_debug = job.execute(debug=True) |
---|
1164 | elif self.no_auto_debug: |
---|
1165 | build_failed_non_debug = self.build_database[build_name].build(debug=False) |
---|
1166 | if build_failed_non_debug: |
---|
1167 | job_failed_non_debug = True |
---|
1168 | else: |
---|
1169 | job_failed_non_debug = job.execute(debug=False) |
---|
1170 | build_failed_debug = None |
---|
1171 | job_failed_debug = None |
---|
1172 | else: |
---|
1173 | build_failed_non_debug = self.build_database[build_name].build(debug=False) |
---|
1174 | if build_failed_non_debug: |
---|
1175 | job_failed_non_debug = True |
---|
1176 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1177 | if build_failed_debug: |
---|
1178 | job_failed_debug = False |
---|
1179 | else: |
---|
1180 | job_failed_debug = job.execute(debug=True) |
---|
1181 | else: |
---|
1182 | job_failed_non_debug = job.execute(debug=False) |
---|
1183 | if job_failed_non_debug: |
---|
1184 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1185 | if build_failed_debug: |
---|
1186 | job_failed_debug = True |
---|
1187 | else: |
---|
1188 | job_failed_debug = job.execute(debug=True) |
---|
1189 | else: |
---|
1190 | build_failed_debug = None |
---|
1191 | job_failed_debug = None |
---|
1192 | return dict( |
---|
1193 | build_failed_non_debug=build_failed_non_debug, |
---|
1194 | job_failed_non_debug=job_failed_non_debug, |
---|
1195 | build_failed_debug=build_failed_debug, |
---|
1196 | job_failed_debug=job_failed_debug, |
---|
1197 | ) |
---|
1198 | |
---|
1199 | def execute(self): |
---|
1200 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
1201 | logger.to_all(LogFormatter.hline) |
---|
1202 | logger.to_all(LogFormatter.task_table_line_template.format('Task:', 'Case:', 'Build:', 'Cores:') + 'Status:\n') |
---|
1203 | logger.to_all(LogFormatter.hline) |
---|
1204 | self.test_report = dict() |
---|
1205 | for test_case in self.test_case_queue: |
---|
1206 | logger.to_log(LogFormatter.hline) |
---|
1207 | logger.to_file(LogFormatter.hline) |
---|
1208 | logger.to_file(LogFormatter.hline) |
---|
1209 | status_dict = dict() |
---|
1210 | for build_name in test_case.build_names: |
---|
1211 | status_dict[build_name] = dict() |
---|
1212 | for cores in test_case.number_of_cores: |
---|
1213 | status_dict[build_name][cores] = self._execute(test_case, build_name, cores) |
---|
1214 | self.test_report[test_case.name] = status_dict |
---|
1215 | logger.to_log(LogFormatter.hline) |
---|
1216 | logger.to_file('\n' * 10) |
---|
1217 | |
---|
1218 | def report(self): |
---|
1219 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
1220 | logger.to_all(LogFormatter.hline) |
---|
1221 | r = '{:10}' + ' total: ' + '{:<3d}' + \ |
---|
1222 | ' ok: ' + colored('{:<3d}', 'green') + \ |
---|
1223 | ' debugged: ' + colored('{:<3d}', 'yellow') + \ |
---|
1224 | ' failed: ' + colored('{:<3d}', 'red') |
---|
1225 | n_all = 0 |
---|
1226 | n_ok = 0 |
---|
1227 | n_debugged = 0 |
---|
1228 | n_failed = 0 |
---|
1229 | for build_name, build in self.build_database.items(): |
---|
1230 | status = build.report() |
---|
1231 | b = status['failed_non_debug'] |
---|
1232 | bd = status['failed_debug'] |
---|
1233 | n_all += 1 |
---|
1234 | if not b and b is not None: |
---|
1235 | n_ok += 1 |
---|
1236 | if bd is not None: |
---|
1237 | n_debugged += 1 |
---|
1238 | if b and (bd or bd is None): |
---|
1239 | n_failed += 1 |
---|
1240 | logger.to_all(r.format('Builds:', n_all, n_ok, n_debugged, n_failed) + '\n') |
---|
1241 | total_failed = n_failed |
---|
1242 | total_debugged = n_debugged |
---|
1243 | n_all = 0 |
---|
1244 | n_ok = 0 |
---|
1245 | n_debugged = 0 |
---|
1246 | n_failed = 0 |
---|
1247 | # {'case_name': {'build_name': {4: {'build_failed_debug': None, |
---|
1248 | # 'build_failed_non_debug': False, |
---|
1249 | # 'job_failed_debug': None, |
---|
1250 | # 'job_failed_non_debug': False}}}, |
---|
1251 | for case_name, case in self.test_report.items(): |
---|
1252 | for build_name, build in case.items(): |
---|
1253 | for cores, results in build.items(): |
---|
1254 | n_all += 1 |
---|
1255 | b = results['build_failed_non_debug'] |
---|
1256 | bd = results['build_failed_debug'] |
---|
1257 | j = results['job_failed_non_debug'] |
---|
1258 | jd = results['job_failed_debug'] |
---|
1259 | if not j: |
---|
1260 | n_ok += 1 |
---|
1261 | if jd is not None: |
---|
1262 | n_debugged += 1 |
---|
1263 | if j and (jd or jd is None): |
---|
1264 | n_failed += 1 |
---|
1265 | logger.to_all(r.format('Tests:', n_all, n_ok, n_debugged, n_failed) + '\n') |
---|
1266 | total_failed += n_failed |
---|
1267 | total_debugged += n_debugged |
---|
1268 | if self.fail_on_debug: |
---|
1269 | return (total_failed + total_debugged) > 0 |
---|
1270 | else: |
---|
1271 | return total_failed > 0 |
---|
1272 | |
---|
1273 | |
---|
1274 | class CustomCompleter: |
---|
1275 | |
---|
1276 | def __init__(self): |
---|
1277 | pass |
---|
1278 | |
---|
1279 | def __call__(self, prefix, parsed_args, **kwargs): |
---|
1280 | return (i for i in self.get_items() if i.startswith(prefix)) |
---|
1281 | |
---|
1282 | def get_items(self): |
---|
1283 | return [] |
---|
1284 | |
---|
1285 | |
---|
1286 | class CaseCompleter(CustomCompleter): |
---|
1287 | |
---|
1288 | def get_items(self): |
---|
1289 | case_names = [name for name in next(os.walk(Environment.trunk_tests_cases_dir))[1] if not name[0] == '.'] |
---|
1290 | return case_names + ['all'] |
---|
1291 | |
---|
1292 | |
---|
1293 | class BuildCompleter(CustomCompleter): |
---|
1294 | |
---|
1295 | def get_items(self): |
---|
1296 | build_names = [name for name in next(os.walk(Environment.trunk_tests_builds_dir))[1] if not name[0] == '.'] |
---|
1297 | return build_names + ['all'] |
---|
1298 | |
---|
1299 | |
---|
1300 | class PALMTestArgumentParser(ArgumentParser): |
---|
1301 | |
---|
1302 | def __init__(self): |
---|
1303 | super().__init__( |
---|
1304 | description='This is the PALM tester\n' + |
---|
1305 | 'Developer Support: knoop@muk.uni-hannover.de', |
---|
1306 | formatter_class=RawTextHelpFormatter, |
---|
1307 | add_help=True, |
---|
1308 | ) |
---|
1309 | self.add_argument( |
---|
1310 | '--version', |
---|
1311 | action='version', |
---|
1312 | version=version, |
---|
1313 | ) |
---|
1314 | self.add_argument( |
---|
1315 | '--verbose', |
---|
1316 | action='store_true', |
---|
1317 | dest='verbose', |
---|
1318 | help='Increase verbosity of terminal output.', |
---|
1319 | required=False, |
---|
1320 | ) |
---|
1321 | self.add_argument( |
---|
1322 | '--no-auto-debug', |
---|
1323 | action='store_true', |
---|
1324 | dest='no_auto_debug', |
---|
1325 | help='Disable automatic debugging in case of test failure.', |
---|
1326 | required=False, |
---|
1327 | ) |
---|
1328 | self.add_argument( |
---|
1329 | '--force-debug', |
---|
1330 | action='store_true', |
---|
1331 | dest='force_debug', |
---|
1332 | help='Force debugging regardless of test failure (ignores --no-auto-debug).', |
---|
1333 | required=False, |
---|
1334 | ) |
---|
1335 | self.add_argument( |
---|
1336 | '--fail-on-debug', |
---|
1337 | action='store_true', |
---|
1338 | dest='fail_on_debug', |
---|
1339 | help='Return a non-zero exit status in case debugging was required.', |
---|
1340 | required=False, |
---|
1341 | ) |
---|
1342 | self.add_argument( |
---|
1343 | '--dry-run', |
---|
1344 | action='store_true', |
---|
1345 | dest='dry_run', |
---|
1346 | help='Prepare and process all requested tests without actually building or executing PALM.', |
---|
1347 | required=False, |
---|
1348 | ) |
---|
1349 | self.add_argument( |
---|
1350 | '--no-color', |
---|
1351 | action='store_true', |
---|
1352 | dest='no_color', |
---|
1353 | help='Disable colored terminal output.', |
---|
1354 | required=False, |
---|
1355 | ) |
---|
1356 | self.add_argument( |
---|
1357 | '--cases', |
---|
1358 | action='store', |
---|
1359 | dest='cases', |
---|
1360 | default=['all'], |
---|
1361 | help='A list of test cases to be executed. (default: %(default)s)', |
---|
1362 | nargs='+', |
---|
1363 | required=False, |
---|
1364 | type=str, |
---|
1365 | metavar='STR', |
---|
1366 | ).completer = CaseCompleter() |
---|
1367 | self.add_argument( |
---|
1368 | '--builds', |
---|
1369 | action='store', |
---|
1370 | dest='builds', |
---|
1371 | default=['all'], |
---|
1372 | help='A list of builds to be executed. (default: %(default)s)', |
---|
1373 | nargs='+', |
---|
1374 | required=False, |
---|
1375 | type=str, |
---|
1376 | metavar='STR', |
---|
1377 | ).completer = BuildCompleter() |
---|
1378 | self.add_argument( |
---|
1379 | '--cores', |
---|
1380 | action='store', |
---|
1381 | dest='cores', |
---|
1382 | default=[i for i in range(1, multiprocessing.cpu_count()+1)], |
---|
1383 | choices=[i for i in range(1, multiprocessing.cpu_count()+1)], |
---|
1384 | help='The number of cores tests are supposed to be executed on. (default: %(default)s)', |
---|
1385 | nargs='+', |
---|
1386 | required=False, |
---|
1387 | type=int, |
---|
1388 | metavar='INT', |
---|
1389 | ) |
---|
1390 | self.add_argument( |
---|
1391 | '--test-id', |
---|
1392 | action='store', |
---|
1393 | dest='test_id', |
---|
1394 | default=datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f'), |
---|
1395 | help='An individual test id. (default: current timestamp)', |
---|
1396 | required=False, |
---|
1397 | type=str, |
---|
1398 | metavar='STR', |
---|
1399 | ) |
---|
1400 | |
---|
1401 | |
---|
1402 | if __name__ == '__main__': |
---|
1403 | parser = PALMTestArgumentParser() |
---|
1404 | if has_argcomplete: |
---|
1405 | argcomplete.autocomplete(parser) |
---|
1406 | args = parser.parse_args() |
---|
1407 | palm_test = PALMTest(args) |
---|
1408 | palm_test.prepare() |
---|
1409 | palm_test.execute() |
---|
1410 | failed = palm_test.report() |
---|
1411 | exit(1 if failed else 0) |
---|