1 | #!/usr/bin/env python3 |
---|
2 | # PYTHON_ARGCOMPLETE_OK |
---|
3 | |
---|
4 | import os |
---|
5 | import sys |
---|
6 | import shutil |
---|
7 | from datetime import datetime |
---|
8 | import subprocess |
---|
9 | import multiprocessing |
---|
10 | import socket |
---|
11 | import getpass |
---|
12 | import math |
---|
13 | import re |
---|
14 | import threading |
---|
15 | import queue |
---|
16 | from contextlib import ContextDecorator |
---|
17 | |
---|
18 | try: |
---|
19 | from argparse import ArgumentParser |
---|
20 | from argparse import RawTextHelpFormatter |
---|
21 | except ImportError: |
---|
22 | sys.exit( |
---|
23 | 'ERROR: You need argparse!\n' + |
---|
24 | ' install it from http://pypi.python.org/pypi/argparse\n' + |
---|
25 | ' or run \"pip install argparse\".' |
---|
26 | ) |
---|
27 | |
---|
28 | try: |
---|
29 | import numpy as np |
---|
30 | except ImportError: |
---|
31 | sys.exit( |
---|
32 | 'ERROR: You need numpy!\n' + |
---|
33 | ' install it from http://pypi.python.org/pypi/numpy\n' + |
---|
34 | ' or run \"python3 -m pip install numpy\".' |
---|
35 | ) |
---|
36 | |
---|
37 | try: |
---|
38 | import netCDF4 |
---|
39 | except ImportError: |
---|
40 | sys.exit( |
---|
41 | 'ERROR: You need netCDF4!\n' + |
---|
42 | ' install it from http://pypi.python.org/pypi/netCDF4\n' + |
---|
43 | ' or run \"python3 -m pip install netCDF4\".' |
---|
44 | ) |
---|
45 | |
---|
46 | try: |
---|
47 | import yaml |
---|
48 | except ImportError: |
---|
49 | sys.exit( |
---|
50 | 'ERROR: You need PyYAML!\n' + |
---|
51 | ' install it from http://pypi.python.org/pypi/PyYAML\n' + |
---|
52 | ' or run \"python3 -m pip install PyYAML\".' |
---|
53 | ) |
---|
54 | |
---|
55 | try: |
---|
56 | import argcomplete |
---|
57 | except ImportError: |
---|
58 | print( |
---|
59 | 'INFO: To use Tab-completion you need argcomplete!\n' + |
---|
60 | ' install it from http://pypi.python.org/pypi/argcomplete\n' + |
---|
61 | ' or run \"python3 -m pip install argcomplete\".' |
---|
62 | ) |
---|
63 | has_argcomplete = False |
---|
64 | else: |
---|
65 | has_argcomplete = True |
---|
66 | |
---|
67 | try: |
---|
68 | from termcolor import colored as tcolored |
---|
69 | except ImportError: |
---|
70 | def tcolored(string, color): |
---|
71 | return string |
---|
72 | |
---|
73 | disable_colored_output = False |
---|
74 | |
---|
75 | |
---|
76 | def colored(string, color): |
---|
77 | if not disable_colored_output: |
---|
78 | return tcolored(string, color) |
---|
79 | else: |
---|
80 | return string |
---|
81 | |
---|
82 | |
---|
83 | version = '1.0.1' |
---|
84 | |
---|
85 | scripts_dir = os.path.dirname(os.path.realpath(__file__)) |
---|
86 | trunk_dir = os.path.realpath(os.path.join(scripts_dir, '..')) |
---|
87 | workspace_dir = os.path.realpath(os.path.join(trunk_dir, '..')) |
---|
88 | |
---|
89 | trunk_tests_dir = os.path.join(trunk_dir, 'TESTS') |
---|
90 | trunk_tests_cases_dir = os.path.join(trunk_tests_dir, 'cases') |
---|
91 | trunk_tests_builds_dir = os.path.join(trunk_tests_dir, 'builds') |
---|
92 | |
---|
93 | tests_dir = os.path.join(workspace_dir, 'tests') |
---|
94 | |
---|
95 | available_cores = multiprocessing.cpu_count() |
---|
96 | terminal_columns, terminal_lines = shutil.get_terminal_size() |
---|
97 | hline = '#' * min(terminal_columns, 300) + '\n' |
---|
98 | table_width_intro = 12 |
---|
99 | table_width_builds = len(max([s for s in next(os.walk(trunk_tests_builds_dir))[1] if not s[0] == '.'], key=len)) + len('_debug') |
---|
100 | table_width_cases = len(max([s for s in next(os.walk(trunk_tests_cases_dir))[1] if not s[0] == '.'], key=len)) |
---|
101 | table_width_cores = 7 |
---|
102 | table_width_total = table_width_intro + table_width_builds + table_width_cases + table_width_cores + 3 |
---|
103 | |
---|
104 | task_table_line_template = \ |
---|
105 | '{:' + str(table_width_intro) + '} ' + \ |
---|
106 | '{:' + str(table_width_cases) + '} ' + \ |
---|
107 | '{:' + str(table_width_builds) + '} ' + \ |
---|
108 | '{:' + str(table_width_cores) + '} ' |
---|
109 | |
---|
110 | config_table_line_template = \ |
---|
111 | '{:' + str(table_width_intro) + '} ' + \ |
---|
112 | '{:' + str(max(table_width_builds, table_width_cases)) + '} ' + \ |
---|
113 | '{:8} ' |
---|
114 | |
---|
115 | file_table_line_template = \ |
---|
116 | '{:' + str(table_width_intro) + '} ' + \ |
---|
117 | '{:' + str(table_width_cases + 13) + '} ' |
---|
118 | |
---|
119 | # for debugging |
---|
120 | exist_ok = False |
---|
121 | |
---|
122 | |
---|
123 | class SignificantDigitsRounder: |
---|
124 | |
---|
125 | @staticmethod |
---|
126 | def _round(value, digits=10): |
---|
127 | if value == 0.0: |
---|
128 | return value |
---|
129 | negative = value < 0.0 |
---|
130 | value = -value if negative else value |
---|
131 | rounded_value = round(value, -int(math.floor(math.log10(value))) + (digits - 1)) |
---|
132 | rounded_value = -rounded_value if negative else rounded_value |
---|
133 | return rounded_value |
---|
134 | |
---|
135 | |
---|
136 | vectorized_round = np.vectorize(_round) |
---|
137 | |
---|
138 | _vectorized_round = np.vectorize(round) |
---|
139 | |
---|
140 | |
---|
141 | @classmethod |
---|
142 | def around(cls, array, digits=10): |
---|
143 | # TODO: divide both arrays and check decimal point |
---|
144 | sign_mask = np.ma.masked_where(array >= 0.0, array).mask |
---|
145 | pos_array = np.where(sign_mask, array, -array) |
---|
146 | non_zero_maks = np.ma.masked_where(pos_array == 0.0, pos_array).mask |
---|
147 | non_zero_array = np.where(non_zero_maks, 1.0, pos_array) |
---|
148 | i1 = -np.floor(np.log10(non_zero_array)).astype(int) + (digits - 1) |
---|
149 | rounded_non_zero_array = cls._vectorized_round(non_zero_array, i1) |
---|
150 | rounded_pos_array = np.where(non_zero_maks, 0.0, rounded_non_zero_array) |
---|
151 | return np.where(sign_mask, rounded_pos_array, -rounded_pos_array) |
---|
152 | |
---|
153 | |
---|
154 | |
---|
155 | class Logger(ContextDecorator): |
---|
156 | |
---|
157 | def __init__(self, logfile_dir, logfile_name='palmtest.log', logfile_mode='a', verbose=False): |
---|
158 | self.logfile_path = os.path.join(logfile_dir, logfile_name) |
---|
159 | self.logfile_mode = logfile_mode |
---|
160 | self.verbose = verbose |
---|
161 | |
---|
162 | def __enter__(self): |
---|
163 | self._file = open(self.logfile_path, self.logfile_mode) |
---|
164 | return self |
---|
165 | |
---|
166 | def to_file(self, message): |
---|
167 | self._file.write(message) |
---|
168 | self._file.flush() |
---|
169 | |
---|
170 | def to_log(self, message): |
---|
171 | if self.verbose: |
---|
172 | sys.stdout.write(message) |
---|
173 | sys.stdout.flush() |
---|
174 | self._file.write(message) |
---|
175 | self._file.flush() |
---|
176 | |
---|
177 | def to_all(self, message): |
---|
178 | sys.stdout.write(message) |
---|
179 | sys.stdout.flush() |
---|
180 | self._file.write(message) |
---|
181 | self._file.flush() |
---|
182 | |
---|
183 | def __exit__(self, *exc): |
---|
184 | self._file.close() |
---|
185 | return False |
---|
186 | |
---|
187 | |
---|
188 | class Executor: |
---|
189 | |
---|
190 | @staticmethod |
---|
191 | def _enqueue_output(out, queue): |
---|
192 | for line in iter(out.readline, b''): |
---|
193 | queue.put(line) |
---|
194 | out.close() |
---|
195 | |
---|
196 | @staticmethod |
---|
197 | def execute(cmd, cwd='.', verbose=True, dry_run=False): |
---|
198 | assert isinstance(cmd, list) |
---|
199 | if dry_run: |
---|
200 | cmd = ['echo'] + cmd |
---|
201 | cmd_str = ' '.join(cmd) |
---|
202 | p = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=1) |
---|
203 | q = queue.Queue() |
---|
204 | t = threading.Thread(target=Executor._enqueue_output, args=(p.stdout, q)) |
---|
205 | t.daemon = True # thread dies with the program |
---|
206 | t.start() |
---|
207 | |
---|
208 | with Logger(cwd, verbose=verbose) as logger: |
---|
209 | # read line without blocking |
---|
210 | logger.to_log(hline) |
---|
211 | logger.to_log('CMD: ' + cmd_str + '\n') |
---|
212 | logger.to_log(hline) |
---|
213 | while t.is_alive(): |
---|
214 | try: |
---|
215 | line = q.get_nowait() # or q.get(timeout=.1) |
---|
216 | except queue.Empty: |
---|
217 | pass # print('no output yet') |
---|
218 | else: # got line |
---|
219 | logger.to_log(line.decode("utf-8")) |
---|
220 | line = True |
---|
221 | while line: |
---|
222 | try: |
---|
223 | line = q.get_nowait() # or q.get(timeout=.1) |
---|
224 | except queue.Empty: |
---|
225 | line = False |
---|
226 | else: # got line |
---|
227 | logger.to_log(line.decode("utf-8")) |
---|
228 | logger.to_log(hline) |
---|
229 | |
---|
230 | rc = p.poll() |
---|
231 | failed = rc != 0 |
---|
232 | return failed |
---|
233 | |
---|
234 | |
---|
235 | class NetCDFInterface: |
---|
236 | |
---|
237 | def __init__(self, filename): |
---|
238 | self.filename = filename |
---|
239 | |
---|
240 | def is_healthy(self): |
---|
241 | try: |
---|
242 | self.get_run_name() |
---|
243 | except: |
---|
244 | return False |
---|
245 | else: |
---|
246 | return True |
---|
247 | |
---|
248 | def get_run_name(self): |
---|
249 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
250 | l = getattr(netcdf, 'title').split() |
---|
251 | i = l.index('run:') |
---|
252 | return l[i+1] |
---|
253 | |
---|
254 | def get_var_list(self): |
---|
255 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
256 | var_list = getattr(netcdf, 'VAR_LIST').split(';') |
---|
257 | var_list = filter(None, var_list) |
---|
258 | return sorted(var_list) |
---|
259 | |
---|
260 | def show_content(self): |
---|
261 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
262 | for name in netcdf.ncattrs(): |
---|
263 | print("Global attr", name, "=", getattr(netcdf, name)) |
---|
264 | print(netcdf) |
---|
265 | for v in netcdf.variables: |
---|
266 | print(v) |
---|
267 | |
---|
268 | def get_times_list(self): |
---|
269 | attributes, times = self.read_var('time') |
---|
270 | times = [str(time) for time in times] |
---|
271 | times = list(filter(None, times)) |
---|
272 | return times |
---|
273 | |
---|
274 | def contains(self, variable): |
---|
275 | return variable in self.get_var_list() |
---|
276 | |
---|
277 | def read_var(self, variable): |
---|
278 | with netCDF4.Dataset(self.filename, mode='r') as netcdf: |
---|
279 | values = netcdf.variables[variable][:] # extract values |
---|
280 | attributes = dict( |
---|
281 | long_name=netcdf.variables[variable].name, |
---|
282 | unit=netcdf.variables[variable].units, |
---|
283 | ) |
---|
284 | return attributes, values |
---|
285 | |
---|
286 | |
---|
287 | class FileComparator: |
---|
288 | |
---|
289 | @staticmethod |
---|
290 | def compare_ascii(file_path1, file_path2, start_string=None): |
---|
291 | try: |
---|
292 | with open(file_path1, 'r') as file1: |
---|
293 | content1 = file1.readlines() |
---|
294 | except OSError: |
---|
295 | return True, colored('[reference file not found]', 'red') |
---|
296 | try: |
---|
297 | with open(file_path2, 'r') as file2: |
---|
298 | content2 = file2.readlines() |
---|
299 | except OSError: |
---|
300 | return True, colored('[output file not found]', 'red') |
---|
301 | if start_string: |
---|
302 | index1 = content1.index(start_string) |
---|
303 | index2 = content2.index(start_string) |
---|
304 | comparable_content1 = content1[index1:] |
---|
305 | comparable_content2 = content2[index2:] |
---|
306 | ln = index2 + 1 |
---|
307 | else: |
---|
308 | comparable_content1 = content1 |
---|
309 | comparable_content2 = content2 |
---|
310 | ln = 1 |
---|
311 | if len(comparable_content1) != len(comparable_content2): |
---|
312 | return True, colored('[mismatch in total number of lines]', 'red') |
---|
313 | for line1, line2 in zip(comparable_content1, comparable_content2): |
---|
314 | if not line1 == line2: |
---|
315 | return True, colored('[mismatch in content starting line ' + str(ln) + ']', 'red') |
---|
316 | ln += 1 |
---|
317 | return False, colored('[file ok]', 'green') |
---|
318 | |
---|
319 | @staticmethod |
---|
320 | def compare_netcdf(file_path1, file_path2, digits=None): |
---|
321 | nci1 = NetCDFInterface(file_path1) |
---|
322 | nci2 = NetCDFInterface(file_path2) |
---|
323 | if not nci1.is_healthy(): |
---|
324 | return True, colored('[reference file not found]', 'red') |
---|
325 | if not nci2.is_healthy(): |
---|
326 | return True, colored('[output file not found]', 'red') |
---|
327 | times_list1 = nci1.get_times_list() |
---|
328 | times_list2 = nci2.get_times_list() |
---|
329 | if not times_list1 == times_list2: |
---|
330 | return True, colored('[wrong time dimension]', 'red') |
---|
331 | else: |
---|
332 | time_list = times_list1 |
---|
333 | var_list1 = nci1.get_var_list() |
---|
334 | var_list2 = nci2.get_var_list() |
---|
335 | if not var_list1 == var_list2: |
---|
336 | return True, colored('[wrong set of variables]', 'red') |
---|
337 | else: |
---|
338 | var_list = var_list1 |
---|
339 | content1 = dict() |
---|
340 | content2 = dict() |
---|
341 | for var in var_list: |
---|
342 | attributes1, values1 = nci1.read_var(var) |
---|
343 | attributes2, values2 = nci2.read_var(var) |
---|
344 | if sorted(attributes1.keys()) != sorted(attributes2.keys()): |
---|
345 | return True, colored('[wrong set of attributes in variable \"'+var+'\"]', 'red') |
---|
346 | if isinstance(digits, int): |
---|
347 | values1 = SignificantDigitsRounder.around(values1, digits=digits) |
---|
348 | values2 = SignificantDigitsRounder.around(values2, digits=digits) |
---|
349 | content1[var] = values1 |
---|
350 | content2[var] = values2 |
---|
351 | #for decimals in |
---|
352 | for i, time in enumerate(time_list): |
---|
353 | for var in var_list: |
---|
354 | t_content1 = content1[var][i] |
---|
355 | t_content2 = content2[var][i] |
---|
356 | if not (t_content1==t_content2).all(): |
---|
357 | if isinstance(digits, int): |
---|
358 | return True, colored('[1st mismatch within ' + str(digits) + ' digits at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') |
---|
359 | else: |
---|
360 | return True, colored('[1st mismatch at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') |
---|
361 | return False, colored('[file ok]', 'green') |
---|
362 | |
---|
363 | |
---|
364 | class OutputChecker: |
---|
365 | |
---|
366 | def __init__(self, test_dir, setup_name, build_name, cores, significant_digits=None, verbose=True, dry_run=False): |
---|
367 | self.test_dir = test_dir |
---|
368 | self.setup_name = setup_name |
---|
369 | self.build_name = build_name |
---|
370 | self.cores = cores |
---|
371 | self.significant_digits = significant_digits |
---|
372 | self.verbose = verbose |
---|
373 | self.dry_run = dry_run |
---|
374 | self.job_name = self.setup_name + '__' + build_name + '__' + str(self.cores) |
---|
375 | self.job_dir = os.path.join(self.test_dir, 'JOBS', self.job_name) |
---|
376 | self.ref_monitoring_dir = os.path.join(trunk_tests_cases_dir, self.setup_name, 'MONITORING') |
---|
377 | self.ref_output_dir = os.path.join(trunk_tests_cases_dir, self.setup_name, 'OUTPUT') |
---|
378 | self.res_monitoring_dir = os.path.join(self.job_dir, 'MONITORING') |
---|
379 | self.res_output_dir = os.path.join(self.job_dir, 'OUTPUT') |
---|
380 | self.failed = None |
---|
381 | |
---|
382 | def get_checkable_file_dicts(self): |
---|
383 | if os.path.isdir(self.ref_monitoring_dir): |
---|
384 | file_names_monitoring = [s for s in next(os.walk(self.ref_monitoring_dir))[2]] |
---|
385 | else: |
---|
386 | file_names_monitoring = [] |
---|
387 | file_paths_monitoring = [] |
---|
388 | for file_name in file_names_monitoring: |
---|
389 | file_specific_ending = file_name[len(self.setup_name):] |
---|
390 | file_specific_ending_split = file_specific_ending.split('.') |
---|
391 | postfix = file_specific_ending_split[0] |
---|
392 | if len(file_specific_ending_split) > 1: |
---|
393 | extension = file_specific_ending_split[-1] |
---|
394 | else: |
---|
395 | extension = '' |
---|
396 | if len(file_specific_ending_split) > 2: |
---|
397 | cycle_info = file_specific_ending_split[1: -1] |
---|
398 | else: |
---|
399 | cycle_info = [] |
---|
400 | file_paths_monitoring.append( |
---|
401 | dict( |
---|
402 | postfix=postfix, |
---|
403 | cycle_info=cycle_info, |
---|
404 | extension=extension, |
---|
405 | ref_path=self.ref_monitoring_dir, |
---|
406 | res_path=self.res_monitoring_dir, |
---|
407 | ) |
---|
408 | ) |
---|
409 | if os.path.isdir(self.ref_output_dir): |
---|
410 | file_names_output = [s for s in next(os.walk(self.ref_output_dir))[2]] |
---|
411 | else: |
---|
412 | file_names_output = [] |
---|
413 | file_paths_output = [] |
---|
414 | for file_name in file_names_output: |
---|
415 | file_specific_ending = file_name[len(self.setup_name):] |
---|
416 | file_specific_ending_split = file_specific_ending.split('.') |
---|
417 | postfix = file_specific_ending_split[0] |
---|
418 | if len(file_specific_ending_split) > 1: |
---|
419 | extension = file_specific_ending_split[-1] |
---|
420 | else: |
---|
421 | extension = '' |
---|
422 | if len(file_specific_ending_split) > 2: |
---|
423 | cycle_info = file_specific_ending_split[1: -1] |
---|
424 | else: |
---|
425 | cycle_info = [] |
---|
426 | file_paths_output.append( |
---|
427 | dict( |
---|
428 | postfix=postfix, |
---|
429 | cycle_info=cycle_info, |
---|
430 | extension=extension, |
---|
431 | ref_path=self.ref_output_dir, |
---|
432 | res_path=self.res_output_dir, |
---|
433 | ) |
---|
434 | ) |
---|
435 | return file_paths_monitoring + file_paths_output |
---|
436 | |
---|
437 | def check(self): |
---|
438 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
439 | logger.to_log('Checking output files:') |
---|
440 | logger.to_all('\n') |
---|
441 | failed = False |
---|
442 | for file_dict in self.get_checkable_file_dicts(): |
---|
443 | file_failed = False |
---|
444 | ext_list = [file_dict['extension']] if file_dict['extension'] else [] |
---|
445 | file_specific_ending = '.'.join([file_dict['postfix']] + file_dict['cycle_info'] + ext_list ) |
---|
446 | logger.to_all(file_table_line_template.format('Checking:', self.setup_name + file_specific_ending)) |
---|
447 | ref_file_path = os.path.join(file_dict['ref_path'], self.setup_name + file_specific_ending) |
---|
448 | res_file_path = os.path.join(file_dict['res_path'], self.job_name + file_specific_ending) |
---|
449 | if re.match('_rc', file_dict['postfix']) and not file_dict['extension']: |
---|
450 | file_failed, message = FileComparator.compare_ascii(ref_file_path, res_file_path, start_string='Run-control output:\n') |
---|
451 | elif re.match('nc', file_dict['extension']): |
---|
452 | if self.significant_digits is not None: |
---|
453 | if re.match('_ts', file_dict['postfix']) and 'timeseries' in self.significant_digits: |
---|
454 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
455 | digits=self.significant_digits['timeseries']) |
---|
456 | elif re.match('_pr', file_dict['postfix']) and 'profiles' in self.significant_digits: |
---|
457 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
458 | digits=self.significant_digits['profiles']) |
---|
459 | else: |
---|
460 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, |
---|
461 | digits=self.significant_digits['other']) |
---|
462 | else: |
---|
463 | file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path) |
---|
464 | else: |
---|
465 | message = colored('[ignored]', 'blue') |
---|
466 | if file_failed: |
---|
467 | failed = True |
---|
468 | logger.to_all(message + '\n') |
---|
469 | if self.dry_run: |
---|
470 | failed = False |
---|
471 | return failed |
---|
472 | |
---|
473 | |
---|
474 | class PALMJob: |
---|
475 | """The PALM job class deals with the execution of a single PALM job""" |
---|
476 | |
---|
477 | @staticmethod |
---|
478 | def get_job_name(setup_name, build_name, cores): |
---|
479 | return setup_name + '__' + build_name + '__' + str(cores) |
---|
480 | |
---|
481 | def __init__(self, test_dir, test_case, build_name, cores, verbose=False, dry_run=False): |
---|
482 | self.test_dir = test_dir |
---|
483 | self.test_case = test_case |
---|
484 | self.build_name = build_name |
---|
485 | self.cores = cores |
---|
486 | self.verbose = verbose |
---|
487 | self.dry_run = dry_run |
---|
488 | |
---|
489 | self.attempted_debug = False |
---|
490 | self.failed_debug = None |
---|
491 | self.attempted_non_debug = False |
---|
492 | self.failed_non_debug = None |
---|
493 | |
---|
494 | def _link_restart_files(self, build_name): |
---|
495 | if self.dry_run: |
---|
496 | return True, colored('[restart data dry]', 'blue') |
---|
497 | name = self.get_job_name(self.test_case.name, build_name, self.cores) |
---|
498 | source_name = self.get_job_name(self.test_case.use_binary_files_from, build_name, self.cores) |
---|
499 | source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') |
---|
500 | try: |
---|
501 | source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] |
---|
502 | except: |
---|
503 | source_data_dirs_grp = [] |
---|
504 | if len(source_data_dirs_grp) == 0: |
---|
505 | source_data_dirs = [] |
---|
506 | else: |
---|
507 | source_data_dirs = source_data_dirs_grp[0] |
---|
508 | if len(source_data_dirs) == 0 and re.match('.+_debug', build_name): |
---|
509 | source_build_name = build_name[:-len('_debug')] |
---|
510 | source_name = self.get_job_name(self.test_case.use_binary_files_from, source_build_name, self.cores) |
---|
511 | source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') |
---|
512 | try: |
---|
513 | source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] |
---|
514 | except: |
---|
515 | source_data_dirs_grp = [] |
---|
516 | if len(source_data_dirs_grp) == 0: |
---|
517 | source_data_dirs = [] |
---|
518 | else: |
---|
519 | source_data_dirs = source_data_dirs_grp[0] |
---|
520 | if len(source_data_dirs) == 0: |
---|
521 | source_data_dir = 'no_restart_data' |
---|
522 | else: |
---|
523 | source_data_dir = sorted(source_data_dirs)[-1] |
---|
524 | source_data_dir_path = os.path.join(source_restart_dir, source_data_dir) |
---|
525 | if os.path.isdir(source_data_dir_path) and re.match('.+_d3d.*', source_data_dir): |
---|
526 | job_restart_dir = os.path.join(self.test_dir, 'JOBS', name, 'RESTART') |
---|
527 | os.makedirs(job_restart_dir, exist_ok=exist_ok) |
---|
528 | job_data_dir_path = os.path.join(job_restart_dir, name + '_d3d') |
---|
529 | os.symlink(source_data_dir_path, job_data_dir_path, target_is_directory=True) |
---|
530 | return False, colored('[linked restart data from: ' + source_data_dir_path + ']', 'green') |
---|
531 | else: |
---|
532 | return True, colored('[no restart data found]', 'red') |
---|
533 | |
---|
534 | def _execute(self, name, build_name): |
---|
535 | execution_failed = Executor.execute( |
---|
536 | [ |
---|
537 | os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmrun'), |
---|
538 | '-h', '\"' + build_name + '\"', |
---|
539 | '-d', name, |
---|
540 | '-a', '\"' + ' '.join(self.test_case.activation_strings) + '\"', |
---|
541 | '-X', str(self.cores), |
---|
542 | '-T', str(self.cores), |
---|
543 | '-B', |
---|
544 | '-v', |
---|
545 | '-z', |
---|
546 | ], |
---|
547 | cwd=self.test_dir, |
---|
548 | verbose=self.verbose, |
---|
549 | dry_run=self.dry_run, |
---|
550 | ) |
---|
551 | |
---|
552 | if self.dry_run: |
---|
553 | return False, colored('[execution dry]', 'blue') |
---|
554 | elif execution_failed: |
---|
555 | return True, colored('[execution failed]', 'red') |
---|
556 | else: |
---|
557 | return False, colored('[execution ok]', 'green') |
---|
558 | |
---|
559 | def _check(self, build_name): |
---|
560 | checker = OutputChecker( |
---|
561 | self.test_dir, |
---|
562 | self.test_case.name, |
---|
563 | build_name, |
---|
564 | self.cores, |
---|
565 | significant_digits=self.test_case.significant_digits, |
---|
566 | verbose=self.verbose, |
---|
567 | dry_run=self.dry_run, |
---|
568 | ) |
---|
569 | check_failed = checker.check() |
---|
570 | |
---|
571 | if self.dry_run: |
---|
572 | return False, colored('[checks dry]', 'blue') |
---|
573 | if check_failed: |
---|
574 | return True, colored('[checks failed]', 'red') |
---|
575 | else: |
---|
576 | return False, colored('[checks ok]', 'green') |
---|
577 | |
---|
578 | def execute(self, debug=False): |
---|
579 | if debug: |
---|
580 | attempted = self.attempted_debug |
---|
581 | build_name = self.build_name + '_debug' |
---|
582 | failed = self.failed_debug |
---|
583 | else: |
---|
584 | attempted = self.attempted_non_debug |
---|
585 | build_name = self.build_name |
---|
586 | failed = self.failed_non_debug |
---|
587 | |
---|
588 | if not attempted: |
---|
589 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
590 | status_prefix = task_table_line_template.format('Testing:', self.test_case.name, build_name, self.cores) |
---|
591 | logger.to_all(status_prefix) |
---|
592 | logger.to_log('[started]' + '\n') |
---|
593 | attempted = True |
---|
594 | |
---|
595 | name = self.get_job_name(self.test_case.name, build_name, self.cores) |
---|
596 | |
---|
597 | input_dir = os.path.join(self.test_dir, 'JOBS', name, 'INPUT') |
---|
598 | os.makedirs(input_dir, exist_ok=exist_ok) |
---|
599 | |
---|
600 | # copying needs to be done per file, because input files need to be renamed |
---|
601 | for input_file in self.test_case.input_file_names: |
---|
602 | postfix = input_file[len(self.test_case.name):] |
---|
603 | src = os.path.join(self.test_case.input_dir, input_file) |
---|
604 | dst = os.path.join(input_dir, name + postfix) |
---|
605 | shutil.copy(src, dst) |
---|
606 | |
---|
607 | # copying the entire directory is ok, because source files do not need to be renamed |
---|
608 | user_code_dir = os.path.join(self.test_dir, 'JOBS', name, 'USER_CODE') |
---|
609 | if os.path.isdir(self.test_case.user_code_dir): |
---|
610 | shutil.copytree(self.test_case.user_code_dir, user_code_dir, copy_function=shutil.copy) |
---|
611 | |
---|
612 | if self.test_case.requires_binary_files: |
---|
613 | link_restart_files_failed, message = self._link_restart_files(build_name) |
---|
614 | logger.to_log(status_prefix) |
---|
615 | logger.to_log(message + ' ') |
---|
616 | logger.to_log('\n') |
---|
617 | |
---|
618 | failed, message = self._execute(name, build_name) |
---|
619 | logger.to_log(status_prefix) |
---|
620 | logger.to_all(message + ' ') |
---|
621 | logger.to_log('\n') |
---|
622 | |
---|
623 | failed, message = self._check(build_name) |
---|
624 | logger.to_log(status_prefix) |
---|
625 | logger.to_log(message + ' ') |
---|
626 | |
---|
627 | logger.to_all('\n') |
---|
628 | |
---|
629 | if debug: |
---|
630 | self.attempted_debug = attempted |
---|
631 | self.failed_debug = failed |
---|
632 | else: |
---|
633 | self.attempted_non_debug = attempted |
---|
634 | self.failed_non_debug = failed |
---|
635 | |
---|
636 | return failed |
---|
637 | |
---|
638 | def status(self): |
---|
639 | return dict( |
---|
640 | attempted=self.attempted_non_debug or self.attempted_debug, |
---|
641 | failed=self.failed_non_debug and self.failed_debug, |
---|
642 | debugged=self.attempted_debug, |
---|
643 | non_debug_failed=self.failed_non_debug, |
---|
644 | ) |
---|
645 | |
---|
646 | |
---|
647 | class PALMBuild: |
---|
648 | """The PALM build class deals with configuration and execution of all required PALM builds""" |
---|
649 | |
---|
650 | def __init__(self, test_dir, build_name, verbose=False, dry_run=False): |
---|
651 | self.test_dir = test_dir |
---|
652 | self.build_name = build_name |
---|
653 | self.verbose = verbose |
---|
654 | self.dry_run = dry_run |
---|
655 | self.configured = False |
---|
656 | self.executed = False |
---|
657 | self.available = False |
---|
658 | self.requires_mpi = False |
---|
659 | self.requires_netcdf = False |
---|
660 | self.requires_fftw = False |
---|
661 | self.requires_rrtmg = False |
---|
662 | self.attempted_non_debug = False |
---|
663 | self.attempted_debug = False |
---|
664 | self.failed_non_debug = None |
---|
665 | self.failed_debug = None |
---|
666 | |
---|
667 | def configure(self): |
---|
668 | try: |
---|
669 | with open(os.path.join(trunk_tests_builds_dir, self.build_name, 'build_config.yml'), 'r') as f: |
---|
670 | build_config = yaml.load(f) |
---|
671 | except: |
---|
672 | return True, colored('[build not found]', 'red') |
---|
673 | |
---|
674 | if 'compiler' in build_config: |
---|
675 | self.compiler = build_config['compiler'] |
---|
676 | else: |
---|
677 | return True, colored('[missing \"compiler\" keyword]', 'red') |
---|
678 | |
---|
679 | if not isinstance(self.compiler, dict): |
---|
680 | return True, colored('[\"compiler\" keyword must be dict]', 'red') |
---|
681 | |
---|
682 | if 'linker' in build_config: |
---|
683 | self.linker = build_config['linker'] |
---|
684 | else: |
---|
685 | return True, colored('[missing \"linker\" keyword]', 'red') |
---|
686 | |
---|
687 | if not isinstance(self.linker, dict): |
---|
688 | return True, colored('[\"linker\" keyword must be dict]', 'red') |
---|
689 | |
---|
690 | if 'mpi_wrapper' in self.compiler: |
---|
691 | if 'mpi_wrapper}}' in self.compiler['mpi_wrapper']: |
---|
692 | self.requires_mpi = True |
---|
693 | else: |
---|
694 | return True, colored('[missing \"mpi_wrapper\" keyword]', 'red') |
---|
695 | |
---|
696 | if 'includes' in self.compiler: |
---|
697 | for include in self.compiler['includes']: |
---|
698 | if 'include.netcdf}}' in include: |
---|
699 | self.requires_netcdf = True |
---|
700 | if 'include.fftw}}' in include: |
---|
701 | self.requires_fftw = True |
---|
702 | if 'include.rrtmg}}' in include: |
---|
703 | self.requires_rrtmg = True |
---|
704 | else: |
---|
705 | return True, colored('[missing \"includes\" keyword in compiler]', 'red') |
---|
706 | |
---|
707 | if 'options' in self.linker: |
---|
708 | for lib in self.linker['options']: |
---|
709 | if 'lib.netcdf}}' in lib: |
---|
710 | self.requires_netcdf = True |
---|
711 | if 'lib.fftw}}' in lib: |
---|
712 | self.requires_fftw = True |
---|
713 | if 'lib.rrtmg}}' in lib: |
---|
714 | self.requires_rrtmg = True |
---|
715 | else: |
---|
716 | return True, colored('[missing \"options\" keyword in linker]', 'red') |
---|
717 | |
---|
718 | library_names = [] |
---|
719 | if self.requires_netcdf: |
---|
720 | library_names.append('netcdf') |
---|
721 | if self.requires_fftw: |
---|
722 | library_names.append('fftw') |
---|
723 | if self.requires_rrtmg: |
---|
724 | library_names.append('rrtmg') |
---|
725 | |
---|
726 | if not 'executable' in self.compiler: |
---|
727 | return True, colored('[missing \"executable\" keyword in compiler]', 'red') |
---|
728 | |
---|
729 | if not 'definitions' in self.compiler: |
---|
730 | return True, colored('[missing \"definitions\" keyword in compiler]', 'red') |
---|
731 | |
---|
732 | if not 'options' in self.compiler: |
---|
733 | return True, colored('[missing \"options\" keyword in compiler]', 'red') |
---|
734 | |
---|
735 | if not 'default' in self.compiler['options']: |
---|
736 | return True, colored('[missing \"default\" keyword in compiler.options]', 'red') |
---|
737 | |
---|
738 | if not 'debug' in self.compiler['options']: |
---|
739 | return True, colored('[missing \"debug\" keyword in compiler.options]', 'red') |
---|
740 | |
---|
741 | try: |
---|
742 | with open(os.path.join(workspace_dir, 'palmtest.yml'), 'r') as f: |
---|
743 | palmtest_config = yaml.load(f) |
---|
744 | except: |
---|
745 | return True, colored('[palmtest.yml not found]', 'red') |
---|
746 | |
---|
747 | if 'palm_config_template' in palmtest_config: |
---|
748 | if isinstance(palmtest_config['palm_config_template'], str): |
---|
749 | custom_template = palmtest_config['palm_config_template'] |
---|
750 | try: |
---|
751 | with open(os.path.join(custom_template), 'r') as palm_config_template_file: |
---|
752 | template = palm_config_template_file.read() |
---|
753 | except: |
---|
754 | try: |
---|
755 | with open(os.path.join(scripts_dir, '.palm.config.default.in'), 'r') as palm_config_template_file: |
---|
756 | template = palm_config_template_file.read() |
---|
757 | except: |
---|
758 | return True, colored('[trunk/SCRIPTS/.palm.config.default.in not found]', 'red') |
---|
759 | |
---|
760 | template = template.replace('@CMAKE_INSTALL_PREFIX@', self.test_dir) |
---|
761 | template = template.replace('@PALM_HOSTNAME@', socket.gethostname()) |
---|
762 | template = template.replace('@CMAKE_USERNAME@', getpass.getuser()) |
---|
763 | template = template.replace('@MPI_Fortran_COMPILER@', self.compiler['mpi_wrapper']) |
---|
764 | template = template.replace('@CMAKE_Fortran_COMPILER@', self.compiler['executable']) |
---|
765 | cpp_options_str = ['-D' + s for s in self.compiler['definitions']] |
---|
766 | template = template.replace('@PALM_CPP_OPTIONS_STR@', ' '.join(cpp_options_str)) |
---|
767 | template = template.replace('@PALM_CORES@', str(available_cores)) |
---|
768 | template = template.replace('@PALM_COMPILER_OPTIONS@', '{{palmtest.compiler.options}} ' + ' '.join(self.compiler['includes'])) |
---|
769 | template = template.replace('@PALM_LINKER_OPTIONS@', ' '.join(self.linker['options'])) |
---|
770 | |
---|
771 | if 'environments' in palmtest_config: |
---|
772 | available_environments = palmtest_config['environments'] |
---|
773 | else: |
---|
774 | return True, colored('[missing \"environments\" keyword in palmtest.yml]', 'red') |
---|
775 | |
---|
776 | if 'id' in self.compiler: |
---|
777 | c_id = self.compiler['id'] |
---|
778 | else: |
---|
779 | return True, colored('[missing \"id\" keyword in compiler]', 'red') |
---|
780 | |
---|
781 | if c_id in available_environments: |
---|
782 | self.available = True |
---|
783 | |
---|
784 | environment = available_environments[c_id] |
---|
785 | if 'executable' not in environment: |
---|
786 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"executable\"]', 'red') |
---|
787 | value = environment['executable'] |
---|
788 | if isinstance(value, str): |
---|
789 | template = template.replace('{{' + '.'.join([c_id, 'executable']) + '}}', value) |
---|
790 | if self.requires_mpi: |
---|
791 | if 'mpi_wrapper' not in environment: |
---|
792 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"mpi_wrapper\"]', 'red') |
---|
793 | value = environment['mpi_wrapper'] |
---|
794 | if isinstance(value, str): |
---|
795 | template = template.replace('{{' + '.'.join([c_id, 'mpi_wrapper']) + '}}', value) |
---|
796 | if 'include' not in environment: |
---|
797 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include\"]', 'red') |
---|
798 | if 'lib' not in environment: |
---|
799 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib\"]', 'red') |
---|
800 | for lib in library_names: |
---|
801 | if lib not in environment['include']: |
---|
802 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include.'+lib+'\"]', 'red') |
---|
803 | value = environment['include'][lib] |
---|
804 | if isinstance(value, str): |
---|
805 | template = template.replace('{{' + '.'.join([c_id, 'include', lib]) + '}}', value) |
---|
806 | if lib not in environment['lib']: |
---|
807 | return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib.'+lib+'\"]', 'red') |
---|
808 | value = environment['lib'][lib] |
---|
809 | if isinstance(value, str): |
---|
810 | template = template.replace('{{' + '.'.join([c_id, 'lib', lib]) + '}}', value) |
---|
811 | |
---|
812 | with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name), 'w') as palm_config_file: |
---|
813 | palm_config_file.write( |
---|
814 | template.replace( |
---|
815 | '{{palmtest.compiler.options}}', |
---|
816 | ' '.join(self.compiler['options']['default']), |
---|
817 | ) |
---|
818 | ) |
---|
819 | with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name + '_debug'), 'w') as palm_config_file: |
---|
820 | palm_config_file.write( |
---|
821 | template.replace( |
---|
822 | '{{palmtest.compiler.options}}', |
---|
823 | ' '.join(self.compiler['options']['debug']), |
---|
824 | ) |
---|
825 | ) |
---|
826 | self.configured = True |
---|
827 | return False, colored('[configuration ok]', 'green') |
---|
828 | |
---|
829 | else: |
---|
830 | return True, colored('[palmtest.yml environment \"' + c_id + '\" not found]', 'red') |
---|
831 | |
---|
832 | def _execute(self, build_name): |
---|
833 | self.attempted = True |
---|
834 | build_failed = Executor.execute( |
---|
835 | [ |
---|
836 | os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmbuild'), |
---|
837 | '-h', '\"' + build_name + '\"', |
---|
838 | '-v', |
---|
839 | ], |
---|
840 | cwd=self.test_dir, |
---|
841 | verbose=self.verbose, |
---|
842 | dry_run=self.dry_run, |
---|
843 | ) |
---|
844 | |
---|
845 | if self.dry_run: |
---|
846 | return False, colored('[build dry]', 'blue') |
---|
847 | if build_failed: |
---|
848 | return True, colored('[build failed]', 'red') |
---|
849 | else: |
---|
850 | return False, colored('[build ok]', 'green') |
---|
851 | |
---|
852 | def build(self, debug=False): |
---|
853 | if debug: |
---|
854 | attempted = self.attempted_debug |
---|
855 | build_name = self.build_name + '_debug' |
---|
856 | failed = self.failed_debug |
---|
857 | else: |
---|
858 | attempted = self.attempted_non_debug |
---|
859 | build_name = self.build_name |
---|
860 | failed = self.failed_non_debug |
---|
861 | |
---|
862 | if not attempted: |
---|
863 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
864 | status_prefix = task_table_line_template.format('Building:', '', build_name, '') |
---|
865 | logger.to_all(status_prefix) |
---|
866 | logger.to_log('[started]' + '\n') |
---|
867 | attempted = True |
---|
868 | |
---|
869 | failed, message = self._execute(build_name) |
---|
870 | logger.to_log(status_prefix) |
---|
871 | logger.to_all(message + ' ') |
---|
872 | logger.to_all('\n') |
---|
873 | |
---|
874 | if debug: |
---|
875 | self.attempted_debug = attempted |
---|
876 | self.failed_debug = failed |
---|
877 | else: |
---|
878 | self.attempted_non_debug = attempted |
---|
879 | self.failed_non_debug = failed |
---|
880 | |
---|
881 | return failed |
---|
882 | |
---|
883 | def report(self): |
---|
884 | return dict( |
---|
885 | failed_debug=self.failed_debug, |
---|
886 | failed_non_debug=self.failed_non_debug, |
---|
887 | ) |
---|
888 | |
---|
889 | |
---|
890 | class PALMTestCase: |
---|
891 | """The PALM test case class deals with the configuration and execution of all PALM test cases""" |
---|
892 | |
---|
893 | def __init__(self,test_dir, name, verbose=False, dry_run=False): |
---|
894 | self.test_dir = test_dir |
---|
895 | self.name = name |
---|
896 | self.verbose = verbose |
---|
897 | self.dry_run = dry_run |
---|
898 | self.user_code_dir = os.path.join(trunk_tests_cases_dir, self.name, 'USER_CODE') |
---|
899 | self.input_dir = os.path.join(trunk_tests_cases_dir, self.name, 'INPUT') |
---|
900 | self.number_of_cores = [] |
---|
901 | self.build_names = [] |
---|
902 | self.input_file_names = [] |
---|
903 | self.configured = False |
---|
904 | |
---|
905 | def configure(self, requested_build_names, requested_cores): |
---|
906 | f_name = os.path.join(trunk_tests_cases_dir, self.name, 'case_config.yml') |
---|
907 | try: |
---|
908 | with open(f_name, 'r') as f: |
---|
909 | config = yaml.load(f) |
---|
910 | except: |
---|
911 | return True, colored('[Case \"' + self.name + '\" could not be found.]', 'red') |
---|
912 | try: |
---|
913 | self.use_binary_files_from = config['use_binary_files_from'] |
---|
914 | except: |
---|
915 | self.use_binary_files_from = None |
---|
916 | self.requires_binary_files = bool(self.use_binary_files_from) |
---|
917 | |
---|
918 | if 'allowed_builds' not in config: |
---|
919 | return True, colored('[missing \"allowed_builds\" keyword]', 'red') |
---|
920 | self.allowed_build_names = config['allowed_builds'] |
---|
921 | |
---|
922 | if 'allowed_number_of_cores' not in config: |
---|
923 | return True, colored('[missing \"allowed_number_of_cores\" keyword]', 'red') |
---|
924 | self.allowed_number_of_cores = config['allowed_number_of_cores'] |
---|
925 | |
---|
926 | if 'activation_strings' not in config: |
---|
927 | return True, colored('[missing \"activation_strings\" keyword]', 'red') |
---|
928 | self.activation_strings = config['activation_strings'] |
---|
929 | |
---|
930 | if 'significant_digits_for_netcdf_checks' not in config: |
---|
931 | return True, colored('[missing \"significant_digits_for_netcdf_checks\" keyword]', 'red') |
---|
932 | self.significant_digits = config['significant_digits_for_netcdf_checks'] |
---|
933 | |
---|
934 | if 'timeseries' not in config['significant_digits_for_netcdf_checks']: |
---|
935 | return True, colored('[missing \"timeseries\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
936 | |
---|
937 | if 'profiles' not in config['significant_digits_for_netcdf_checks']: |
---|
938 | return True, colored('[missing \"profiles\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
939 | |
---|
940 | if 'other' not in config['significant_digits_for_netcdf_checks']: |
---|
941 | return True, colored('[missing \"other\" keyword in significant_digits_for_netcdf_checks]', 'red') |
---|
942 | |
---|
943 | self.number_of_cores = sorted(set(requested_cores).intersection(self.allowed_number_of_cores)) |
---|
944 | self.build_names = sorted(set(requested_build_names).intersection(self.allowed_build_names)) |
---|
945 | self.input_file_names = [s for s in next(os.walk(self.input_dir))[2]] |
---|
946 | self.configured = True |
---|
947 | if len(self.number_of_cores) == 0 : |
---|
948 | return True, colored('[no allowed cores requested]', 'blue') |
---|
949 | if len(self.build_names) == 0: |
---|
950 | return True, colored('[no allowed builds requested]', 'blue') |
---|
951 | if len(self.input_file_names) == 0: |
---|
952 | return True, colored('[no input files found]', 'red') |
---|
953 | return False, colored('[configuration ok]', 'green') |
---|
954 | |
---|
955 | |
---|
956 | |
---|
957 | class PALMTest: |
---|
958 | |
---|
959 | def __init__(self, args): |
---|
960 | self.verbose = args.verbose |
---|
961 | self.no_auto_debug = args.no_auto_debug |
---|
962 | self.force_debug = args.force_debug |
---|
963 | self.fail_on_debug = args.fail_on_debug |
---|
964 | self.dry_run = args.dry_run |
---|
965 | self.no_color = args.no_color |
---|
966 | self.test_id = args.test_id |
---|
967 | self.test_case_names = args.cases |
---|
968 | self.requested_build_names = args.builds |
---|
969 | self.requested_cores = args.cores |
---|
970 | self.test_case_queue = [] |
---|
971 | self.build_database = dict() |
---|
972 | |
---|
973 | def prepare(self): |
---|
974 | global disable_colored_output |
---|
975 | disable_colored_output = self.no_color |
---|
976 | self.test_dir = os.path.join(tests_dir, self.test_id) |
---|
977 | try: |
---|
978 | os.makedirs(self.test_dir, exist_ok=exist_ok) |
---|
979 | except: |
---|
980 | print('ERROR: Found existing test directory: ' + self.test_dir) |
---|
981 | exit(1) |
---|
982 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
983 | logger.to_all(hline) |
---|
984 | logger.to_all('This is the PALM tester (version: ' + version + ')' + '\n') |
---|
985 | logger.to_all(hline) |
---|
986 | try: |
---|
987 | with open(os.path.join(workspace_dir, 'palmtest.yml'), 'r') as f: |
---|
988 | pass |
---|
989 | except: |
---|
990 | logger.to_all('ERROR: No palmtest.yml file was found in your working directory!\n') |
---|
991 | logger.to_all('INFO: A template for this file can be found at: trunk/TESTS/palmtest.yml\n') |
---|
992 | logger.to_all(' Please copy the template to your working directory and adjust it to your system!\n') |
---|
993 | exit(1) |
---|
994 | |
---|
995 | self.execution_trunk_dir = os.path.join(self.test_dir, 'trunk') |
---|
996 | os.symlink(trunk_dir, self.execution_trunk_dir) |
---|
997 | self.execution_jobs_dir = os.path.join(self.test_dir, 'JOBS') |
---|
998 | os.makedirs(self.execution_jobs_dir, exist_ok=exist_ok) |
---|
999 | |
---|
1000 | try: |
---|
1001 | with open(os.path.join(scripts_dir, '.palm.iofiles'), 'r') as iofiles_template_file: |
---|
1002 | iofiles_template = iofiles_template_file.read() |
---|
1003 | with open(os.path.join(self.test_dir, '.palm.iofiles'), 'w') as iofiles_file: |
---|
1004 | iofiles_file.write(iofiles_template.replace('$fast_io_catalog', '$base_data')) |
---|
1005 | except: |
---|
1006 | logger.to_all('ERROR: No .palm.iofiles file was found in trunk/SCRIPTS/') |
---|
1007 | exit(1) |
---|
1008 | |
---|
1009 | final_cores_list = list(filter(lambda x: x <= available_cores, self.requested_cores)) |
---|
1010 | |
---|
1011 | logger.to_all(config_table_line_template.format('Object:', 'Name:', 'Action:') + 'Status:\n') |
---|
1012 | logger.to_all(hline) |
---|
1013 | |
---|
1014 | if 'all' in self.requested_build_names: |
---|
1015 | self.requested_build_names = [name for name in next(os.walk(trunk_tests_builds_dir))[1] if not name[0] == '.'] |
---|
1016 | found_build_names = [] |
---|
1017 | for build_name in self.requested_build_names: |
---|
1018 | build = PALMBuild(self.test_dir, build_name, verbose=self.verbose, dry_run=self.dry_run) |
---|
1019 | configuration_failed, message = build.configure() |
---|
1020 | if not configuration_failed: |
---|
1021 | self.build_database[build_name] = build |
---|
1022 | found_build_names.append(build_name) |
---|
1023 | logger.to_all(config_table_line_template.format('Build', build_name, 'approved')) |
---|
1024 | logger.to_all(message + '\n') |
---|
1025 | else: |
---|
1026 | logger.to_all(config_table_line_template.format('Build', build_name, 'rejected')) |
---|
1027 | logger.to_all(message + '\n') |
---|
1028 | final_build_list = found_build_names |
---|
1029 | |
---|
1030 | if 'all' in self.test_case_names: |
---|
1031 | self.test_case_names = sorted([name for name in next(os.walk(trunk_tests_cases_dir))[1] if not name[0] == '.']) |
---|
1032 | |
---|
1033 | additional_initial_runs_2 = [self.test_case_names] |
---|
1034 | while len(additional_initial_runs_2[-1]) > 0: |
---|
1035 | additional_initial_runs_1 = [] |
---|
1036 | for test_case_name in additional_initial_runs_2[-1]: |
---|
1037 | test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) |
---|
1038 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1039 | if not test_case_configuration_failed: |
---|
1040 | if test_case.requires_binary_files: |
---|
1041 | additional_initial_runs_1.append(test_case.use_binary_files_from) |
---|
1042 | additional_initial_runs_2.append(sorted(set(additional_initial_runs_1))) |
---|
1043 | |
---|
1044 | test_case_order = [] |
---|
1045 | for i in range(len(additional_initial_runs_2)-1): |
---|
1046 | # low and high refer to priority |
---|
1047 | low = additional_initial_runs_2[i] |
---|
1048 | high = additional_initial_runs_2[i+1] |
---|
1049 | for item in high: |
---|
1050 | while item in low: |
---|
1051 | low.remove(item) |
---|
1052 | test_case_order.append(low) |
---|
1053 | |
---|
1054 | test_case_order_no_dublicates = [] |
---|
1055 | for test_cases in test_case_order: |
---|
1056 | seen = set() |
---|
1057 | seen_add = seen.add |
---|
1058 | test_case_order_no_dublicates.append( [x for x in test_cases if not (x in seen or seen_add(x))] ) |
---|
1059 | |
---|
1060 | approved_test_case_order = [[]] + list(reversed(test_case_order_no_dublicates)) |
---|
1061 | for i, test_cases in enumerate(list(approved_test_case_order)): |
---|
1062 | info = 'Case (dep)' if i < len(approved_test_case_order)-1 else 'Case' |
---|
1063 | for test_case_name in list(test_cases): |
---|
1064 | sys.stdout.flush() |
---|
1065 | test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) |
---|
1066 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1067 | if test_case_configuration_failed: |
---|
1068 | # removing as configuration failed should only apply to added dependencies |
---|
1069 | approved_test_case_order[i].remove(test_case_name) |
---|
1070 | logger.to_all(config_table_line_template.format(info, test_case_name, 'rejected')) |
---|
1071 | logger.to_all(message + '\n') |
---|
1072 | elif test_case.requires_binary_files: |
---|
1073 | if test_case.use_binary_files_from not in approved_test_case_order[i-1]: |
---|
1074 | # removing as dependency is already removed |
---|
1075 | approved_test_case_order[i].remove(test_case_name) |
---|
1076 | logger.to_all(config_table_line_template.format(info, test_case_name, 'disabled')) |
---|
1077 | logger.to_all(colored('[requires dependency \"' + test_case.use_binary_files_from + '\"]', 'red') + '\n') |
---|
1078 | else: |
---|
1079 | logger.to_all(config_table_line_template.format(info, test_case_name, 'approved')) |
---|
1080 | logger.to_all(message + '\n') |
---|
1081 | else: |
---|
1082 | logger.to_all(config_table_line_template.format(info, test_case_name, 'approved')) |
---|
1083 | logger.to_all(message + '\n') |
---|
1084 | |
---|
1085 | final_case_list = [] |
---|
1086 | for cases in approved_test_case_order: |
---|
1087 | for case in cases: |
---|
1088 | if case not in final_case_list: |
---|
1089 | final_case_list.append(case) |
---|
1090 | |
---|
1091 | for build_name in final_build_list: |
---|
1092 | build = PALMBuild( |
---|
1093 | self.test_dir, |
---|
1094 | build_name, |
---|
1095 | verbose=self.verbose, |
---|
1096 | dry_run=self.dry_run, |
---|
1097 | ) |
---|
1098 | configuration_failed, message = build.configure() |
---|
1099 | if not configuration_failed: |
---|
1100 | self.build_database[build_name] = build |
---|
1101 | else: |
---|
1102 | logger.to_all(message + '\n') |
---|
1103 | |
---|
1104 | for case_name in final_case_list: |
---|
1105 | test_case = PALMTestCase( |
---|
1106 | self.test_dir, |
---|
1107 | case_name, |
---|
1108 | verbose=self.verbose, |
---|
1109 | dry_run=self.dry_run, |
---|
1110 | ) |
---|
1111 | test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) |
---|
1112 | if not test_case_configuration_failed: |
---|
1113 | self.test_case_queue.append(test_case) |
---|
1114 | logger.to_all(hline) |
---|
1115 | |
---|
1116 | r = '{:' + str(table_width_intro) + '} ' |
---|
1117 | |
---|
1118 | logger.to_all(r.format('Test ID:') + self.test_id + '\n') |
---|
1119 | logger.to_all(r.format('Builds:') + str('\n' + r.format('')).join(sorted(self.build_database.keys())) + '\n') |
---|
1120 | logger.to_all(r.format('Cases:') + str('\n' + r.format('')).join([c.name for c in self.test_case_queue]) + '\n') |
---|
1121 | logger.to_all(r.format('Cores:') + ' '.join([str(i) for i in final_cores_list]) + '\n') |
---|
1122 | |
---|
1123 | def _execute(self, test_case, build_name, cores): |
---|
1124 | job = PALMJob( |
---|
1125 | self.test_dir, |
---|
1126 | test_case, |
---|
1127 | build_name, |
---|
1128 | cores, |
---|
1129 | verbose=self.verbose, |
---|
1130 | dry_run=self.dry_run |
---|
1131 | ) |
---|
1132 | if self.force_debug: |
---|
1133 | build_failed_non_debug = True |
---|
1134 | job_failed_non_debug = True |
---|
1135 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1136 | if build_failed_debug: |
---|
1137 | job_failed_debug = True |
---|
1138 | else: |
---|
1139 | job_failed_debug = job.execute(debug=True) |
---|
1140 | elif self.no_auto_debug: |
---|
1141 | build_failed_non_debug = self.build_database[build_name].build(debug=False) |
---|
1142 | if build_failed_non_debug: |
---|
1143 | job_failed_non_debug = True |
---|
1144 | else: |
---|
1145 | job_failed_non_debug = job.execute(debug=False) |
---|
1146 | build_failed_debug = None |
---|
1147 | job_failed_debug = None |
---|
1148 | else: |
---|
1149 | build_failed_non_debug = self.build_database[build_name].build(debug=False) |
---|
1150 | if build_failed_non_debug: |
---|
1151 | job_failed_non_debug = True |
---|
1152 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1153 | if build_failed_debug: |
---|
1154 | job_failed_debug = False |
---|
1155 | else: |
---|
1156 | job_failed_debug = job.execute(debug=True) |
---|
1157 | else: |
---|
1158 | job_failed_non_debug = job.execute(debug=False) |
---|
1159 | if job_failed_non_debug: |
---|
1160 | build_failed_debug = self.build_database[build_name].build(debug=True) |
---|
1161 | if build_failed_debug: |
---|
1162 | job_failed_debug = True |
---|
1163 | else: |
---|
1164 | job_failed_debug = job.execute(debug=True) |
---|
1165 | else: |
---|
1166 | build_failed_debug = None |
---|
1167 | job_failed_debug = None |
---|
1168 | return dict( |
---|
1169 | build_failed_non_debug=build_failed_non_debug, |
---|
1170 | job_failed_non_debug=job_failed_non_debug, |
---|
1171 | build_failed_debug=build_failed_debug, |
---|
1172 | job_failed_debug=job_failed_debug, |
---|
1173 | ) |
---|
1174 | |
---|
1175 | def execute(self): |
---|
1176 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
1177 | logger.to_all(hline) |
---|
1178 | logger.to_all(task_table_line_template.format('Task:', 'Case:', 'Build:', 'Cores:') + 'Status:\n') |
---|
1179 | logger.to_all(hline) |
---|
1180 | self.test_report = dict() |
---|
1181 | for test_case in self.test_case_queue: |
---|
1182 | logger.to_log(hline) |
---|
1183 | logger.to_file(hline) |
---|
1184 | logger.to_file(hline) |
---|
1185 | status_dict = dict() |
---|
1186 | for build_name in test_case.build_names: |
---|
1187 | status_dict[build_name] = dict() |
---|
1188 | for cores in test_case.number_of_cores: |
---|
1189 | status_dict[build_name][cores] = self._execute(test_case, build_name, cores) |
---|
1190 | self.test_report[test_case.name] = status_dict |
---|
1191 | logger.to_log(hline) |
---|
1192 | logger.to_file('\n' * 10) |
---|
1193 | |
---|
1194 | def report(self): |
---|
1195 | with Logger(self.test_dir, verbose=self.verbose) as logger: |
---|
1196 | logger.to_all(hline) |
---|
1197 | r = '{:10}' + ' total: ' + '{:<3d}' + \ |
---|
1198 | ' ok: ' + colored('{:<3d}', 'green') + \ |
---|
1199 | ' debugged: ' + colored('{:<3d}', 'yellow') + \ |
---|
1200 | ' failed: ' + colored('{:<3d}', 'red') |
---|
1201 | n_all = 0 |
---|
1202 | n_ok = 0 |
---|
1203 | n_debugged = 0 |
---|
1204 | n_failed = 0 |
---|
1205 | for build_name, build in self.build_database.items(): |
---|
1206 | status = build.report() |
---|
1207 | b = status['failed_non_debug'] |
---|
1208 | bd = status['failed_debug'] |
---|
1209 | n_all += 1 |
---|
1210 | if not b and b is not None: |
---|
1211 | n_ok += 1 |
---|
1212 | if bd is not None: |
---|
1213 | n_debugged += 1 |
---|
1214 | if b and (bd or bd is None): |
---|
1215 | n_failed += 1 |
---|
1216 | logger.to_all(r.format('Builds:', n_all, n_ok, n_debugged, n_failed) + '\n') |
---|
1217 | total_failed = n_failed |
---|
1218 | total_debugged = n_debugged |
---|
1219 | n_all = 0 |
---|
1220 | n_ok = 0 |
---|
1221 | n_debugged = 0 |
---|
1222 | n_failed = 0 |
---|
1223 | # {'case_name': {'build_name': {4: {'build_failed_debug': None, |
---|
1224 | # 'build_failed_non_debug': False, |
---|
1225 | # 'job_failed_debug': None, |
---|
1226 | # 'job_failed_non_debug': False}}}, |
---|
1227 | for case_name, case in self.test_report.items(): |
---|
1228 | for build_name, build in case.items(): |
---|
1229 | for cores, results in build.items(): |
---|
1230 | n_all += 1 |
---|
1231 | b = results['build_failed_non_debug'] |
---|
1232 | bd = results['build_failed_debug'] |
---|
1233 | j = results['job_failed_non_debug'] |
---|
1234 | jd = results['job_failed_debug'] |
---|
1235 | if not j: |
---|
1236 | n_ok += 1 |
---|
1237 | if jd is not None: |
---|
1238 | n_debugged += 1 |
---|
1239 | if j and (jd or jd is None): |
---|
1240 | n_failed += 1 |
---|
1241 | logger.to_all(r.format('Tests:', n_all, n_ok, n_debugged, n_failed) + '\n') |
---|
1242 | total_failed += n_failed |
---|
1243 | total_debugged += n_debugged |
---|
1244 | if self.fail_on_debug: |
---|
1245 | return (total_failed + total_debugged) > 0 |
---|
1246 | else: |
---|
1247 | return total_failed > 0 |
---|
1248 | |
---|
1249 | |
---|
1250 | class CustomCompleter: |
---|
1251 | |
---|
1252 | def __init__(self): |
---|
1253 | pass |
---|
1254 | |
---|
1255 | def __call__(self, prefix, parsed_args, **kwargs): |
---|
1256 | return (i for i in self.get_items() if i.startswith(prefix)) |
---|
1257 | |
---|
1258 | def get_items(self): |
---|
1259 | return [] |
---|
1260 | |
---|
1261 | |
---|
1262 | class CaseCompleter(CustomCompleter): |
---|
1263 | |
---|
1264 | def get_items(self): |
---|
1265 | case_names = [name for name in next(os.walk(trunk_tests_cases_dir))[1] if not name[0] == '.'] |
---|
1266 | return case_names + ['all'] |
---|
1267 | |
---|
1268 | |
---|
1269 | class BuildCompleter(CustomCompleter): |
---|
1270 | |
---|
1271 | def get_items(self): |
---|
1272 | build_names = [name for name in next(os.walk(trunk_tests_builds_dir))[1] if not name[0] == '.'] |
---|
1273 | return build_names + ['all'] |
---|
1274 | |
---|
1275 | |
---|
1276 | class PALMTestArgumentParser(ArgumentParser): |
---|
1277 | |
---|
1278 | def __init__(self): |
---|
1279 | super().__init__( |
---|
1280 | description='This is the PALM tester\n' + |
---|
1281 | 'Developer Support: knoop@muk.uni-hannover.de', |
---|
1282 | formatter_class=RawTextHelpFormatter, |
---|
1283 | add_help=True, |
---|
1284 | ) |
---|
1285 | self.add_argument( |
---|
1286 | '--version', |
---|
1287 | action='version', |
---|
1288 | version=version, |
---|
1289 | ) |
---|
1290 | self.add_argument( |
---|
1291 | '--verbose', |
---|
1292 | action='store_true', |
---|
1293 | dest='verbose', |
---|
1294 | help='Increase verbosity of terminal output.', |
---|
1295 | required=False, |
---|
1296 | ) |
---|
1297 | self.add_argument( |
---|
1298 | '--no-auto-debug', |
---|
1299 | action='store_true', |
---|
1300 | dest='no_auto_debug', |
---|
1301 | help='Disable automatic debugging in case of test failure.', |
---|
1302 | required=False, |
---|
1303 | ) |
---|
1304 | self.add_argument( |
---|
1305 | '--force-debug', |
---|
1306 | action='store_true', |
---|
1307 | dest='force_debug', |
---|
1308 | help='Force debugging regardless of test failure (ignores --no-auto-debug).', |
---|
1309 | required=False, |
---|
1310 | ) |
---|
1311 | self.add_argument( |
---|
1312 | '--fail-on-debug', |
---|
1313 | action='store_true', |
---|
1314 | dest='fail_on_debug', |
---|
1315 | help='Return a non-zero exit status in case debugging was required.', |
---|
1316 | required=False, |
---|
1317 | ) |
---|
1318 | self.add_argument( |
---|
1319 | '--dry-run', |
---|
1320 | action='store_true', |
---|
1321 | dest='dry_run', |
---|
1322 | help='Prepare and process all requested tests without actually building or executing PALM.', |
---|
1323 | required=False, |
---|
1324 | ) |
---|
1325 | self.add_argument( |
---|
1326 | '--no-color', |
---|
1327 | action='store_true', |
---|
1328 | dest='no_color', |
---|
1329 | help='Disable colored terminal output.', |
---|
1330 | required=False, |
---|
1331 | ) |
---|
1332 | self.add_argument( |
---|
1333 | '--cases', |
---|
1334 | action='store', |
---|
1335 | dest='cases', |
---|
1336 | default=['all'], |
---|
1337 | help='A list of test cases to be executed. (default: %(default)s)', |
---|
1338 | nargs='+', |
---|
1339 | required=False, |
---|
1340 | type=str, |
---|
1341 | metavar='STR', |
---|
1342 | ).completer = CaseCompleter() |
---|
1343 | self.add_argument( |
---|
1344 | '--builds', |
---|
1345 | action='store', |
---|
1346 | dest='builds', |
---|
1347 | default=['all'], |
---|
1348 | help='A list of builds to be executed. (default: %(default)s)', |
---|
1349 | nargs='+', |
---|
1350 | required=False, |
---|
1351 | type=str, |
---|
1352 | metavar='STR', |
---|
1353 | ).completer = BuildCompleter() |
---|
1354 | self.add_argument( |
---|
1355 | '--cores', |
---|
1356 | action='store', |
---|
1357 | dest='cores', |
---|
1358 | default=[i for i in range(1, available_cores+1)], |
---|
1359 | choices=[i for i in range(1, available_cores+1)], |
---|
1360 | help='The number of cores tests are supposed to be executed on. (default: %(default)s)', |
---|
1361 | nargs='+', |
---|
1362 | required=False, |
---|
1363 | type=int, |
---|
1364 | metavar='INT', |
---|
1365 | ) |
---|
1366 | self.add_argument( |
---|
1367 | '--test-id', |
---|
1368 | action='store', |
---|
1369 | dest='test_id', |
---|
1370 | default=datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f'), |
---|
1371 | help='An individual test id. (default: current timestamp)', |
---|
1372 | required=False, |
---|
1373 | type=str, |
---|
1374 | metavar='STR', |
---|
1375 | ) |
---|
1376 | |
---|
1377 | |
---|
1378 | if __name__ == '__main__': |
---|
1379 | parser = PALMTestArgumentParser() |
---|
1380 | if has_argcomplete: |
---|
1381 | argcomplete.autocomplete(parser) |
---|
1382 | args = parser.parse_args() |
---|
1383 | palm_test = PALMTest(args) |
---|
1384 | palm_test.prepare() |
---|
1385 | palm_test.execute() |
---|
1386 | failed = palm_test.report() |
---|
1387 | exit(1 if failed else 0) |
---|