Changeset 3353
- Timestamp:
- Oct 15, 2018 7:39:01 PM (6 years ago)
- Location:
- palm/trunk
- Files:
-
- 65 added
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
palm/trunk/SCRIPTS/palmtest
r2824 r3353 1 #!/usr/bin/env bash 2 3 #--------------------------------------------------------------------------------# 4 # This file is part of the PALM model system. 5 # 6 # PALM is free software: you can redistribute it and/or modify it under the terms 7 # of the GNU General Public License as published by the Free Software Foundation, 8 # either version 3 of the License, or (at your option) any later version. 9 # 10 # PALM is distributed in the hope that it will be useful, but WITHOUT ANY 11 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 12 # A PARTICULAR PURPOSE. See the GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License along with 15 # PALM. If not, see <http://www.gnu.org/licenses/>. 16 # 17 # Copyright 2017-2018 Leibniz Universitaet Hannover 18 #--------------------------------------------------------------------------------# 19 # 20 # Current revisions: 21 # ----------------- 22 # 23 # 24 # Former revisions: 25 # ----------------- 26 # $Id$ 27 # Corrected "Former revisions" section 28 # svn propset keyword 29 # 30 # 31 # 32 # 2696 kanani 33 # Change in file header (GPL part) 34 # 35 # 2579 knoop 36 # palmtest now testing for multiple cpu-setups 37 # 38 # 2515 kanani 39 # Generalization of the palmtest script 40 # 41 # 2497 knoop 42 # Initial revision 43 # 44 # Description: 45 # ------------ 46 # Testsuite execution script 47 #------------------------------------------------------------------------------# 48 SOURCE="${BASH_SOURCE[0]}" 49 while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 50 DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 51 SOURCE="$(readlink "$SOURCE")" 52 [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 53 done 54 SCRIPT_LOCATION="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 55 56 hrule() { 57 printf "#" 58 printf -- '-%.0s' {1..72} 59 printf "#\n" 60 } 61 62 get_number_of_cpu_cores() { 63 { 64 n=$(sysctl -n machdep.cpu.core_count 2> /dev/null) 65 } || { 66 n=$(grep -c ^processor /proc/cpuinfo 2> /dev/null) 67 } || { 68 if ! [[ $n =~ ^-?[0-9]+$ ]]; then 69 n=1 70 fi 71 } 72 echo $n 73 } 74 75 get_core_array() { 76 for i in 1 2 4 8 16 32; do 77 if [[ $i -le ${1} ]]; then 78 printf "$i " 79 fi 80 done 81 } 82 83 configure() { 84 hrule 85 printf "Configuring..." 86 if [[ -f ${existing_working_dir}/.palm.config.${configuration} ]]; then 87 cp ${existing_working_dir}/.palm.config.${configuration} ${tester_prefix}/.palm.config.${configuration} 88 sed -i -e "s#%base_directory .*#%base_directory ${tester_prefix}#g" ${tester_prefix}/.palm.config.${configuration} 89 sed -i -e "s#%base_data .*#%base_data ${tester_prefix}/JOBS#g" ${tester_prefix}/.palm.config.${configuration} 90 sed -i -e "s#%source_path .*#%source_path ${tester_prefix}/trunk/SOURCE#g" ${tester_prefix}/.palm.config.${configuration} 91 sed -i -e "s#%user_source_path .*#%user_source_path ${tester_prefix}/JOBS/\$fname/USER_CODE#g" ${tester_prefix}/.palm.config.${configuration} 92 sed -i -e "s#%fast_io_catalog .*#%fast_io_catalog ${tester_prefix}/tmp#g" ${tester_prefix}/.palm.config.${configuration} 93 printf " finished (adapted existing .palm.config.${configuration})\n" 94 else 95 printf " failed (missing .palm.config.${configuration})\n" 96 hrule 97 exit 1 98 fi 99 } 100 101 build() { 102 hrule 103 rm -rf ${tester_prefix}/JOBS 104 rm -rf ${tester_prefix}/MAKE_DEPOSITORY* 105 bash ${trunk_dir}/SCRIPTS/palmbuild -h "${configuration}" -v 106 } 107 108 palm_installer_test() { 109 hrule 110 local name=${1} 111 local cores=${2} 112 printf "Testing with \"${name}\" on ${cores} core(s)... " 113 local job_id=${name}_${cores} 114 local input_dir=${tester_prefix}/JOBS/${job_id}/INPUT 115 local monitoring_dir=${tester_prefix}/JOBS/${job_id}/MONITORING 116 if [[ ! -f ${test_dir}/${name}_p3d ]] || [[ ! -f ${test_dir}/${name}_rc ]]; then 117 printf " test not found\n" 118 return 1 119 fi 120 rm -rf ${monitoring_dir} 121 mkdir -p ${input_dir} 122 mkdir -p ${monitoring_dir} 123 cp ${test_dir}/${name}_p3d ${input_dir}/${job_id}_p3d 124 cp ${test_dir}/${name}_rc ${monitoring_dir}/${job_id}_rc_reference 125 [[ -f ${test_dir}/${name}_topo ]] && cp ${test_dir}/${name}_topo ${input_dir}/${job_id}_topo 126 [[ -f ${test_dir}/${name}_static ]] && cp ${test_dir}/${name}_static ${input_dir}/${job_id}_static 127 [[ -f ${test_dir}/${name}_dynamic ]] && cp ${test_dir}/${name}_dynamic ${input_dir}/${job_id}_dynamic 128 bash ${trunk_dir}/SCRIPTS/palmrun -d ${job_id} -a "d3#" -h "${configuration}" -X "$cores" -T "$cores" -v -B > ${monitoring_dir}/${job_id}_stdout 2>&1 129 grep -A 99999 "Run-control output" ${monitoring_dir}/${job_id}_rc 1> ${monitoring_dir}/RC 2> /dev/null 130 grep -A 99999 "Run-control output" ${monitoring_dir}/${job_id}_rc_reference 1> ${monitoring_dir}/RC_REF 2> /dev/null 131 diff_output=$(diff ${monitoring_dir}/RC_REF ${monitoring_dir}/RC) 132 rm ${monitoring_dir}/RC ${monitoring_dir}/RC_REF 133 if [[ "${diff_output}" == "" ]]; then 134 printf " passed\n" 135 return 0 136 else 137 printf " failed\n" 138 test_status="failed" 139 return 1 140 fi 141 } 142 143 palm_installer_test_suite() { 144 for test_path in ${fnames}; do 145 for n_core in $(get_core_array $max_cores); do 146 testname_p3d=$(basename $test_path) 147 palm_installer_test "${testname_p3d%_p3d}" "${n_core}" 148 done 149 done 150 hrule 151 if [[ "${test_status}" == "failed" ]]; then 152 echo "Some tests failed!" 153 hrule 154 exit 1 155 else 156 echo "All found tests passed. :-)" 157 rm -rf ${tester_prefix}/tmp/* 158 hrule 159 exit 0 160 fi 161 } 162 163 existing_working_dir=$(readlink -f "${SCRIPT_LOCATION}/../../") 164 existing_trunk_dir=$(readlink -f "${SCRIPT_LOCATION}/../") 165 166 max_cores=$(get_number_of_cpu_cores) 167 test_id=$(date +%Y-%m-%d_%H%M%S) 168 do_plots=1 169 configuration="default" 170 fnames="$(echo ${existing_trunk_dir}/INSTALL/*_p3d)" 171 172 while getopts :d:h:N:pX: option 173 do 174 case $option in 175 (d) fnames="$OPTARG";; 176 (h) configuration="$OPTARG";; 177 (N) test_id="$OPTARG";; 178 (p) do_plots=0;; 179 (X) max_cores=$OPTARG;; 180 (\?) printf "\n +++ unknown option $OPTARG \n"; 181 exit;; 182 esac 183 done 184 tester_prefix=${existing_working_dir}/tests/${test_id} 185 trunk_dir=${tester_prefix}/trunk 186 test_dir=${trunk_dir}/INSTALL 187 188 mkdir -p ${tester_prefix} 189 cd ${tester_prefix} 190 191 ln -s ${existing_trunk_dir} trunk 192 193 194 195 configure 196 build 197 palm_installer_test_suite 1 #!/usr/bin/env python3 2 # PYTHON_ARGCOMPLETE_OK 3 4 import os 5 import sys 6 import shutil 7 from datetime import datetime 8 import subprocess 9 import multiprocessing 10 import socket 11 import getpass 12 import math 13 import re 14 import threading 15 import queue 16 from contextlib import ContextDecorator 17 18 try: 19 from argparse import ArgumentParser 20 from argparse import RawTextHelpFormatter 21 except ImportError: 22 sys.exit( 23 'ERROR: You need argparse!\n' + 24 ' install it from http://pypi.python.org/pypi/argparse\n' + 25 ' or run \"pip install argparse\".' 26 ) 27 28 try: 29 import numpy as np 30 except ImportError: 31 sys.exit( 32 'ERROR: You need numpy!\n' + 33 ' install it from http://pypi.python.org/pypi/numpy\n' + 34 ' or run \"python3 -m pip install numpy\".' 35 ) 36 37 try: 38 import netCDF4 39 except ImportError: 40 sys.exit( 41 'ERROR: You need netCDF4!\n' + 42 ' install it from http://pypi.python.org/pypi/netCDF4\n' + 43 ' or run \"python3 -m pip install netCDF4\".' 44 ) 45 46 try: 47 import yaml 48 except ImportError: 49 sys.exit( 50 'ERROR: You need PyYAML!\n' + 51 ' install it from http://pypi.python.org/pypi/PyYAML\n' + 52 ' or run \"python3 -m pip install PyYAML\".' 53 ) 54 55 try: 56 import argcomplete 57 except ImportError: 58 print( 59 'INFO: To use Tab-completion you need argcomplete!\n' + 60 ' install it from http://pypi.python.org/pypi/argcomplete\n' + 61 ' or run \"python3 -m pip install argcomplete\".' 62 ) 63 has_argcomplete = False 64 else: 65 has_argcomplete = True 66 67 try: 68 from termcolor import colored 69 except ImportError: 70 def colored(string, color): 71 return string 72 73 74 version = '1.0.0' 75 76 scripts_dir = os.path.dirname(os.path.realpath(__file__)) 77 trunk_dir = os.path.realpath(os.path.join(scripts_dir, '..')) 78 workspace_dir = os.path.realpath(os.path.join(trunk_dir, '..')) 79 80 trunk_tests_dir = os.path.join(trunk_dir, 'TESTS') 81 trunk_tests_cases_dir = os.path.join(trunk_tests_dir, 'cases') 82 trunk_tests_builds_dir = os.path.join(trunk_tests_dir, 'builds') 83 84 tests_dir = os.path.join(workspace_dir, 'tests') 85 86 available_cores = multiprocessing.cpu_count() 87 terminal_columns, terminal_lines = shutil.get_terminal_size() 88 hline = '#' * min(terminal_columns, 300) + '\n' 89 table_width_intro = 12 90 table_width_builds = len(max([s for s in next(os.walk(trunk_tests_builds_dir))[1]], key=len)) + len('_debug') 91 table_width_cases = len(max([s for s in next(os.walk(trunk_tests_cases_dir))[1]], key=len)) 92 table_width_cores = 7 93 table_width_total = table_width_intro + table_width_builds + table_width_cases + table_width_cores + 3 94 95 task_table_line_template = \ 96 '{:' + str(table_width_intro) + '} ' + \ 97 '{:' + str(table_width_cases) + '} ' + \ 98 '{:' + str(table_width_builds) + '} ' + \ 99 '{:' + str(table_width_cores) + '} ' 100 101 config_table_line_template = \ 102 '{:' + str(table_width_intro) + '} ' + \ 103 '{:' + str(max(table_width_builds, table_width_cases)) + '} ' + \ 104 '{:8} ' 105 106 file_table_line_template = \ 107 '{:' + str(table_width_intro) + '} ' + \ 108 '{:' + str(table_width_cases + 13) + '} ' 109 110 # for debugging 111 exist_ok = False 112 113 114 class SignificantDigitsRounder: 115 116 @staticmethod 117 def _round(value, digits=10): 118 if value == 0.0: 119 return value 120 negative = value < 0.0 121 value = -value if negative else value 122 rounded_value = round(value, -int(math.floor(math.log10(value))) + (digits - 1)) 123 rounded_value = -rounded_value if negative else rounded_value 124 return rounded_value 125 126 127 vectorized_round = np.vectorize(_round) 128 129 _vectorized_round = np.vectorize(round) 130 131 132 @classmethod 133 def around(cls, array, digits=10): 134 # TODO: divide both arrays and check decimal point 135 sign_mask = np.ma.masked_where(array >= 0.0, array).mask 136 pos_array = np.where(sign_mask, array, -array) 137 non_zero_maks = np.ma.masked_where(pos_array == 0.0, pos_array).mask 138 non_zero_array = np.where(non_zero_maks, 1.0, pos_array) 139 i1 = -np.floor(np.log10(non_zero_array)).astype(int) + (digits - 1) 140 rounded_non_zero_array = cls._vectorized_round(non_zero_array, i1) 141 rounded_pos_array = np.where(non_zero_maks, 0.0, rounded_non_zero_array) 142 return np.where(sign_mask, rounded_pos_array, -rounded_pos_array) 143 144 145 146 class Logger(ContextDecorator): 147 148 def __init__(self, logfile_dir, logfile_name='palmtest.log', logfile_mode='a', verbose=False): 149 self.logfile_path = os.path.join(logfile_dir, logfile_name) 150 self.logfile_mode = logfile_mode 151 self.verbose = verbose 152 153 def __enter__(self): 154 self._file = open(self.logfile_path, self.logfile_mode) 155 return self 156 157 def to_file(self, message): 158 self._file.write(message) 159 self._file.flush() 160 161 def to_log(self, message): 162 if self.verbose: 163 sys.stdout.write(message) 164 sys.stdout.flush() 165 self._file.write(message) 166 self._file.flush() 167 168 def to_all(self, message): 169 sys.stdout.write(message) 170 sys.stdout.flush() 171 self._file.write(message) 172 self._file.flush() 173 174 def __exit__(self, *exc): 175 self._file.close() 176 return False 177 178 179 class Executor: 180 181 @staticmethod 182 def _enqueue_output(out, queue): 183 for line in iter(out.readline, b''): 184 queue.put(line) 185 out.close() 186 187 @staticmethod 188 def execute(cmd, cwd='.', verbose=True, dry_run=False): 189 assert isinstance(cmd, list) 190 if dry_run: 191 cmd = ['echo'] + cmd 192 cmd_str = ' '.join(cmd) 193 p = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=1) 194 q = queue.Queue() 195 t = threading.Thread(target=Executor._enqueue_output, args=(p.stdout, q)) 196 t.daemon = True # thread dies with the program 197 t.start() 198 199 with Logger(cwd, verbose=verbose) as logger: 200 # read line without blocking 201 logger.to_log(hline) 202 logger.to_log('CMD: ' + cmd_str + '\n') 203 logger.to_log(hline) 204 while t.is_alive(): 205 try: 206 line = q.get_nowait() # or q.get(timeout=.1) 207 except queue.Empty: 208 pass # print('no output yet') 209 else: # got line 210 logger.to_log(line.decode("utf-8")) 211 line = True 212 while line: 213 try: 214 line = q.get_nowait() # or q.get(timeout=.1) 215 except queue.Empty: 216 line = False 217 else: # got line 218 logger.to_log(line.decode("utf-8")) 219 logger.to_log(hline) 220 221 rc = p.poll() 222 failed = rc != 0 223 return failed 224 225 226 class NetCDFInterface: 227 228 def __init__(self, filename): 229 self.filename = filename 230 231 def is_healthy(self): 232 try: 233 self.get_run_name() 234 except: 235 return False 236 else: 237 return True 238 239 def get_run_name(self): 240 with netCDF4.Dataset(self.filename, mode='r') as netcdf: 241 l = getattr(netcdf, 'title').split() 242 i = l.index('run:') 243 return l[i+1] 244 245 def get_var_list(self): 246 with netCDF4.Dataset(self.filename, mode='r') as netcdf: 247 var_list = getattr(netcdf, 'VAR_LIST').split(';') 248 var_list = filter(None, var_list) 249 return sorted(var_list) 250 251 def show_content(self): 252 with netCDF4.Dataset(self.filename, mode='r') as netcdf: 253 for name in netcdf.ncattrs(): 254 print("Global attr", name, "=", getattr(netcdf, name)) 255 print(netcdf) 256 for v in netcdf.variables: 257 print(v) 258 259 def get_times_list(self): 260 attributes, times = self.read_var('time') 261 times = [str(time) for time in times] 262 times = list(filter(None, times)) 263 return times 264 265 def contains(self, variable): 266 return variable in self.get_var_list() 267 268 def read_var(self, variable): 269 with netCDF4.Dataset(self.filename, mode='r') as netcdf: 270 values = netcdf.variables[variable][:] # extract values 271 attributes = dict( 272 long_name=netcdf.variables[variable].name, 273 unit=netcdf.variables[variable].units, 274 ) 275 return attributes, values 276 277 278 class FileComparator: 279 280 @staticmethod 281 def compare_ascii(file_path1, file_path2, start_string=None): 282 try: 283 with open(file_path1, 'r') as file1: 284 content1 = file1.readlines() 285 except OSError: 286 return True, colored('[reference file not found]', 'red') 287 try: 288 with open(file_path2, 'r') as file2: 289 content2 = file2.readlines() 290 except OSError: 291 return True, colored('[output file not found]', 'red') 292 if start_string: 293 index1 = content1.index(start_string) 294 index2 = content2.index(start_string) 295 comparable_content1 = content1[index1:] 296 comparable_content2 = content2[index2:] 297 ln = index2 + 1 298 else: 299 comparable_content1 = content1 300 comparable_content2 = content2 301 ln = 1 302 if len(comparable_content1) != len(comparable_content2): 303 return True, colored('[mismatch in total number of lines]', 'red') 304 for line1, line2 in zip(comparable_content1, comparable_content2): 305 if not line1 == line2: 306 return True, colored('[mismatch in content starting line ' + str(ln) + ']', 'red') 307 ln += 1 308 return False, colored('[file ok]', 'green') 309 310 @staticmethod 311 def compare_netcdf(file_path1, file_path2, digits=None): 312 nci1 = NetCDFInterface(file_path1) 313 nci2 = NetCDFInterface(file_path2) 314 if not nci1.is_healthy(): 315 return True, colored('[reference file not found]', 'red') 316 if not nci2.is_healthy(): 317 return True, colored('[output file not found]', 'red') 318 times_list1 = nci1.get_times_list() 319 times_list2 = nci2.get_times_list() 320 if not times_list1 == times_list2: 321 return True, colored('[wrong time dimension]', 'red') 322 else: 323 time_list = times_list1 324 var_list1 = nci1.get_var_list() 325 var_list2 = nci2.get_var_list() 326 if not var_list1 == var_list2: 327 return True, colored('[wrong set of variables]', 'red') 328 else: 329 var_list = var_list1 330 content1 = dict() 331 content2 = dict() 332 for var in var_list: 333 attributes1, values1 = nci1.read_var(var) 334 attributes2, values2 = nci2.read_var(var) 335 if sorted(attributes1.keys()) != sorted(attributes2.keys()): 336 return True, colored('[wrong set of attributes in variable \"'+var+'\"]', 'red') 337 if isinstance(digits, int): 338 values1 = SignificantDigitsRounder.around(values1, digits=digits) 339 values2 = SignificantDigitsRounder.around(values2, digits=digits) 340 content1[var] = values1 341 content2[var] = values2 342 #for decimals in 343 for i, time in enumerate(time_list): 344 for var in var_list: 345 t_content1 = content1[var][i] 346 t_content2 = content2[var][i] 347 if not (t_content1==t_content2).all(): 348 if isinstance(digits, int): 349 return True, colored('[1st mismatch within ' + str(digits) + ' digits at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') 350 else: 351 return True, colored('[1st mismatch at time index ' + str(i) + ' in variable \"' + var + '\"]', 'red') 352 return False, colored('[file ok]', 'green') 353 354 355 class OutputChecker: 356 357 def __init__(self, test_dir, setup_name, build_name, cores, significant_digits=None, verbose=True, dry_run=False): 358 self.test_dir = test_dir 359 self.setup_name = setup_name 360 self.build_name = build_name 361 self.cores = cores 362 self.significant_digits = significant_digits 363 self.verbose = verbose 364 self.dry_run = dry_run 365 self.job_name = self.setup_name + '__' + build_name + '__' + str(self.cores) 366 self.job_dir = os.path.join(self.test_dir, 'JOBS', self.job_name) 367 self.ref_monitoring_dir = os.path.join(trunk_tests_cases_dir, self.setup_name, 'MONITORING') 368 self.ref_output_dir = os.path.join(trunk_tests_cases_dir, self.setup_name, 'OUTPUT') 369 self.res_monitoring_dir = os.path.join(self.job_dir, 'MONITORING') 370 self.res_output_dir = os.path.join(self.job_dir, 'OUTPUT') 371 self.failed = None 372 373 def get_checkable_file_dicts(self): 374 if os.path.isdir(self.ref_monitoring_dir): 375 file_names_monitoring = [s for s in next(os.walk(self.ref_monitoring_dir))[2]] 376 else: 377 file_names_monitoring = [] 378 file_paths_monitoring = [] 379 for file_name in file_names_monitoring: 380 file_specific_ending = file_name[len(self.setup_name):] 381 file_specific_ending_split = file_specific_ending.split('.') 382 postfix = file_specific_ending_split[0] 383 if len(file_specific_ending_split) > 1: 384 extension = file_specific_ending_split[-1] 385 else: 386 extension = '' 387 if len(file_specific_ending_split) > 2: 388 cycle_info = file_specific_ending_split[1: -1] 389 else: 390 cycle_info = [] 391 file_paths_monitoring.append( 392 dict( 393 postfix=postfix, 394 cycle_info=cycle_info, 395 extension=extension, 396 ref_path=self.ref_monitoring_dir, 397 res_path=self.res_monitoring_dir, 398 ) 399 ) 400 if os.path.isdir(self.ref_output_dir): 401 file_names_output = [s for s in next(os.walk(self.ref_output_dir))[2]] 402 else: 403 file_names_output = [] 404 file_paths_output = [] 405 for file_name in file_names_output: 406 file_specific_ending = file_name[len(self.setup_name):] 407 file_specific_ending_split = file_specific_ending.split('.') 408 postfix = file_specific_ending_split[0] 409 if len(file_specific_ending_split) > 1: 410 extension = file_specific_ending_split[-1] 411 else: 412 extension = '' 413 if len(file_specific_ending_split) > 2: 414 cycle_info = file_specific_ending_split[1: -1] 415 else: 416 cycle_info = [] 417 file_paths_output.append( 418 dict( 419 postfix=postfix, 420 cycle_info=cycle_info, 421 extension=extension, 422 ref_path=self.ref_output_dir, 423 res_path=self.res_output_dir, 424 ) 425 ) 426 return file_paths_monitoring + file_paths_output 427 428 def check(self): 429 with Logger(self.test_dir, verbose=self.verbose) as logger: 430 logger.to_log('Checking output files:') 431 logger.to_all('\n') 432 failed = False 433 for file_dict in self.get_checkable_file_dicts(): 434 file_failed = False 435 ext_list = [file_dict['extension']] if file_dict['extension'] else [] 436 file_specific_ending = '.'.join([file_dict['postfix']] + file_dict['cycle_info'] + ext_list ) 437 logger.to_all(file_table_line_template.format('Checking:', self.setup_name + file_specific_ending)) 438 ref_file_path = os.path.join(file_dict['ref_path'], self.setup_name + file_specific_ending) 439 res_file_path = os.path.join(file_dict['res_path'], self.job_name + file_specific_ending) 440 if re.match('_rc', file_dict['postfix']) and not file_dict['extension']: 441 file_failed, message = FileComparator.compare_ascii(ref_file_path, res_file_path, start_string='Run-control output:\n') 442 elif re.match('nc', file_dict['extension']): 443 if self.significant_digits is not None: 444 if re.match('_ts', file_dict['postfix']) and 'timeseries' in self.significant_digits: 445 file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, 446 digits=self.significant_digits['timeseries']) 447 elif re.match('_pr', file_dict['postfix']) and 'profiles' in self.significant_digits: 448 file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, 449 digits=self.significant_digits['profiles']) 450 else: 451 file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path, 452 digits=self.significant_digits['other']) 453 else: 454 file_failed, message = FileComparator.compare_netcdf(ref_file_path, res_file_path) 455 else: 456 message = colored('[ignored]', 'blue') 457 if file_failed: 458 failed = True 459 logger.to_all(message + '\n') 460 if self.dry_run: 461 failed = False 462 return failed 463 464 465 class PALMJob: 466 """The PALM job class deals with the execution of a single PALM job""" 467 468 @staticmethod 469 def get_job_name(setup_name, build_name, cores): 470 return setup_name + '__' + build_name + '__' + str(cores) 471 472 def __init__(self, test_dir, test_case, build_name, cores, verbose=False, dry_run=False): 473 self.test_dir = test_dir 474 self.test_case = test_case 475 self.build_name = build_name 476 self.cores = cores 477 self.verbose = verbose 478 self.dry_run = dry_run 479 480 self.attempted_debug = False 481 self.failed_debug = None 482 self.attempted_non_debug = False 483 self.failed_non_debug = None 484 485 def _link_restart_files(self, build_name): 486 if self.dry_run: 487 return True, colored('[restart data dry]', 'blue') 488 name = self.get_job_name(self.test_case.name, build_name, self.cores) 489 source_name = self.get_job_name(self.test_case.use_binary_files_from, build_name, self.cores) 490 source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') 491 try: 492 source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] 493 except: 494 source_data_dirs_grp = [] 495 if len(source_data_dirs_grp) == 0: 496 source_data_dirs = [] 497 else: 498 source_data_dirs = source_data_dirs_grp[0] 499 if len(source_data_dirs) == 0 and re.match('.+_debug', build_name): 500 source_build_name = build_name[:-len('_debug')] 501 source_name = self.get_job_name(self.test_case.use_binary_files_from, source_build_name, self.cores) 502 source_restart_dir = os.path.join(self.test_dir, 'JOBS', source_name, 'RESTART') 503 try: 504 source_data_dirs_grp = [d for r, d, f in os.walk(source_restart_dir)] 505 except: 506 source_data_dirs_grp = [] 507 if len(source_data_dirs_grp) == 0: 508 source_data_dirs = [] 509 else: 510 source_data_dirs = source_data_dirs_grp[0] 511 if len(source_data_dirs) == 0: 512 source_data_dir = 'no_restart_data' 513 else: 514 source_data_dir = sorted(source_data_dirs)[-1] 515 source_data_dir_path = os.path.join(source_restart_dir, source_data_dir) 516 if os.path.isdir(source_data_dir_path) and re.match('.+_d3d.*', source_data_dir): 517 job_restart_dir = os.path.join(self.test_dir, 'JOBS', name, 'RESTART') 518 os.makedirs(job_restart_dir, exist_ok=exist_ok) 519 job_data_dir_path = os.path.join(job_restart_dir, name + '_d3d') 520 os.symlink(source_data_dir_path, job_data_dir_path, target_is_directory=True) 521 return False, colored('[linked restart data from: ' + source_data_dir_path + ']', 'green') 522 else: 523 return True, colored('[no restart data found]', 'red') 524 525 def _execute(self, name, build_name): 526 execution_failed = Executor.execute( 527 [ 528 os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmrun'), 529 '-h', '\"' + build_name + '\"', 530 '-d', name, 531 '-a', '\"' + ' '.join(self.test_case.activation_strings) + '\"', 532 '-X', str(self.cores), 533 '-T', str(self.cores), 534 '-B', 535 '-v', 536 '-z', 537 ], 538 cwd=self.test_dir, 539 verbose=self.verbose, 540 dry_run=self.dry_run, 541 ) 542 543 if self.dry_run: 544 return False, colored('[execution dry]', 'blue') 545 elif execution_failed: 546 return True, colored('[execution failed]', 'red') 547 else: 548 return False, colored('[execution ok]', 'green') 549 550 def _check(self, build_name): 551 checker = OutputChecker( 552 self.test_dir, 553 self.test_case.name, 554 build_name, 555 self.cores, 556 significant_digits=self.test_case.significant_digits, 557 verbose=self.verbose, 558 dry_run=self.dry_run, 559 ) 560 check_failed = checker.check() 561 562 if self.dry_run: 563 return False, colored('[checks dry]', 'blue') 564 if check_failed: 565 return True, colored('[checks failed]', 'red') 566 else: 567 return False, colored('[checks ok]', 'green') 568 569 def execute(self, debug=False): 570 if debug: 571 attempted = self.attempted_debug 572 build_name = self.build_name + '_debug' 573 failed = self.failed_debug 574 else: 575 attempted = self.attempted_non_debug 576 build_name = self.build_name 577 failed = self.failed_non_debug 578 579 if not attempted: 580 with Logger(self.test_dir, verbose=self.verbose) as logger: 581 status_prefix = task_table_line_template.format('Testing:', self.test_case.name, build_name, self.cores) 582 logger.to_all(status_prefix) 583 logger.to_log('[started]' + '\n') 584 attempted = True 585 586 name = self.get_job_name(self.test_case.name, build_name, self.cores) 587 588 input_dir = os.path.join(self.test_dir, 'JOBS', name, 'INPUT') 589 os.makedirs(input_dir, exist_ok=exist_ok) 590 591 # copying needs to be done per file, because input files need to be renamed 592 for input_file in self.test_case.input_file_names: 593 postfix = input_file[len(self.test_case.name):] 594 src = os.path.join(self.test_case.input_dir, input_file) 595 dst = os.path.join(input_dir, name + postfix) 596 shutil.copy(src, dst) 597 598 # copying the entire directory is ok, because source files do not need to be renamed 599 user_code_dir = os.path.join(self.test_dir, 'JOBS', name, 'USER_CODE') 600 if os.path.isdir(self.test_case.user_code_dir): 601 shutil.copytree(self.test_case.user_code_dir, user_code_dir, copy_function=shutil.copy) 602 603 if self.test_case.requires_binary_files: 604 link_restart_files_failed, message = self._link_restart_files(build_name) 605 logger.to_log(status_prefix) 606 logger.to_log(message + ' ') 607 logger.to_log('\n') 608 609 failed, message = self._execute(name, build_name) 610 logger.to_log(status_prefix) 611 logger.to_all(message + ' ') 612 logger.to_log('\n') 613 614 failed, message = self._check(build_name) 615 logger.to_log(status_prefix) 616 logger.to_log(message + ' ') 617 618 logger.to_all('\n') 619 620 if debug: 621 self.attempted_debug = attempted 622 self.failed_debug = failed 623 else: 624 self.attempted_non_debug = attempted 625 self.failed_non_debug = failed 626 627 return failed 628 629 def status(self): 630 return dict( 631 attempted=self.attempted_non_debug or self.attempted_debug, 632 failed=self.failed_non_debug and self.failed_debug, 633 debugged=self.attempted_debug, 634 non_debug_failed=self.failed_non_debug, 635 ) 636 637 638 class PALMBuild: 639 """The PALM build class deals with configuration and execution of all required PALM builds""" 640 641 def __init__(self, test_dir, build_name, verbose=False, dry_run=False): 642 self.test_dir = test_dir 643 self.build_name = build_name 644 self.verbose = verbose 645 self.dry_run = dry_run 646 self.configured = False 647 self.executed = False 648 self.available = False 649 self.requires_mpi = False 650 self.requires_netcdf = False 651 self.requires_fftw = False 652 self.requires_rrtmg = False 653 self.attempted_non_debug = False 654 self.attempted_debug = False 655 self.failed_non_debug = None 656 self.failed_debug = None 657 658 def configure(self): 659 try: 660 with open(os.path.join(trunk_tests_builds_dir, self.build_name, 'build_config.yml'), 'r') as f: 661 build_config = yaml.load(f) 662 except: 663 return True, colored('[build not found]', 'red') 664 665 if 'compiler' in build_config: 666 self.compiler = build_config['compiler'] 667 else: 668 return True, colored('[missing \"compiler\" keyword]', 'red') 669 670 if not isinstance(self.compiler, dict): 671 return True, colored('[\"compiler\" keyword must be dict]', 'red') 672 673 if 'linker' in build_config: 674 self.linker = build_config['linker'] 675 else: 676 return True, colored('[missing \"linker\" keyword]', 'red') 677 678 if not isinstance(self.linker, dict): 679 return True, colored('[\"linker\" keyword must be dict]', 'red') 680 681 if 'mpi_wrapper' in self.compiler: 682 if 'mpi_wrapper}}' in self.compiler['mpi_wrapper']: 683 self.requires_mpi = True 684 else: 685 return True, colored('[missing \"mpi_wrapper\" keyword]', 'red') 686 687 if 'includes' in self.compiler: 688 for include in self.compiler['includes']: 689 if 'include.netcdf}}' in include: 690 self.requires_netcdf = True 691 if 'include.fftw}}' in include: 692 self.requires_fftw = True 693 if 'include.rrtmg}}' in include: 694 self.requires_rrtmg = True 695 else: 696 return True, colored('[missing \"includes\" keyword in compiler]', 'red') 697 698 if 'options' in self.linker: 699 for lib in self.linker['options']: 700 if 'lib.netcdf}}' in lib: 701 self.requires_netcdf = True 702 if 'lib.fftw}}' in lib: 703 self.requires_fftw = True 704 if 'lib.rrtmg}}' in lib: 705 self.requires_rrtmg = True 706 else: 707 return True, colored('[missing \"options\" keyword in linker]', 'red') 708 709 library_names = [] 710 if self.requires_netcdf: 711 library_names.append('netcdf') 712 if self.requires_fftw: 713 library_names.append('fftw') 714 if self.requires_rrtmg: 715 library_names.append('rrtmg') 716 717 if not 'executable' in self.compiler: 718 return True, colored('[missing \"executable\" keyword in compiler]', 'red') 719 720 if not 'definitions' in self.compiler: 721 return True, colored('[missing \"definitions\" keyword in compiler]', 'red') 722 723 if not 'options' in self.compiler: 724 return True, colored('[missing \"options\" keyword in compiler]', 'red') 725 726 if not 'default' in self.compiler['options']: 727 return True, colored('[missing \"default\" keyword in compiler.options]', 'red') 728 729 if not 'debug' in self.compiler['options']: 730 return True, colored('[missing \"debug\" keyword in compiler.options]', 'red') 731 732 try: 733 with open(os.path.join(workspace_dir, 'palmtest.yml'), 'r') as f: 734 palmtest_config = yaml.load(f) 735 except: 736 return True, colored('[palmtest.yml not found]', 'red') 737 738 if 'palm_config_template' in palmtest_config: 739 if isinstance(palmtest_config['palm_config_template'], str): 740 custom_template = palmtest_config['palm_config_template'] 741 try: 742 with open(os.path.join(custom_template), 'r') as palm_config_template_file: 743 template = palm_config_template_file.read() 744 except: 745 try: 746 with open(os.path.join(scripts_dir, '.palm.config.default.in'), 'r') as palm_config_template_file: 747 template = palm_config_template_file.read() 748 except: 749 return True, colored('[trunk/SCRIPTS/.palm.config.default.in not found]', 'red') 750 751 template = template.replace('@CMAKE_INSTALL_PREFIX@', self.test_dir) 752 template = template.replace('@PALM_HOSTNAME@', socket.gethostname()) 753 template = template.replace('@CMAKE_USERNAME@', getpass.getuser()) 754 template = template.replace('@MPI_Fortran_COMPILER@', self.compiler['mpi_wrapper']) 755 template = template.replace('@CMAKE_Fortran_COMPILER@', self.compiler['executable']) 756 cpp_options_str = ['-D' + s for s in self.compiler['definitions']] 757 template = template.replace('@PALM_CPP_OPTIONS_STR@', ' '.join(cpp_options_str)) 758 template = template.replace('@PALM_CORES@', str(available_cores)) 759 template = template.replace('@PALM_COMPILER_OPTIONS@', '{{palmtest.compiler.options}} ' + ' '.join(self.compiler['includes'])) 760 template = template.replace('@PALM_LINKER_OPTIONS@', ' '.join(self.linker['options'])) 761 762 if 'environments' in palmtest_config: 763 available_environments = palmtest_config['environments'] 764 else: 765 return True, colored('[missing \"environments\" keyword in palmtest.yml]', 'red') 766 767 if 'id' in self.compiler: 768 c_id = self.compiler['id'] 769 else: 770 return True, colored('[missing \"id\" keyword in compiler]', 'red') 771 772 if c_id in available_environments: 773 self.available = True 774 775 environment = available_environments[c_id] 776 if 'executable' not in environment: 777 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"executable\"]', 'red') 778 value = environment['executable'] 779 if isinstance(value, str): 780 template = template.replace('{{' + '.'.join([c_id, 'executable']) + '}}', value) 781 if self.requires_mpi: 782 if 'mpi_wrapper' not in environment: 783 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"mpi_wrapper\"]', 'red') 784 value = environment['mpi_wrapper'] 785 if isinstance(value, str): 786 template = template.replace('{{' + '.'.join([c_id, 'mpi_wrapper']) + '}}', value) 787 if 'include' not in environment: 788 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include\"]', 'red') 789 if 'lib' not in environment: 790 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib\"]', 'red') 791 for lib in library_names: 792 if lib not in environment['include']: 793 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"include.'+lib+'\"]', 'red') 794 value = environment['include'][lib] 795 if isinstance(value, str): 796 template = template.replace('{{' + '.'.join([c_id, 'include', lib]) + '}}', value) 797 if lib not in environment['lib']: 798 return True, colored('[palmtest.yml environment \"' + c_id + '\" has no \"lib.'+lib+'\"]', 'red') 799 value = environment['lib'][lib] 800 if isinstance(value, str): 801 template = template.replace('{{' + '.'.join([c_id, 'lib', lib]) + '}}', value) 802 803 with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name), 'w') as palm_config_file: 804 palm_config_file.write( 805 template.replace( 806 '{{palmtest.compiler.options}}', 807 ' '.join(self.compiler['options']['default']), 808 ) 809 ) 810 with open(os.path.join(self.test_dir, '.palm.config.' + self.build_name + '_debug'), 'w') as palm_config_file: 811 palm_config_file.write( 812 template.replace( 813 '{{palmtest.compiler.options}}', 814 ' '.join(self.compiler['options']['debug']), 815 ) 816 ) 817 self.configured = True 818 return False, colored('[configuration ok]', 'green') 819 820 else: 821 return True, colored('[palmtest.yml environment \"' + c_id + '\" not found]', 'red') 822 823 def _execute(self, build_name): 824 self.attempted = True 825 build_failed = Executor.execute( 826 [ 827 os.path.join(self.test_dir, 'trunk', 'SCRIPTS', 'palmbuild'), 828 '-h', '\"' + build_name + '\"', 829 '-v', 830 ], 831 cwd=self.test_dir, 832 verbose=self.verbose, 833 dry_run=self.dry_run, 834 ) 835 836 if self.dry_run: 837 return False, colored('[build dry]', 'blue') 838 if build_failed: 839 return True, colored('[build failed]', 'red') 840 else: 841 return False, colored('[build ok]', 'green') 842 843 def build(self, debug=False): 844 if debug: 845 attempted = self.attempted_debug 846 build_name = self.build_name + '_debug' 847 failed = self.failed_debug 848 else: 849 attempted = self.attempted_non_debug 850 build_name = self.build_name 851 failed = self.failed_non_debug 852 853 if not attempted: 854 with Logger(self.test_dir, verbose=self.verbose) as logger: 855 status_prefix = task_table_line_template.format('Building:', '', build_name, '') 856 logger.to_all(status_prefix) 857 logger.to_log('[started]' + '\n') 858 attempted = True 859 860 failed, message = self._execute(build_name) 861 logger.to_log(status_prefix) 862 logger.to_all(message + ' ') 863 logger.to_all('\n') 864 865 if debug: 866 self.attempted_debug = attempted 867 self.failed_debug = failed 868 else: 869 self.attempted_non_debug = attempted 870 self.failed_non_debug = failed 871 872 return failed 873 874 def report(self): 875 return dict( 876 failed_debug=self.failed_debug, 877 failed_non_debug=self.failed_non_debug, 878 ) 879 880 881 class PALMTestCase: 882 """The PALM test case class deals with the configuration and execution of all PALM test cases""" 883 884 def __init__(self,test_dir, name, verbose=False, dry_run=False): 885 self.test_dir = test_dir 886 self.name = name 887 self.verbose = verbose 888 self.dry_run = dry_run 889 self.user_code_dir = os.path.join(trunk_tests_cases_dir, self.name, 'USER_CODE') 890 self.input_dir = os.path.join(trunk_tests_cases_dir, self.name, 'INPUT') 891 self.number_of_cores = [] 892 self.build_names = [] 893 self.input_file_names = [] 894 self.configured = False 895 896 def configure(self, requested_build_names, requested_cores): 897 f_name = os.path.join(trunk_tests_cases_dir, self.name, 'case_config.yml') 898 try: 899 with open(f_name, 'r') as f: 900 config = yaml.load(f) 901 except: 902 return True, colored('[Case \"' + self.name + '\" could not be found.]', 'red') 903 try: 904 self.use_binary_files_from = config['use_binary_files_from'] 905 except: 906 self.use_binary_files_from = None 907 self.requires_binary_files = bool(self.use_binary_files_from) 908 909 if 'allowed_builds' not in config: 910 return True, colored('[missing \"allowed_builds\" keyword]', 'red') 911 self.allowed_build_names = config['allowed_builds'] 912 913 if 'allowed_number_of_cores' not in config: 914 return True, colored('[missing \"allowed_number_of_cores\" keyword]', 'red') 915 self.allowed_number_of_cores = config['allowed_number_of_cores'] 916 917 if 'activation_strings' not in config: 918 return True, colored('[missing \"activation_strings\" keyword]', 'red') 919 self.activation_strings = config['activation_strings'] 920 921 if 'significant_digits_for_netcdf_checks' not in config: 922 return True, colored('[missing \"significant_digits_for_netcdf_checks\" keyword]', 'red') 923 self.significant_digits = config['significant_digits_for_netcdf_checks'] 924 925 if 'timeseries' not in config['significant_digits_for_netcdf_checks']: 926 return True, colored('[missing \"timeseries\" keyword in significant_digits_for_netcdf_checks]', 'red') 927 928 if 'profiles' not in config['significant_digits_for_netcdf_checks']: 929 return True, colored('[missing \"profiles\" keyword in significant_digits_for_netcdf_checks]', 'red') 930 931 if 'other' not in config['significant_digits_for_netcdf_checks']: 932 return True, colored('[missing \"other\" keyword in significant_digits_for_netcdf_checks]', 'red') 933 934 self.number_of_cores = sorted(set(requested_cores).intersection(self.allowed_number_of_cores)) 935 self.build_names = sorted(set(requested_build_names).intersection(self.allowed_build_names)) 936 self.input_file_names = [s for s in next(os.walk(self.input_dir))[2]] 937 self.configured = True 938 if len(self.number_of_cores) == 0 : 939 return True, colored('[no allowed cores requested]', 'blue') 940 if len(self.build_names) == 0: 941 return True, colored('[no allowed builds requested]', 'blue') 942 if len(self.input_file_names) == 0: 943 return True, colored('[no input files found]', 'red') 944 return False, colored('[configuration ok]', 'green') 945 946 947 948 class PALMTest: 949 950 def __init__(self, args): 951 self.verbose = args.verbose 952 self.no_auto_debug = args.no_auto_debug 953 self.force_debug = args.force_debug 954 self.fail_on_debug = args.fail_on_debug 955 self.dry_run = args.dry_run 956 self.test_id = args.test_id 957 self.test_case_names = args.cases 958 self.requested_build_names = args.builds 959 self.requested_cores = args.cores 960 self.test_case_queue = [] 961 self.build_database = dict() 962 963 def prepare(self): 964 self.test_dir = os.path.join(tests_dir, self.test_id) 965 try: 966 os.makedirs(self.test_dir, exist_ok=exist_ok) 967 except: 968 print('ERROR: Found existing test directory: ' + self.test_dir) 969 exit(1) 970 with Logger(self.test_dir, verbose=self.verbose) as logger: 971 logger.to_all(hline) 972 logger.to_all('This is the PALM tester (version: ' + version + ')' + '\n') 973 logger.to_all(hline) 974 try: 975 with open(os.path.join(workspace_dir, 'palmtest.yml'), 'r') as f: 976 pass 977 except: 978 logger.to_all('ERROR: No palmtest.yml file was found in your working directory!\n') 979 logger.to_all('INFO: A template for this file can be found at: trunk/TESTS/palmtest.yml\n') 980 logger.to_all(' Please copy the template to your working directory and adjust it to your system!\n') 981 exit(1) 982 983 self.execution_trunk_dir = os.path.join(self.test_dir, 'trunk') 984 os.symlink(trunk_dir, self.execution_trunk_dir) 985 self.execution_jobs_dir = os.path.join(self.test_dir, 'JOBS') 986 os.makedirs(self.execution_jobs_dir, exist_ok=exist_ok) 987 988 try: 989 with open(os.path.join(scripts_dir, '.palm.iofiles'), 'r') as iofiles_template_file: 990 iofiles_template = iofiles_template_file.read() 991 with open(os.path.join(self.test_dir, '.palm.iofiles'), 'w') as iofiles_file: 992 iofiles_file.write(iofiles_template.replace('$fast_io_catalog', '$base_data')) 993 except: 994 logger.to_all('ERROR: No .palm.iofiles file was found in trunk/SCRIPTS/') 995 exit(1) 996 997 final_cores_list = list(filter(lambda x: x <= available_cores, self.requested_cores)) 998 999 logger.to_all(config_table_line_template.format('Object:', 'Name:', 'Action:') + 'Status:\n') 1000 logger.to_all(hline) 1001 1002 if 'all' in self.requested_build_names: 1003 self.requested_build_names = [name for name in next(os.walk(trunk_tests_builds_dir))[1]] 1004 found_build_names = [] 1005 for build_name in self.requested_build_names: 1006 build = PALMBuild(self.test_dir, build_name, verbose=self.verbose, dry_run=self.dry_run) 1007 configuration_failed, message = build.configure() 1008 if not configuration_failed: 1009 self.build_database[build_name] = build 1010 found_build_names.append(build_name) 1011 logger.to_all(config_table_line_template.format('Build', build_name, 'approved')) 1012 logger.to_all(message + '\n') 1013 else: 1014 logger.to_all(config_table_line_template.format('Build', build_name, 'rejected')) 1015 logger.to_all(message + '\n') 1016 final_build_list = found_build_names 1017 1018 if 'all' in self.test_case_names: 1019 self.test_case_names = sorted([name for name in next(os.walk(trunk_tests_cases_dir))[1]]) 1020 1021 additional_initial_runs_2 = [self.test_case_names] 1022 while len(additional_initial_runs_2[-1]) > 0: 1023 additional_initial_runs_1 = [] 1024 for test_case_name in additional_initial_runs_2[-1]: 1025 test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) 1026 test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) 1027 if not test_case_configuration_failed: 1028 if test_case.requires_binary_files: 1029 additional_initial_runs_1.append(test_case.use_binary_files_from) 1030 additional_initial_runs_2.append(sorted(set(additional_initial_runs_1))) 1031 1032 test_case_order = [] 1033 for i in range(len(additional_initial_runs_2)-1): 1034 # low and high refer to priority 1035 low = additional_initial_runs_2[i] 1036 high = additional_initial_runs_2[i+1] 1037 for item in high: 1038 while item in low: 1039 low.remove(item) 1040 test_case_order.append(low) 1041 1042 test_case_order_no_dublicates = [] 1043 for test_cases in test_case_order: 1044 seen = set() 1045 seen_add = seen.add 1046 test_case_order_no_dublicates.append( [x for x in test_cases if not (x in seen or seen_add(x))] ) 1047 1048 approved_test_case_order = [[]] + list(reversed(test_case_order_no_dublicates)) 1049 for i, test_cases in enumerate(list(approved_test_case_order)): 1050 info = 'Case (dep)' if i < len(approved_test_case_order)-1 else 'Case' 1051 for test_case_name in list(test_cases): 1052 sys.stdout.flush() 1053 test_case = PALMTestCase(self.test_dir, test_case_name, verbose=self.verbose) 1054 test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) 1055 if test_case_configuration_failed: 1056 # removing as configuration failed should only apply to added dependencies 1057 approved_test_case_order[i].remove(test_case_name) 1058 logger.to_all(config_table_line_template.format(info, test_case_name, 'rejected')) 1059 logger.to_all(message + '\n') 1060 elif test_case.requires_binary_files: 1061 if test_case.use_binary_files_from not in approved_test_case_order[i-1]: 1062 # removing as dependency is already removed 1063 approved_test_case_order[i].remove(test_case_name) 1064 logger.to_all(config_table_line_template.format(info, test_case_name, 'disabled')) 1065 logger.to_all(colored('[requires dependency \"' + test_case.use_binary_files_from + '\"]', 'red') + '\n') 1066 else: 1067 logger.to_all(config_table_line_template.format(info, test_case_name, 'approved')) 1068 logger.to_all(message + '\n') 1069 else: 1070 logger.to_all(config_table_line_template.format(info, test_case_name, 'approved')) 1071 logger.to_all(message + '\n') 1072 1073 final_case_list = [] 1074 for cases in approved_test_case_order: 1075 for case in cases: 1076 if case not in final_case_list: 1077 final_case_list.append(case) 1078 1079 for build_name in final_build_list: 1080 build = PALMBuild( 1081 self.test_dir, 1082 build_name, 1083 verbose=self.verbose, 1084 dry_run=self.dry_run, 1085 ) 1086 configuration_failed, message = build.configure() 1087 if not configuration_failed: 1088 self.build_database[build_name] = build 1089 else: 1090 logger.to_all(message + '\n') 1091 1092 for case_name in final_case_list: 1093 test_case = PALMTestCase( 1094 self.test_dir, 1095 case_name, 1096 verbose=self.verbose, 1097 dry_run=self.dry_run, 1098 ) 1099 test_case_configuration_failed, message = test_case.configure(final_build_list, final_cores_list) 1100 if not test_case_configuration_failed: 1101 self.test_case_queue.append(test_case) 1102 logger.to_all(hline) 1103 1104 r = '{:' + str(table_width_intro) + '} ' 1105 1106 logger.to_all(r.format('Test ID:') + self.test_id + '\n') 1107 logger.to_all(r.format('Builds:') + str('\n' + r.format('')).join(sorted(self.build_database.keys())) + '\n') 1108 logger.to_all(r.format('Cases:') + str('\n' + r.format('')).join([c.name for c in self.test_case_queue]) + '\n') 1109 logger.to_all(r.format('Cores:') + ' '.join([str(i) for i in final_cores_list]) + '\n') 1110 1111 def _execute(self, test_case, build_name, cores): 1112 job = PALMJob( 1113 self.test_dir, 1114 test_case, 1115 build_name, 1116 cores, 1117 verbose=self.verbose, 1118 dry_run=self.dry_run 1119 ) 1120 if self.force_debug: 1121 build_failed_non_debug = True 1122 job_failed_non_debug = True 1123 build_failed_debug = self.build_database[build_name].build(debug=True) 1124 if build_failed_debug: 1125 job_failed_debug = True 1126 else: 1127 job_failed_debug = job.execute(debug=True) 1128 elif self.no_auto_debug: 1129 build_failed_non_debug = self.build_database[build_name].build(debug=False) 1130 if build_failed_non_debug: 1131 job_failed_non_debug = True 1132 else: 1133 job_failed_non_debug = job.execute(debug=False) 1134 build_failed_debug = None 1135 job_failed_debug = None 1136 else: 1137 build_failed_non_debug = self.build_database[build_name].build(debug=False) 1138 if build_failed_non_debug: 1139 job_failed_non_debug = True 1140 build_failed_debug = self.build_database[build_name].build(debug=True) 1141 if build_failed_debug: 1142 job_failed_debug = False 1143 else: 1144 job_failed_debug = job.execute(debug=True) 1145 else: 1146 job_failed_non_debug = job.execute(debug=False) 1147 if job_failed_non_debug: 1148 build_failed_debug = self.build_database[build_name].build(debug=True) 1149 if build_failed_debug: 1150 job_failed_debug = True 1151 else: 1152 job_failed_debug = job.execute(debug=True) 1153 else: 1154 build_failed_debug = None 1155 job_failed_debug = None 1156 return dict( 1157 build_failed_non_debug=build_failed_non_debug, 1158 job_failed_non_debug=job_failed_non_debug, 1159 build_failed_debug=build_failed_debug, 1160 job_failed_debug=job_failed_debug, 1161 ) 1162 1163 def execute(self): 1164 with Logger(self.test_dir, verbose=self.verbose) as logger: 1165 logger.to_all(hline) 1166 logger.to_all(task_table_line_template.format('Task:', 'Case:', 'Build:', 'Cores:') + 'Status:\n') 1167 logger.to_all(hline) 1168 self.test_report = dict() 1169 for test_case in self.test_case_queue: 1170 logger.to_log(hline) 1171 logger.to_file(hline) 1172 logger.to_file(hline) 1173 status_dict = dict() 1174 for build_name in test_case.build_names: 1175 status_dict[build_name] = dict() 1176 for cores in test_case.number_of_cores: 1177 status_dict[build_name][cores] = self._execute(test_case, build_name, cores) 1178 self.test_report[test_case.name] = status_dict 1179 logger.to_log(hline) 1180 logger.to_file('\n' * 10) 1181 1182 def report(self): 1183 with Logger(self.test_dir, verbose=self.verbose) as logger: 1184 logger.to_all(hline) 1185 r = '{:10}' + ' total: ' + '{:<3d}' + \ 1186 ' ok: ' + colored('{:<3d}', 'green') + \ 1187 ' debugged: ' + colored('{:<3d}', 'yellow') + \ 1188 ' failed: ' + colored('{:<3d}', 'red') 1189 n_all = 0 1190 n_ok = 0 1191 n_debugged = 0 1192 n_failed = 0 1193 for build_name, build in self.build_database.items(): 1194 status = build.report() 1195 b = status['failed_non_debug'] 1196 bd = status['failed_debug'] 1197 n_all += 1 1198 if not b and b is not None: 1199 n_ok += 1 1200 if bd is not None: 1201 n_debugged += 1 1202 if b and (bd or bd is None): 1203 n_failed += 1 1204 logger.to_all(r.format('Builds:', n_all, n_ok, n_debugged, n_failed) + '\n') 1205 total_failed = n_failed 1206 total_debugged = n_debugged 1207 n_all = 0 1208 n_ok = 0 1209 n_debugged = 0 1210 n_failed = 0 1211 # {'case_name': {'build_name': {4: {'build_failed_debug': None, 1212 # 'build_failed_non_debug': False, 1213 # 'job_failed_debug': None, 1214 # 'job_failed_non_debug': False}}}, 1215 for case_name, case in self.test_report.items(): 1216 for build_name, build in case.items(): 1217 for cores, results in build.items(): 1218 n_all += 1 1219 b = results['build_failed_non_debug'] 1220 bd = results['build_failed_debug'] 1221 j = results['job_failed_non_debug'] 1222 jd = results['job_failed_debug'] 1223 if not j: 1224 n_ok += 1 1225 if jd is not None: 1226 n_debugged += 1 1227 if j and (jd or jd is None): 1228 n_failed += 1 1229 logger.to_all(r.format('Tests:', n_all, n_ok, n_debugged, n_failed)) 1230 total_failed += n_failed 1231 total_debugged += n_debugged 1232 if self.fail_on_debug: 1233 return (total_failed + total_debugged) > 0 1234 else: 1235 return total_failed > 0 1236 1237 1238 class CustomCompleter: 1239 1240 def __init__(self): 1241 pass 1242 1243 def __call__(self, prefix, parsed_args, **kwargs): 1244 return (i for i in self.get_items() if i.startswith(prefix)) 1245 1246 def get_items(self): 1247 return [] 1248 1249 1250 class CaseCompleter(CustomCompleter): 1251 1252 def get_items(self): 1253 case_names = [name for name in next(os.walk(trunk_tests_cases_dir))[1]] 1254 return case_names + ['all'] 1255 1256 1257 class BuildCompleter(CustomCompleter): 1258 1259 def get_items(self): 1260 build_names = [name for name in next(os.walk(trunk_tests_builds_dir))[1]] 1261 return build_names + ['all'] 1262 1263 1264 class PALMTestArgumentParser(ArgumentParser): 1265 1266 def __init__(self): 1267 super().__init__( 1268 description='This is the PALM tester\n' + 1269 'Developer Support: knoop@muk.uni-hannover.de', 1270 formatter_class=RawTextHelpFormatter, 1271 add_help=True, 1272 allow_abbrev=False, 1273 ) 1274 self.add_argument( 1275 '--version', 1276 action='version', 1277 version=version, 1278 ) 1279 self.add_argument( 1280 '--verbose', 1281 action='store_true', 1282 dest='verbose', 1283 help='Increase verbosity of terminal output.', 1284 required=False, 1285 ) 1286 self.add_argument( 1287 '--no-auto-debug', 1288 action='store_true', 1289 dest='no_auto_debug', 1290 help='Disable automatic debugging in case of test failure.', 1291 required=False, 1292 ) 1293 self.add_argument( 1294 '--force-debug', 1295 action='store_true', 1296 dest='force_debug', 1297 help='Force debugging regardless of test failure (ignores --no-auto-debug).', 1298 required=False, 1299 ) 1300 self.add_argument( 1301 '--fail-on-debug', 1302 action='store_true', 1303 dest='fail_on_debug', 1304 help='Return a non-zero exit status in case debugging was required.', 1305 required=False, 1306 ) 1307 self.add_argument( 1308 '--dry-run', 1309 action='store_true', 1310 dest='dry_run', 1311 help='Prepare and process all requested tests without actually building or executing PALM.', 1312 required=False, 1313 ) 1314 self.add_argument( 1315 '--cases', 1316 action='store', 1317 dest='cases', 1318 default=['all'], 1319 help='A list of test cases to be executed. (default: %(default)s)', 1320 nargs='+', 1321 required=False, 1322 type=str, 1323 metavar='STR', 1324 ).completer = CaseCompleter() 1325 self.add_argument( 1326 '--builds', 1327 action='store', 1328 dest='builds', 1329 default=['all'], 1330 help='A list of builds to be executed. (default: %(default)s)', 1331 nargs='+', 1332 required=False, 1333 type=str, 1334 metavar='STR', 1335 ).completer = BuildCompleter() 1336 self.add_argument( 1337 '--cores', 1338 action='store', 1339 dest='cores', 1340 default=[i for i in range(1, available_cores+1)], 1341 choices=[i for i in range(1, available_cores+1)], 1342 help='The number of cores tests are supposed to be executed on. (default: %(default)s)', 1343 nargs='+', 1344 required=False, 1345 type=int, 1346 metavar='INT', 1347 ) 1348 self.add_argument( 1349 '--test-id', 1350 action='store', 1351 dest='test_id', 1352 default=datetime.now().strftime('%Y-%m-%d_%H:%M:%S.%f'), 1353 help='An individual test id. (default: current timestamp)', 1354 required=False, 1355 type=str, 1356 metavar='STR', 1357 ) 1358 1359 1360 if __name__ == '__main__': 1361 parser = PALMTestArgumentParser() 1362 if has_argcomplete: 1363 argcomplete.autocomplete(parser) 1364 args = parser.parse_args() 1365 palm_test = PALMTest(args) 1366 palm_test.prepare() 1367 palm_test.execute() 1368 failed = palm_test.report() 1369 exit(1 if failed else 0)
Note: See TracChangeset
for help on using the changeset viewer.