Keyboard firmwares for Atmel AVR and Cortex-M
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_api.py 80KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841
  1. """
  2. mbed SDK
  3. Copyright (c) 2011-2014 ARM Limited
  4. Licensed under the Apache License, Version 2.0 (the "License");
  5. you may not use this file except in compliance with the License.
  6. You may obtain a copy of the License at
  7. http://www.apache.org/licenses/LICENSE-2.0
  8. Unless required by applicable law or agreed to in writing, software
  9. distributed under the License is distributed on an "AS IS" BASIS,
  10. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. See the License for the specific language governing permissions and
  12. limitations under the License.
  13. Author: Przemyslaw Wirkus <[email protected]>
  14. """
  15. import os
  16. import re
  17. import sys
  18. import json
  19. import uuid
  20. import pprint
  21. import random
  22. import optparse
  23. import datetime
  24. import threading
  25. from types import ListType
  26. from colorama import Fore, Back, Style
  27. from prettytable import PrettyTable
  28. from time import sleep, time
  29. from Queue import Queue, Empty
  30. from os.path import join, exists, basename
  31. from threading import Thread, Lock
  32. from subprocess import Popen, PIPE
  33. # Imports related to mbed build api
  34. from workspace_tools.tests import TESTS
  35. from workspace_tools.tests import TEST_MAP
  36. from workspace_tools.paths import BUILD_DIR
  37. from workspace_tools.paths import HOST_TESTS
  38. from workspace_tools.utils import ToolException
  39. from workspace_tools.utils import construct_enum
  40. from workspace_tools.targets import TARGET_MAP
  41. from workspace_tools.test_db import BaseDBAccess
  42. from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
  43. from workspace_tools.build_api import get_target_supported_toolchains
  44. from workspace_tools.build_api import write_build_report
  45. from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
  46. from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
  47. from workspace_tools.test_exporters import ReportExporter, ResultExporterType
  48. import workspace_tools.host_tests.host_tests_plugins as host_tests_plugins
  49. try:
  50. import mbed_lstools
  51. except:
  52. pass
  53. class ProcessObserver(Thread):
  54. def __init__(self, proc):
  55. Thread.__init__(self)
  56. self.proc = proc
  57. self.queue = Queue()
  58. self.daemon = True
  59. self.active = True
  60. self.start()
  61. def run(self):
  62. while self.active:
  63. c = self.proc.stdout.read(1)
  64. self.queue.put(c)
  65. def stop(self):
  66. self.active = False
  67. try:
  68. self.proc.terminate()
  69. except Exception, _:
  70. pass
  71. class SingleTestExecutor(threading.Thread):
  72. """ Example: Single test class in separate thread usage
  73. """
  74. def __init__(self, single_test):
  75. self.single_test = single_test
  76. threading.Thread.__init__(self)
  77. def run(self):
  78. start = time()
  79. # Execute tests depending on options and filter applied
  80. test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
  81. elapsed_time = time() - start
  82. # Human readable summary
  83. if not self.single_test.opts_suppress_summary:
  84. # prints well-formed summary with results (SQL table like)
  85. print self.single_test.generate_test_summary(test_summary, shuffle_seed)
  86. if self.single_test.opts_test_x_toolchain_summary:
  87. # prints well-formed summary with results (SQL table like)
  88. # table shows text x toolchain test result matrix
  89. print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
  90. print "Completed in %.2f sec"% (elapsed_time)
  91. class SingleTestRunner(object):
  92. """ Object wrapper for single test run which may involve multiple MUTs
  93. """
  94. RE_DETECT_TESTCASE_RESULT = None
  95. # Return codes for test script
  96. TEST_RESULT_OK = "OK"
  97. TEST_RESULT_FAIL = "FAIL"
  98. TEST_RESULT_ERROR = "ERROR"
  99. TEST_RESULT_UNDEF = "UNDEF"
  100. TEST_RESULT_IOERR_COPY = "IOERR_COPY"
  101. TEST_RESULT_IOERR_DISK = "IOERR_DISK"
  102. TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
  103. TEST_RESULT_TIMEOUT = "TIMEOUT"
  104. TEST_RESULT_NO_IMAGE = "NO_IMAGE"
  105. TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
  106. GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
  107. TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
  108. TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
  109. muts = {} # MUTs descriptor (from external file)
  110. test_spec = {} # Test specification (from external file)
  111. # mbed test suite -> SingleTestRunner
  112. TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
  113. "failure" : TEST_RESULT_FAIL,
  114. "error" : TEST_RESULT_ERROR,
  115. "ioerr_copy" : TEST_RESULT_IOERR_COPY,
  116. "ioerr_disk" : TEST_RESULT_IOERR_DISK,
  117. "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
  118. "timeout" : TEST_RESULT_TIMEOUT,
  119. "no_image" : TEST_RESULT_NO_IMAGE,
  120. "end" : TEST_RESULT_UNDEF,
  121. "mbed_assert" : TEST_RESULT_MBED_ASSERT
  122. }
  123. def __init__(self,
  124. _global_loops_count=1,
  125. _test_loops_list=None,
  126. _muts={},
  127. _clean=False,
  128. _opts_db_url=None,
  129. _opts_log_file_name=None,
  130. _opts_report_html_file_name=None,
  131. _opts_report_junit_file_name=None,
  132. _opts_report_build_file_name=None,
  133. _test_spec={},
  134. _opts_goanna_for_mbed_sdk=None,
  135. _opts_goanna_for_tests=None,
  136. _opts_shuffle_test_order=False,
  137. _opts_shuffle_test_seed=None,
  138. _opts_test_by_names=None,
  139. _opts_peripheral_by_names=None,
  140. _opts_test_only_peripheral=False,
  141. _opts_test_only_common=False,
  142. _opts_verbose_skipped_tests=False,
  143. _opts_verbose_test_result_only=False,
  144. _opts_verbose=False,
  145. _opts_firmware_global_name=None,
  146. _opts_only_build_tests=False,
  147. _opts_parallel_test_exec=False,
  148. _opts_suppress_summary=False,
  149. _opts_test_x_toolchain_summary=False,
  150. _opts_copy_method=None,
  151. _opts_mut_reset_type=None,
  152. _opts_jobs=None,
  153. _opts_waterfall_test=None,
  154. _opts_extend_test_timeout=None):
  155. """ Let's try hard to init this object
  156. """
  157. from colorama import init
  158. init()
  159. PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
  160. self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
  161. # Settings related to test loops counters
  162. try:
  163. _global_loops_count = int(_global_loops_count)
  164. except:
  165. _global_loops_count = 1
  166. if _global_loops_count < 1:
  167. _global_loops_count = 1
  168. self.GLOBAL_LOOPS_COUNT = _global_loops_count
  169. self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
  170. self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
  171. self.shuffle_random_seed = 0.0
  172. self.SHUFFLE_SEED_ROUND = 10
  173. # MUT list and test specification storage
  174. self.muts = _muts
  175. self.test_spec = _test_spec
  176. # Settings passed e.g. from command line
  177. self.opts_db_url = _opts_db_url
  178. self.opts_log_file_name = _opts_log_file_name
  179. self.opts_report_html_file_name = _opts_report_html_file_name
  180. self.opts_report_junit_file_name = _opts_report_junit_file_name
  181. self.opts_report_build_file_name = _opts_report_build_file_name
  182. self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
  183. self.opts_goanna_for_tests = _opts_goanna_for_tests
  184. self.opts_shuffle_test_order = _opts_shuffle_test_order
  185. self.opts_shuffle_test_seed = _opts_shuffle_test_seed
  186. self.opts_test_by_names = _opts_test_by_names
  187. self.opts_peripheral_by_names = _opts_peripheral_by_names
  188. self.opts_test_only_peripheral = _opts_test_only_peripheral
  189. self.opts_test_only_common = _opts_test_only_common
  190. self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
  191. self.opts_verbose_test_result_only = _opts_verbose_test_result_only
  192. self.opts_verbose = _opts_verbose
  193. self.opts_firmware_global_name = _opts_firmware_global_name
  194. self.opts_only_build_tests = _opts_only_build_tests
  195. self.opts_parallel_test_exec = _opts_parallel_test_exec
  196. self.opts_suppress_summary = _opts_suppress_summary
  197. self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
  198. self.opts_copy_method = _opts_copy_method
  199. self.opts_mut_reset_type = _opts_mut_reset_type
  200. self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
  201. self.opts_waterfall_test = _opts_waterfall_test
  202. self.opts_extend_test_timeout = _opts_extend_test_timeout
  203. self.opts_clean = _clean
  204. # File / screen logger initialization
  205. self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
  206. # Database related initializations
  207. self.db_logger = factory_db_logger(self.opts_db_url)
  208. self.db_logger_build_id = None # Build ID (database index of build_id table)
  209. # Let's connect to database to set up credentials and confirm database is ready
  210. if self.db_logger:
  211. self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
  212. if self.db_logger.is_connected():
  213. # Get hostname and uname so we can use it as build description
  214. # when creating new build_id in external database
  215. (_hostname, _uname) = self.db_logger.get_hostname()
  216. _host_location = os.path.dirname(os.path.abspath(__file__))
  217. build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
  218. self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
  219. self.db_logger.disconnect()
  220. def dump_options(self):
  221. """ Function returns data structure with common settings passed to SingelTestRunner
  222. It can be used for example to fill _extra fields in database storing test suite single run data
  223. Example:
  224. data = self.dump_options()
  225. or
  226. data_str = json.dumps(self.dump_options())
  227. """
  228. result = {"db_url" : str(self.opts_db_url),
  229. "log_file_name" : str(self.opts_log_file_name),
  230. "shuffle_test_order" : str(self.opts_shuffle_test_order),
  231. "shuffle_test_seed" : str(self.opts_shuffle_test_seed),
  232. "test_by_names" : str(self.opts_test_by_names),
  233. "peripheral_by_names" : str(self.opts_peripheral_by_names),
  234. "test_only_peripheral" : str(self.opts_test_only_peripheral),
  235. "test_only_common" : str(self.opts_test_only_common),
  236. "verbose" : str(self.opts_verbose),
  237. "firmware_global_name" : str(self.opts_firmware_global_name),
  238. "only_build_tests" : str(self.opts_only_build_tests),
  239. "copy_method" : str(self.opts_copy_method),
  240. "mut_reset_type" : str(self.opts_mut_reset_type),
  241. "jobs" : str(self.opts_jobs),
  242. "extend_test_timeout" : str(self.opts_extend_test_timeout),
  243. "_dummy" : ''
  244. }
  245. return result
  246. def shuffle_random_func(self):
  247. return self.shuffle_random_seed
  248. def is_shuffle_seed_float(self):
  249. """ return true if function parameter can be converted to float
  250. """
  251. result = True
  252. try:
  253. float(self.shuffle_random_seed)
  254. except ValueError:
  255. result = False
  256. return result
  257. # This will store target / toolchain specific properties
  258. test_suite_properties_ext = {} # target : toolchain
  259. # Here we store test results
  260. test_summary = []
  261. # Here we store test results in extended data structure
  262. test_summary_ext = {}
  263. execute_thread_slice_lock = Lock()
  264. def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
  265. for toolchain in toolchains:
  266. # Toolchain specific build successes and failures
  267. build_report[toolchain] = {
  268. "mbed_failure": False,
  269. "library_failure": False,
  270. "library_build_passing": [],
  271. "library_build_failing": [],
  272. "test_build_passing": [],
  273. "test_build_failing": []
  274. }
  275. # print target, toolchain
  276. # Test suite properties returned to external tools like CI
  277. test_suite_properties = {}
  278. test_suite_properties['jobs'] = self.opts_jobs
  279. test_suite_properties['clean'] = clean
  280. test_suite_properties['target'] = target
  281. test_suite_properties['test_ids'] = ', '.join(test_ids)
  282. test_suite_properties['toolchain'] = toolchain
  283. test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
  284. # print '=== %s::%s ===' % (target, toolchain)
  285. # Let's build our test
  286. if target not in TARGET_MAP:
  287. print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
  288. continue
  289. T = TARGET_MAP[target]
  290. build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
  291. clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
  292. try:
  293. build_mbed_libs_result = build_mbed_libs(T,
  294. toolchain,
  295. options=build_mbed_libs_options,
  296. clean=clean_mbed_libs_options,
  297. jobs=self.opts_jobs)
  298. if not build_mbed_libs_result:
  299. print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
  300. continue
  301. except ToolException:
  302. print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
  303. build_report[toolchain]["mbed_failure"] = True
  304. #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
  305. continue
  306. build_dir = join(BUILD_DIR, "test", target, toolchain)
  307. test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
  308. test_suite_properties['build_dir'] = build_dir
  309. test_suite_properties['skipped'] = []
  310. # Enumerate through all tests and shuffle test order if requested
  311. test_map_keys = sorted(TEST_MAP.keys())
  312. if self.opts_shuffle_test_order:
  313. random.shuffle(test_map_keys, self.shuffle_random_func)
  314. # Update database with shuffle seed f applicable
  315. if self.db_logger:
  316. self.db_logger.reconnect();
  317. if self.db_logger.is_connected():
  318. self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
  319. self.db_logger.disconnect();
  320. if self.db_logger:
  321. self.db_logger.reconnect();
  322. if self.db_logger.is_connected():
  323. # Update MUTs and Test Specification in database
  324. self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
  325. # Update Extra information in database (some options passed to test suite)
  326. self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
  327. self.db_logger.disconnect();
  328. valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
  329. skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
  330. for skipped_test_id in skipped_test_map_keys:
  331. test_suite_properties['skipped'].append(skipped_test_id)
  332. # First pass through all tests and determine which libraries need to be built
  333. libraries = set()
  334. for test_id in valid_test_map_keys:
  335. test = TEST_MAP[test_id]
  336. # Detect which lib should be added to test
  337. # Some libs have to compiled like RTOS or ETH
  338. for lib in LIBRARIES:
  339. if lib['build_dir'] in test.dependencies:
  340. libraries.add(lib['id'])
  341. build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
  342. clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
  343. # Build all required libraries
  344. for lib_id in libraries:
  345. try:
  346. build_lib(lib_id,
  347. T,
  348. toolchain,
  349. options=build_project_options,
  350. verbose=self.opts_verbose,
  351. clean=clean_mbed_libs_options,
  352. jobs=self.opts_jobs)
  353. build_report[toolchain]["library_build_passing"].append(lib_id)
  354. except ToolException:
  355. print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
  356. build_report[toolchain]["library_failure"] = True
  357. build_report[toolchain]["library_build_failing"].append(lib_id)
  358. #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
  359. continue
  360. for test_id in valid_test_map_keys:
  361. test = TEST_MAP[test_id]
  362. test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
  363. # TODO: move this 2 below loops to separate function
  364. INC_DIRS = []
  365. for lib_id in libraries:
  366. if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
  367. INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
  368. MACROS = []
  369. for lib_id in libraries:
  370. if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
  371. MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
  372. MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
  373. MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
  374. test_uuid = uuid.uuid4()
  375. MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
  376. project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
  377. try:
  378. path = build_project(test.source_dir,
  379. join(build_dir, test_id),
  380. T,
  381. toolchain,
  382. test.dependencies,
  383. options=build_project_options,
  384. clean=clean_project_options,
  385. verbose=self.opts_verbose,
  386. name=project_name,
  387. macros=MACROS,
  388. inc_dirs=INC_DIRS,
  389. jobs=self.opts_jobs)
  390. build_report[toolchain]["test_build_passing"].append(test_id)
  391. except ToolException:
  392. project_name_str = project_name if project_name is not None else test_id
  393. print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
  394. build_report[toolchain]["test_build_failing"].append(test_id)
  395. # return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
  396. continue
  397. if self.opts_only_build_tests:
  398. # With this option we are skipping testing phase
  399. continue
  400. # Test duration can be increased by global value
  401. test_duration = test.duration
  402. if self.opts_extend_test_timeout is not None:
  403. test_duration += self.opts_extend_test_timeout
  404. # For an automated test the duration act as a timeout after
  405. # which the test gets interrupted
  406. test_spec = self.shape_test_request(target, path, test_id, test_duration)
  407. test_loops = self.get_test_loop_count(test_id)
  408. test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
  409. test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
  410. test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
  411. # read MUTs, test specification and perform tests
  412. single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
  413. # Append test results to global test summary
  414. if single_test_result is not None:
  415. self.test_summary.append(single_test_result)
  416. # Prepare extended test results data structure (it can be used to generate detailed test report)
  417. if toolchain not in self.test_summary_ext:
  418. self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
  419. if target not in self.test_summary_ext[toolchain]:
  420. self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
  421. if target not in self.test_summary_ext[toolchain][target]:
  422. self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
  423. test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
  424. self.test_suite_properties_ext[target][toolchain] = test_suite_properties
  425. # return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
  426. q.put(target + '_'.join(toolchains))
  427. return
  428. def execute(self):
  429. clean = self.test_spec.get('clean', False)
  430. test_ids = self.test_spec.get('test_ids', [])
  431. q = Queue()
  432. # Generate seed for shuffle if seed is not provided in
  433. self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
  434. if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
  435. self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
  436. build_reports = []
  437. if self.opts_parallel_test_exec:
  438. ###################################################################
  439. # Experimental, parallel test execution per singletest instance.
  440. ###################################################################
  441. execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
  442. # Note: We are building here in parallel for each target separately!
  443. # So we are not building the same thing multiple times and compilers
  444. # in separate threads do not collide.
  445. # Inside execute_thread_slice() function function handle() will be called to
  446. # get information about available MUTs (per target).
  447. for target, toolchains in self.test_spec['targets'].iteritems():
  448. self.test_suite_properties_ext[target] = {}
  449. cur_build_report = {}
  450. t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
  451. build_reports.append({ "target": target, "report": cur_build_report})
  452. t.daemon = True
  453. t.start()
  454. execute_threads.append(t)
  455. for t in execute_threads:
  456. q.get() # t.join() would block some threads because we should not wait in any order for thread end
  457. else:
  458. # Serialized (not parallel) test execution
  459. for target, toolchains in self.test_spec['targets'].iteritems():
  460. if target not in self.test_suite_properties_ext:
  461. self.test_suite_properties_ext[target] = {}
  462. cur_build_report = {}
  463. self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
  464. build_reports.append({ "target": target, "report": cur_build_report})
  465. q.get()
  466. build_report = []
  467. for target_build_report in build_reports:
  468. cur_report = {
  469. "target": target_build_report["target"],
  470. "passing": [],
  471. "failing": []
  472. }
  473. for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
  474. print "%s - %s" % (target_build_report["target"], toolchain)
  475. report = target_build_report["report"][toolchain]
  476. if report["mbed_failure"]:
  477. cur_report["failing"].append({
  478. "toolchain": toolchain,
  479. "project": "mbed library"
  480. })
  481. else:
  482. for passing_library in report["library_build_failing"]:
  483. cur_report["failing"].append({
  484. "toolchain": toolchain,
  485. "project": "Library::%s" % (passing_library)
  486. })
  487. for failing_library in report["library_build_passing"]:
  488. cur_report["passing"].append({
  489. "toolchain": toolchain,
  490. "project": "Library::%s" % (failing_library)
  491. })
  492. for passing_test in report["test_build_passing"]:
  493. cur_report["passing"].append({
  494. "toolchain": toolchain,
  495. "project": "Test::%s" % (passing_test)
  496. })
  497. for failing_test in report["test_build_failing"]:
  498. cur_report["failing"].append({
  499. "toolchain": toolchain,
  500. "project": "Test::%s" % (failing_test)
  501. })
  502. build_report.append(cur_report)
  503. if self.db_logger:
  504. self.db_logger.reconnect();
  505. if self.db_logger.is_connected():
  506. self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
  507. self.db_logger.disconnect();
  508. return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
  509. def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
  510. valid_test_map_keys = []
  511. for test_id in test_map_keys:
  512. test = TEST_MAP[test_id]
  513. if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
  514. continue
  515. if test_ids and test_id not in test_ids:
  516. continue
  517. if self.opts_test_only_peripheral and not test.peripherals:
  518. if self.opts_verbose_skipped_tests:
  519. print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
  520. continue
  521. if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
  522. # We will skip tests not forced with -p option
  523. if self.opts_verbose_skipped_tests:
  524. print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
  525. continue
  526. if self.opts_test_only_common and test.peripherals:
  527. if self.opts_verbose_skipped_tests:
  528. print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
  529. continue
  530. if test.automated and test.is_supported(target, toolchain):
  531. if test.peripherals is None and self.opts_only_build_tests:
  532. # When users are using 'build only flag' and test do not have
  533. # specified peripherals we can allow test building by default
  534. pass
  535. elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
  536. # If we force peripheral with option -p we expect test
  537. # to pass even if peripheral is not in MUTs file.
  538. pass
  539. elif not self.is_peripherals_available(target, test.peripherals):
  540. if self.opts_verbose_skipped_tests:
  541. if test.peripherals:
  542. print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
  543. else:
  544. print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
  545. continue
  546. # The test has made it through all the filters, so add it to the valid tests list
  547. valid_test_map_keys.append(test_id)
  548. return valid_test_map_keys
  549. def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
  550. # NOTE: This will not preserve order
  551. return list(set(all_test_map_keys) - set(valid_test_map_keys))
  552. def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
  553. """ Prints well-formed summary with results (SQL table like)
  554. table shows text x toolchain test result matrix
  555. """
  556. RESULT_INDEX = 0
  557. TARGET_INDEX = 1
  558. TOOLCHAIN_INDEX = 2
  559. TEST_INDEX = 3
  560. DESC_INDEX = 4
  561. unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
  562. unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
  563. unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
  564. unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
  565. result = "Test summary:\n"
  566. for target in unique_targets:
  567. result_dict = {} # test : { toolchain : result }
  568. unique_target_toolchains = []
  569. for test in test_summary:
  570. if test[TARGET_INDEX] == target:
  571. if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
  572. unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
  573. if test[TEST_INDEX] not in result_dict:
  574. result_dict[test[TEST_INDEX]] = {}
  575. result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
  576. pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
  577. pt = PrettyTable(pt_cols)
  578. for col in pt_cols:
  579. pt.align[col] = "l"
  580. pt.padding_width = 1 # One space between column edges and contents (default)
  581. for test in unique_tests:
  582. if test in result_dict:
  583. test_results = result_dict[test]
  584. if test in unique_test_desc:
  585. row = [target, test, unique_test_desc[test]]
  586. for toolchain in unique_toolchains:
  587. if toolchain in test_results:
  588. row.append(test_results[toolchain])
  589. pt.add_row(row)
  590. result += pt.get_string()
  591. shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
  592. shuffle_seed if shuffle_seed else self.shuffle_random_seed)
  593. result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
  594. return result
  595. def generate_test_summary(self, test_summary, shuffle_seed=None):
  596. """ Prints well-formed summary with results (SQL table like)
  597. table shows target x test results matrix across
  598. """
  599. result = "Test summary:\n"
  600. # Pretty table package is used to print results
  601. pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
  602. "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
  603. pt.align["Result"] = "l" # Left align
  604. pt.align["Target"] = "l" # Left align
  605. pt.align["Toolchain"] = "l" # Left align
  606. pt.align["Test ID"] = "l" # Left align
  607. pt.align["Test Description"] = "l" # Left align
  608. pt.padding_width = 1 # One space between column edges and contents (default)
  609. result_dict = {self.TEST_RESULT_OK : 0,
  610. self.TEST_RESULT_FAIL : 0,
  611. self.TEST_RESULT_ERROR : 0,
  612. self.TEST_RESULT_UNDEF : 0,
  613. self.TEST_RESULT_IOERR_COPY : 0,
  614. self.TEST_RESULT_IOERR_DISK : 0,
  615. self.TEST_RESULT_IOERR_SERIAL : 0,
  616. self.TEST_RESULT_NO_IMAGE : 0,
  617. self.TEST_RESULT_TIMEOUT : 0,
  618. self.TEST_RESULT_MBED_ASSERT : 0
  619. }
  620. for test in test_summary:
  621. if test[0] in result_dict:
  622. result_dict[test[0]] += 1
  623. pt.add_row(test)
  624. result += pt.get_string()
  625. result += "\n"
  626. # Print result count
  627. result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
  628. shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
  629. shuffle_seed if shuffle_seed else self.shuffle_random_seed)
  630. result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
  631. return result
  632. def test_loop_list_to_dict(self, test_loops_str):
  633. """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
  634. """
  635. result = {}
  636. if test_loops_str:
  637. test_loops = test_loops_str.split(',')
  638. for test_loop in test_loops:
  639. test_loop_count = test_loop.split('=')
  640. if len(test_loop_count) == 2:
  641. _test_id, _test_loops = test_loop_count
  642. try:
  643. _test_loops = int(_test_loops)
  644. except:
  645. continue
  646. result[_test_id] = _test_loops
  647. return result
  648. def get_test_loop_count(self, test_id):
  649. """ This function returns no. of loops per test (deducted by test_id_.
  650. If test is not in list of redefined loop counts it will use default value.
  651. """
  652. result = self.GLOBAL_LOOPS_COUNT
  653. if test_id in self.TEST_LOOPS_DICT:
  654. result = self.TEST_LOOPS_DICT[test_id]
  655. return result
  656. def delete_file(self, file_path):
  657. """ Remove file from the system
  658. """
  659. result = True
  660. resutl_msg = ""
  661. try:
  662. os.remove(file_path)
  663. except Exception, e:
  664. resutl_msg = e
  665. result = False
  666. return result, resutl_msg
  667. def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
  668. """ Function determines MUT's mbed disk/port and copies binary to
  669. target.
  670. Test is being invoked afterwards.
  671. """
  672. data = json.loads(test_spec)
  673. # Get test information, image and test timeout
  674. test_id = data['test_id']
  675. test = TEST_MAP[test_id]
  676. test_description = TEST_MAP[test_id].get_description()
  677. image = data["image"]
  678. duration = data.get("duration", 10)
  679. # Find a suitable MUT:
  680. mut = None
  681. for id, m in self.muts.iteritems():
  682. if m['mcu'] == data['mcu']:
  683. mut = m
  684. break
  685. if mut is None:
  686. print "Error: No Mbed available: MUT[%s]" % data['mcu']
  687. return None
  688. disk = mut.get('disk')
  689. port = mut.get('port')
  690. if disk is None or port is None:
  691. return None
  692. target_by_mcu = TARGET_MAP[mut['mcu']]
  693. # Some extra stuff can be declared in MUTs structure
  694. reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
  695. reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
  696. image_dest = mut.get('image_dest') # Image file destination DISK + IMAGE_DEST + BINARY_NAME
  697. images_config = mut.get('images_config') # Available images selection via config file
  698. mobo_config = mut.get('mobo_config') # Available board configuration selection e.g. core selection etc.
  699. copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
  700. # When the build and test system were separate, this was relative to a
  701. # base network folder base path: join(NETWORK_BASE_PATH, )
  702. image_path = image
  703. if self.db_logger:
  704. self.db_logger.reconnect()
  705. selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
  706. # Tests can be looped so test results must be stored for the same test
  707. test_all_result = []
  708. # Test results for one test ran few times
  709. detailed_test_results = {} # { Loop_number: { results ... } }
  710. for test_index in range(test_loops):
  711. # Host test execution
  712. start_host_exec_time = time()
  713. single_test_result = self.TEST_RESULT_UNDEF # single test run result
  714. _copy_method = selected_copy_method
  715. if not exists(image_path):
  716. single_test_result = self.TEST_RESULT_NO_IMAGE
  717. elapsed_time = 0
  718. single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
  719. print single_test_output
  720. else:
  721. # Host test execution
  722. start_host_exec_time = time()
  723. host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
  724. host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
  725. host_test_result = self.run_host_test(test.host_test,
  726. image_path, disk, port, duration,
  727. micro=target_name,
  728. verbose=host_test_verbose,
  729. reset=host_test_reset,
  730. reset_tout=reset_tout,
  731. copy_method=selected_copy_method,
  732. program_cycle_s=target_by_mcu.program_cycle_s())
  733. single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
  734. # Store test result
  735. test_all_result.append(single_test_result)
  736. total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
  737. elapsed_time = single_testduration # TIme of single test case execution after reset
  738. detailed_test_results[test_index] = {
  739. 'single_test_result' : single_test_result,
  740. 'single_test_output' : single_test_output,
  741. 'target_name' : target_name,
  742. 'toolchain_name' : toolchain_name,
  743. 'test_id' : test_id,
  744. 'test_description' : test_description,
  745. 'elapsed_time' : round(elapsed_time, 2),
  746. 'duration' : single_timeout,
  747. 'copy_method' : _copy_method,
  748. }
  749. print self.print_test_result(single_test_result, target_name, toolchain_name,
  750. test_id, test_description, elapsed_time, single_timeout)
  751. # Update database entries for ongoing test
  752. if self.db_logger and self.db_logger.is_connected():
  753. test_type = 'SingleTest'
  754. self.db_logger.insert_test_entry(self.db_logger_build_id,
  755. target_name,
  756. toolchain_name,
  757. test_type,
  758. test_id,
  759. single_test_result,
  760. single_test_output,
  761. elapsed_time,
  762. single_timeout,
  763. test_index)
  764. # If we perform waterfall test we test until we get OK and we stop testing
  765. if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
  766. break
  767. if self.db_logger:
  768. self.db_logger.disconnect()
  769. return (self.shape_global_test_loop_result(test_all_result),
  770. target_name,
  771. toolchain_name,
  772. test_id,
  773. test_description,
  774. round(elapsed_time, 2),
  775. single_timeout,
  776. self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
  777. def print_test_result(self, test_result, target_name, toolchain_name,
  778. test_id, test_description, elapsed_time, duration):
  779. """ Use specific convention to print test result and related data
  780. """
  781. tokens = []
  782. tokens.append("TargetTest")
  783. tokens.append(target_name)
  784. tokens.append(toolchain_name)
  785. tokens.append(test_id)
  786. tokens.append(test_description)
  787. separator = "::"
  788. time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
  789. result = separator.join(tokens) + " [" + test_result +"]" + time_info
  790. return Fore.MAGENTA + result + Fore.RESET
  791. def shape_test_loop_ok_result_count(self, test_all_result):
  792. """ Reformats list of results to simple string
  793. """
  794. test_loop_count = len(test_all_result)
  795. test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
  796. return "%d/%d"% (test_loop_ok_result, test_loop_count)
  797. def shape_global_test_loop_result(self, test_all_result):
  798. """ Reformats list of results to simple string
  799. """
  800. result = self.TEST_RESULT_FAIL
  801. if all(test_all_result[0] == res for res in test_all_result):
  802. result = test_all_result[0]
  803. return result
  804. def run_host_test(self, name, image_path, disk, port, duration,
  805. micro=None, reset=None, reset_tout=None,
  806. verbose=False, copy_method=None, program_cycle_s=None):
  807. """ Function creates new process with host test configured with particular test case.
  808. Function also is pooling for serial port activity from process to catch all data
  809. printed by test runner and host test during test execution
  810. """
  811. def get_char_from_queue(obs):
  812. """ Get character from queue safe way
  813. """
  814. try:
  815. c = obs.queue.get(block=True, timeout=0.5)
  816. except Empty, _:
  817. c = None
  818. return c
  819. def filter_queue_char(c):
  820. """ Filters out non ASCII characters from serial port
  821. """
  822. if ord(c) not in range(128):
  823. c = ' '
  824. return c
  825. def get_test_result(output):
  826. """ Parse test 'output' data
  827. """
  828. result = self.TEST_RESULT_TIMEOUT
  829. for line in "".join(output).splitlines():
  830. search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
  831. if search_result and len(search_result.groups()):
  832. result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
  833. break
  834. return result
  835. def get_auto_property_value(property_name, line):
  836. """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
  837. Returns string
  838. """
  839. result = None
  840. if re.search("HOST: Property '%s'"% property_name, line) is not None:
  841. property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
  842. if property is not None and len(property.groups()) == 1:
  843. result = property.groups()[0]
  844. return result
  845. # print "{%s} port:%s disk:%s" % (name, port, disk),
  846. cmd = ["python",
  847. '%s.py'% name,
  848. '-d', disk,
  849. '-f', '"%s"'% image_path,
  850. '-p', port,
  851. '-t', str(duration),
  852. '-C', str(program_cycle_s)]
  853. # Add extra parameters to host_test
  854. if copy_method is not None:
  855. cmd += ["-c", copy_method]
  856. if micro is not None:
  857. cmd += ["-m", micro]
  858. if reset is not None:
  859. cmd += ["-r", reset]
  860. if reset_tout is not None:
  861. cmd += ["-R", str(reset_tout)]
  862. if verbose:
  863. print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
  864. print "Test::Output::Start"
  865. proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
  866. obs = ProcessObserver(proc)
  867. update_once_flag = {} # Stores flags checking if some auto-parameter was already set
  868. line = ''
  869. output = []
  870. start_time = time()
  871. while (time() - start_time) < (2 * duration):
  872. c = get_char_from_queue(obs)
  873. if c:
  874. if verbose:
  875. sys.stdout.write(c)
  876. c = filter_queue_char(c)
  877. output.append(c)
  878. # Give the mbed under test a way to communicate the end of the test
  879. if c in ['\n', '\r']:
  880. # Checking for auto-detection information from the test about MUT reset moment
  881. if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
  882. # We will update this marker only once to prevent multiple time resets
  883. update_once_flag['reset_target'] = True
  884. start_time = time()
  885. # Checking for auto-detection information from the test about timeout
  886. auto_timeout_val = get_auto_property_value('timeout', line)
  887. if 'timeout' not in update_once_flag and auto_timeout_val is not None:
  888. # We will update this marker only once to prevent multiple time resets
  889. update_once_flag['timeout'] = True
  890. duration = int(auto_timeout_val)
  891. # Detect mbed assert:
  892. if 'mbed assertation failed: ' in line:
  893. output.append('{{mbed_assert}}')
  894. break
  895. # Check for test end
  896. if '{end}' in line:
  897. break
  898. line = ''
  899. else:
  900. line += c
  901. end_time = time()
  902. testcase_duration = end_time - start_time # Test case duration from reset to {end}
  903. c = get_char_from_queue(obs)
  904. if c:
  905. if verbose:
  906. sys.stdout.write(c)
  907. c = filter_queue_char(c)
  908. output.append(c)
  909. if verbose:
  910. print "Test::Output::Finish"
  911. # Stop test process
  912. obs.stop()
  913. result = get_test_result(output)
  914. return (result, "".join(output), testcase_duration, duration)
  915. def is_peripherals_available(self, target_mcu_name, peripherals=None):
  916. """ Checks if specified target should run specific peripheral test case defined in MUTs file
  917. """
  918. if peripherals is not None:
  919. peripherals = set(peripherals)
  920. for id, mut in self.muts.iteritems():
  921. # Target MCU name check
  922. if mut["mcu"] != target_mcu_name:
  923. continue
  924. # Peripherals check
  925. if peripherals is not None:
  926. if 'peripherals' not in mut:
  927. continue
  928. if not peripherals.issubset(set(mut['peripherals'])):
  929. continue
  930. return True
  931. return False
  932. def shape_test_request(self, mcu, image_path, test_id, duration=10):
  933. """ Function prepares JSON structure describing test specification
  934. """
  935. test_spec = {
  936. "mcu": mcu,
  937. "image": image_path,
  938. "duration": duration,
  939. "test_id": test_id,
  940. }
  941. return json.dumps(test_spec)
  942. def get_unique_value_from_summary(test_summary, index):
  943. """ Gets list of unique target names
  944. """
  945. result = []
  946. for test in test_summary:
  947. target_name = test[index]
  948. if target_name not in result:
  949. result.append(target_name)
  950. return sorted(result)
  951. def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
  952. """ Gets list of unique target names and return dictionary
  953. """
  954. result = {}
  955. for test in test_summary:
  956. key = test[index_key]
  957. val = test[index_val]
  958. if key not in result:
  959. result[key] = val
  960. return result
  961. def show_json_file_format_error(json_spec_filename, line, column):
  962. """ Prints JSON broken content
  963. """
  964. with open(json_spec_filename) as data_file:
  965. line_no = 1
  966. for json_line in data_file:
  967. if line_no + 5 >= line: # Print last few lines before error
  968. print 'Line %d:\t'%line_no + json_line, # Prints line
  969. if line_no == line:
  970. print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
  971. break
  972. line_no += 1
  973. def json_format_error_defect_pos(json_error_msg):
  974. """ Gets first error line and column in JSON file format.
  975. Parsed from exception thrown by json.loads() string
  976. """
  977. result = None
  978. line, column = 0, 0
  979. # Line value search
  980. line_search = re.search('line [0-9]+', json_error_msg)
  981. if line_search is not None:
  982. ls = line_search.group().split(' ')
  983. if len(ls) == 2:
  984. line = int(ls[1])
  985. # Column position search
  986. column_search = re.search('column [0-9]+', json_error_msg)
  987. if column_search is not None:
  988. cs = column_search.group().split(' ')
  989. if len(cs) == 2:
  990. column = int(cs[1])
  991. result = [line, column]
  992. return result
  993. def get_json_data_from_file(json_spec_filename, verbose=False):
  994. """ Loads from file JSON formatted string to data structure
  995. """
  996. result = None
  997. try:
  998. with open(json_spec_filename) as data_file:
  999. try:
  1000. result = json.load(data_file)
  1001. except ValueError as json_error_msg:
  1002. result = None
  1003. print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
  1004. # We can print where error occurred inside JSON file if we can parse exception msg
  1005. json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
  1006. if json_format_defect_pos is not None:
  1007. line = json_format_defect_pos[0]
  1008. column = json_format_defect_pos[1]
  1009. print
  1010. show_json_file_format_error(json_spec_filename, line, column)
  1011. except IOError as fileopen_error_msg:
  1012. print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
  1013. print
  1014. if verbose and result:
  1015. pp = pprint.PrettyPrinter(indent=4)
  1016. pp.pprint(result)
  1017. return result
  1018. def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
  1019. """ Prints MUTs configuration passed to test script for verboseness
  1020. """
  1021. muts_info_cols = []
  1022. # We need to check all unique properties for each defined MUT
  1023. for k in json_data:
  1024. mut_info = json_data[k]
  1025. for mut_property in mut_info:
  1026. if mut_property not in muts_info_cols:
  1027. muts_info_cols.append(mut_property)
  1028. # Prepare pretty table object to display all MUTs
  1029. pt_cols = ["index"] + muts_info_cols
  1030. pt = PrettyTable(pt_cols)
  1031. for col in pt_cols:
  1032. pt.align[col] = "l"
  1033. # Add rows to pretty print object
  1034. for k in json_data:
  1035. row = [k]
  1036. mut_info = json_data[k]
  1037. add_row = True
  1038. if platform_filter and 'mcu' in mut_info:
  1039. add_row = re.search(platform_filter, mut_info['mcu']) is not None
  1040. if add_row:
  1041. for col in muts_info_cols:
  1042. cell_val = mut_info[col] if col in mut_info else None
  1043. if type(cell_val) == ListType:
  1044. cell_val = join_delim.join(cell_val)
  1045. row.append(cell_val)
  1046. pt.add_row(row)
  1047. return pt.get_string()
  1048. def print_test_configuration_from_json(json_data, join_delim=", "):
  1049. """ Prints test specification configuration passed to test script for verboseness
  1050. """
  1051. toolchains_info_cols = []
  1052. # We need to check all toolchains for each device
  1053. for k in json_data:
  1054. # k should be 'targets'
  1055. targets = json_data[k]
  1056. for target in targets:
  1057. toolchains = targets[target]
  1058. for toolchain in toolchains:
  1059. if toolchain not in toolchains_info_cols:
  1060. toolchains_info_cols.append(toolchain)
  1061. # Prepare pretty table object to display test specification
  1062. pt_cols = ["mcu"] + sorted(toolchains_info_cols)
  1063. pt = PrettyTable(pt_cols)
  1064. for col in pt_cols:
  1065. pt.align[col] = "l"
  1066. # { target : [conflicted toolchains] }
  1067. toolchain_conflicts = {}
  1068. toolchain_path_conflicts = []
  1069. for k in json_data:
  1070. # k should be 'targets'
  1071. targets = json_data[k]
  1072. for target in targets:
  1073. target_supported_toolchains = get_target_supported_toolchains(target)
  1074. if not target_supported_toolchains:
  1075. target_supported_toolchains = []
  1076. target_name = target if target in TARGET_MAP else "%s*"% target
  1077. row = [target_name]
  1078. toolchains = targets[target]
  1079. for toolchain in sorted(toolchains_info_cols):
  1080. # Check for conflicts: target vs toolchain
  1081. conflict = False
  1082. conflict_path = False
  1083. if toolchain in toolchains:
  1084. if toolchain not in target_supported_toolchains:
  1085. conflict = True
  1086. if target not in toolchain_conflicts:
  1087. toolchain_conflicts[target] = []
  1088. toolchain_conflicts[target].append(toolchain)
  1089. # Add marker inside table about target usage / conflict
  1090. cell_val = 'Yes' if toolchain in toolchains else '-'
  1091. if conflict:
  1092. cell_val += '*'
  1093. # Check for conflicts: toolchain vs toolchain path
  1094. if toolchain in TOOLCHAIN_BIN_PATH:
  1095. toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
  1096. if not os.path.isdir(toolchain_path):
  1097. conflict_path = True
  1098. if toolchain not in toolchain_path_conflicts:
  1099. toolchain_path_conflicts.append(toolchain)
  1100. if conflict_path:
  1101. cell_val += '#'
  1102. row.append(cell_val)
  1103. pt.add_row(row)
  1104. # generate result string
  1105. result = pt.get_string() # Test specification table
  1106. if toolchain_conflicts or toolchain_path_conflicts:
  1107. result += "\n"
  1108. result += "Toolchain conflicts:\n"
  1109. for target in toolchain_conflicts:
  1110. if target not in TARGET_MAP:
  1111. result += "\t* Target %s unknown\n"% (target)
  1112. conflict_target_list = join_delim.join(toolchain_conflicts[target])
  1113. sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
  1114. result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
  1115. for toolchain in toolchain_path_conflicts:
  1116. # Let's check toolchain configuration
  1117. if toolchain in TOOLCHAIN_BIN_PATH:
  1118. toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
  1119. if not os.path.isdir(toolchain_path):
  1120. result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
  1121. return result
  1122. def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
  1123. """ Generates table summary with all test cases and additional test cases
  1124. information using pretty print functionality. Allows test suite user to
  1125. see test cases
  1126. """
  1127. # get all unique test ID prefixes
  1128. unique_test_id = []
  1129. for test in TESTS:
  1130. split = test['id'].split('_')[:-1]
  1131. test_id_prefix = '_'.join(split)
  1132. if test_id_prefix not in unique_test_id:
  1133. unique_test_id.append(test_id_prefix)
  1134. unique_test_id.sort()
  1135. counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
  1136. counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
  1137. test_properties = ['id',
  1138. 'automated',
  1139. 'description',
  1140. 'peripherals',
  1141. 'host_test',
  1142. 'duration'] if cols is None else cols
  1143. # All tests status table print
  1144. pt = PrettyTable(test_properties)
  1145. for col in test_properties:
  1146. pt.align[col] = "l"
  1147. pt.align['duration'] = "r"
  1148. counter_all = 0
  1149. counter_automated = 0
  1150. pt.padding_width = 1 # One space between column edges and contents (default)
  1151. for test_id in sorted(TEST_MAP.keys()):
  1152. if platform_filter is not None:
  1153. # FIlter out platforms using regex
  1154. if re.search(platform_filter, test_id) is None:
  1155. continue
  1156. row = []
  1157. test = TEST_MAP[test_id]
  1158. split = test_id.split('_')[:-1]
  1159. test_id_prefix = '_'.join(split)
  1160. for col in test_properties:
  1161. col_value = test[col]
  1162. if type(test[col]) == ListType:
  1163. col_value = join_delim.join(test[col])
  1164. elif test[col] == None:
  1165. col_value = "-"
  1166. row.append(col_value)
  1167. if test['automated'] == True:
  1168. counter_dict_test_id_types[test_id_prefix] += 1
  1169. counter_automated += 1
  1170. pt.add_row(row)
  1171. # Update counters
  1172. counter_all += 1
  1173. counter_dict_test_id_types_all[test_id_prefix] += 1
  1174. result = pt.get_string()
  1175. result += "\n\n"
  1176. if result_summary and not platform_filter:
  1177. # Automation result summary
  1178. test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
  1179. pt = PrettyTable(test_id_cols)
  1180. pt.align['automated'] = "r"
  1181. pt.align['all'] = "r"
  1182. pt.align['percent [%]'] = "r"
  1183. percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
  1184. str_progress = progress_bar(percent_progress, 75)
  1185. pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
  1186. result += "Automation coverage:\n"
  1187. result += pt.get_string()
  1188. result += "\n\n"
  1189. # Test automation coverage table print
  1190. test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
  1191. pt = PrettyTable(test_id_cols)
  1192. pt.align['id'] = "l"
  1193. pt.align['automated'] = "r"
  1194. pt.align['all'] = "r"
  1195. pt.align['percent [%]'] = "r"
  1196. for unique_id in unique_test_id:
  1197. # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
  1198. percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
  1199. str_progress = progress_bar(percent_progress, 75)
  1200. row = [unique_id,
  1201. counter_dict_test_id_types[unique_id],
  1202. counter_dict_test_id_types_all[unique_id],
  1203. percent_progress,
  1204. "[" + str_progress + "]"]
  1205. pt.add_row(row)
  1206. result += "Test automation coverage:\n"
  1207. result += pt.get_string()
  1208. result += "\n\n"
  1209. return result
  1210. def progress_bar(percent_progress, saturation=0):
  1211. """ This function creates progress bar with optional simple saturation mark
  1212. """
  1213. step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
  1214. str_progress = '#' * step + '.' * int(50 - step)
  1215. c = '!' if str_progress[38] == '.' else '|'
  1216. if saturation > 0:
  1217. saturation = saturation / 2
  1218. str_progress = str_progress[:saturation] + c + str_progress[saturation:]
  1219. return str_progress
  1220. def singletest_in_cli_mode(single_test):
  1221. """ Runs SingleTestRunner object in CLI (Command line interface) mode
  1222. """
  1223. start = time()
  1224. # Execute tests depending on options and filter applied
  1225. test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
  1226. elapsed_time = time() - start
  1227. # Human readable summary
  1228. if not single_test.opts_suppress_summary:
  1229. # prints well-formed summary with results (SQL table like)
  1230. print single_test.generate_test_summary(test_summary, shuffle_seed)
  1231. if single_test.opts_test_x_toolchain_summary:
  1232. # prints well-formed summary with results (SQL table like)
  1233. # table shows text x toolchain test result matrix
  1234. print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
  1235. print "Completed in %.2f sec"% (elapsed_time)
  1236. # Store extra reports in files
  1237. if single_test.opts_report_html_file_name:
  1238. # Export results in form of HTML report to separate file
  1239. report_exporter = ReportExporter(ResultExporterType.HTML)
  1240. report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
  1241. if single_test.opts_report_junit_file_name:
  1242. # Export results in form of JUnit XML report to separate file
  1243. report_exporter = ReportExporter(ResultExporterType.JUNIT)
  1244. report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
  1245. if single_test.opts_report_build_file_name:
  1246. # Export build results as html report to sparate file
  1247. write_build_report(build_report, 'tests_build/report.html', single_test.opts_report_build_file_name)
  1248. class TestLogger():
  1249. """ Super-class for logging and printing ongoing events for test suite pass
  1250. """
  1251. def __init__(self, store_log=True):
  1252. """ We can control if logger actually stores log in memory
  1253. or just handled all log entries immediately
  1254. """
  1255. self.log = []
  1256. self.log_to_file = False
  1257. self.log_file_name = None
  1258. self.store_log = store_log
  1259. self.LogType = construct_enum(INFO='Info',
  1260. WARN='Warning',
  1261. NOTIF='Notification',
  1262. ERROR='Error',
  1263. EXCEPT='Exception')
  1264. self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
  1265. APPEND=2) # Append to existing log file
  1266. def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
  1267. """ Log one line of text
  1268. """
  1269. log_timestamp = time()
  1270. log_entry = {'log_type' : LogType,
  1271. 'log_timestamp' : log_timestamp,
  1272. 'log_line' : log_line,
  1273. '_future' : None
  1274. }
  1275. # Store log in memory
  1276. if self.store_log:
  1277. self.log.append(log_entry)
  1278. return log_entry
  1279. class CLITestLogger(TestLogger):
  1280. """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
  1281. """
  1282. def __init__(self, store_log=True, file_name=None):
  1283. TestLogger.__init__(self)
  1284. self.log_file_name = file_name
  1285. #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
  1286. self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
  1287. def log_print(self, log_entry, timestamp=True):
  1288. """ Prints on screen formatted log entry
  1289. """
  1290. ts = log_entry['log_timestamp']
  1291. timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
  1292. log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
  1293. return timestamp_str + log_line_str
  1294. def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
  1295. """ Logs line, if log file output was specified log line will be appended
  1296. at the end of log file
  1297. """
  1298. log_entry = TestLogger.log_line(self, LogType, log_line)
  1299. log_line_str = self.log_print(log_entry, timestamp)
  1300. if self.log_file_name is not None:
  1301. try:
  1302. with open(self.log_file_name, 'a') as f:
  1303. f.write(log_line_str + line_delim)
  1304. except IOError:
  1305. pass
  1306. return log_line_str
  1307. def factory_db_logger(db_url):
  1308. """ Factory database driver depending on database type supplied in database connection string db_url
  1309. """
  1310. if db_url is not None:
  1311. from workspace_tools.test_mysql import MySQLDBAccess
  1312. connection_info = BaseDBAccess().parse_db_connection_string(db_url)
  1313. if connection_info is not None:
  1314. (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
  1315. if db_type == 'mysql':
  1316. return MySQLDBAccess()
  1317. return None
  1318. def detect_database_verbose(db_url):
  1319. """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
  1320. """
  1321. result = BaseDBAccess().parse_db_connection_string(db_url)
  1322. if result is not None:
  1323. # Parsing passed
  1324. (db_type, username, password, host, db_name) = result
  1325. #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
  1326. # Let's try to connect
  1327. db_ = factory_db_logger(db_url)
  1328. if db_ is not None:
  1329. print "Connecting to database '%s'..."% db_url,
  1330. db_.connect(host, username, password, db_name)
  1331. if db_.is_connected():
  1332. print "ok"
  1333. print "Detecting database..."
  1334. print db_.detect_database(verbose=True)
  1335. print "Disconnecting...",
  1336. db_.disconnect()
  1337. print "done"
  1338. else:
  1339. print "Database type '%s' unknown"% db_type
  1340. else:
  1341. print "Parse error: '%s' - DB Url error"% (db_url)
  1342. def get_module_avail(module_name):
  1343. """ This function returns True if module_name is already impored module
  1344. """
  1345. return module_name in sys.modules.keys()
  1346. def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
  1347. """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
  1348. If function fails to auto-detect devices it will return empty dictionary.
  1349. if get_module_avail('mbed_lstools'):
  1350. mbeds = mbed_lstools.create()
  1351. mbeds_list = mbeds.list_mbeds()
  1352. @param mbeds_list list of mbeds captured from mbed_lstools
  1353. @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
  1354. """
  1355. result = {} # Should be in muts_all.json format
  1356. # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
  1357. # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
  1358. index = 1
  1359. for mut in mbeds_list:
  1360. m = {'mcu' : mut['platform_name'],
  1361. 'port' : mut['serial_port'],
  1362. 'disk' : mut['mount_point'],
  1363. 'peripherals' : [] # No peripheral detection
  1364. }
  1365. if index not in result:
  1366. result[index] = {}
  1367. result[index] = m
  1368. index += 1
  1369. return result
  1370. def get_autodetected_TEST_SPEC(mbeds_list,
  1371. use_default_toolchain=True,
  1372. use_supported_toolchains=False,
  1373. toolchain_filter=None,
  1374. platform_name_filter=None):
  1375. """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
  1376. If function fails to auto-detect devices it will return empty 'targets' test_spec description.
  1377. use_default_toolchain - if True add default toolchain to test_spec
  1378. use_supported_toolchains - if True add all supported toolchains to test_spec
  1379. toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
  1380. """
  1381. result = {'targets': {} }
  1382. for mut in mbeds_list:
  1383. mcu = mut['platform_name']
  1384. if platform_name_filter is None or (platform_name_filter and mut['platform_name'] in platform_name_filter):
  1385. if mcu in TARGET_MAP:
  1386. default_toolchain = TARGET_MAP[mcu].default_toolchain
  1387. supported_toolchains = TARGET_MAP[mcu].supported_toolchains
  1388. # Decide which toolchains should be added to test specification toolchain pool for each target
  1389. toolchains = []
  1390. if use_default_toolchain:
  1391. toolchains.append(default_toolchain)
  1392. if use_supported_toolchains:
  1393. toolchains += supported_toolchains
  1394. if toolchain_filter is not None:
  1395. all_toolchains = supported_toolchains + [default_toolchain]
  1396. for toolchain in toolchain_filter.split(','):
  1397. if toolchain in all_toolchains:
  1398. toolchains.append(toolchain)
  1399. result['targets'][mcu] = list(set(toolchains))
  1400. return result
  1401. def get_default_test_options_parser():
  1402. """ Get common test script options used by CLI, web services etc.
  1403. """
  1404. parser = optparse.OptionParser()
  1405. parser.add_option('-i', '--tests',
  1406. dest='test_spec_filename',
  1407. metavar="FILE",
  1408. help='Points to file with test specification')
  1409. parser.add_option('-M', '--MUTS',
  1410. dest='muts_spec_filename',
  1411. metavar="FILE",
  1412. help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
  1413. parser.add_option("-j", "--jobs",
  1414. dest='jobs',
  1415. metavar="NUMBER",
  1416. type="int",
  1417. help="Define number of compilation jobs. Default value is 1")
  1418. if get_module_avail('mbed_lstools'):
  1419. # Additional features available when mbed_lstools is installed on host and imported
  1420. # mbed_lstools allow users to detect connected to host mbed-enabled devices
  1421. parser.add_option('', '--auto',
  1422. dest='auto_detect',
  1423. metavar=False,
  1424. action="store_true",
  1425. help='Use mbed-ls module to detect all connected mbed devices')
  1426. parser.add_option('', '--tc',
  1427. dest='toolchains_filter',
  1428. help="Toolchain filter for --auto option. Use toolcahins names separated by comma, 'default' or 'all' to select toolchains")
  1429. parser.add_option('', '--clean',
  1430. dest='clean',
  1431. metavar=False,
  1432. action="store_true",
  1433. help='Clean the build directory')
  1434. parser.add_option('-P', '--only-peripherals',
  1435. dest='test_only_peripheral',
  1436. default=False,
  1437. action="store_true",
  1438. help='Test only peripheral declared for MUT and skip common tests')
  1439. parser.add_option('-C', '--only-commons',
  1440. dest='test_only_common',
  1441. default=False,
  1442. action="store_true",
  1443. help='Test only board internals. Skip perpherials tests and perform common tests.')
  1444. parser.add_option('-n', '--test-by-names',
  1445. dest='test_by_names',
  1446. help='Runs only test enumerated it this switch. Use comma to separate test case names.')
  1447. parser.add_option('-p', '--peripheral-by-names',
  1448. dest='peripheral_by_names',
  1449. help='Forces discovery of particular peripherals. Use comma to separate peripheral names.')
  1450. copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
  1451. copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
  1452. parser.add_option('-c', '--copy-method',
  1453. dest='copy_method',
  1454. help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
  1455. reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
  1456. reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
  1457. parser.add_option('-r', '--reset-type',
  1458. dest='mut_reset_type',
  1459. default=None,
  1460. help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
  1461. parser.add_option('-g', '--goanna-for-tests',
  1462. dest='goanna_for_tests',
  1463. metavar=False,
  1464. action="store_true",
  1465. help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
  1466. parser.add_option('-G', '--goanna-for-sdk',
  1467. dest='goanna_for_mbed_sdk',
  1468. metavar=False,
  1469. action="store_true",
  1470. help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
  1471. parser.add_option('-s', '--suppress-summary',
  1472. dest='suppress_summary',
  1473. default=False,
  1474. action="store_true",
  1475. help='Suppresses display of wellformatted table with test results')
  1476. parser.add_option('-t', '--test-summary',
  1477. dest='test_x_toolchain_summary',
  1478. default=False,
  1479. action="store_true",
  1480. help='Displays wellformatted table with test x toolchain test result per target')
  1481. parser.add_option('-A', '--test-automation-report',
  1482. dest='test_automation_report',
  1483. default=False,
  1484. action="store_true",
  1485. help='Prints information about all tests and exits')
  1486. parser.add_option('-R', '--test-case-report',
  1487. dest='test_case_report',
  1488. default=False,
  1489. action="store_true",
  1490. help='Prints information about all test cases and exits')
  1491. parser.add_option("-S", "--supported-toolchains",
  1492. action="store_true",
  1493. dest="supported_toolchains",
  1494. default=False,
  1495. help="Displays supported matrix of MCUs and toolchains")
  1496. parser.add_option("-O", "--only-build",
  1497. action="store_true",
  1498. dest="only_build_tests",
  1499. default=False,
  1500. help="Only build tests, skips actual test procedures (flashing etc.)")
  1501. parser.add_option('', '--parallel',
  1502. dest='parallel_test_exec',
  1503. default=False,
  1504. action="store_true",
  1505. help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
  1506. parser.add_option('', '--config',
  1507. dest='verbose_test_configuration_only',
  1508. default=False,
  1509. action="store_true",
  1510. help='Displays full test specification and MUTs configration and exits')
  1511. parser.add_option('', '--loops',
  1512. dest='test_loops_list',
  1513. help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
  1514. parser.add_option('', '--global-loops',
  1515. dest='test_global_loops_value',
  1516. help='Set global number of test loops per test. Default value is set 1')
  1517. parser.add_option('-W', '--waterfall',
  1518. dest='waterfall_test',
  1519. default=False,
  1520. action="store_true",
  1521. help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed.')
  1522. parser.add_option('-N', '--firmware-name',
  1523. dest='firmware_global_name',
  1524. help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts.')
  1525. parser.add_option('-u', '--shuffle',
  1526. dest='shuffle_test_order',
  1527. default=False,
  1528. action="store_true",
  1529. help='Shuffles test execution order')
  1530. parser.add_option('', '--shuffle-seed',
  1531. dest='shuffle_test_seed',
  1532. default=None,
  1533. help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
  1534. parser.add_option('-f', '--filter',
  1535. dest='general_filter_regex',
  1536. default=None,
  1537. help='For some commands you can use filter to filter out results')
  1538. parser.add_option('', '--inc-timeout',
  1539. dest='extend_test_timeout',
  1540. metavar="NUMBER",
  1541. type="int",
  1542. help='You can increase global timeout for each test by specifying additional test timeout in seconds')
  1543. parser.add_option('', '--db',
  1544. dest='db_url',
  1545. help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:[email protected]/db_name\'')
  1546. parser.add_option('-l', '--log',
  1547. dest='log_file_name',
  1548. help='Log events to external file (note not all console entries may be visible in log file)')
  1549. parser.add_option('', '--report-html',
  1550. dest='report_html_file_name',
  1551. help='You can log test suite results in form of HTML report')
  1552. parser.add_option('', '--report-junit',
  1553. dest='report_junit_file_name',
  1554. help='You can log test suite results in form of JUnit compliant XML report')
  1555. parser.add_option("", "--report-build",
  1556. dest="report_build_file_name",
  1557. help="Output the build results to an html file")
  1558. parser.add_option('', '--verbose-skipped',
  1559. dest='verbose_skipped_tests',
  1560. default=False,
  1561. action="store_true",
  1562. help='Prints some extra information about skipped tests')
  1563. parser.add_option('-V', '--verbose-test-result',
  1564. dest='verbose_test_result_only',
  1565. default=False,
  1566. action="store_true",
  1567. help='Prints test serial output')
  1568. parser.add_option('-v', '--verbose',
  1569. dest='verbose',
  1570. default=False,
  1571. action="store_true",
  1572. help='Verbose mode (prints some extra information)')
  1573. parser.add_option('', '--version',
  1574. dest='version',
  1575. default=False,
  1576. action="store_true",
  1577. help='Prints script version and exits')
  1578. return parser