KLL Compiler
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.

stage.py 61KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078
  1. #!/usr/bin/env python3
  2. '''
  3. KLL Compiler Stage Definitions
  4. '''
  5. # Copyright (C) 2016 by Jacob Alexander
  6. #
  7. # This file is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation, either version 3 of the License, or
  10. # (at your option) any later version.
  11. #
  12. # This file is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU General Public License
  18. # along with this file. If not, see <http://www.gnu.org/licenses/>.
  19. ### Imports ###
  20. from multiprocessing.dummy import Pool as ThreadPool
  21. import io
  22. import multiprocessing
  23. import os
  24. import re
  25. import sys
  26. import common.context as context
  27. import common.expression as expression
  28. import common.file as file
  29. import emitters.emitters as emitters
  30. from funcparserlib.lexer import make_tokenizer, Token, LexerError
  31. from funcparserlib.parser import many, oneplus, maybe, skip, NoParseError, Parser_debug
  32. ### Decorators ###
  33. ## Print Decorator Variables
  34. ERROR = '\033[5;1;31mERROR\033[0m:'
  35. WARNING = '\033[5;1;33mWARNING\033[0m:'
  36. ansi_escape = re.compile(r'\x1b[^m]*m')
  37. ### Classes ###
  38. class ControlStage:
  39. '''
  40. Top-level Stage
  41. Controls the order in which each stage is processed
  42. '''
  43. def __init__( self ):
  44. '''
  45. Initialize stage objects and control variables
  46. '''
  47. # Initialized in process order
  48. # NOTE: Only unique classes in this list, otherwise stage() will get confused
  49. self.stages = [
  50. CompilerConfigurationStage( self ),
  51. FileImportStage( self ),
  52. PreprocessorStage( self ),
  53. OperationClassificationStage( self ),
  54. OperationSpecificsStage( self ),
  55. OperationOrganizationStage( self ),
  56. DataOrganizationStage( self ),
  57. DataFinalizationStage( self ),
  58. DataAnalysisStage( self ),
  59. CodeGenerationStage( self ),
  60. #ReportGenerationStage( self ),
  61. ]
  62. self.git_rev = None
  63. self.git_changes = None
  64. self.version = None
  65. def stage( self, context_str ):
  66. '''
  67. Returns the stage object of the associated string name of the class
  68. @param context_str: String name of the class of the stage e.g. CompilerConfigurationStage
  69. '''
  70. return [ stage for stage in self.stages if type( stage ).__name__ is context_str ][0]
  71. def command_line_args( self, args ):
  72. '''
  73. Capture commmand line arguments for each processing stage
  74. @param args: Name space of processed arguments
  75. '''
  76. for stage in self.stages:
  77. stage.command_line_args( args )
  78. def command_line_flags( self, parser ):
  79. '''
  80. Prepare group parser for each processing stage
  81. @param parser: argparse setup object
  82. '''
  83. for stage in self.stages:
  84. stage.command_line_flags( parser )
  85. def process( self ):
  86. '''
  87. Main processing section
  88. Initializes each stage in order.
  89. Each stage must complete before the next one begins.
  90. '''
  91. # Run report even if stage doesn't complete
  92. run_report = False
  93. for stage in self.stages:
  94. stage.process()
  95. # Make sure stage has successfully completed
  96. if stage.status() != 'Completed':
  97. print( "{0} Invalid stage status '{1}' for '{2}'.".format(
  98. ERROR,
  99. stage.status(),
  100. stage.__class__.__name__,
  101. ) )
  102. run_report = True
  103. break
  104. # Only need to explicitly run reports if there was a stage problem
  105. # Otherwise reports are run automatically
  106. if run_report:
  107. # TODO
  108. sys.exit( 1 )
  109. class Stage:
  110. '''
  111. Base Stage Class
  112. '''
  113. def __init__( self, control ):
  114. '''
  115. Stage initialization
  116. @param control: ControlStage object, used to access data from other stages
  117. '''
  118. self.control = control
  119. self.color = False
  120. self._status = 'Queued'
  121. def command_line_args( self, args ):
  122. '''
  123. Group parser for command line arguments
  124. @param args: Name space of processed arguments
  125. '''
  126. print( "{0} '{1}' '{2}' has not been implemented yet"
  127. .format(
  128. WARNING,
  129. self.command_line_args.__name__,
  130. type( self ).__name__
  131. )
  132. )
  133. def command_line_flags( self, parser ):
  134. '''
  135. Group parser for command line options
  136. @param parser: argparse setup object
  137. '''
  138. print( "{0} '{1}' '{2}' has not been implemented yet"
  139. .format(
  140. WARNING,
  141. self.command_line_flags.__name__,
  142. type( self ).__name__
  143. )
  144. )
  145. def process( self ):
  146. '''
  147. Main procesing section
  148. '''
  149. self._status = 'Running'
  150. print( "{0} '{1}' '{2}' has not been implemented yet"
  151. .format(
  152. WARNING,
  153. self.process.__name__,
  154. type( self ).__name__
  155. )
  156. )
  157. self._status = 'Completed'
  158. def status( self ):
  159. '''
  160. Returns the current status of the Stage
  161. Values:
  162. Queued - Not yet run
  163. Running - Currently running
  164. Completed - Successfully completed
  165. Incomplete - Unsuccessfully completed
  166. '''
  167. return self._status
  168. class CompilerConfigurationStage( Stage ):
  169. '''
  170. Compiler Configuration Stage
  171. * Does initial setup of KLL compiler.
  172. * Handles any global configuration that must be done before parsing can begin
  173. '''
  174. def __init__( self, control ):
  175. '''
  176. Initialize compiler configuration variables
  177. '''
  178. super().__init__( control )
  179. self.color = "auto"
  180. self.jobs = multiprocessing.cpu_count()
  181. self.pool = None
  182. # Build list of emitters
  183. self.emitters = emitters.Emitters( control )
  184. self.emitter = self.emitters.emitter_default()
  185. def command_line_args( self, args ):
  186. '''
  187. Group parser for command line arguments
  188. @param args: Name space of processed arguments
  189. '''
  190. self.emitter = args.emitter
  191. self.color = args.color
  192. self.jobs = args.jobs
  193. # Validate color argument before processing
  194. if self.color not in ['auto', 'always', 'never' ]:
  195. print( "Invalid color option '{0}'".format( self.color ) )
  196. sys.exit( 2 )
  197. # TODO Detect whether colorization should be used
  198. self.color = self.color in ['auto', 'always']
  199. # Validate if it's a valid emitter
  200. if self.emitter not in self.emitters.emitter_list():
  201. print( "{0} Invalid emitter '{1}'".format( ERROR, self.emitter ) )
  202. print( "Valid emitters: {0}".format( self.emitters.emitter_list() ) )
  203. sys.exit( 2 )
  204. def command_line_flags( self, parser ):
  205. '''
  206. Group parser for command line options
  207. @param parser: argparse setup object
  208. '''
  209. # Create new option group
  210. group = parser.add_argument_group('\033[1mCompiler Configuration\033[0m')
  211. # Optional Arguments
  212. group.add_argument( '--emitter', type=str, default=self.emitter,
  213. help="Specify target emitter for the KLL compiler.\n"
  214. "\033[1mDefault\033[0m: {0}\n"
  215. "\033[1mOptions\033[0m: {1}".format( self.emitter, self.emitters.emitter_list() )
  216. )
  217. group.add_argument( '--color', type=str, default=self.color,
  218. help="Specify debug colorizer mode.\n"
  219. "\033[1mDefault\033[0m: {0}\n"
  220. "\033[1mOptions\033[0m: auto, always, never (auto attempts to detect support)".format( self.color )
  221. )
  222. group.add_argument( '--jobs', type=int, default=self.jobs,
  223. help="Specify max number of threads to use.\n"
  224. "\033[1mDefault\033[0m: {0}".format( self.jobs )
  225. )
  226. def process( self ):
  227. '''
  228. Compiler Configuration Processing
  229. '''
  230. self._status = 'Running'
  231. # Initialize thread pool
  232. self.pool = ThreadPool( self.jobs )
  233. self._status = 'Completed'
  234. class FileImportStage( Stage ):
  235. '''
  236. FIle Import Stage
  237. * Loads text of all files into memory
  238. * Does initial sorting of KLL Contexts based upon command line arguments
  239. '''
  240. def __init__( self, control ):
  241. '''
  242. Initialize file storage datastructures and variables
  243. '''
  244. super().__init__( control )
  245. # These lists are order sensitive
  246. self.generic_files = []
  247. self.config_files = []
  248. self.base_files = []
  249. self.default_files = []
  250. # This is a list of lists, each sub list is another layer in order from 1 to max
  251. self.partial_files = []
  252. # List of all files contained in KLLFile objects
  253. self.kll_files = []
  254. def command_line_args( self, args ):
  255. '''
  256. Group parser for command line arguments
  257. @param args: Name space of processed arguments
  258. '''
  259. self.generic_files = args.generic
  260. self.config_files = args.config
  261. self.base_files = args.base
  262. self.default_files = args.default
  263. self.partial_files = args.partial
  264. def command_line_flags( self, parser ):
  265. '''
  266. Group parser for command line options
  267. @param parser: argparse setup object
  268. '''
  269. # Create new option group
  270. group = parser.add_argument_group('\033[1mFile Context Configuration\033[0m')
  271. # Positional Arguments
  272. group.add_argument( 'generic', type=str, nargs='*', default=self.generic_files,
  273. help="Auto-detect context of .kll files, defaults to a base map configuration."
  274. )
  275. # Optional Arguments
  276. group.add_argument( '--config', type=str, nargs='+', default=self.config_files,
  277. help="Specify base configuration .kll files, earliest priority"
  278. )
  279. group.add_argument( '--base', type=str, nargs='+', default=self.base_files,
  280. help="Specify base map configuration, applied after config .kll files.\n"
  281. "The base map is applied prior to all default and partial maps and is used as the basis for them."
  282. )
  283. group.add_argument( '--default', type=str, nargs='+', default=self.default_files,
  284. help="Specify .kll files to layer on top of the default map to create a combined map.\n"
  285. "Also known as layer 0."
  286. )
  287. group.add_argument( '--partial', type=str, nargs='+', action='append', default=self.partial_files,
  288. help="Specify .kll files to generate partial map, multiple files per flag.\n"
  289. "Each -p defines another partial map.\n"
  290. "Base .kll files (that define the scan code maps) must be defined for each partial map."
  291. )
  292. def init_kllfile( self, path, file_context ):
  293. '''
  294. Prepares a KLLFile object with the given context
  295. @path: Path to the KLL file
  296. @file_context: Type of file context, e.g. DefaultMapContext
  297. '''
  298. return file.KLLFile( path, file_context )
  299. def process( self ):
  300. '''
  301. Process each of the files, sorting them by command line argument context order
  302. '''
  303. self._status = 'Running'
  304. # Determine colorization setting
  305. self.color = self.control.stage('CompilerConfigurationStage').color
  306. # Process each type of file
  307. # Iterates over each file in the context list and creates a KLLFile object with a context and path
  308. self.kll_files += map(
  309. lambda path: self.init_kllfile( path, context.GenericContext() ),
  310. self.generic_files
  311. )
  312. self.kll_files += map(
  313. lambda path: self.init_kllfile( path, context.ConfigurationContext() ),
  314. self.config_files
  315. )
  316. self.kll_files += map(
  317. lambda path: self.init_kllfile( path, context.BaseMapContext() ),
  318. self.base_files
  319. )
  320. self.kll_files += map(
  321. lambda path: self.init_kllfile( path, context.DefaultMapContext() ),
  322. self.default_files
  323. )
  324. # Partial Maps require a third parameter which specifies which layer it's in
  325. for layer, files in enumerate( self.partial_files ):
  326. self.kll_files += map(
  327. lambda path: self.init_kllfile( path, context.PartialMapContext( layer ) ),
  328. files
  329. )
  330. # Validate that all the file paths exist, exit if any of the checks fail
  331. if False in [ path.check() for path in self.kll_files ]:
  332. self._status = 'Incomplete'
  333. return
  334. # Now that we have a full list of files and their given context, we can now read the files into memory
  335. # Uses the thread pool to speed up processing
  336. # Make sure processing was successful before continuing
  337. pool = self.control.stage('CompilerConfigurationStage').pool
  338. if False in pool.map( lambda kll_file: kll_file.read(), self.kll_files ):
  339. self._status = 'Incomplete'
  340. return
  341. self._status = 'Completed'
  342. class PreprocessorStage( Stage ):
  343. '''
  344. Preprocessor Stage
  345. * Does initial split and decision of contexts
  346. * Handles Preprocessor part of KLL
  347. '''
  348. def __init__( self, control ):
  349. '''
  350. Initialize preprocessor configuration variables
  351. '''
  352. super().__init__( control )
  353. def command_line_args( self, args ):
  354. '''
  355. Group parser for command line arguments
  356. @param args: Name space of processed arguments
  357. '''
  358. def command_line_flags( self, parser ):
  359. '''
  360. Group parser for command line options
  361. @param parser: argparse setup object
  362. '''
  363. # Create new option group
  364. #group = parser.add_argument_group('\033[1mPreprocessor Configuration\033[0m')
  365. def seed_context( self, kll_file ):
  366. '''
  367. Build list of context
  368. TODO Update later for proper preprocessor
  369. Adds data from KLFile into the Context
  370. '''
  371. kll_file.context.initial_context( kll_file.lines, kll_file.data, kll_file )
  372. def process( self ):
  373. '''
  374. Preprocessor Execution
  375. '''
  376. self._status = 'Running'
  377. # Determine colorization setting
  378. self.color = self.control.stage('CompilerConfigurationStage').color
  379. # Acquire thread pool
  380. pool = self.control.stage('CompilerConfigurationStage').pool
  381. # TODO
  382. # Once the KLL Spec has preprocessor commands, there may be a risk of infinite/circular dependencies
  383. # Please add a non-invasive way to avoid/warn/stop in this case -HaaTa
  384. # First, since initial contexts have been populated, populate details
  385. # TODO
  386. # This step will change once preprocessor commands have been added
  387. # Simply, this just takes the imported file data (KLLFile) and puts it in the context container
  388. kll_files = self.control.stage('FileImportStage').kll_files
  389. if False in pool.map( self.seed_context, kll_files ):
  390. self._status = 'Incomplete'
  391. return
  392. # Next, tokenize and parser the preprocessor KLL commands.
  393. # NOTE: This may result in having to create more KLL Contexts and tokenize/parse again numerous times over
  394. # TODO
  395. self._status = 'Completed'
  396. class OperationClassificationStage( Stage ):
  397. '''
  398. Operation Classification Stage
  399. * Sorts operations by type based on operator
  400. * Tokenizes only operator pivots and left/right arguments
  401. * Further tokenization and parsing occurs at a later stage
  402. '''
  403. def __init__( self, control ):
  404. '''
  405. Initialize operation classification stage
  406. '''
  407. super().__init__( control )
  408. self.tokenized_data = []
  409. self.contexts = []
  410. def command_line_args( self, args ):
  411. '''
  412. Group parser for command line arguments
  413. @param args: Name space of processed arguments
  414. '''
  415. def command_line_flags( self, parser ):
  416. '''
  417. Group parser for command line options
  418. @param parser: argparse setup object
  419. '''
  420. # Create new option group
  421. #group = parser.add_argument_group('\033[1mOperation Classification Configuration\033[0m')
  422. def merge_tokens( self, token_list, token_type ):
  423. '''
  424. Merge list of tokens into a single token
  425. @param token_list: List of tokens
  426. @param token_type: String name of token type
  427. '''
  428. # Initial token parameters
  429. ret_token = Token( token_type, '' )
  430. # Set start/end positions of token
  431. ret_token.start = token_list[0].start
  432. ret_token.end = token_list[-1].end
  433. # Build token value
  434. for token in token_list:
  435. ret_token.value += token.value
  436. return ret_token
  437. def tokenize( self, kll_context ):
  438. '''
  439. Tokenize a single string
  440. @param kll_context: KLL Context containing file data
  441. '''
  442. ret = True
  443. # Basic Tokens Spec
  444. spec = [
  445. ( 'Comment', ( r' *#.*', ) ),
  446. ( 'Space', ( r'[ \t]+', ) ),
  447. ( 'NewLine', ( r'[\r\n]+', ) ),
  448. # Tokens that will be grouped together after tokenization
  449. # Ignored at this stage
  450. # This is required to isolate the Operator tags
  451. ( 'Misc', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ), # Position context
  452. ( 'Misc', ( r'\([^\)]*\)', ) ), # Parenthesis context
  453. ( 'Misc', ( r'\[[^\]]*\]', ) ), # Square bracket context
  454. ( 'Misc', ( r'"[^"]*"', ) ), # Double quote context
  455. ( 'Misc', ( r"'[^']*'", ) ), # Single quote context
  456. ( 'Operator', ( r'=>|<=|i:\+|i:-|i::|i:|:\+|:-|::|:|=', ) ),
  457. ( 'EndOfLine', ( r';', ) ),
  458. # Everything else to be ignored at this stage
  459. ( 'Misc', ( r'.', ) ), # Everything else
  460. ]
  461. # Tokens to filter out of the token stream
  462. #useless = [ 'Space', 'Comment' ]
  463. useless = [ 'Comment', 'NewLine' ]
  464. # Build tokenizer that appends unknown characters to Misc Token groups
  465. # NOTE: This is technically slower processing wise, but allows for multi-stage tokenization
  466. # Which in turn allows for parsing and tokenization rules to be simplified
  467. tokenizer = make_tokenizer( spec )
  468. # Tokenize and filter out useless tokens
  469. try:
  470. tokens = [ x for x in tokenizer( kll_context.data ) if x.type not in useless ]
  471. except LexerError as err:
  472. print( err )
  473. print( "{0} {1}:tokenize -> {2}:{3}".format(
  474. ERROR,
  475. self.__class__.__name__,
  476. kll_context.parent.path,
  477. err.place[0],
  478. ) )
  479. # Merge Misc tokens delimited by Operator and EndOfLine tokens
  480. kll_context.classification_token_data = []
  481. new_token = []
  482. last_operator = None
  483. for token in tokens:
  484. # Check for delimiter, append new_token if ready
  485. if token.type in ['EndOfLine', 'Operator']:
  486. # Determine the token type
  487. token_type = 'LOperatorData'
  488. if token.type is 'EndOfLine':
  489. token_type = 'ROperatorData'
  490. # If this is a 'misplaced' operator, set as Misc
  491. if token_type == last_operator:
  492. token.type = 'Misc'
  493. new_token.append( token )
  494. continue
  495. if len( new_token ) > 0:
  496. # Build new token
  497. kll_context.classification_token_data.append(
  498. self.merge_tokens( new_token, token_type )
  499. )
  500. new_token = []
  501. kll_context.classification_token_data.append( token )
  502. last_operator = token_type
  503. # Collect Misc tokens
  504. elif token.type in ['Misc', 'Space']:
  505. new_token.append( token )
  506. # Invalid token for this stage
  507. else:
  508. print( "{0} Invalid token '{1}' for '{2}'".format(
  509. ERROR,
  510. token,
  511. type( self ).__name__,
  512. ) )
  513. ret = False
  514. return ret
  515. def sort( self, kll_context ):
  516. '''
  517. Sorts tokenized data into expressions
  518. LOperatorData + Operator + ROperatorData + EndOfLine
  519. @param kll_context: KLL Context, contains tokenized data
  520. '''
  521. ret = True
  522. def validate_token( token, token_type ):
  523. '''
  524. Validate token
  525. @param token: Given token to validate
  526. @param token_type: Token type to validate against
  527. @return True if the token is correct
  528. '''
  529. ret = token.type is token_type
  530. # Error message
  531. if not ret:
  532. print( "Expected: '{0}' got '{1}':{2} '{3}'".format(
  533. token_type,
  534. token.type,
  535. token._pos_str(),
  536. token.value,
  537. ) )
  538. return ret
  539. tokens = kll_context.classification_token_data
  540. for index in range( 0, len( tokens ), 4 ):
  541. # Make sure enough tokens exist
  542. if index + 3 >= len( tokens ):
  543. print( "Not enough tokens left: {0}".format( tokens[index:] ) )
  544. print( "Expected: LOperatorData, Operator, ROperatorData, EndOfLine" )
  545. print( "{0} {1}:sort -> {2}:{3}".format(
  546. ERROR,
  547. self.__class__.__name__,
  548. kll_context.parent.path,
  549. tokens[-1].start[0],
  550. ) )
  551. ret = False
  552. break
  553. # Validate the tokens are what was expected
  554. ret = validate_token( tokens[index], 'LOperatorData' ) and ret
  555. ret = validate_token( tokens[index + 1], 'Operator' ) and ret
  556. ret = validate_token( tokens[index + 2], 'ROperatorData' ) and ret
  557. ret = validate_token( tokens[index + 3], 'EndOfLine' ) and ret
  558. # Append expression
  559. kll_context.expressions.append(
  560. expression.Expression( tokens[index], tokens[index + 1], tokens[index + 2], kll_context )
  561. )
  562. return ret
  563. def process( self ):
  564. '''
  565. Compiler Configuration Processing
  566. '''
  567. self._status = 'Running'
  568. # Determine colorization setting
  569. self.color = self.control.stage('CompilerConfigurationStage').color
  570. # Acquire thread pool
  571. pool = self.control.stage('CompilerConfigurationStage').pool
  572. # Get list of KLLFiles
  573. kll_files = self.control.stage('FileImportStage').kll_files
  574. # Build list of contexts
  575. self.contexts = [ kll_file.context for kll_file in kll_files ]
  576. # Tokenize operators
  577. # TODO
  578. # Once preprocessor includes are implemented use a second kll_files list
  579. # This way processing doesn't have to be recursive for a few stages -HaaTa
  580. if False in pool.map( self.tokenize, self.contexts ):
  581. self._status = 'Incomplete'
  582. return
  583. # Sort elements into expressions
  584. # LOperatorData + Operator + ROperatorData + EndOfLine
  585. if False in pool.map( self.sort, self.contexts ):
  586. self._status = 'Incomplete'
  587. return
  588. self._status = 'Completed'
  589. class OperationSpecificsStage( Stage ):
  590. '''
  591. Operation Specifics Stage
  592. * For each sorted operation, tokenize and parse the left/right arguments
  593. * Data is stored with the operation, but no context is given to the data beyond the argument types
  594. '''
  595. def __init__( self, control ):
  596. '''
  597. Initialize operation specifics stage
  598. '''
  599. super().__init__( control )
  600. self.parser_debug = False
  601. self.parser_token_debug = False
  602. self.token_debug = False
  603. def command_line_args( self, args ):
  604. '''
  605. Group parser for command line arguments
  606. @param args: Name space of processed arguments
  607. '''
  608. self.parser_debug = args.parser_debug
  609. self.parser_token_debug = args.parser_token_debug
  610. self.token_debug = args.token_debug
  611. # Auto-set parser_debug if parser_token_debug is set
  612. if self.parser_token_debug:
  613. self.parser_debug = True
  614. def command_line_flags( self, parser ):
  615. '''
  616. Group parser for command line options
  617. @param parser: argparse setup object
  618. '''
  619. # Create new option group
  620. group = parser.add_argument_group('\033[1mOperation Specifics Configuration\033[0m')
  621. # Optional Arguments
  622. group.add_argument( '--parser-debug', action='store_true', default=self.parser_debug,
  623. help="Enable parser debug output.\n",
  624. )
  625. group.add_argument( '--parser-token-debug', action='store_true', default=self.parser_token_debug,
  626. help="Enable parser-stage token debug output.\n",
  627. )
  628. group.add_argument( '--token-debug', action='store_true', default=self.token_debug,
  629. help="Enable tokenization debug output.\n",
  630. )
  631. ## Tokenizers ##
  632. def tokenize_base( self, kll_expression, lspec, rspec ):
  633. '''
  634. Base tokenization logic for this stage
  635. @param kll_expression: KLL expression to tokenize
  636. @param lspec: Regex tokenization spec for the left parameter
  637. @param rspec: Regex tokenization spec for the right parameter
  638. @return False if a LexerError was detected
  639. '''
  640. # Build tokenizers for lparam and rparam
  641. ltokenizer = make_tokenizer( lspec )
  642. rtokenizer = make_tokenizer( rspec )
  643. # Tokenize lparam and rparam
  644. # Ignore the generators, not useful in this case (i.e. use list())
  645. err_pos = [] # Error positions
  646. try:
  647. kll_expression.lparam_sub_tokens = list( ltokenizer( kll_expression.lparam_token.value ) )
  648. except LexerError as err:
  649. # Determine place in constructed expression
  650. err_pos.append( err.place[1] )
  651. print( type( err ).__name__, err )
  652. try:
  653. kll_expression.rparam_sub_tokens = list( rtokenizer( kll_expression.rparam_token.value ) )
  654. except LexerError as err:
  655. # Determine place in constructed expression
  656. err_pos.append( err.place[1] + kll_expression.rparam_start() )
  657. print( type( err ).__name__, err )
  658. # Display more information if any errors were detected
  659. if len( err_pos ) > 0:
  660. print( kll_expression.point_chars( err_pos ) )
  661. return False
  662. return True
  663. def tokenize_name_association( self, kll_expression ):
  664. '''
  665. Tokenize lparam and rparam in name association expressions
  666. <lparam> => <rparam>;
  667. '''
  668. # Define tokenization regex
  669. lspec = [
  670. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  671. ( 'Space', ( r'[ \t]+', ) ),
  672. ]
  673. rspec = [
  674. ( 'Space', ( r'[ \t]+', ) ),
  675. ( 'Parenthesis', ( r'\(|\)', ) ),
  676. ( 'Operator', ( r':', ) ),
  677. ( 'Comma', ( r',', ) ),
  678. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  679. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  680. ]
  681. # Tokenize, expression stores the result, status is returned
  682. return self.tokenize_base( kll_expression, lspec, rspec )
  683. def tokenize_data_association( self, kll_expression ):
  684. '''
  685. Tokenize lparam and rparam in data association expressions
  686. <lparam> <= <rparam>;
  687. '''
  688. # Define tokenization regex
  689. lspec = [
  690. ( 'Space', ( r'[ \t]+', ) ),
  691. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  692. ( 'ScanCodeStart', ( r'S\[', ) ),
  693. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  694. ( 'PixelStart', ( r'P\[', ) ),
  695. ( 'Animation', ( r'A"[^"]+"', ) ),
  696. ( 'AnimationStart', ( r'A\[', ) ),
  697. ( 'CodeBegin', ( r'\[', ) ),
  698. ( 'CodeEnd', ( r'\]', ) ),
  699. ( 'Position', ( r'r?[xyz]:-?[0-9]+(.[0-9]+)?', ) ),
  700. ( 'Comma', ( r',', ) ),
  701. ( 'Dash', ( r'-', ) ),
  702. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  703. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  704. ]
  705. rspec = [
  706. ( 'Space', ( r'[ \t]+', ) ),
  707. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  708. ( 'PixelStart', ( r'P\[', ) ),
  709. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  710. ( 'PixelLayerStart', ( r'PL\[', ) ),
  711. ( 'Animation', ( r'A"[^"]+"', ) ),
  712. ( 'AnimationStart', ( r'A\[', ) ),
  713. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  714. ( 'USBCodeStart', ( r'U\[', ) ),
  715. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  716. ( 'ScanCodeStart', ( r'S\[', ) ),
  717. ( 'CodeBegin', ( r'\[', ) ),
  718. ( 'CodeEnd', ( r'\]', ) ),
  719. ( 'Position', ( r'r?[xyz]:-?[0-9]+(.[0-9]+)?', ) ),
  720. ( 'PixelOperator', ( r'(\+:|-:|>>|<<)', ) ),
  721. ( 'String', ( r'"[^"]*"', ) ),
  722. ( 'Operator', ( r':', ) ),
  723. ( 'Comma', ( r',', ) ),
  724. ( 'Dash', ( r'-', ) ),
  725. ( 'Plus', ( r'\+', ) ),
  726. ( 'Parenthesis', ( r'\(|\)', ) ),
  727. ( 'Percent', ( r'(0|([1-9][0-9]*))%', ) ),
  728. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  729. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  730. ]
  731. # Tokenize, expression stores the result, status is returned
  732. return self.tokenize_base( kll_expression, lspec, rspec )
  733. def tokenize_assignment( self, kll_expression ):
  734. '''
  735. Tokenize lparam and rparam in assignment expressions
  736. <lparam> = <rparam>;
  737. '''
  738. # Define tokenization regex
  739. lspec = [
  740. ( 'Space', ( r'[ \t]+', ) ),
  741. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  742. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  743. ( 'CodeBegin', ( r'\[', ) ),
  744. ( 'CodeEnd', ( r'\]', ) ),
  745. ]
  746. rspec = [
  747. ( 'Space', ( r'[ \t]+', ) ),
  748. ( 'String', ( r'"[^"]*"', ) ),
  749. ( 'SequenceString', ( r"'[^']*'", ) ),
  750. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  751. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  752. ( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
  753. ]
  754. # Tokenize, expression stores the result, status is returned
  755. return self.tokenize_base( kll_expression, lspec, rspec )
  756. def tokenize_mapping( self, kll_expression ):
  757. '''
  758. Tokenize lparam and rparam in mapping expressions
  759. <lparam> : <rparam>; # Set mapping
  760. <lparam> :+ <rparam>; # Mappping append
  761. <lparam> :- <rparam>; # Mapping removal
  762. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  763. Isolated versions of mappings
  764. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  765. <lparam> i: <rparam>;
  766. <lparam> i:+ <rparam>;
  767. <lparam> i:- <rparam>;
  768. <lparam> i:: <rparam>;
  769. '''
  770. # Define tokenization regex
  771. lspec = [
  772. ( 'Space', ( r'[ \t]+', ) ),
  773. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  774. ( 'USBCodeStart', ( r'U\[', ) ),
  775. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  776. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  777. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  778. ( 'SysCodeStart', ( r'SYS\[', ) ),
  779. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  780. ( 'ScanCodeStart', ( r'S\[', ) ),
  781. ( 'IndCode', ( r'I(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  782. ( 'IndicatorStart', ( r'I\[', ) ),
  783. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  784. ( 'PixelStart', ( r'P\[', ) ),
  785. ( 'Animation', ( r'A"[^"]+"', ) ),
  786. ( 'AnimationStart', ( r'A\[', ) ),
  787. ( 'CodeBegin', ( r'\[', ) ),
  788. ( 'CodeEnd', ( r'\]', ) ),
  789. ( 'String', ( r'"[^"]*"', ) ),
  790. ( 'SequenceString', ( r"'[^']*'", ) ),
  791. ( 'Operator', ( r':', ) ),
  792. ( 'Comma', ( r',', ) ),
  793. ( 'Dash', ( r'-', ) ),
  794. ( 'Plus', ( r'\+', ) ),
  795. ( 'Parenthesis', ( r'\(|\)', ) ),
  796. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  797. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  798. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  799. ]
  800. rspec = [
  801. ( 'Space', ( r'[ \t]+', ) ),
  802. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  803. ( 'USBCodeStart', ( r'U\[', ) ),
  804. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  805. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  806. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  807. ( 'SysCodeStart', ( r'SYS\[', ) ),
  808. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  809. ( 'ScanCodeStart', ( r'S\[', ) ),
  810. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  811. ( 'PixelStart', ( r'P\[', ) ),
  812. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  813. ( 'PixelLayerStart', ( r'PL\[', ) ),
  814. ( 'Animation', ( r'A"[^"]+"', ) ),
  815. ( 'AnimationStart', ( r'A\[', ) ),
  816. ( 'CodeBegin', ( r'\[', ) ),
  817. ( 'CodeEnd', ( r'\]', ) ),
  818. ( 'String', ( r'"[^"]*"', ) ),
  819. ( 'SequenceString', ( r"'[^']*'", ) ),
  820. ( 'None', ( r'None', ) ),
  821. ( 'Operator', ( r':', ) ),
  822. ( 'Comma', ( r',', ) ),
  823. ( 'Dash', ( r'-', ) ),
  824. ( 'Plus', ( r'\+', ) ),
  825. ( 'Parenthesis', ( r'\(|\)', ) ),
  826. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  827. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  828. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  829. ]
  830. # Tokenize, expression stores the result, status is returned
  831. return self.tokenize_base( kll_expression, lspec, rspec )
  832. ## Parsers ##
  833. def parse_base( self, kll_expression, parse_expression, quiet ):
  834. '''
  835. Base parsing logic
  836. @param kll_expression: Expression being parsed, contains tokens
  837. @param parse_expression: Parse tree expression that understands the group of tokens
  838. @param quiet: Reduces verbosity, used when re-running an errored command in debug mode
  839. @return: False if parsing wasn't successful
  840. '''
  841. ret = True
  842. try:
  843. # Since the expressions have already been pre-organized, we only expect a single expression at a time
  844. ret = parse_expression.parse( kll_expression.final_tokens() )
  845. # Parse intepretation error, more info is provided by the specific parse intepreter
  846. if not ret and not quiet:
  847. print( kll_expression.final_tokens() )
  848. except NoParseError as err:
  849. if not quiet:
  850. print( kll_expression.final_tokens() )
  851. print( err )
  852. ret = False
  853. return ret
  854. def parse_name_association( self, kll_expression, quiet=False ):
  855. '''
  856. Parse name association expressions
  857. <lparam> => <rparam>;
  858. '''
  859. # Import parse elements/lambda functions
  860. from common.parse import (
  861. comma,
  862. name,
  863. number,
  864. operator,
  865. parenthesis,
  866. unarg,
  867. Make,
  868. )
  869. # Name Association
  870. # <capability name> => <c function>;
  871. capability_arguments = name + skip( operator(':') ) + number + skip( maybe( comma ) ) >> unarg( Make.capArg )
  872. capability_expression = name + skip( operator('=>') ) + name + skip( parenthesis('(') ) + many( capability_arguments ) + skip( parenthesis(')') ) >> unarg( kll_expression.capability )
  873. # Name Association
  874. # <define name> => <c define>;
  875. define_expression = name + skip( operator('=>') ) + name >> unarg( kll_expression.define )
  876. # Top-level Parser
  877. expr = (
  878. capability_expression |
  879. define_expression
  880. )
  881. return self.parse_base( kll_expression, expr, quiet )
  882. def parse_data_association( self, kll_expression, quiet=False ):
  883. '''
  884. Parse data association expressions
  885. <lparam> <= <rparam>;
  886. '''
  887. from common.parse import (
  888. animation_def,
  889. animation_elem,
  890. animation_flattened,
  891. animation_modlist,
  892. comma,
  893. flatten,
  894. operator,
  895. pixel_elem,
  896. pixel_expanded,
  897. pixelmod_elem,
  898. position_list,
  899. triggerCode_outerList,
  900. unarg,
  901. )
  902. # Data Association
  903. # <animation> <= <modifiers>;
  904. # <animation frame> <= <modifiers>;
  905. animation_expression = ( animation_elem | animation_def ) + skip( operator('<=') ) + animation_modlist >> unarg( kll_expression.animation )
  906. animationFrame_expression = animation_flattened + skip( operator('<=') ) + many( pixelmod_elem + skip( maybe( comma ) ) ) >> unarg( kll_expression.animationFrame )
  907. # Data Association
  908. # <pixel> <= <position>;
  909. pixelPosition_expression = ( pixel_expanded | pixel_elem ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.pixelPosition )
  910. # Data Association
  911. # <scancode> <= <position>;
  912. scanCodePosition_expression = ( triggerCode_outerList >> flatten >> flatten ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.scanCodePosition )
  913. # Top-level Parser
  914. expr = (
  915. animation_expression |
  916. animationFrame_expression |
  917. pixelPosition_expression |
  918. scanCodePosition_expression
  919. )
  920. return self.parse_base( kll_expression, expr, quiet )
  921. def parse_assignment( self, kll_expression, quiet=False ):
  922. '''
  923. Parse assignment expressions
  924. <lparam> = <rparam>;
  925. '''
  926. # Import parse elements/lambda functions
  927. from common.parse import (
  928. code_begin,
  929. code_end,
  930. comma,
  931. content,
  932. dash,
  933. name,
  934. number,
  935. operator,
  936. string,
  937. unarg,
  938. unseqString,
  939. )
  940. # Assignment
  941. # <variable> = <variable contents>;
  942. variable_contents = name | content | string | number | comma | dash | unseqString
  943. variable_expression = name + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.variable )
  944. # Array Assignment
  945. # <variable>[] = <space> <separated> <list>;
  946. # <variable>[<index>] = <index element>;
  947. array_expression = name + skip( code_begin ) + maybe( number ) + skip( code_end ) + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.array )
  948. # Top-level Parser
  949. expr = (
  950. array_expression |
  951. variable_expression
  952. )
  953. return self.parse_base( kll_expression, expr, quiet )
  954. def parse_mapping( self, kll_expression, quiet=False ):
  955. '''
  956. Parse mapping expressions
  957. <lparam> : <rparam>; # Set mapping
  958. <lparam> :+ <rparam>; # Mappping append
  959. <lparam> :- <rparam>; # Mapping removal
  960. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  961. Isolated versions of mappings
  962. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  963. <lparam> i: <rparam>;
  964. <lparam> i:+ <rparam>;
  965. <lparam> i:- <rparam>;
  966. <lparam> i:: <rparam>;
  967. '''
  968. # Import parse elements/lambda functions
  969. from common.parse import (
  970. animation_expanded,
  971. none,
  972. operator,
  973. pixelchan_elem,
  974. resultCode_outerList,
  975. scanCode_single,
  976. triggerCode_outerList,
  977. triggerUSBCode_outerList,
  978. unarg,
  979. )
  980. # Mapping
  981. # <trigger> : <result>;
  982. operatorTriggerResult = operator(':') | operator(':+') | operator(':-') | operator('::') | operator('i:') | operator('i:+') | operator('i:-') | operator('i::')
  983. scanCode_expression = triggerCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.scanCode )
  984. usbCode_expression = triggerUSBCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.usbCode )
  985. animation_trigger = animation_expanded + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.animationTrigger )
  986. # Data Association
  987. # <pixel chan> : <scanCode>;
  988. pixelChan_expression = pixelchan_elem + skip( operator(':') ) + ( scanCode_single | none ) >> unarg( kll_expression.pixelChannels )
  989. # Top-level Parser
  990. expr = (
  991. scanCode_expression |
  992. usbCode_expression |
  993. pixelChan_expression |
  994. animation_trigger
  995. )
  996. return self.parse_base( kll_expression, expr, quiet )
  997. ## Processing ##
  998. def tokenize( self, kll_context ):
  999. '''
  1000. Tokenizes contents of both LOperatorData and ROperatorData
  1001. LOperatorData and ROperatorData have different contexts, so tokenization can be simplified a bit
  1002. @param context: KLL Context containing file data
  1003. '''
  1004. ret = True
  1005. # Tokenizer map, each takes an expression argument
  1006. tokenizers = {
  1007. # Name association
  1008. '=>' : self.tokenize_name_association,
  1009. # Data association
  1010. '<=' : self.tokenize_data_association,
  1011. # Assignment
  1012. '=' : self.tokenize_assignment,
  1013. # Mapping
  1014. # All : based operators have the same structure
  1015. # The only difference is the application context (handled in a later stage)
  1016. ':' : self.tokenize_mapping,
  1017. }
  1018. # Tokenize left and right parameters of the expression
  1019. for kll_expression in kll_context.expressions:
  1020. # Determine which parser to use
  1021. token = kll_expression.operator_type()
  1022. # If there was a problem tokenizing, display exprersion info
  1023. if not tokenizers[ token ]( kll_expression ):
  1024. ret = False
  1025. print( "{0} {1}:tokenize -> {2}:{3}".format(
  1026. ERROR,
  1027. self.__class__.__name__,
  1028. kll_context.parent.path,
  1029. kll_expression.lparam_token.start[0],
  1030. ) )
  1031. # Debug Output
  1032. # Displays each parsed expression on a single line
  1033. # Includes <filename>:<line number>
  1034. if self.token_debug:
  1035. # Uncolorize if requested
  1036. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m\033[1;36;41m>\033[0m {3}".format(
  1037. os.path.basename( kll_context.parent.path ),
  1038. kll_expression.lparam_token.start[0],
  1039. kll_expression.__class__.__name__,
  1040. kll_expression.final_tokens(),
  1041. )
  1042. print( self.color and output or ansi_escape.sub( '', output ) )
  1043. return ret
  1044. def parse( self, kll_context ):
  1045. '''
  1046. Parse the fully tokenized expressions
  1047. @param kll_context: KLL Context which has the fully tokenized expression list
  1048. '''
  1049. ret = True
  1050. # Parser map of functions, each takes an expression argument
  1051. parsers = {
  1052. # Name association
  1053. '=>' : self.parse_name_association,
  1054. # Data association
  1055. '<=' : self.parse_data_association,
  1056. # Assignment
  1057. '=' : self.parse_assignment,
  1058. # Mapping
  1059. # All : based operators have the same structure
  1060. # The only difference is the application context (handled in a later stage)
  1061. ':' : self.parse_mapping,
  1062. }
  1063. # Parse each expression to extract the data from it
  1064. for kll_expression in kll_context.expressions:
  1065. token = kll_expression.operator_type()
  1066. # Assume failed, unless proven otherwise
  1067. cur_ret = False
  1068. # In some situations we don't want a parser trace, but only disable when we know
  1069. parser_debug_ignore = False
  1070. # If there was a problem parsing, display expression info
  1071. # Catch any TypeErrors due to incorrect parsing rules
  1072. try:
  1073. cur_ret = parsers[ token ]( kll_expression )
  1074. # Unexpected token (user grammar error), sometimes might be a bug
  1075. except NoParseError as err:
  1076. import traceback
  1077. traceback.print_tb( err.__traceback__ )
  1078. print( type( err ).__name__, err )
  1079. print( "Bad kll expression, usually a syntax error." )
  1080. # Invalid parsing rules, definitely a bug
  1081. except TypeError as err:
  1082. import traceback
  1083. traceback.print_tb( err.__traceback__ )
  1084. print( type( err ).__name__, err )
  1085. print( "Bad parsing rule, this is a bug!" )
  1086. # Lookup error, invalid lookup
  1087. except KeyError as err:
  1088. import traceback
  1089. print( "".join( traceback.format_tb( err.__traceback__ )[-1:] ), end='' )
  1090. print( "Invalid dictionary lookup, check syntax." )
  1091. parser_debug_ignore = True
  1092. # Parsing failed, show more error info
  1093. if not cur_ret:
  1094. ret = False
  1095. # We don't always want a full trace of the parser
  1096. if not parser_debug_ignore:
  1097. # StringIO stream from funcparserlib parser.py
  1098. # Command failed, run again, this time with verbose logging enabled
  1099. # Helps debug erroneous parsing expressions
  1100. parser_log = io.StringIO()
  1101. # This part is not thread-safe
  1102. # You must run with --jobs 1 to get 100% valid output
  1103. Parser_debug( True, parser_log )
  1104. try:
  1105. parsers[ token ]( kll_expression, True )
  1106. except:
  1107. pass
  1108. Parser_debug( False )
  1109. # Display
  1110. print( parser_log.getvalue() )
  1111. # Cleanup StringIO
  1112. parser_log.close()
  1113. print( "{0} {1}:parse -> {2}:{3}".format(
  1114. ERROR,
  1115. self.__class__.__name__,
  1116. kll_context.parent.path,
  1117. kll_expression.lparam_token.start[0],
  1118. ) )
  1119. # Debug Output
  1120. # Displays each parsed expression on a single line
  1121. # Includes <filename>:<line number>
  1122. if self.parser_debug:
  1123. # Uncolorize if requested
  1124. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1125. os.path.basename( kll_context.parent.path ),
  1126. kll_expression.lparam_token.start[0],
  1127. kll_expression.__class__.__name__,
  1128. kll_expression.type,
  1129. kll_expression
  1130. )
  1131. print( self.color and output or ansi_escape.sub( '', output ) )
  1132. if self.parser_token_debug:
  1133. # Uncolorize if requested
  1134. output = "\t\033[1;4mTokens\033[0m\033[1;36m:\033[0m {0}".format(
  1135. [ ( t.type, t.value ) for t in kll_expression.final_tokens() ]
  1136. )
  1137. print( self.color and output or ansi_escape.sub( '', output ) )
  1138. return ret
  1139. def process( self ):
  1140. '''
  1141. Compiler Configuration Processing
  1142. '''
  1143. self._status = 'Running'
  1144. # Determine colorization setting
  1145. self.color = self.control.stage('CompilerConfigurationStage').color
  1146. # Acquire thread pool
  1147. pool = self.control.stage('CompilerConfigurationStage').pool
  1148. # Get list of KLL contexts
  1149. contexts = self.control.stage('OperationClassificationStage').contexts
  1150. # Tokenize operators
  1151. if False in pool.map( self.tokenize, contexts ):
  1152. self._status = 'Incomplete'
  1153. return
  1154. # Parse operators
  1155. if False in pool.map( self.parse, contexts ):
  1156. self._status = 'Incomplete'
  1157. return
  1158. self._status = 'Completed'
  1159. class OperationOrganizationStage( Stage ):
  1160. '''
  1161. Operation Organization Stage
  1162. * Using the type of each operation, apply the KLL Context to each operation
  1163. * This results in various datastructures being populated based upon the context and type of operation
  1164. * Each Context instance (distinct Context of the same type), remain separate
  1165. '''
  1166. def __init__( self, control ):
  1167. '''
  1168. Initialize configuration variables
  1169. '''
  1170. super().__init__( control )
  1171. self.operation_organization_debug = False
  1172. self.operation_organization_display = False
  1173. def command_line_args( self, args ):
  1174. '''
  1175. Group parser for command line arguments
  1176. @param args: Name space of processed arguments
  1177. '''
  1178. self.operation_organization_debug = args.operation_organization_debug
  1179. self.operation_organization_display = args.operation_organization_display
  1180. def command_line_flags( self, parser ):
  1181. '''
  1182. Group parser for command line options
  1183. @param parser: argparse setup object
  1184. '''
  1185. # Create new option group
  1186. group = parser.add_argument_group('\033[1mOperation Organization Configuration\033[0m')
  1187. # Optional Arguments
  1188. group.add_argument(
  1189. '--operation-organization-debug',
  1190. action='store_true',
  1191. default=self.operation_organization_debug,
  1192. help="Enable operation organization debug output.\n",
  1193. )
  1194. group.add_argument(
  1195. '--operation-organization-display',
  1196. action='store_true',
  1197. default=self.operation_organization_display,
  1198. help="Show datastructure of each context after filling.\n",
  1199. )
  1200. def organize( self, kll_context ):
  1201. '''
  1202. Organize each set of expressions on a context level
  1203. The full layout organization occurs over multiple stages, this is the first one
  1204. '''
  1205. # Add each of the expressions to the organization data structure
  1206. try:
  1207. for kll_expression in kll_context.expressions:
  1208. # Debug output
  1209. if self.operation_organization_debug:
  1210. # Uncolorize if requested
  1211. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1212. os.path.basename( kll_context.parent.path ),
  1213. kll_expression.lparam_token.start[0],
  1214. kll_expression.__class__.__name__,
  1215. kll_expression.type,
  1216. kll_expression
  1217. )
  1218. print( self.color and output or ansi_escape.sub( '', output ) )
  1219. # Add expression
  1220. kll_context.organization.add_expression(
  1221. kll_expression,
  1222. ( self.operation_organization_debug, self.color )
  1223. )
  1224. except Exception as err:
  1225. import traceback
  1226. traceback.print_tb( err.__traceback__ )
  1227. print( type( err ).__name__, err )
  1228. print( "Could not add/modify kll expression in context datastructure." )
  1229. return False
  1230. return True
  1231. def process( self ):
  1232. '''
  1233. Operation Organization Stage Processing
  1234. '''
  1235. self._status = 'Running'
  1236. # Determine colorization setting
  1237. self.color = self.control.stage('CompilerConfigurationStage').color
  1238. # Acquire thread pool
  1239. pool = self.control.stage('CompilerConfigurationStage').pool
  1240. # Get list of KLL contexts
  1241. contexts = self.control.stage('OperationClassificationStage').contexts
  1242. # Add expressions from contexts to context datastructures
  1243. if False in pool.map( self.organize, contexts ):
  1244. self._status = 'Incomplete'
  1245. return
  1246. # Show result of filling datastructure
  1247. if self.operation_organization_display:
  1248. for kll_context in contexts:
  1249. # Uncolorize if requested
  1250. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m".format(
  1251. os.path.basename( kll_context.parent.path ),
  1252. kll_context.__class__.__name__
  1253. )
  1254. print( self.color and output or ansi_escape.sub( '', output ) )
  1255. # Display Table
  1256. for store in kll_context.organization.stores():
  1257. # Uncolorize if requested
  1258. output = "\t\033[1;4;32m{0}\033[0m".format(
  1259. store.__class__.__name__
  1260. )
  1261. print( self.color and output or ansi_escape.sub( '', output ) )
  1262. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1263. self._status = 'Completed'
  1264. class DataOrganizationStage( Stage ):
  1265. '''
  1266. Data Organization Stage
  1267. * Using the constructed Context datastructures, merge contexts of the same type together
  1268. * Precedence/priority is defined by the order each Context was included on the command line
  1269. * May include datastructure data optimizations
  1270. '''
  1271. def __init__( self, control ):
  1272. '''
  1273. Initialize configuration variables
  1274. '''
  1275. super().__init__( control )
  1276. self.data_organization_debug = False
  1277. self.data_organization_display = False
  1278. self.contexts = None
  1279. def command_line_args( self, args ):
  1280. '''
  1281. Group parser for command line arguments
  1282. @param args: Name space of processed arguments
  1283. '''
  1284. self.data_organization_debug = args.data_organization_debug
  1285. self.data_organization_display = args.data_organization_display
  1286. def command_line_flags( self, parser ):
  1287. '''
  1288. Group parser for command line options
  1289. @param parser: argparse setup object
  1290. '''
  1291. # Create new option group
  1292. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1293. # Optional Arguments
  1294. group.add_argument(
  1295. '--data-organization-debug',
  1296. action='store_true',
  1297. default=self.data_organization_debug,
  1298. help="Show debug info from data organization stage.\n",
  1299. )
  1300. group.add_argument(
  1301. '--data-organization-display',
  1302. action='store_true',
  1303. default=self.data_organization_display,
  1304. help="Show datastructure of each context after merging.\n",
  1305. )
  1306. def sort_contexts( self, contexts ):
  1307. '''
  1308. Returns a dictionary of list of sorted 'like' contexts
  1309. This is used to group the contexts that need merging
  1310. '''
  1311. lists = {}
  1312. for kll_context in contexts:
  1313. name = kll_context.__class__.__name__
  1314. # PartialMapContext's are sorted by name *and* layer number
  1315. if name == "PartialMapContext":
  1316. name = "{0}{1}".format( name, kll_context.layer )
  1317. # Add new list if no elements yet
  1318. if name not in lists.keys():
  1319. lists[ name ] = [ kll_context ]
  1320. else:
  1321. lists[ name ].append( kll_context )
  1322. return lists
  1323. def organize( self, kll_context ):
  1324. '''
  1325. Symbolically merge all like Contexts
  1326. The full layout organization occurs over multiple stages, this is the second stage
  1327. '''
  1328. # Lookup context name
  1329. context_name = "{0}".format( kll_context[0].__class__.__name__ )
  1330. # PartialMapContext's are sorted by name *and* layer number
  1331. if context_name == "PartialMapContext":
  1332. context_name = "{0}{1}".format( context_name, kll_context[0].layer )
  1333. # Initialize merge context as the first one
  1334. self.contexts[ context_name ] = context.MergeContext( kll_context[0] )
  1335. # Indicate when a context is skipped as there is only one
  1336. if self.data_organization_debug:
  1337. if len( kll_context ) < 2:
  1338. output = "\033[1;33mSkipping\033[0m\033[1m:\033[1;32m{0}\033[0m".format(
  1339. context_name
  1340. )
  1341. print( self.color and output or ansi_escape.sub( '', output ) )
  1342. return True
  1343. # The incoming list is ordered
  1344. # Merge in each of the contexts symbolically
  1345. for next_context in kll_context[1:]:
  1346. try:
  1347. self.contexts[ context_name ].merge(
  1348. next_context,
  1349. ( self.data_organization_debug, self.color )
  1350. )
  1351. except Exception as err:
  1352. import traceback
  1353. traceback.print_tb( err.__traceback__ )
  1354. print( type( err ).__name__, err )
  1355. print( "Could not merge '{0}' into '{1}' context.".format(
  1356. os.path.basename( next_context.parent.path ),
  1357. context_name
  1358. ) )
  1359. return False
  1360. return True
  1361. def process( self ):
  1362. '''
  1363. Data Organization Stage Processing
  1364. '''
  1365. self._status = 'Running'
  1366. # Determine colorization setting
  1367. self.color = self.control.stage('CompilerConfigurationStage').color
  1368. # Acquire thread pool
  1369. pool = self.control.stage('CompilerConfigurationStage').pool
  1370. # Get list of KLL contexts
  1371. contexts = self.control.stage('OperationClassificationStage').contexts
  1372. # Get sorted list of KLL contexts
  1373. sorted_contexts = self.sort_contexts( contexts )
  1374. self.contexts = {}
  1375. # Add expressions from contexts to context datastructures
  1376. if False in pool.map( self.organize, sorted_contexts.values() ):
  1377. self._status = 'Incomplete'
  1378. return
  1379. # Show result of filling datastructure
  1380. if self.data_organization_display:
  1381. for key, kll_context in self.contexts.items():
  1382. # Uncolorize if requested
  1383. output = "\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1384. key,
  1385. kll_context.paths(),
  1386. )
  1387. print( self.color and output or ansi_escape.sub( '', output ) )
  1388. # Display Table
  1389. for store in kll_context.organization.stores():
  1390. # Uncolorize if requested
  1391. output = "\t\033[1;4;32m{0}\033[0m".format(
  1392. store.__class__.__name__
  1393. )
  1394. print( self.color and output or ansi_escape.sub( '', output ) )
  1395. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1396. self._status = 'Completed'
  1397. class DataFinalizationStage( Stage ):
  1398. '''
  1399. Data Finalization Stage
  1400. * Using the merged Context datastructures, apply the Configuration and BaseMap contexts to the higher
  1401. level DefaultMap and PartialMap Contexts
  1402. * First BaseMap is applied on top of Configuration
  1403. * Next, DefaultMap is applied on top of (Configuration+BaseMap) as well as the PartialMaps
  1404. * May include datastructure data optimizations
  1405. '''
  1406. def __init__( self, control ):
  1407. '''
  1408. Initialize configuration variables
  1409. '''
  1410. super().__init__( control )
  1411. self.data_finalization_debug = False
  1412. self.data_finalization_display = False
  1413. self.base_context = None
  1414. self.default_context = None
  1415. self.partial_contexts = None
  1416. self.full_context = None
  1417. self.context_list = None
  1418. self.layer_contexts = None
  1419. def command_line_args( self, args ):
  1420. '''
  1421. Group parser for command line arguments
  1422. @param args: Name space of processed arguments
  1423. '''
  1424. self.data_finalization_debug = args.data_finalization_debug
  1425. self.data_finalization_display = args.data_finalization_display
  1426. def command_line_flags( self, parser ):
  1427. '''
  1428. Group parser for command line options
  1429. @param parser: argparse setup object
  1430. '''
  1431. # Create new option group
  1432. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1433. # Optional Arguments
  1434. group.add_argument(
  1435. '--data-finalization-debug',
  1436. action='store_true',
  1437. default=self.data_finalization_debug,
  1438. help="Show debug info from data finalization stage.\n",
  1439. )
  1440. group.add_argument(
  1441. '--data-finalization-display',
  1442. action='store_true',
  1443. default=self.data_finalization_display,
  1444. help="Show datastructure of each context after merging.\n",
  1445. )
  1446. def process( self ):
  1447. '''
  1448. Data Organization Stage Processing
  1449. '''
  1450. self._status = 'Running'
  1451. # Determine colorization setting
  1452. self.color = self.control.stage('CompilerConfigurationStage').color
  1453. # Get context silos
  1454. contexts = self.control.stage('DataOrganizationStage').contexts
  1455. self._status = 'Incomplete'
  1456. # Context list
  1457. self.context_list = []
  1458. # Depending on the calling order, we may need to use a GenericContext or ConfigurationContext as the base
  1459. # Default to ConfigurationContext first
  1460. if 'ConfigurationContext' in contexts.keys():
  1461. self.base_context = context.MergeContext( contexts['ConfigurationContext'] )
  1462. # If we still have GenericContexts around, merge them on top of the ConfigurationContext
  1463. if 'GenericContext' in contexts.keys():
  1464. self.base_context.merge(
  1465. contexts['GenericContext'],
  1466. ( self.data_finalization_debug, self.color )
  1467. )
  1468. # Otherwise, just use a GenericContext
  1469. elif 'GenericContext' in contexts.keys():
  1470. self.base_context = context.MergeContext( contexts['GenericContext'] )
  1471. # Fail otherwise, you *must* have a GenericContext or ConfigurationContext
  1472. else:
  1473. print( "{0} Missing a 'GenericContext' and/or 'ConfigurationContext'.".format( ERROR ) )
  1474. self._status = 'Incomplete'
  1475. return
  1476. # Next use the BaseMapContext and overlay on ConfigurationContext
  1477. # This serves as the basis for the next two merges
  1478. if 'BaseMapContext' in contexts.keys():
  1479. self.base_context.merge(
  1480. contexts['BaseMapContext'],
  1481. ( self.data_finalization_debug, self.color )
  1482. )
  1483. self.context_list.append( ( 'BaseMapContext', self.base_context ) )
  1484. # Then use the DefaultMapContext as the default keyboard mapping
  1485. self.default_context = context.MergeContext( self.base_context )
  1486. if 'DefaultMapContext' in contexts.keys():
  1487. self.default_context.merge(
  1488. contexts['DefaultMapContext'],
  1489. ( self.data_finalization_debug, self.color )
  1490. )
  1491. self.context_list.append( ( 'DefaultMapContext', self.default_context ) )
  1492. # For convenience build a fully merged dataset
  1493. # This is usually only required for variables
  1494. self.full_context = context.MergeContext( self.default_context )
  1495. # Finally setup each of the PartialMapContext groups
  1496. # Build list of PartialMapContexts and sort by layer before iterating over
  1497. self.partial_contexts = []
  1498. partial_context_list = [
  1499. ( item[1].layer, item[1] )
  1500. for item in contexts.items()
  1501. if 'PartialMapContext' in item[0]
  1502. ]
  1503. for layer, partial in sorted( partial_context_list, key=lambda x: x[0] ):
  1504. self.partial_contexts.append( context.MergeContext( self.base_context ) )
  1505. self.partial_contexts[ layer ].merge(
  1506. partial,
  1507. ( self.data_finalization_debug, self.color )
  1508. )
  1509. self.context_list.append( ( 'PartialMapContext{0}'.format( layer ), self.default_context ) )
  1510. # Add each partial to the full_context as well
  1511. self.full_context.merge(
  1512. partial,
  1513. ( self.data_finalization_debug, self.color )
  1514. )
  1515. # Build layer context list
  1516. # Each index of the list corresponds to the keyboard layer
  1517. self.layer_contexts = [ self.default_context ]
  1518. self.layer_contexts.extend( self.partial_contexts )
  1519. # Show result of filling datastructure
  1520. if self.data_finalization_display:
  1521. for key, kll_context in self.context_list:
  1522. # Uncolorize if requested
  1523. output = "*\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1524. key,
  1525. kll_context.paths(),
  1526. )
  1527. print( self.color and output or ansi_escape.sub( '', output ) )
  1528. # Display Table
  1529. for store in kll_context.organization.stores():
  1530. # Uncolorize if requested
  1531. output = "\t\033[1;4;32m{0}\033[0m".format(
  1532. store.__class__.__name__
  1533. )
  1534. print( self.color and output or ansi_escape.sub( '', output ) )
  1535. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1536. self._status = 'Completed'
  1537. class DataAnalysisStage( Stage ):
  1538. '''
  1539. Data Analysis Stage
  1540. * Using the completed Context datastructures, do additional analysis that may be required for Code Generation
  1541. '''
  1542. def __init__( self, control ):
  1543. '''
  1544. Initialize configuration variables
  1545. '''
  1546. super().__init__( control )
  1547. self.layer_contexts = None
  1548. self.full_context = None
  1549. def command_line_args( self, args ):
  1550. '''
  1551. Group parser for command line arguments
  1552. @param args: Name space of processed arguments
  1553. '''
  1554. def command_line_flags( self, parser ):
  1555. '''
  1556. Group parser for command line options
  1557. @param parser: argparse setup object
  1558. '''
  1559. # Create new option group
  1560. #group = parser.add_argument_group('\033[1mData Analysis Configuration\033[0m')
  1561. def reduction( self ):
  1562. '''
  1563. Builds a new reduced_contexts list
  1564. For each of the layers, evaluate triggers into ScanCodes (USBCode to ScanCodes)
  1565. (all other triggers don't require reductions)
  1566. '''
  1567. self.reduced_contexts = []
  1568. for layer in self.layer_contexts:
  1569. reduced = context.MergeContext( layer )
  1570. reduced.reduction()
  1571. self.reduced_contexts.append( reduced )
  1572. def generate_mapping_indices( self ):
  1573. '''
  1574. For each trigger:result pair generate a unique index
  1575. The triggers and results are first sorted alphabetically
  1576. '''
  1577. # Build list of map expressions
  1578. expressions = []
  1579. # Gather list of expressions
  1580. for layer in self.layer_contexts:
  1581. expressions.extend( layer.organization.mapping_data.data.items() )
  1582. # Sort expressions by trigger, there may be *duplicate* triggers however don't reduce yet
  1583. # we need the result mappings as well
  1584. trigger_sorted = sorted( expressions, key=lambda x: x[1][0].trigger_str() )
  1585. trigger_filtered = [ elem for elem in trigger_sorted if not elem[1][0].type == 'USBCode' ]
  1586. #print( trigger_filtered )
  1587. # Sort expressions by result, there may be *duplicate* results however don't reduce yet
  1588. # we need the result mappings as well
  1589. result_sorted = sorted( expressions, key=lambda x: x[1][0].result_str() )
  1590. #print( result_sorted )
  1591. # Build List of Triggers and sort by string contents
  1592. # XXX Only scan codes right now
  1593. # This will need to expand to a
  1594. #TODO
  1595. # Build List of Results and sort by string contents
  1596. # TODO
  1597. def sort_map_index_lists( self ):
  1598. '''
  1599. '''
  1600. def generate_map_offset_table( self ):
  1601. '''
  1602. '''
  1603. def generate_trigger_lists( self ):
  1604. '''
  1605. '''
  1606. def analyze( self ):
  1607. '''
  1608. Analyze the set of configured contexts
  1609. TODO: Perhaps use emitters or something like it for this code? -HaaTa
  1610. '''
  1611. # Reduce Contexts
  1612. # Convert all trigger USBCodes to ScanCodes
  1613. self.reduction()
  1614. # Generate Indices
  1615. # Assigns a sequential index (starting from 0) for each map expresssion
  1616. self.generate_mapping_indices()
  1617. # Sort Index Lists
  1618. # Using indices sort Trigger and Results macros
  1619. self.sort_map_index_lists()
  1620. # Generate Offset Table
  1621. # This is needed for interconnect devices
  1622. self.generate_map_offset_table()
  1623. # Generate Trigger Lists
  1624. self.generate_trigger_lists()
  1625. def process( self ):
  1626. '''
  1627. Data Analysis Stage Processing
  1628. '''
  1629. self._status = 'Running'
  1630. # Determine colorization setting
  1631. self.color = self.control.stage('CompilerConfigurationStage').color
  1632. # Acquire list of contexts
  1633. self.layer_contexts = self.control.stage('DataFinalizationStage').layer_contexts
  1634. self.full_context = self.control.stage('DataFinalizationStage').full_context
  1635. # Analyze set of contexts
  1636. self.analyze()
  1637. self._status = 'Completed'
  1638. class CodeGenerationStage( Stage ):
  1639. '''
  1640. Code Generation Stage
  1641. * Generates code for the given firmware backend
  1642. * Backend is selected in the Compiler Configuration Stage
  1643. * Uses the specified emitter to generate the code
  1644. '''
  1645. def __init__( self, control ):
  1646. '''
  1647. Initialize configuration variables
  1648. '''
  1649. super().__init__( control )
  1650. def command_line_args( self, args ):
  1651. '''
  1652. Group parser for command line arguments
  1653. @param args: Name space of processed arguments
  1654. '''
  1655. self.control.stage('CompilerConfigurationStage').emitters.command_line_args( args )
  1656. def command_line_flags( self, parser ):
  1657. '''
  1658. Group parser for command line options
  1659. @param parser: argparse setup object
  1660. '''
  1661. # Create new option group
  1662. #group = parser.add_argument_group('\033[1mCode Generation Configuration\033[0m')
  1663. # Create options groups for each of the Emitters
  1664. self.control.stage('CompilerConfigurationStage').emitters.command_line_flags( parser )
  1665. def process( self ):
  1666. '''
  1667. Data Organization Stage Processing
  1668. '''
  1669. self._status = 'Running'
  1670. # Determine colorization setting
  1671. self.color = self.control.stage('CompilerConfigurationStage').color
  1672. # Get Emitter object
  1673. self.emitter = self.control.stage('CompilerConfigurationStage').emitters.emitter(
  1674. self.control.stage('CompilerConfigurationStage').emitter
  1675. )
  1676. # Call Emitter
  1677. self.emitter.process()
  1678. # Generate Outputs using Emitter
  1679. self.emitter.output()
  1680. self._status = 'Completed'
  1681. class ReportGenerationStage( Stage ):
  1682. '''
  1683. Report Generation Stage
  1684. * Using the datastructures and analyzed data, generate a compiler report
  1685. * TODO
  1686. '''