KLL Compiler
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.

stage.py 60KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064
  1. #!/usr/bin/env python3
  2. '''
  3. KLL Compiler Stage Definitions
  4. '''
  5. # Copyright (C) 2016 by Jacob Alexander
  6. #
  7. # This file is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation, either version 3 of the License, or
  10. # (at your option) any later version.
  11. #
  12. # This file is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU General Public License
  18. # along with this file. If not, see <http://www.gnu.org/licenses/>.
  19. ### Imports ###
  20. from multiprocessing.dummy import Pool as ThreadPool
  21. import io
  22. import os
  23. import re
  24. import sys
  25. import common.context as context
  26. import common.expression as expression
  27. import common.file as file
  28. import emitters.emitters as emitters
  29. from funcparserlib.lexer import make_tokenizer, Token, LexerError
  30. from funcparserlib.parser import many, oneplus, maybe, skip, NoParseError, Parser_debug
  31. ### Decorators ###
  32. ## Print Decorator Variables
  33. ERROR = '\033[5;1;31mERROR\033[0m:'
  34. WARNING = '\033[5;1;33mWARNING\033[0m:'
  35. ansi_escape = re.compile(r'\x1b[^m]*m')
  36. ### Classes ###
  37. class ControlStage:
  38. '''
  39. Top-level Stage
  40. Controls the order in which each stage is processed
  41. '''
  42. def __init__( self ):
  43. '''
  44. Initialize stage objects and control variables
  45. '''
  46. # Initialized in process order
  47. # NOTE: Only unique classes in this list, otherwise stage() will get confused
  48. self.stages = [
  49. CompilerConfigurationStage( self ),
  50. FileImportStage( self ),
  51. PreprocessorStage( self ),
  52. OperationClassificationStage( self ),
  53. OperationSpecificsStage( self ),
  54. OperationOrganizationStage( self ),
  55. DataOrganizationStage( self ),
  56. DataFinalizationStage( self ),
  57. DataAnalysisStage( self ),
  58. CodeGenerationStage( self ),
  59. #ReportGenerationStage( self ),
  60. ]
  61. self.git_rev = None
  62. self.git_changes = None
  63. self.version = None
  64. def stage( self, context_str ):
  65. '''
  66. Returns the stage object of the associated string name of the class
  67. @param context_str: String name of the class of the stage e.g. CompilerConfigurationStage
  68. '''
  69. return [ stage for stage in self.stages if type( stage ).__name__ is context_str ][0]
  70. def command_line_args( self, args ):
  71. '''
  72. Capture commmand line arguments for each processing stage
  73. @param args: Name space of processed arguments
  74. '''
  75. for stage in self.stages:
  76. stage.command_line_args( args )
  77. def command_line_flags( self, parser ):
  78. '''
  79. Prepare group parser for each processing stage
  80. @param parser: argparse setup object
  81. '''
  82. for stage in self.stages:
  83. stage.command_line_flags( parser )
  84. def process( self ):
  85. '''
  86. Main processing section
  87. Initializes each stage in order.
  88. Each stage must complete before the next one begins.
  89. '''
  90. # Run report even if stage doesn't complete
  91. run_report = False
  92. for stage in self.stages:
  93. stage.process()
  94. # Make sure stage has successfully completed
  95. if stage.status() != 'Completed':
  96. print( "{0} Invalid stage status '{1}' for '{2}'.".format(
  97. ERROR,
  98. stage.status(),
  99. stage.__class__.__name__,
  100. ) )
  101. run_report = True
  102. break
  103. # Only need to explicitly run reports if there was a stage problem
  104. # Otherwise reports are run automatically
  105. if run_report:
  106. # TODO
  107. sys.exit( 1 )
  108. class Stage:
  109. '''
  110. Base Stage Class
  111. '''
  112. def __init__( self, control ):
  113. '''
  114. Stage initialization
  115. @param control: ControlStage object, used to access data from other stages
  116. '''
  117. self.control = control
  118. self.color = False
  119. self._status = 'Queued'
  120. def command_line_args( self, args ):
  121. '''
  122. Group parser for command line arguments
  123. @param args: Name space of processed arguments
  124. '''
  125. print( "{0} '{1}' '{2}' has not been implemented yet"
  126. .format(
  127. WARNING,
  128. self.command_line_args.__name__,
  129. type( self ).__name__
  130. )
  131. )
  132. def command_line_flags( self, parser ):
  133. '''
  134. Group parser for command line options
  135. @param parser: argparse setup object
  136. '''
  137. print( "{0} '{1}' '{2}' has not been implemented yet"
  138. .format(
  139. WARNING,
  140. self.command_line_flags.__name__,
  141. type( self ).__name__
  142. )
  143. )
  144. def process( self ):
  145. '''
  146. Main procesing section
  147. '''
  148. self._status = 'Running'
  149. print( "{0} '{1}' '{2}' has not been implemented yet"
  150. .format(
  151. WARNING,
  152. self.process.__name__,
  153. type( self ).__name__
  154. )
  155. )
  156. self._status = 'Completed'
  157. def status( self ):
  158. '''
  159. Returns the current status of the Stage
  160. Values:
  161. Queued - Not yet run
  162. Running - Currently running
  163. Completed - Successfully completed
  164. Incomplete - Unsuccessfully completed
  165. '''
  166. return self._status
  167. class CompilerConfigurationStage( Stage ):
  168. '''
  169. Compiler Configuration Stage
  170. * Does initial setup of KLL compiler.
  171. * Handles any global configuration that must be done before parsing can begin
  172. '''
  173. def __init__( self, control ):
  174. '''
  175. Initialize compiler configuration variables
  176. '''
  177. super().__init__( control )
  178. self.color = "auto"
  179. self.jobs = os.cpu_count()
  180. self.pool = None
  181. # Build list of emitters
  182. self.emitters = emitters.Emitters( control )
  183. self.emitter = self.emitters.emitter_default()
  184. def command_line_args( self, args ):
  185. '''
  186. Group parser for command line arguments
  187. @param args: Name space of processed arguments
  188. '''
  189. self.emitter = args.emitter
  190. self.color = args.color
  191. self.jobs = args.jobs
  192. # Validate color argument before processing
  193. if self.color not in ['auto', 'always', 'never' ]:
  194. print( "Invalid color option '{0}'".format( self.color ) )
  195. sys.exit( 2 )
  196. # TODO Detect whether colorization should be used
  197. self.color = self.color in ['auto', 'always']
  198. def command_line_flags( self, parser ):
  199. '''
  200. Group parser for command line options
  201. @param parser: argparse setup object
  202. '''
  203. # Create new option group
  204. group = parser.add_argument_group('\033[1mCompiler Configuration\033[0m')
  205. # Optional Arguments
  206. group.add_argument( '--emitter', type=str, default=self.emitter,
  207. help="Specify target emitter for the KLL compiler.\n"
  208. "\033[1mDefault\033[0m: {0}\n"
  209. "\033[1mOptions\033[0m: {1}".format( self.emitter, self.emitters.emitter_list() )
  210. )
  211. group.add_argument( '--color', type=str, default=self.color,
  212. help="Specify debug colorizer mode.\n"
  213. "\033[1mDefault\033[0m: {0}\n"
  214. "\033[1mOptions\033[0m: auto, always, never (auto attempts to detect support)".format( self.color )
  215. )
  216. group.add_argument( '--jobs', type=int, default=self.jobs,
  217. help="Specify max number of threads to use.\n"
  218. "\033[1mDefault\033[0m: {0}".format( self.jobs )
  219. )
  220. def process( self ):
  221. '''
  222. Compiler Configuration Processing
  223. '''
  224. self._status = 'Running'
  225. # Initialize thread pool
  226. self.pool = ThreadPool( self.jobs )
  227. self._status = 'Completed'
  228. class FileImportStage( Stage ):
  229. '''
  230. FIle Import Stage
  231. * Loads text of all files into memory
  232. * Does initial sorting of KLL Contexts based upon command line arguments
  233. '''
  234. def __init__( self, control ):
  235. '''
  236. Initialize file storage datastructures and variables
  237. '''
  238. super().__init__( control )
  239. # These lists are order sensitive
  240. self.generic_files = []
  241. self.config_files = []
  242. self.base_files = []
  243. self.default_files = []
  244. # This is a list of lists, each sub list is another layer in order from 1 to max
  245. self.partial_files = []
  246. # List of all files contained in KLLFile objects
  247. self.kll_files = []
  248. def command_line_args( self, args ):
  249. '''
  250. Group parser for command line arguments
  251. @param args: Name space of processed arguments
  252. '''
  253. self.generic_files = args.generic
  254. self.config_files = args.config
  255. self.base_files = args.base
  256. self.default_files = args.default
  257. self.partial_files = args.partial
  258. def command_line_flags( self, parser ):
  259. '''
  260. Group parser for command line options
  261. @param parser: argparse setup object
  262. '''
  263. # Create new option group
  264. group = parser.add_argument_group('\033[1mFile Context Configuration\033[0m')
  265. # Positional Arguments
  266. group.add_argument( 'generic', type=str, nargs='*', default=self.generic_files,
  267. help="Auto-detect context of .kll files, defaults to a base map configuration."
  268. )
  269. # Optional Arguments
  270. group.add_argument( '--config', type=str, nargs='+', default=self.config_files,
  271. help="Specify base configuration .kll files, earliest priority"
  272. )
  273. group.add_argument( '--base', type=str, nargs='+', default=self.base_files,
  274. help="Specify base map configuration, applied after config .kll files.\n"
  275. "The base map is applied prior to all default and partial maps and is used as the basis for them."
  276. )
  277. group.add_argument( '--default', type=str, nargs='+', default=self.default_files,
  278. help="Specify .kll files to layer on top of the default map to create a combined map.\n"
  279. "Also known as layer 0."
  280. )
  281. group.add_argument( '--partial', type=str, nargs='+', action='append', default=self.partial_files,
  282. help="Specify .kll files to generate partial map, multiple files per flag.\n"
  283. "Each -p defines another partial map.\n"
  284. "Base .kll files (that define the scan code maps) must be defined for each partial map."
  285. )
  286. def init_kllfile( self, path, file_context ):
  287. '''
  288. Prepares a KLLFile object with the given context
  289. @path: Path to the KLL file
  290. @file_context: Type of file context, e.g. DefaultMapContext
  291. '''
  292. return file.KLLFile( path, file_context )
  293. def process( self ):
  294. '''
  295. Process each of the files, sorting them by command line argument context order
  296. '''
  297. self._status = 'Running'
  298. # Determine colorization setting
  299. self.color = self.control.stage('CompilerConfigurationStage').color
  300. # Process each type of file
  301. # Iterates over each file in the context list and creates a KLLFile object with a context and path
  302. self.kll_files += map(
  303. lambda path: self.init_kllfile( path, context.GenericContext() ),
  304. self.generic_files
  305. )
  306. self.kll_files += map(
  307. lambda path: self.init_kllfile( path, context.ConfigurationContext() ),
  308. self.config_files
  309. )
  310. self.kll_files += map(
  311. lambda path: self.init_kllfile( path, context.BaseMapContext() ),
  312. self.base_files
  313. )
  314. self.kll_files += map(
  315. lambda path: self.init_kllfile( path, context.DefaultMapContext() ),
  316. self.default_files
  317. )
  318. # Partial Maps require a third parameter which specifies which layer it's in
  319. for layer, files in enumerate( self.partial_files ):
  320. self.kll_files += map(
  321. lambda path: self.init_kllfile( path, context.PartialMapContext( layer ) ),
  322. files
  323. )
  324. # Validate that all the file paths exist, exit if any of the checks fail
  325. if False in [ path.check() for path in self.kll_files ]:
  326. self._status = 'Incomplete'
  327. return
  328. # Now that we have a full list of files and their given context, we can now read the files into memory
  329. # Uses the thread pool to speed up processing
  330. # Make sure processing was successful before continuing
  331. pool = self.control.stage('CompilerConfigurationStage').pool
  332. if False in pool.map( lambda kll_file: kll_file.read(), self.kll_files ):
  333. self._status = 'Incomplete'
  334. return
  335. self._status = 'Completed'
  336. class PreprocessorStage( Stage ):
  337. '''
  338. Preprocessor Stage
  339. * Does initial split and decision of contexts
  340. * Handles Preprocessor part of KLL
  341. '''
  342. def __init__( self, control ):
  343. '''
  344. Initialize preprocessor configuration variables
  345. '''
  346. super().__init__( control )
  347. def command_line_args( self, args ):
  348. '''
  349. Group parser for command line arguments
  350. @param args: Name space of processed arguments
  351. '''
  352. def command_line_flags( self, parser ):
  353. '''
  354. Group parser for command line options
  355. @param parser: argparse setup object
  356. '''
  357. # Create new option group
  358. #group = parser.add_argument_group('\033[1mPreprocessor Configuration\033[0m')
  359. def seed_context( self, kll_file ):
  360. '''
  361. Build list of context
  362. TODO Update later for proper preprocessor
  363. Adds data from KLFile into the Context
  364. '''
  365. kll_file.context.initial_context( kll_file.lines, kll_file.data, kll_file )
  366. def process( self ):
  367. '''
  368. Preprocessor Execution
  369. '''
  370. self._status = 'Running'
  371. # Determine colorization setting
  372. self.color = self.control.stage('CompilerConfigurationStage').color
  373. # Acquire thread pool
  374. pool = self.control.stage('CompilerConfigurationStage').pool
  375. # TODO
  376. # Once the KLL Spec has preprocessor commands, there may be a risk of infinite/circular dependencies
  377. # Please add a non-invasive way to avoid/warn/stop in this case -HaaTa
  378. # First, since initial contexts have been populated, populate details
  379. # TODO
  380. # This step will change once preprocessor commands have been added
  381. # Simply, this just takes the imported file data (KLLFile) and puts it in the context container
  382. kll_files = self.control.stage('FileImportStage').kll_files
  383. if False in pool.map( self.seed_context, kll_files ):
  384. self._status = 'Incomplete'
  385. return
  386. # Next, tokenize and parser the preprocessor KLL commands.
  387. # NOTE: This may result in having to create more KLL Contexts and tokenize/parse again numerous times over
  388. # TODO
  389. self._status = 'Completed'
  390. class OperationClassificationStage( Stage ):
  391. '''
  392. Operation Classification Stage
  393. * Sorts operations by type based on operator
  394. * Tokenizes only operator pivots and left/right arguments
  395. * Further tokenization and parsing occurs at a later stage
  396. '''
  397. def __init__( self, control ):
  398. '''
  399. Initialize operation classification stage
  400. '''
  401. super().__init__( control )
  402. self.tokenized_data = []
  403. self.contexts = []
  404. def command_line_args( self, args ):
  405. '''
  406. Group parser for command line arguments
  407. @param args: Name space of processed arguments
  408. '''
  409. def command_line_flags( self, parser ):
  410. '''
  411. Group parser for command line options
  412. @param parser: argparse setup object
  413. '''
  414. # Create new option group
  415. #group = parser.add_argument_group('\033[1mOperation Classification Configuration\033[0m')
  416. def merge_tokens( self, token_list, token_type ):
  417. '''
  418. Merge list of tokens into a single token
  419. @param token_list: List of tokens
  420. @param token_type: String name of token type
  421. '''
  422. # Initial token parameters
  423. ret_token = Token( token_type, '' )
  424. # Set start/end positions of token
  425. ret_token.start = token_list[0].start
  426. ret_token.end = token_list[-1].end
  427. # Build token value
  428. for token in token_list:
  429. ret_token.value += token.value
  430. return ret_token
  431. def tokenize( self, kll_context ):
  432. '''
  433. Tokenize a single string
  434. @param kll_context: KLL Context containing file data
  435. '''
  436. ret = True
  437. # Basic Tokens Spec
  438. spec = [
  439. ( 'Comment', ( r' *#.*', ) ),
  440. ( 'Space', ( r'[ \t]+', ) ),
  441. ( 'NewLine', ( r'[\r\n]+', ) ),
  442. # Tokens that will be grouped together after tokenization
  443. # Ignored at this stage
  444. # This is required to isolate the Operator tags
  445. ( 'Misc', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ), # Position context
  446. ( 'Misc', ( r'\([^\)]*\)', ) ), # Parenthesis context
  447. ( 'Misc', ( r'\[[^\]]*\]', ) ), # Square bracket context
  448. ( 'Misc', ( r'"[^"]*"', ) ), # Double quote context
  449. ( 'Misc', ( r"'[^']*'", ) ), # Single quote context
  450. ( 'Operator', ( r'=>|<=|i:\+|i:-|i::|i:|:\+|:-|::|:|=', ) ),
  451. ( 'EndOfLine', ( r';', ) ),
  452. # Everything else to be ignored at this stage
  453. ( 'Misc', ( r'.', ) ), # Everything else
  454. ]
  455. # Tokens to filter out of the token stream
  456. #useless = [ 'Space', 'Comment' ]
  457. useless = [ 'Comment', 'NewLine' ]
  458. # Build tokenizer that appends unknown characters to Misc Token groups
  459. # NOTE: This is technically slower processing wise, but allows for multi-stage tokenization
  460. # Which in turn allows for parsing and tokenization rules to be simplified
  461. tokenizer = make_tokenizer( spec )
  462. # Tokenize and filter out useless tokens
  463. try:
  464. tokens = [ x for x in tokenizer( kll_context.data ) if x.type not in useless ]
  465. except LexerError as err:
  466. print( err )
  467. print( "{0} {1}:tokenize -> {2}:{3}".format(
  468. ERROR,
  469. self.__class__.__name__,
  470. kll_context.parent.path,
  471. err.place[0],
  472. ) )
  473. # Merge Misc tokens delimited by Operator and EndOfLine tokens
  474. kll_context.classification_token_data = []
  475. new_token = []
  476. last_operator = None
  477. for token in tokens:
  478. # Check for delimiter, append new_token if ready
  479. if token.type in ['EndOfLine', 'Operator']:
  480. # Determine the token type
  481. token_type = 'LOperatorData'
  482. if token.type is 'EndOfLine':
  483. token_type = 'ROperatorData'
  484. # If this is a 'misplaced' operator, set as Misc
  485. if token_type == last_operator:
  486. token.type = 'Misc'
  487. new_token.append( token )
  488. continue
  489. if len( new_token ) > 0:
  490. # Build new token
  491. kll_context.classification_token_data.append(
  492. self.merge_tokens( new_token, token_type )
  493. )
  494. new_token = []
  495. kll_context.classification_token_data.append( token )
  496. last_operator = token_type
  497. # Collect Misc tokens
  498. elif token.type in ['Misc', 'Space']:
  499. new_token.append( token )
  500. # Invalid token for this stage
  501. else:
  502. print( "{0} Invalid token '{1}' for '{2}'".format(
  503. ERROR,
  504. token,
  505. type( self ).__name__,
  506. ) )
  507. ret = False
  508. return ret
  509. def sort( self, kll_context ):
  510. '''
  511. Sorts tokenized data into expressions
  512. LOperatorData + Operator + ROperatorData + EndOfLine
  513. @param kll_context: KLL Context, contains tokenized data
  514. '''
  515. ret = True
  516. def validate_token( token, token_type ):
  517. '''
  518. Validate token
  519. @param token: Given token to validate
  520. @param token_type: Token type to validate against
  521. @return True if the token is correct
  522. '''
  523. ret = token.type is token_type
  524. # Error message
  525. if not ret:
  526. print( "Expected: '{0}' got '{1}':{2} '{3}'".format(
  527. token_type,
  528. token.type,
  529. token._pos_str(),
  530. token.value,
  531. ) )
  532. return ret
  533. tokens = kll_context.classification_token_data
  534. for index in range( 0, len( tokens ), 4 ):
  535. # Make sure enough tokens exist
  536. if index + 3 >= len( tokens ):
  537. print( "Not enough tokens left: {0}".format( tokens[index:] ) )
  538. print( "Expected: LOperatorData, Operator, ROperatorData, EndOfLine" )
  539. print( "{0} {1}:sort -> {2}:{3}".format(
  540. ERROR,
  541. self.__class__.__name__,
  542. kll_context.parent.path,
  543. tokens[-1].start[0],
  544. ) )
  545. ret = False
  546. break
  547. # Validate the tokens are what was expected
  548. ret = validate_token( tokens[index], 'LOperatorData' ) and ret
  549. ret = validate_token( tokens[index + 1], 'Operator' ) and ret
  550. ret = validate_token( tokens[index + 2], 'ROperatorData' ) and ret
  551. ret = validate_token( tokens[index + 3], 'EndOfLine' ) and ret
  552. # Append expression
  553. kll_context.expressions.append(
  554. expression.Expression( tokens[index], tokens[index + 1], tokens[index + 2], kll_context )
  555. )
  556. return ret
  557. def process( self ):
  558. '''
  559. Compiler Configuration Processing
  560. '''
  561. self._status = 'Running'
  562. # Determine colorization setting
  563. self.color = self.control.stage('CompilerConfigurationStage').color
  564. # Acquire thread pool
  565. pool = self.control.stage('CompilerConfigurationStage').pool
  566. # Get list of KLLFiles
  567. kll_files = self.control.stage('FileImportStage').kll_files
  568. # Build list of contexts
  569. self.contexts = [ kll_file.context for kll_file in kll_files ]
  570. # Tokenize operators
  571. # TODO
  572. # Once preprocessor includes are implemented use a second kll_files list
  573. # This way processing doesn't have to be recursive for a few stages -HaaTa
  574. if False in pool.map( self.tokenize, self.contexts ):
  575. self._status = 'Incomplete'
  576. return
  577. # Sort elements into expressions
  578. # LOperatorData + Operator + ROperatorData + EndOfLine
  579. if False in pool.map( self.sort, self.contexts ):
  580. self._status = 'Incomplete'
  581. return
  582. self._status = 'Completed'
  583. class OperationSpecificsStage( Stage ):
  584. '''
  585. Operation Specifics Stage
  586. * For each sorted operation, tokenize and parse the left/right arguments
  587. * Data is stored with the operation, but no context is given to the data beyond the argument types
  588. '''
  589. def __init__( self, control ):
  590. '''
  591. Initialize operation specifics stage
  592. '''
  593. super().__init__( control )
  594. self.parser_debug = False
  595. self.parser_token_debug = False
  596. self.token_debug = False
  597. def command_line_args( self, args ):
  598. '''
  599. Group parser for command line arguments
  600. @param args: Name space of processed arguments
  601. '''
  602. self.parser_debug = args.parser_debug
  603. self.parser_token_debug = args.parser_token_debug
  604. self.token_debug = args.token_debug
  605. # Auto-set parser_debug if parser_token_debug is set
  606. if self.parser_token_debug:
  607. self.parser_debug = True
  608. def command_line_flags( self, parser ):
  609. '''
  610. Group parser for command line options
  611. @param parser: argparse setup object
  612. '''
  613. # Create new option group
  614. group = parser.add_argument_group('\033[1mOperation Specifics Configuration\033[0m')
  615. # Optional Arguments
  616. group.add_argument( '--parser-debug', action='store_true', default=self.parser_debug,
  617. help="Enable parser debug output.\n",
  618. )
  619. group.add_argument( '--parser-token-debug', action='store_true', default=self.parser_token_debug,
  620. help="Enable parser-stage token debug output.\n",
  621. )
  622. group.add_argument( '--token-debug', action='store_true', default=self.token_debug,
  623. help="Enable tokenization debug output.\n",
  624. )
  625. ## Tokenizers ##
  626. def tokenize_base( self, kll_expression, lspec, rspec ):
  627. '''
  628. Base tokenization logic for this stage
  629. @param kll_expression: KLL expression to tokenize
  630. @param lspec: Regex tokenization spec for the left parameter
  631. @param rspec: Regex tokenization spec for the right parameter
  632. @return False if a LexerError was detected
  633. '''
  634. # Build tokenizers for lparam and rparam
  635. ltokenizer = make_tokenizer( lspec )
  636. rtokenizer = make_tokenizer( rspec )
  637. # Tokenize lparam and rparam
  638. # Ignore the generators, not useful in this case (i.e. use list())
  639. err_pos = [] # Error positions
  640. try:
  641. kll_expression.lparam_sub_tokens = list( ltokenizer( kll_expression.lparam_token.value ) )
  642. except LexerError as err:
  643. # Determine place in constructed expression
  644. err_pos.append( err.place[1] )
  645. print( type( err ).__name__, err )
  646. try:
  647. kll_expression.rparam_sub_tokens = list( rtokenizer( kll_expression.rparam_token.value ) )
  648. except LexerError as err:
  649. # Determine place in constructed expression
  650. err_pos.append( err.place[1] + kll_expression.rparam_start() )
  651. print( type( err ).__name__, err )
  652. # Display more information if any errors were detected
  653. if len( err_pos ) > 0:
  654. print( kll_expression.point_chars( err_pos ) )
  655. return False
  656. return True
  657. def tokenize_name_association( self, kll_expression ):
  658. '''
  659. Tokenize lparam and rparam in name association expressions
  660. <lparam> => <rparam>;
  661. '''
  662. # Define tokenization regex
  663. lspec = [
  664. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  665. ( 'Space', ( r'[ \t]+', ) ),
  666. ]
  667. rspec = [
  668. ( 'Space', ( r'[ \t]+', ) ),
  669. ( 'Parenthesis', ( r'\(|\)', ) ),
  670. ( 'Operator', ( r':', ) ),
  671. ( 'Comma', ( r',', ) ),
  672. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  673. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  674. ]
  675. # Tokenize, expression stores the result, status is returned
  676. return self.tokenize_base( kll_expression, lspec, rspec )
  677. def tokenize_data_association( self, kll_expression ):
  678. '''
  679. Tokenize lparam and rparam in data association expressions
  680. <lparam> <= <rparam>;
  681. '''
  682. # Define tokenization regex
  683. lspec = [
  684. ( 'Space', ( r'[ \t]+', ) ),
  685. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  686. ( 'ScanCodeStart', ( r'S\[', ) ),
  687. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  688. ( 'PixelStart', ( r'P\[', ) ),
  689. ( 'Animation', ( r'A"[^"]+"', ) ),
  690. ( 'AnimationStart', ( r'A\[', ) ),
  691. ( 'CodeBegin', ( r'\[', ) ),
  692. ( 'CodeEnd', ( r'\]', ) ),
  693. ( 'Position', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ),
  694. ( 'Comma', ( r',', ) ),
  695. ( 'Dash', ( r'-', ) ),
  696. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  697. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  698. ]
  699. rspec = [
  700. ( 'Space', ( r'[ \t]+', ) ),
  701. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  702. ( 'PixelStart', ( r'P\[', ) ),
  703. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  704. ( 'PixelLayerStart', ( r'PL\[', ) ),
  705. ( 'Animation', ( r'A"[^"]+"', ) ),
  706. ( 'AnimationStart', ( r'A\[', ) ),
  707. ( 'CodeBegin', ( r'\[', ) ),
  708. ( 'CodeEnd', ( r'\]', ) ),
  709. ( 'Position', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ),
  710. ( 'PixelOperator', ( r'(\+:|-:|>>|<<)', ) ),
  711. ( 'Operator', ( r':', ) ),
  712. ( 'Comma', ( r',', ) ),
  713. ( 'Dash', ( r'-', ) ),
  714. ( 'Plus', ( r'\+', ) ),
  715. ( 'Parenthesis', ( r'\(|\)', ) ),
  716. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  717. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  718. ]
  719. # Tokenize, expression stores the result, status is returned
  720. return self.tokenize_base( kll_expression, lspec, rspec )
  721. def tokenize_assignment( self, kll_expression ):
  722. '''
  723. Tokenize lparam and rparam in assignment expressions
  724. <lparam> = <rparam>;
  725. '''
  726. # Define tokenization regex
  727. lspec = [
  728. ( 'Space', ( r'[ \t]+', ) ),
  729. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  730. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  731. ( 'CodeBegin', ( r'\[', ) ),
  732. ( 'CodeEnd', ( r'\]', ) ),
  733. ]
  734. rspec = [
  735. ( 'Space', ( r'[ \t]+', ) ),
  736. ( 'String', ( r'"[^"]*"', ) ),
  737. ( 'SequenceString', ( r"'[^']*'", ) ),
  738. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  739. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  740. ( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
  741. ]
  742. # Tokenize, expression stores the result, status is returned
  743. return self.tokenize_base( kll_expression, lspec, rspec )
  744. def tokenize_mapping( self, kll_expression ):
  745. '''
  746. Tokenize lparam and rparam in mapping expressions
  747. <lparam> : <rparam>; # Set mapping
  748. <lparam> :+ <rparam>; # Mappping append
  749. <lparam> :- <rparam>; # Mapping removal
  750. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  751. Isolated versions of mappings
  752. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  753. <lparam> i: <rparam>;
  754. <lparam> i:+ <rparam>;
  755. <lparam> i:- <rparam>;
  756. <lparam> i:: <rparam>;
  757. '''
  758. # Define tokenization regex
  759. lspec = [
  760. ( 'Space', ( r'[ \t]+', ) ),
  761. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  762. ( 'USBCodeStart', ( r'U\[', ) ),
  763. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  764. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  765. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  766. ( 'SysCodeStart', ( r'SYS\[', ) ),
  767. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  768. ( 'ScanCodeStart', ( r'S\[', ) ),
  769. ( 'IndCode', ( r'I(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  770. ( 'IndicatorStart', ( r'I\[', ) ),
  771. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  772. ( 'PixelStart', ( r'P\[', ) ),
  773. ( 'Animation', ( r'A"[^"]+"', ) ),
  774. ( 'AnimationStart', ( r'A\[', ) ),
  775. ( 'CodeBegin', ( r'\[', ) ),
  776. ( 'CodeEnd', ( r'\]', ) ),
  777. ( 'String', ( r'"[^"]*"', ) ),
  778. ( 'SequenceString', ( r"'[^']*'", ) ),
  779. ( 'Operator', ( r':', ) ),
  780. ( 'Comma', ( r',', ) ),
  781. ( 'Dash', ( r'-', ) ),
  782. ( 'Plus', ( r'\+', ) ),
  783. ( 'Parenthesis', ( r'\(|\)', ) ),
  784. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  785. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  786. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  787. ]
  788. rspec = [
  789. ( 'Space', ( r'[ \t]+', ) ),
  790. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  791. ( 'USBCodeStart', ( r'U\[', ) ),
  792. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  793. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  794. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  795. ( 'SysCodeStart', ( r'SYS\[', ) ),
  796. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  797. ( 'ScanCodeStart', ( r'S\[', ) ),
  798. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  799. ( 'PixelStart', ( r'P\[', ) ),
  800. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  801. ( 'PixelLayerStart', ( r'PL\[', ) ),
  802. ( 'Animation', ( r'A"[^"]+"', ) ),
  803. ( 'AnimationStart', ( r'A\[', ) ),
  804. ( 'CodeBegin', ( r'\[', ) ),
  805. ( 'CodeEnd', ( r'\]', ) ),
  806. ( 'String', ( r'"[^"]*"', ) ),
  807. ( 'SequenceString', ( r"'[^']*'", ) ),
  808. ( 'None', ( r'None', ) ),
  809. ( 'Operator', ( r':', ) ),
  810. ( 'Comma', ( r',', ) ),
  811. ( 'Dash', ( r'-', ) ),
  812. ( 'Plus', ( r'\+', ) ),
  813. ( 'Parenthesis', ( r'\(|\)', ) ),
  814. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  815. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  816. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  817. ]
  818. # Tokenize, expression stores the result, status is returned
  819. return self.tokenize_base( kll_expression, lspec, rspec )
  820. ## Parsers ##
  821. def parse_base( self, kll_expression, parse_expression, quiet ):
  822. '''
  823. Base parsing logic
  824. @param kll_expression: Expression being parsed, contains tokens
  825. @param parse_expression: Parse tree expression that understands the group of tokens
  826. @param quiet: Reduces verbosity, used when re-running an errored command in debug mode
  827. @return: False if parsing wasn't successful
  828. '''
  829. ret = True
  830. try:
  831. # Since the expressions have already been pre-organized, we only expect a single expression at a time
  832. ret = parse_expression.parse( kll_expression.final_tokens() )
  833. # Parse intepretation error, more info is provided by the specific parse intepreter
  834. if not ret and not quiet:
  835. print( kll_expression.final_tokens() )
  836. except NoParseError as err:
  837. if not quiet:
  838. print( kll_expression.final_tokens() )
  839. print( err )
  840. ret = False
  841. return ret
  842. def parse_name_association( self, kll_expression, quiet=False ):
  843. '''
  844. Parse name association expressions
  845. <lparam> => <rparam>;
  846. '''
  847. # Import parse elements/lambda functions
  848. from common.parse import (
  849. comma,
  850. name,
  851. number,
  852. operator,
  853. parenthesis,
  854. unarg,
  855. Make,
  856. )
  857. # Name Association
  858. # <capability name> => <c function>;
  859. capability_arguments = name + skip( operator(':') ) + number + skip( maybe( comma ) ) >> unarg( Make.capArg )
  860. capability_expression = name + skip( operator('=>') ) + name + skip( parenthesis('(') ) + many( capability_arguments ) + skip( parenthesis(')') ) >> unarg( kll_expression.capability )
  861. # Name Association
  862. # <define name> => <c define>;
  863. define_expression = name + skip( operator('=>') ) + name >> unarg( kll_expression.define )
  864. # Top-level Parser
  865. expr = (
  866. capability_expression |
  867. define_expression
  868. )
  869. return self.parse_base( kll_expression, expr, quiet )
  870. def parse_data_association( self, kll_expression, quiet=False ):
  871. '''
  872. Parse data association expressions
  873. <lparam> <= <rparam>;
  874. '''
  875. from common.parse import (
  876. animation_def,
  877. animation_elem,
  878. animation_flattened,
  879. animation_modlist,
  880. comma,
  881. flatten,
  882. operator,
  883. pixel_elem,
  884. pixel_expanded,
  885. pixelmod_elem,
  886. position_list,
  887. triggerCode_outerList,
  888. unarg,
  889. )
  890. # Data Association
  891. # <animation> <= <modifiers>;
  892. # <animation frame> <= <modifiers>;
  893. animation_expression = ( animation_elem | animation_def ) + skip( operator('<=') ) + animation_modlist >> unarg( kll_expression.animation )
  894. animationFrame_expression = animation_flattened + skip( operator('<=') ) + many( pixelmod_elem + skip( maybe( comma ) ) ) >> unarg( kll_expression.animationFrame )
  895. # Data Association
  896. # <pixel> <= <position>;
  897. pixelPosition_expression = ( pixel_expanded | pixel_elem ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.pixelPosition )
  898. # Data Association
  899. # <scancode> <= <position>;
  900. scanCodePosition_expression = ( triggerCode_outerList >> flatten >> flatten ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.scanCodePosition )
  901. # Top-level Parser
  902. expr = (
  903. animation_expression |
  904. animationFrame_expression |
  905. pixelPosition_expression |
  906. scanCodePosition_expression
  907. )
  908. return self.parse_base( kll_expression, expr, quiet )
  909. def parse_assignment( self, kll_expression, quiet=False ):
  910. '''
  911. Parse assignment expressions
  912. <lparam> = <rparam>;
  913. '''
  914. # Import parse elements/lambda functions
  915. from common.parse import (
  916. code_begin,
  917. code_end,
  918. comma,
  919. content,
  920. dash,
  921. name,
  922. number,
  923. operator,
  924. string,
  925. unarg,
  926. unseqString,
  927. )
  928. # Assignment
  929. # <variable> = <variable contents>;
  930. variable_contents = name | content | string | number | comma | dash | unseqString
  931. variable_expression = name + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.variable )
  932. # Array Assignment
  933. # <variable>[] = <space> <separated> <list>;
  934. # <variable>[<index>] = <index element>;
  935. array_expression = name + skip( code_begin ) + maybe( number ) + skip( code_end ) + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.array )
  936. # Top-level Parser
  937. expr = (
  938. array_expression |
  939. variable_expression
  940. )
  941. return self.parse_base( kll_expression, expr, quiet )
  942. def parse_mapping( self, kll_expression, quiet=False ):
  943. '''
  944. Parse mapping expressions
  945. <lparam> : <rparam>; # Set mapping
  946. <lparam> :+ <rparam>; # Mappping append
  947. <lparam> :- <rparam>; # Mapping removal
  948. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  949. Isolated versions of mappings
  950. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  951. <lparam> i: <rparam>;
  952. <lparam> i:+ <rparam>;
  953. <lparam> i:- <rparam>;
  954. <lparam> i:: <rparam>;
  955. '''
  956. # Import parse elements/lambda functions
  957. from common.parse import (
  958. animation_expanded,
  959. none,
  960. operator,
  961. pixelchan_elem,
  962. resultCode_outerList,
  963. scanCode_single,
  964. triggerCode_outerList,
  965. triggerUSBCode_outerList,
  966. unarg,
  967. )
  968. # Mapping
  969. # <trigger> : <result>;
  970. operatorTriggerResult = operator(':') | operator(':+') | operator(':-') | operator('::') | operator('i:') | operator('i:+') | operator('i:-') | operator('i::')
  971. scanCode_expression = triggerCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.scanCode )
  972. usbCode_expression = triggerUSBCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.usbCode )
  973. animation_trigger = animation_expanded + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.animationTrigger )
  974. # Data Association
  975. # <pixel chan> : <scanCode>;
  976. pixelChan_expression = pixelchan_elem + skip( operator(':') ) + ( scanCode_single | none ) >> unarg( kll_expression.pixelChannels )
  977. # Top-level Parser
  978. expr = (
  979. scanCode_expression |
  980. usbCode_expression |
  981. pixelChan_expression |
  982. animation_trigger
  983. )
  984. return self.parse_base( kll_expression, expr, quiet )
  985. ## Processing ##
  986. def tokenize( self, kll_context ):
  987. '''
  988. Tokenizes contents of both LOperatorData and ROperatorData
  989. LOperatorData and ROperatorData have different contexts, so tokenization can be simplified a bit
  990. @param context: KLL Context containing file data
  991. '''
  992. ret = True
  993. # Tokenizer map, each takes an expression argument
  994. tokenizers = {
  995. # Name association
  996. '=>' : self.tokenize_name_association,
  997. # Data association
  998. '<=' : self.tokenize_data_association,
  999. # Assignment
  1000. '=' : self.tokenize_assignment,
  1001. # Mapping
  1002. # All : based operators have the same structure
  1003. # The only difference is the application context (handled in a later stage)
  1004. ':' : self.tokenize_mapping,
  1005. }
  1006. # Tokenize left and right parameters of the expression
  1007. for kll_expression in kll_context.expressions:
  1008. # Determine which parser to use
  1009. token = kll_expression.operator_type()
  1010. # If there was a problem tokenizing, display exprersion info
  1011. if not tokenizers[ token ]( kll_expression ):
  1012. ret = False
  1013. print( "{0} {1}:tokenize -> {2}:{3}".format(
  1014. ERROR,
  1015. self.__class__.__name__,
  1016. kll_context.parent.path,
  1017. kll_expression.lparam_token.start[0],
  1018. ) )
  1019. # Debug Output
  1020. # Displays each parsed expression on a single line
  1021. # Includes <filename>:<line number>
  1022. if self.token_debug:
  1023. # Uncolorize if requested
  1024. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m\033[1;36;41m>\033[0m {3}".format(
  1025. os.path.basename( kll_context.parent.path ),
  1026. kll_expression.lparam_token.start[0],
  1027. kll_expression.__class__.__name__,
  1028. kll_expression.final_tokens(),
  1029. )
  1030. print( self.color and output or ansi_escape.sub( '', output ) )
  1031. return ret
  1032. def parse( self, kll_context ):
  1033. '''
  1034. Parse the fully tokenized expressions
  1035. @param kll_context: KLL Context which has the fully tokenized expression list
  1036. '''
  1037. ret = True
  1038. # Parser map of functions, each takes an expression argument
  1039. parsers = {
  1040. # Name association
  1041. '=>' : self.parse_name_association,
  1042. # Data association
  1043. '<=' : self.parse_data_association,
  1044. # Assignment
  1045. '=' : self.parse_assignment,
  1046. # Mapping
  1047. # All : based operators have the same structure
  1048. # The only difference is the application context (handled in a later stage)
  1049. ':' : self.parse_mapping,
  1050. }
  1051. # Parse each expression to extract the data from it
  1052. for kll_expression in kll_context.expressions:
  1053. token = kll_expression.operator_type()
  1054. # Assume failed, unless proven otherwise
  1055. cur_ret = False
  1056. # In some situations we don't want a parser trace, but only disable when we know
  1057. parser_debug_ignore = False
  1058. # If there was a problem parsing, display expression info
  1059. # Catch any TypeErrors due to incorrect parsing rules
  1060. try:
  1061. cur_ret = parsers[ token ]( kll_expression )
  1062. # Unexpected token (user grammar error), sometimes might be a bug
  1063. except NoParseError as err:
  1064. import traceback
  1065. traceback.print_tb( err.__traceback__ )
  1066. print( type( err ).__name__, err )
  1067. print( "Bad kll expression, usually a syntax error." )
  1068. # Invalid parsing rules, definitely a bug
  1069. except TypeError as err:
  1070. import traceback
  1071. traceback.print_tb( err.__traceback__ )
  1072. print( type( err ).__name__, err )
  1073. print( "Bad parsing rule, this is a bug!" )
  1074. # Lookup error, invalid lookup
  1075. except KeyError as err:
  1076. import traceback
  1077. print( "".join( traceback.format_tb( err.__traceback__ )[-1:] ), end='' )
  1078. print( "Invalid dictionary lookup, check syntax." )
  1079. parser_debug_ignore = True
  1080. # Parsing failed, show more error info
  1081. if not cur_ret:
  1082. ret = False
  1083. # We don't always want a full trace of the parser
  1084. if not parser_debug_ignore:
  1085. # StringIO stream from funcparserlib parser.py
  1086. # Command failed, run again, this time with verbose logging enabled
  1087. # Helps debug erroneous parsing expressions
  1088. parser_log = io.StringIO()
  1089. # This part is not thread-safe
  1090. # You must run with --jobs 1 to get 100% valid output
  1091. Parser_debug( True, parser_log )
  1092. try:
  1093. parsers[ token ]( kll_expression, True )
  1094. except:
  1095. pass
  1096. Parser_debug( False )
  1097. # Display
  1098. print( parser_log.getvalue() )
  1099. # Cleanup StringIO
  1100. parser_log.close()
  1101. print( "{0} {1}:parse -> {2}:{3}".format(
  1102. ERROR,
  1103. self.__class__.__name__,
  1104. kll_context.parent.path,
  1105. kll_expression.lparam_token.start[0],
  1106. ) )
  1107. # Debug Output
  1108. # Displays each parsed expression on a single line
  1109. # Includes <filename>:<line number>
  1110. if self.parser_debug:
  1111. # Uncolorize if requested
  1112. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1113. os.path.basename( kll_context.parent.path ),
  1114. kll_expression.lparam_token.start[0],
  1115. kll_expression.__class__.__name__,
  1116. kll_expression.type,
  1117. kll_expression
  1118. )
  1119. print( self.color and output or ansi_escape.sub( '', output ) )
  1120. if self.parser_token_debug:
  1121. # Uncolorize if requested
  1122. output = "\t\033[1;4mTokens\033[0m\033[1;36m:\033[0m {0}".format(
  1123. [ ( t.type, t.value ) for t in kll_expression.final_tokens() ]
  1124. )
  1125. print( self.color and output or ansi_escape.sub( '', output ) )
  1126. return ret
  1127. def process( self ):
  1128. '''
  1129. Compiler Configuration Processing
  1130. '''
  1131. self._status = 'Running'
  1132. # Determine colorization setting
  1133. self.color = self.control.stage('CompilerConfigurationStage').color
  1134. # Acquire thread pool
  1135. pool = self.control.stage('CompilerConfigurationStage').pool
  1136. # Get list of KLL contexts
  1137. contexts = self.control.stage('OperationClassificationStage').contexts
  1138. # Tokenize operators
  1139. if False in pool.map( self.tokenize, contexts ):
  1140. self._status = 'Incomplete'
  1141. return
  1142. # Parse operators
  1143. if False in pool.map( self.parse, contexts ):
  1144. self._status = 'Incomplete'
  1145. return
  1146. self._status = 'Completed'
  1147. class OperationOrganizationStage( Stage ):
  1148. '''
  1149. Operation Organization Stage
  1150. * Using the type of each operation, apply the KLL Context to each operation
  1151. * This results in various datastructures being populated based upon the context and type of operation
  1152. * Each Context instance (distinct Context of the same type), remain separate
  1153. '''
  1154. def __init__( self, control ):
  1155. '''
  1156. Initialize configuration variables
  1157. '''
  1158. super().__init__( control )
  1159. self.operation_organization_debug = False
  1160. self.operation_organization_display = False
  1161. def command_line_args( self, args ):
  1162. '''
  1163. Group parser for command line arguments
  1164. @param args: Name space of processed arguments
  1165. '''
  1166. self.operation_organization_debug = args.operation_organization_debug
  1167. self.operation_organization_display = args.operation_organization_display
  1168. def command_line_flags( self, parser ):
  1169. '''
  1170. Group parser for command line options
  1171. @param parser: argparse setup object
  1172. '''
  1173. # Create new option group
  1174. group = parser.add_argument_group('\033[1mOperation Organization Configuration\033[0m')
  1175. # Optional Arguments
  1176. group.add_argument(
  1177. '--operation-organization-debug',
  1178. action='store_true',
  1179. default=self.operation_organization_debug,
  1180. help="Enable operation organization debug output.\n",
  1181. )
  1182. group.add_argument(
  1183. '--operation-organization-display',
  1184. action='store_true',
  1185. default=self.operation_organization_display,
  1186. help="Show datastructure of each context after filling.\n",
  1187. )
  1188. def organize( self, kll_context ):
  1189. '''
  1190. Organize each set of expressions on a context level
  1191. The full layout organization occurs over multiple stages, this is the first one
  1192. '''
  1193. # Add each of the expressions to the organization data structure
  1194. try:
  1195. for kll_expression in kll_context.expressions:
  1196. # Debug output
  1197. if self.operation_organization_debug:
  1198. # Uncolorize if requested
  1199. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1200. os.path.basename( kll_context.parent.path ),
  1201. kll_expression.lparam_token.start[0],
  1202. kll_expression.__class__.__name__,
  1203. kll_expression.type,
  1204. kll_expression
  1205. )
  1206. print( self.color and output or ansi_escape.sub( '', output ) )
  1207. # Add expression
  1208. kll_context.organization.add_expression(
  1209. kll_expression,
  1210. ( self.operation_organization_debug, self.color )
  1211. )
  1212. except Exception as err:
  1213. import traceback
  1214. traceback.print_tb( err.__traceback__ )
  1215. print( type( err ).__name__, err )
  1216. print( "Could not add/modify kll expression in context datastructure." )
  1217. return False
  1218. return True
  1219. def process( self ):
  1220. '''
  1221. Operation Organization Stage Processing
  1222. '''
  1223. self._status = 'Running'
  1224. # Determine colorization setting
  1225. self.color = self.control.stage('CompilerConfigurationStage').color
  1226. # Acquire thread pool
  1227. pool = self.control.stage('CompilerConfigurationStage').pool
  1228. # Get list of KLL contexts
  1229. contexts = self.control.stage('OperationClassificationStage').contexts
  1230. # Add expressions from contexts to context datastructures
  1231. if False in pool.map( self.organize, contexts ):
  1232. self._status = 'Incomplete'
  1233. return
  1234. # Show result of filling datastructure
  1235. if self.operation_organization_display:
  1236. for kll_context in contexts:
  1237. # Uncolorize if requested
  1238. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m".format(
  1239. os.path.basename( kll_context.parent.path ),
  1240. kll_context.__class__.__name__
  1241. )
  1242. print( self.color and output or ansi_escape.sub( '', output ) )
  1243. # Display Table
  1244. for store in kll_context.organization.stores():
  1245. # Uncolorize if requested
  1246. output = "\t\033[1;4;32m{0}\033[0m".format(
  1247. store.__class__.__name__
  1248. )
  1249. print( self.color and output or ansi_escape.sub( '', output ) )
  1250. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1251. self._status = 'Completed'
  1252. class DataOrganizationStage( Stage ):
  1253. '''
  1254. Data Organization Stage
  1255. * Using the constructed Context datastructures, merge contexts of the same type together
  1256. * Precedence/priority is defined by the order each Context was included on the command line
  1257. * May include datastructure data optimizations
  1258. '''
  1259. def __init__( self, control ):
  1260. '''
  1261. Initialize configuration variables
  1262. '''
  1263. super().__init__( control )
  1264. self.data_organization_debug = False
  1265. self.data_organization_display = False
  1266. self.contexts = None
  1267. def command_line_args( self, args ):
  1268. '''
  1269. Group parser for command line arguments
  1270. @param args: Name space of processed arguments
  1271. '''
  1272. self.data_organization_debug = args.data_organization_debug
  1273. self.data_organization_display = args.data_organization_display
  1274. def command_line_flags( self, parser ):
  1275. '''
  1276. Group parser for command line options
  1277. @param parser: argparse setup object
  1278. '''
  1279. # Create new option group
  1280. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1281. # Optional Arguments
  1282. group.add_argument(
  1283. '--data-organization-debug',
  1284. action='store_true',
  1285. default=self.data_organization_debug,
  1286. help="Show debug info from data organization stage.\n",
  1287. )
  1288. group.add_argument(
  1289. '--data-organization-display',
  1290. action='store_true',
  1291. default=self.data_organization_display,
  1292. help="Show datastructure of each context after merging.\n",
  1293. )
  1294. def sort_contexts( self, contexts ):
  1295. '''
  1296. Returns a dictionary of list of sorted 'like' contexts
  1297. This is used to group the contexts that need merging
  1298. '''
  1299. lists = {}
  1300. for kll_context in contexts:
  1301. name = kll_context.__class__.__name__
  1302. # PartialMapContext's are sorted by name *and* layer number
  1303. if name == "PartialMapContext":
  1304. name = "{0}{1}".format( name, kll_context.layer )
  1305. # Add new list if no elements yet
  1306. if name not in lists.keys():
  1307. lists[ name ] = [ kll_context ]
  1308. else:
  1309. lists[ name ].append( kll_context )
  1310. return lists
  1311. def organize( self, kll_context ):
  1312. '''
  1313. Symbolically merge all like Contexts
  1314. The full layout organization occurs over multiple stages, this is the second stage
  1315. '''
  1316. # Lookup context name
  1317. context_name = "{0}".format( kll_context[0].__class__.__name__ )
  1318. # PartialMapContext's are sorted by name *and* layer number
  1319. if context_name == "PartialMapContext":
  1320. context_name = "{0}{1}".format( context_name, kll_context[0].layer )
  1321. # Initialize merge context as the first one
  1322. self.contexts[ context_name ] = context.MergeContext( kll_context[0] )
  1323. # Indicate when a context is skipped as there is only one
  1324. if self.data_organization_debug:
  1325. if len( kll_context ) < 2:
  1326. output = "\033[1;33mSkipping\033[0m\033[1m:\033[1;32m{0}\033[0m".format(
  1327. context_name
  1328. )
  1329. print( self.color and output or ansi_escape.sub( '', output ) )
  1330. return True
  1331. # The incoming list is ordered
  1332. # Merge in each of the contexts symbolically
  1333. for next_context in kll_context[1:]:
  1334. try:
  1335. self.contexts[ context_name ].merge(
  1336. next_context,
  1337. ( self.data_organization_debug, self.color )
  1338. )
  1339. except Exception as err:
  1340. import traceback
  1341. traceback.print_tb( err.__traceback__ )
  1342. print( type( err ).__name__, err )
  1343. print( "Could not merge '{0}' into '{1}' context.".format(
  1344. os.path.basename( next_context.parent.path ),
  1345. context_name
  1346. ) )
  1347. return False
  1348. return True
  1349. def process( self ):
  1350. '''
  1351. Data Organization Stage Processing
  1352. '''
  1353. self._status = 'Running'
  1354. # Determine colorization setting
  1355. self.color = self.control.stage('CompilerConfigurationStage').color
  1356. # Acquire thread pool
  1357. pool = self.control.stage('CompilerConfigurationStage').pool
  1358. # Get list of KLL contexts
  1359. contexts = self.control.stage('OperationClassificationStage').contexts
  1360. # Get sorted list of KLL contexts
  1361. sorted_contexts = self.sort_contexts( contexts )
  1362. self.contexts = {}
  1363. # Add expressions from contexts to context datastructures
  1364. if False in pool.map( self.organize, sorted_contexts.values() ):
  1365. self._status = 'Incomplete'
  1366. return
  1367. # Show result of filling datastructure
  1368. if self.data_organization_display:
  1369. for key, kll_context in self.contexts.items():
  1370. # Uncolorize if requested
  1371. output = "\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1372. key,
  1373. kll_context.paths(),
  1374. )
  1375. print( self.color and output or ansi_escape.sub( '', output ) )
  1376. # Display Table
  1377. for store in kll_context.organization.stores():
  1378. # Uncolorize if requested
  1379. output = "\t\033[1;4;32m{0}\033[0m".format(
  1380. store.__class__.__name__
  1381. )
  1382. print( self.color and output or ansi_escape.sub( '', output ) )
  1383. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1384. self._status = 'Completed'
  1385. class DataFinalizationStage( Stage ):
  1386. '''
  1387. Data Finalization Stage
  1388. * Using the merged Context datastructures, apply the Configuration and BaseMap contexts to the higher
  1389. level DefaultMap and PartialMap Contexts
  1390. * First BaseMap is applied on top of Configuration
  1391. * Next, DefaultMap is applied on top of (Configuration+BaseMap) as well as the PartialMaps
  1392. * May include datastructure data optimizations
  1393. '''
  1394. def __init__( self, control ):
  1395. '''
  1396. Initialize configuration variables
  1397. '''
  1398. super().__init__( control )
  1399. self.data_finalization_debug = False
  1400. self.data_finalization_display = False
  1401. self.base_context = None
  1402. self.default_context = None
  1403. self.partial_contexts = None
  1404. self.full_context = None
  1405. self.context_list = None
  1406. self.layer_contexts = None
  1407. def command_line_args( self, args ):
  1408. '''
  1409. Group parser for command line arguments
  1410. @param args: Name space of processed arguments
  1411. '''
  1412. self.data_finalization_debug = args.data_finalization_debug
  1413. self.data_finalization_display = args.data_finalization_display
  1414. def command_line_flags( self, parser ):
  1415. '''
  1416. Group parser for command line options
  1417. @param parser: argparse setup object
  1418. '''
  1419. # Create new option group
  1420. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1421. # Optional Arguments
  1422. group.add_argument(
  1423. '--data-finalization-debug',
  1424. action='store_true',
  1425. default=self.data_finalization_debug,
  1426. help="Show debug info from data finalization stage.\n",
  1427. )
  1428. group.add_argument(
  1429. '--data-finalization-display',
  1430. action='store_true',
  1431. default=self.data_finalization_display,
  1432. help="Show datastructure of each context after merging.\n",
  1433. )
  1434. def process( self ):
  1435. '''
  1436. Data Organization Stage Processing
  1437. '''
  1438. self._status = 'Running'
  1439. # Determine colorization setting
  1440. self.color = self.control.stage('CompilerConfigurationStage').color
  1441. # Get context silos
  1442. contexts = self.control.stage('DataOrganizationStage').contexts
  1443. self._status = 'Incomplete'
  1444. # Context list
  1445. self.context_list = []
  1446. # Depending on the calling order, we may need to use a GenericContext or ConfigurationContext as the base
  1447. # Default to ConfigurationContext first
  1448. if 'ConfigurationContext' in contexts.keys():
  1449. self.base_context = context.MergeContext( contexts['ConfigurationContext'] )
  1450. # If we still have GenericContexts around, merge them on top of the ConfigurationContext
  1451. if 'GenericContext' in contexts.keys():
  1452. self.base_context.merge(
  1453. contexts['GenericContext'],
  1454. ( self.data_finalization_debug, self.color )
  1455. )
  1456. # Otherwise, just use a GenericContext
  1457. elif 'GenericContext' in contexts.keys():
  1458. self.base_context = context.MergeContext( contexts['GenericContext'] )
  1459. # Fail otherwise, you *must* have a GenericContext or ConfigurationContext
  1460. else:
  1461. print( "{0} Missing a 'GenericContext' and/or 'ConfigurationContext'.".format( ERROR ) )
  1462. self._status = 'Incomplete'
  1463. return
  1464. # Next use the BaseMapContext and overlay on ConfigurationContext
  1465. # This serves as the basis for the next two merges
  1466. if 'BaseMapContext' in contexts.keys():
  1467. self.base_context.merge(
  1468. contexts['BaseMapContext'],
  1469. ( self.data_finalization_debug, self.color )
  1470. )
  1471. self.context_list.append( ( 'BaseMapContext', self.base_context ) )
  1472. # Then use the DefaultMapContext as the default keyboard mapping
  1473. self.default_context = context.MergeContext( self.base_context )
  1474. if 'DefaultMapContext' in contexts.keys():
  1475. self.default_context.merge(
  1476. contexts['DefaultMapContext'],
  1477. ( self.data_finalization_debug, self.color )
  1478. )
  1479. self.context_list.append( ( 'DefaultMapContext', self.default_context ) )
  1480. # For convenience build a fully merged dataset
  1481. # This is usually only required for variables
  1482. self.full_context = context.MergeContext( self.default_context )
  1483. # Finally setup each of the PartialMapContext groups
  1484. # Build list of PartialMapContexts and sort by layer before iterating over
  1485. self.partial_contexts = []
  1486. partial_context_list = [
  1487. ( item[1].layer, item[1] )
  1488. for item in contexts.items()
  1489. if 'PartialMapContext' in item[0]
  1490. ]
  1491. for layer, partial in sorted( partial_context_list, key=lambda x: x[0] ):
  1492. self.partial_contexts.append( context.MergeContext( self.base_context ) )
  1493. self.partial_contexts[ layer ].merge(
  1494. partial,
  1495. ( self.data_finalization_debug, self.color )
  1496. )
  1497. self.context_list.append( ( 'PartialMapContext{0}'.format( layer ), self.default_context ) )
  1498. # Add each partial to the full_context as well
  1499. self.full_context.merge(
  1500. partial,
  1501. ( self.data_finalization_debug, self.color )
  1502. )
  1503. # Build layer context list
  1504. # Each index of the list corresponds to the keyboard layer
  1505. self.layer_contexts = [ self.default_context ]
  1506. self.layer_contexts.extend( self.partial_contexts )
  1507. # Show result of filling datastructure
  1508. if self.data_finalization_display:
  1509. for key, kll_context in self.context_list:
  1510. # Uncolorize if requested
  1511. output = "*\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1512. key,
  1513. kll_context.paths(),
  1514. )
  1515. print( self.color and output or ansi_escape.sub( '', output ) )
  1516. # Display Table
  1517. for store in kll_context.organization.stores():
  1518. # Uncolorize if requested
  1519. output = "\t\033[1;4;32m{0}\033[0m".format(
  1520. store.__class__.__name__
  1521. )
  1522. print( self.color and output or ansi_escape.sub( '', output ) )
  1523. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1524. self._status = 'Completed'
  1525. class DataAnalysisStage( Stage ):
  1526. '''
  1527. Data Analysis Stage
  1528. * Using the completed Context datastructures, do additional analysis that may be required for Code Generation
  1529. '''
  1530. def __init__( self, control ):
  1531. '''
  1532. Initialize configuration variables
  1533. '''
  1534. super().__init__( control )
  1535. self.layer_contexts = None
  1536. self.full_context = None
  1537. def command_line_args( self, args ):
  1538. '''
  1539. Group parser for command line arguments
  1540. @param args: Name space of processed arguments
  1541. '''
  1542. def command_line_flags( self, parser ):
  1543. '''
  1544. Group parser for command line options
  1545. @param parser: argparse setup object
  1546. '''
  1547. # Create new option group
  1548. #group = parser.add_argument_group('\033[1mData Analysis Configuration\033[0m')
  1549. def reduction( self ):
  1550. '''
  1551. Builds a new reduced_contexts list
  1552. For each of the layers, evaluate triggers into ScanCodes (USBCode to ScanCodes)
  1553. (all other triggers don't require reductions)
  1554. '''
  1555. self.reduced_contexts = []
  1556. for layer in self.layer_contexts:
  1557. reduced = context.MergeContext( layer )
  1558. reduced.reduction()
  1559. self.reduced_contexts.append( reduced )
  1560. def generate_mapping_indices( self ):
  1561. '''
  1562. For each trigger:result pair generate a unique index
  1563. The triggers and results are first sorted alphabetically
  1564. '''
  1565. # Build list of map expressions
  1566. expressions = []
  1567. # Gather list of expressions
  1568. for layer in self.layer_contexts:
  1569. expressions.extend( layer.organization.mapping_data.data.items() )
  1570. # Sort expressions by trigger, there may be *duplicate* triggers however don't reduce yet
  1571. # we need the result mappings as well
  1572. trigger_sorted = sorted( expressions, key=lambda x: x[1][0].trigger_str() )
  1573. trigger_filtered = [ elem for elem in trigger_sorted if not elem[1][0].type == 'USBCode' ]
  1574. #print( trigger_filtered )
  1575. # Sort expressions by result, there may be *duplicate* results however don't reduce yet
  1576. # we need the result mappings as well
  1577. result_sorted = sorted( expressions, key=lambda x: x[1][0].result_str() )
  1578. #print( result_sorted )
  1579. # Build List of Triggers and sort by string contents
  1580. # XXX Only scan codes right now
  1581. # This will need to expand to a
  1582. #TODO
  1583. # Build List of Results and sort by string contents
  1584. # TODO
  1585. def sort_map_index_lists( self ):
  1586. '''
  1587. '''
  1588. def generate_map_offset_table( self ):
  1589. '''
  1590. '''
  1591. def generate_trigger_lists( self ):
  1592. '''
  1593. '''
  1594. def analyze( self ):
  1595. '''
  1596. Analyze the set of configured contexts
  1597. TODO: Perhaps use emitters or something like it for this code? -HaaTa
  1598. '''
  1599. # Reduce Contexts
  1600. # Convert all trigger USBCodes to ScanCodes
  1601. self.reduction()
  1602. # Generate Indices
  1603. # Assigns a sequential index (starting from 0) for each map expresssion
  1604. self.generate_mapping_indices()
  1605. # Sort Index Lists
  1606. # Using indices sort Trigger and Results macros
  1607. self.sort_map_index_lists()
  1608. # Generate Offset Table
  1609. # This is needed for interconnect devices
  1610. self.generate_map_offset_table()
  1611. # Generate Trigger Lists
  1612. self.generate_trigger_lists()
  1613. def process( self ):
  1614. '''
  1615. Data Analysis Stage Processing
  1616. '''
  1617. self._status = 'Running'
  1618. # Determine colorization setting
  1619. self.color = self.control.stage('CompilerConfigurationStage').color
  1620. # Acquire list of contexts
  1621. self.layer_contexts = self.control.stage('DataFinalizationStage').layer_contexts
  1622. self.full_context = self.control.stage('DataFinalizationStage').full_context
  1623. # Analyze set of contexts
  1624. self.analyze()
  1625. self._status = 'Completed'
  1626. class CodeGenerationStage( Stage ):
  1627. '''
  1628. Code Generation Stage
  1629. * Generates code for the given firmware backend
  1630. * Backend is selected in the Compiler Configuration Stage
  1631. * Uses the specified emitter to generate the code
  1632. '''
  1633. def __init__( self, control ):
  1634. '''
  1635. Initialize configuration variables
  1636. '''
  1637. super().__init__( control )
  1638. def command_line_args( self, args ):
  1639. '''
  1640. Group parser for command line arguments
  1641. @param args: Name space of processed arguments
  1642. '''
  1643. self.control.stage('CompilerConfigurationStage').emitters.command_line_args( args )
  1644. def command_line_flags( self, parser ):
  1645. '''
  1646. Group parser for command line options
  1647. @param parser: argparse setup object
  1648. '''
  1649. # Create new option group
  1650. #group = parser.add_argument_group('\033[1mCode Generation Configuration\033[0m')
  1651. # Create options groups for each of the Emitters
  1652. self.control.stage('CompilerConfigurationStage').emitters.command_line_flags( parser )
  1653. def process( self ):
  1654. '''
  1655. Data Organization Stage Processing
  1656. '''
  1657. self._status = 'Running'
  1658. # Determine colorization setting
  1659. self.color = self.control.stage('CompilerConfigurationStage').color
  1660. # Get Emitter object
  1661. self.emitter = self.control.stage('CompilerConfigurationStage').emitters.emitter(
  1662. self.control.stage('CompilerConfigurationStage').emitter
  1663. )
  1664. # Call Emitter
  1665. self.emitter.process()
  1666. # Generate Outputs using Emitter
  1667. self.emitter.output()
  1668. self._status = 'Completed'
  1669. class ReportGenerationStage( Stage ):
  1670. '''
  1671. Report Generation Stage
  1672. * Using the datastructures and analyzed data, generate a compiler report
  1673. * TODO
  1674. '''