KLL Compiler
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.

stage.py 60KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065
  1. #!/usr/bin/env python3
  2. '''
  3. KLL Compiler Stage Definitions
  4. '''
  5. # Copyright (C) 2016 by Jacob Alexander
  6. #
  7. # This file is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation, either version 3 of the License, or
  10. # (at your option) any later version.
  11. #
  12. # This file is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU General Public License
  18. # along with this file. If not, see <http://www.gnu.org/licenses/>.
  19. ### Imports ###
  20. from multiprocessing.dummy import Pool as ThreadPool
  21. import io
  22. import multiprocessing
  23. import os
  24. import re
  25. import sys
  26. import common.context as context
  27. import common.expression as expression
  28. import common.file as file
  29. import emitters.emitters as emitters
  30. from funcparserlib.lexer import make_tokenizer, Token, LexerError
  31. from funcparserlib.parser import many, oneplus, maybe, skip, NoParseError, Parser_debug
  32. ### Decorators ###
  33. ## Print Decorator Variables
  34. ERROR = '\033[5;1;31mERROR\033[0m:'
  35. WARNING = '\033[5;1;33mWARNING\033[0m:'
  36. ansi_escape = re.compile(r'\x1b[^m]*m')
  37. ### Classes ###
  38. class ControlStage:
  39. '''
  40. Top-level Stage
  41. Controls the order in which each stage is processed
  42. '''
  43. def __init__( self ):
  44. '''
  45. Initialize stage objects and control variables
  46. '''
  47. # Initialized in process order
  48. # NOTE: Only unique classes in this list, otherwise stage() will get confused
  49. self.stages = [
  50. CompilerConfigurationStage( self ),
  51. FileImportStage( self ),
  52. PreprocessorStage( self ),
  53. OperationClassificationStage( self ),
  54. OperationSpecificsStage( self ),
  55. OperationOrganizationStage( self ),
  56. DataOrganizationStage( self ),
  57. DataFinalizationStage( self ),
  58. DataAnalysisStage( self ),
  59. CodeGenerationStage( self ),
  60. #ReportGenerationStage( self ),
  61. ]
  62. self.git_rev = None
  63. self.git_changes = None
  64. self.version = None
  65. def stage( self, context_str ):
  66. '''
  67. Returns the stage object of the associated string name of the class
  68. @param context_str: String name of the class of the stage e.g. CompilerConfigurationStage
  69. '''
  70. return [ stage for stage in self.stages if type( stage ).__name__ is context_str ][0]
  71. def command_line_args( self, args ):
  72. '''
  73. Capture commmand line arguments for each processing stage
  74. @param args: Name space of processed arguments
  75. '''
  76. for stage in self.stages:
  77. stage.command_line_args( args )
  78. def command_line_flags( self, parser ):
  79. '''
  80. Prepare group parser for each processing stage
  81. @param parser: argparse setup object
  82. '''
  83. for stage in self.stages:
  84. stage.command_line_flags( parser )
  85. def process( self ):
  86. '''
  87. Main processing section
  88. Initializes each stage in order.
  89. Each stage must complete before the next one begins.
  90. '''
  91. # Run report even if stage doesn't complete
  92. run_report = False
  93. for stage in self.stages:
  94. stage.process()
  95. # Make sure stage has successfully completed
  96. if stage.status() != 'Completed':
  97. print( "{0} Invalid stage status '{1}' for '{2}'.".format(
  98. ERROR,
  99. stage.status(),
  100. stage.__class__.__name__,
  101. ) )
  102. run_report = True
  103. break
  104. # Only need to explicitly run reports if there was a stage problem
  105. # Otherwise reports are run automatically
  106. if run_report:
  107. # TODO
  108. sys.exit( 1 )
  109. class Stage:
  110. '''
  111. Base Stage Class
  112. '''
  113. def __init__( self, control ):
  114. '''
  115. Stage initialization
  116. @param control: ControlStage object, used to access data from other stages
  117. '''
  118. self.control = control
  119. self.color = False
  120. self._status = 'Queued'
  121. def command_line_args( self, args ):
  122. '''
  123. Group parser for command line arguments
  124. @param args: Name space of processed arguments
  125. '''
  126. print( "{0} '{1}' '{2}' has not been implemented yet"
  127. .format(
  128. WARNING,
  129. self.command_line_args.__name__,
  130. type( self ).__name__
  131. )
  132. )
  133. def command_line_flags( self, parser ):
  134. '''
  135. Group parser for command line options
  136. @param parser: argparse setup object
  137. '''
  138. print( "{0} '{1}' '{2}' has not been implemented yet"
  139. .format(
  140. WARNING,
  141. self.command_line_flags.__name__,
  142. type( self ).__name__
  143. )
  144. )
  145. def process( self ):
  146. '''
  147. Main procesing section
  148. '''
  149. self._status = 'Running'
  150. print( "{0} '{1}' '{2}' has not been implemented yet"
  151. .format(
  152. WARNING,
  153. self.process.__name__,
  154. type( self ).__name__
  155. )
  156. )
  157. self._status = 'Completed'
  158. def status( self ):
  159. '''
  160. Returns the current status of the Stage
  161. Values:
  162. Queued - Not yet run
  163. Running - Currently running
  164. Completed - Successfully completed
  165. Incomplete - Unsuccessfully completed
  166. '''
  167. return self._status
  168. class CompilerConfigurationStage( Stage ):
  169. '''
  170. Compiler Configuration Stage
  171. * Does initial setup of KLL compiler.
  172. * Handles any global configuration that must be done before parsing can begin
  173. '''
  174. def __init__( self, control ):
  175. '''
  176. Initialize compiler configuration variables
  177. '''
  178. super().__init__( control )
  179. self.color = "auto"
  180. self.jobs = multiprocessing.cpu_count()
  181. self.pool = None
  182. # Build list of emitters
  183. self.emitters = emitters.Emitters( control )
  184. self.emitter = self.emitters.emitter_default()
  185. def command_line_args( self, args ):
  186. '''
  187. Group parser for command line arguments
  188. @param args: Name space of processed arguments
  189. '''
  190. self.emitter = args.emitter
  191. self.color = args.color
  192. self.jobs = args.jobs
  193. # Validate color argument before processing
  194. if self.color not in ['auto', 'always', 'never' ]:
  195. print( "Invalid color option '{0}'".format( self.color ) )
  196. sys.exit( 2 )
  197. # TODO Detect whether colorization should be used
  198. self.color = self.color in ['auto', 'always']
  199. def command_line_flags( self, parser ):
  200. '''
  201. Group parser for command line options
  202. @param parser: argparse setup object
  203. '''
  204. # Create new option group
  205. group = parser.add_argument_group('\033[1mCompiler Configuration\033[0m')
  206. # Optional Arguments
  207. group.add_argument( '--emitter', type=str, default=self.emitter,
  208. help="Specify target emitter for the KLL compiler.\n"
  209. "\033[1mDefault\033[0m: {0}\n"
  210. "\033[1mOptions\033[0m: {1}".format( self.emitter, self.emitters.emitter_list() )
  211. )
  212. group.add_argument( '--color', type=str, default=self.color,
  213. help="Specify debug colorizer mode.\n"
  214. "\033[1mDefault\033[0m: {0}\n"
  215. "\033[1mOptions\033[0m: auto, always, never (auto attempts to detect support)".format( self.color )
  216. )
  217. group.add_argument( '--jobs', type=int, default=self.jobs,
  218. help="Specify max number of threads to use.\n"
  219. "\033[1mDefault\033[0m: {0}".format( self.jobs )
  220. )
  221. def process( self ):
  222. '''
  223. Compiler Configuration Processing
  224. '''
  225. self._status = 'Running'
  226. # Initialize thread pool
  227. self.pool = ThreadPool( self.jobs )
  228. self._status = 'Completed'
  229. class FileImportStage( Stage ):
  230. '''
  231. FIle Import Stage
  232. * Loads text of all files into memory
  233. * Does initial sorting of KLL Contexts based upon command line arguments
  234. '''
  235. def __init__( self, control ):
  236. '''
  237. Initialize file storage datastructures and variables
  238. '''
  239. super().__init__( control )
  240. # These lists are order sensitive
  241. self.generic_files = []
  242. self.config_files = []
  243. self.base_files = []
  244. self.default_files = []
  245. # This is a list of lists, each sub list is another layer in order from 1 to max
  246. self.partial_files = []
  247. # List of all files contained in KLLFile objects
  248. self.kll_files = []
  249. def command_line_args( self, args ):
  250. '''
  251. Group parser for command line arguments
  252. @param args: Name space of processed arguments
  253. '''
  254. self.generic_files = args.generic
  255. self.config_files = args.config
  256. self.base_files = args.base
  257. self.default_files = args.default
  258. self.partial_files = args.partial
  259. def command_line_flags( self, parser ):
  260. '''
  261. Group parser for command line options
  262. @param parser: argparse setup object
  263. '''
  264. # Create new option group
  265. group = parser.add_argument_group('\033[1mFile Context Configuration\033[0m')
  266. # Positional Arguments
  267. group.add_argument( 'generic', type=str, nargs='*', default=self.generic_files,
  268. help="Auto-detect context of .kll files, defaults to a base map configuration."
  269. )
  270. # Optional Arguments
  271. group.add_argument( '--config', type=str, nargs='+', default=self.config_files,
  272. help="Specify base configuration .kll files, earliest priority"
  273. )
  274. group.add_argument( '--base', type=str, nargs='+', default=self.base_files,
  275. help="Specify base map configuration, applied after config .kll files.\n"
  276. "The base map is applied prior to all default and partial maps and is used as the basis for them."
  277. )
  278. group.add_argument( '--default', type=str, nargs='+', default=self.default_files,
  279. help="Specify .kll files to layer on top of the default map to create a combined map.\n"
  280. "Also known as layer 0."
  281. )
  282. group.add_argument( '--partial', type=str, nargs='+', action='append', default=self.partial_files,
  283. help="Specify .kll files to generate partial map, multiple files per flag.\n"
  284. "Each -p defines another partial map.\n"
  285. "Base .kll files (that define the scan code maps) must be defined for each partial map."
  286. )
  287. def init_kllfile( self, path, file_context ):
  288. '''
  289. Prepares a KLLFile object with the given context
  290. @path: Path to the KLL file
  291. @file_context: Type of file context, e.g. DefaultMapContext
  292. '''
  293. return file.KLLFile( path, file_context )
  294. def process( self ):
  295. '''
  296. Process each of the files, sorting them by command line argument context order
  297. '''
  298. self._status = 'Running'
  299. # Determine colorization setting
  300. self.color = self.control.stage('CompilerConfigurationStage').color
  301. # Process each type of file
  302. # Iterates over each file in the context list and creates a KLLFile object with a context and path
  303. self.kll_files += map(
  304. lambda path: self.init_kllfile( path, context.GenericContext() ),
  305. self.generic_files
  306. )
  307. self.kll_files += map(
  308. lambda path: self.init_kllfile( path, context.ConfigurationContext() ),
  309. self.config_files
  310. )
  311. self.kll_files += map(
  312. lambda path: self.init_kllfile( path, context.BaseMapContext() ),
  313. self.base_files
  314. )
  315. self.kll_files += map(
  316. lambda path: self.init_kllfile( path, context.DefaultMapContext() ),
  317. self.default_files
  318. )
  319. # Partial Maps require a third parameter which specifies which layer it's in
  320. for layer, files in enumerate( self.partial_files ):
  321. self.kll_files += map(
  322. lambda path: self.init_kllfile( path, context.PartialMapContext( layer ) ),
  323. files
  324. )
  325. # Validate that all the file paths exist, exit if any of the checks fail
  326. if False in [ path.check() for path in self.kll_files ]:
  327. self._status = 'Incomplete'
  328. return
  329. # Now that we have a full list of files and their given context, we can now read the files into memory
  330. # Uses the thread pool to speed up processing
  331. # Make sure processing was successful before continuing
  332. pool = self.control.stage('CompilerConfigurationStage').pool
  333. if False in pool.map( lambda kll_file: kll_file.read(), self.kll_files ):
  334. self._status = 'Incomplete'
  335. return
  336. self._status = 'Completed'
  337. class PreprocessorStage( Stage ):
  338. '''
  339. Preprocessor Stage
  340. * Does initial split and decision of contexts
  341. * Handles Preprocessor part of KLL
  342. '''
  343. def __init__( self, control ):
  344. '''
  345. Initialize preprocessor configuration variables
  346. '''
  347. super().__init__( control )
  348. def command_line_args( self, args ):
  349. '''
  350. Group parser for command line arguments
  351. @param args: Name space of processed arguments
  352. '''
  353. def command_line_flags( self, parser ):
  354. '''
  355. Group parser for command line options
  356. @param parser: argparse setup object
  357. '''
  358. # Create new option group
  359. #group = parser.add_argument_group('\033[1mPreprocessor Configuration\033[0m')
  360. def seed_context( self, kll_file ):
  361. '''
  362. Build list of context
  363. TODO Update later for proper preprocessor
  364. Adds data from KLFile into the Context
  365. '''
  366. kll_file.context.initial_context( kll_file.lines, kll_file.data, kll_file )
  367. def process( self ):
  368. '''
  369. Preprocessor Execution
  370. '''
  371. self._status = 'Running'
  372. # Determine colorization setting
  373. self.color = self.control.stage('CompilerConfigurationStage').color
  374. # Acquire thread pool
  375. pool = self.control.stage('CompilerConfigurationStage').pool
  376. # TODO
  377. # Once the KLL Spec has preprocessor commands, there may be a risk of infinite/circular dependencies
  378. # Please add a non-invasive way to avoid/warn/stop in this case -HaaTa
  379. # First, since initial contexts have been populated, populate details
  380. # TODO
  381. # This step will change once preprocessor commands have been added
  382. # Simply, this just takes the imported file data (KLLFile) and puts it in the context container
  383. kll_files = self.control.stage('FileImportStage').kll_files
  384. if False in pool.map( self.seed_context, kll_files ):
  385. self._status = 'Incomplete'
  386. return
  387. # Next, tokenize and parser the preprocessor KLL commands.
  388. # NOTE: This may result in having to create more KLL Contexts and tokenize/parse again numerous times over
  389. # TODO
  390. self._status = 'Completed'
  391. class OperationClassificationStage( Stage ):
  392. '''
  393. Operation Classification Stage
  394. * Sorts operations by type based on operator
  395. * Tokenizes only operator pivots and left/right arguments
  396. * Further tokenization and parsing occurs at a later stage
  397. '''
  398. def __init__( self, control ):
  399. '''
  400. Initialize operation classification stage
  401. '''
  402. super().__init__( control )
  403. self.tokenized_data = []
  404. self.contexts = []
  405. def command_line_args( self, args ):
  406. '''
  407. Group parser for command line arguments
  408. @param args: Name space of processed arguments
  409. '''
  410. def command_line_flags( self, parser ):
  411. '''
  412. Group parser for command line options
  413. @param parser: argparse setup object
  414. '''
  415. # Create new option group
  416. #group = parser.add_argument_group('\033[1mOperation Classification Configuration\033[0m')
  417. def merge_tokens( self, token_list, token_type ):
  418. '''
  419. Merge list of tokens into a single token
  420. @param token_list: List of tokens
  421. @param token_type: String name of token type
  422. '''
  423. # Initial token parameters
  424. ret_token = Token( token_type, '' )
  425. # Set start/end positions of token
  426. ret_token.start = token_list[0].start
  427. ret_token.end = token_list[-1].end
  428. # Build token value
  429. for token in token_list:
  430. ret_token.value += token.value
  431. return ret_token
  432. def tokenize( self, kll_context ):
  433. '''
  434. Tokenize a single string
  435. @param kll_context: KLL Context containing file data
  436. '''
  437. ret = True
  438. # Basic Tokens Spec
  439. spec = [
  440. ( 'Comment', ( r' *#.*', ) ),
  441. ( 'Space', ( r'[ \t]+', ) ),
  442. ( 'NewLine', ( r'[\r\n]+', ) ),
  443. # Tokens that will be grouped together after tokenization
  444. # Ignored at this stage
  445. # This is required to isolate the Operator tags
  446. ( 'Misc', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ), # Position context
  447. ( 'Misc', ( r'\([^\)]*\)', ) ), # Parenthesis context
  448. ( 'Misc', ( r'\[[^\]]*\]', ) ), # Square bracket context
  449. ( 'Misc', ( r'"[^"]*"', ) ), # Double quote context
  450. ( 'Misc', ( r"'[^']*'", ) ), # Single quote context
  451. ( 'Operator', ( r'=>|<=|i:\+|i:-|i::|i:|:\+|:-|::|:|=', ) ),
  452. ( 'EndOfLine', ( r';', ) ),
  453. # Everything else to be ignored at this stage
  454. ( 'Misc', ( r'.', ) ), # Everything else
  455. ]
  456. # Tokens to filter out of the token stream
  457. #useless = [ 'Space', 'Comment' ]
  458. useless = [ 'Comment', 'NewLine' ]
  459. # Build tokenizer that appends unknown characters to Misc Token groups
  460. # NOTE: This is technically slower processing wise, but allows for multi-stage tokenization
  461. # Which in turn allows for parsing and tokenization rules to be simplified
  462. tokenizer = make_tokenizer( spec )
  463. # Tokenize and filter out useless tokens
  464. try:
  465. tokens = [ x for x in tokenizer( kll_context.data ) if x.type not in useless ]
  466. except LexerError as err:
  467. print( err )
  468. print( "{0} {1}:tokenize -> {2}:{3}".format(
  469. ERROR,
  470. self.__class__.__name__,
  471. kll_context.parent.path,
  472. err.place[0],
  473. ) )
  474. # Merge Misc tokens delimited by Operator and EndOfLine tokens
  475. kll_context.classification_token_data = []
  476. new_token = []
  477. last_operator = None
  478. for token in tokens:
  479. # Check for delimiter, append new_token if ready
  480. if token.type in ['EndOfLine', 'Operator']:
  481. # Determine the token type
  482. token_type = 'LOperatorData'
  483. if token.type is 'EndOfLine':
  484. token_type = 'ROperatorData'
  485. # If this is a 'misplaced' operator, set as Misc
  486. if token_type == last_operator:
  487. token.type = 'Misc'
  488. new_token.append( token )
  489. continue
  490. if len( new_token ) > 0:
  491. # Build new token
  492. kll_context.classification_token_data.append(
  493. self.merge_tokens( new_token, token_type )
  494. )
  495. new_token = []
  496. kll_context.classification_token_data.append( token )
  497. last_operator = token_type
  498. # Collect Misc tokens
  499. elif token.type in ['Misc', 'Space']:
  500. new_token.append( token )
  501. # Invalid token for this stage
  502. else:
  503. print( "{0} Invalid token '{1}' for '{2}'".format(
  504. ERROR,
  505. token,
  506. type( self ).__name__,
  507. ) )
  508. ret = False
  509. return ret
  510. def sort( self, kll_context ):
  511. '''
  512. Sorts tokenized data into expressions
  513. LOperatorData + Operator + ROperatorData + EndOfLine
  514. @param kll_context: KLL Context, contains tokenized data
  515. '''
  516. ret = True
  517. def validate_token( token, token_type ):
  518. '''
  519. Validate token
  520. @param token: Given token to validate
  521. @param token_type: Token type to validate against
  522. @return True if the token is correct
  523. '''
  524. ret = token.type is token_type
  525. # Error message
  526. if not ret:
  527. print( "Expected: '{0}' got '{1}':{2} '{3}'".format(
  528. token_type,
  529. token.type,
  530. token._pos_str(),
  531. token.value,
  532. ) )
  533. return ret
  534. tokens = kll_context.classification_token_data
  535. for index in range( 0, len( tokens ), 4 ):
  536. # Make sure enough tokens exist
  537. if index + 3 >= len( tokens ):
  538. print( "Not enough tokens left: {0}".format( tokens[index:] ) )
  539. print( "Expected: LOperatorData, Operator, ROperatorData, EndOfLine" )
  540. print( "{0} {1}:sort -> {2}:{3}".format(
  541. ERROR,
  542. self.__class__.__name__,
  543. kll_context.parent.path,
  544. tokens[-1].start[0],
  545. ) )
  546. ret = False
  547. break
  548. # Validate the tokens are what was expected
  549. ret = validate_token( tokens[index], 'LOperatorData' ) and ret
  550. ret = validate_token( tokens[index + 1], 'Operator' ) and ret
  551. ret = validate_token( tokens[index + 2], 'ROperatorData' ) and ret
  552. ret = validate_token( tokens[index + 3], 'EndOfLine' ) and ret
  553. # Append expression
  554. kll_context.expressions.append(
  555. expression.Expression( tokens[index], tokens[index + 1], tokens[index + 2], kll_context )
  556. )
  557. return ret
  558. def process( self ):
  559. '''
  560. Compiler Configuration Processing
  561. '''
  562. self._status = 'Running'
  563. # Determine colorization setting
  564. self.color = self.control.stage('CompilerConfigurationStage').color
  565. # Acquire thread pool
  566. pool = self.control.stage('CompilerConfigurationStage').pool
  567. # Get list of KLLFiles
  568. kll_files = self.control.stage('FileImportStage').kll_files
  569. # Build list of contexts
  570. self.contexts = [ kll_file.context for kll_file in kll_files ]
  571. # Tokenize operators
  572. # TODO
  573. # Once preprocessor includes are implemented use a second kll_files list
  574. # This way processing doesn't have to be recursive for a few stages -HaaTa
  575. if False in pool.map( self.tokenize, self.contexts ):
  576. self._status = 'Incomplete'
  577. return
  578. # Sort elements into expressions
  579. # LOperatorData + Operator + ROperatorData + EndOfLine
  580. if False in pool.map( self.sort, self.contexts ):
  581. self._status = 'Incomplete'
  582. return
  583. self._status = 'Completed'
  584. class OperationSpecificsStage( Stage ):
  585. '''
  586. Operation Specifics Stage
  587. * For each sorted operation, tokenize and parse the left/right arguments
  588. * Data is stored with the operation, but no context is given to the data beyond the argument types
  589. '''
  590. def __init__( self, control ):
  591. '''
  592. Initialize operation specifics stage
  593. '''
  594. super().__init__( control )
  595. self.parser_debug = False
  596. self.parser_token_debug = False
  597. self.token_debug = False
  598. def command_line_args( self, args ):
  599. '''
  600. Group parser for command line arguments
  601. @param args: Name space of processed arguments
  602. '''
  603. self.parser_debug = args.parser_debug
  604. self.parser_token_debug = args.parser_token_debug
  605. self.token_debug = args.token_debug
  606. # Auto-set parser_debug if parser_token_debug is set
  607. if self.parser_token_debug:
  608. self.parser_debug = True
  609. def command_line_flags( self, parser ):
  610. '''
  611. Group parser for command line options
  612. @param parser: argparse setup object
  613. '''
  614. # Create new option group
  615. group = parser.add_argument_group('\033[1mOperation Specifics Configuration\033[0m')
  616. # Optional Arguments
  617. group.add_argument( '--parser-debug', action='store_true', default=self.parser_debug,
  618. help="Enable parser debug output.\n",
  619. )
  620. group.add_argument( '--parser-token-debug', action='store_true', default=self.parser_token_debug,
  621. help="Enable parser-stage token debug output.\n",
  622. )
  623. group.add_argument( '--token-debug', action='store_true', default=self.token_debug,
  624. help="Enable tokenization debug output.\n",
  625. )
  626. ## Tokenizers ##
  627. def tokenize_base( self, kll_expression, lspec, rspec ):
  628. '''
  629. Base tokenization logic for this stage
  630. @param kll_expression: KLL expression to tokenize
  631. @param lspec: Regex tokenization spec for the left parameter
  632. @param rspec: Regex tokenization spec for the right parameter
  633. @return False if a LexerError was detected
  634. '''
  635. # Build tokenizers for lparam and rparam
  636. ltokenizer = make_tokenizer( lspec )
  637. rtokenizer = make_tokenizer( rspec )
  638. # Tokenize lparam and rparam
  639. # Ignore the generators, not useful in this case (i.e. use list())
  640. err_pos = [] # Error positions
  641. try:
  642. kll_expression.lparam_sub_tokens = list( ltokenizer( kll_expression.lparam_token.value ) )
  643. except LexerError as err:
  644. # Determine place in constructed expression
  645. err_pos.append( err.place[1] )
  646. print( type( err ).__name__, err )
  647. try:
  648. kll_expression.rparam_sub_tokens = list( rtokenizer( kll_expression.rparam_token.value ) )
  649. except LexerError as err:
  650. # Determine place in constructed expression
  651. err_pos.append( err.place[1] + kll_expression.rparam_start() )
  652. print( type( err ).__name__, err )
  653. # Display more information if any errors were detected
  654. if len( err_pos ) > 0:
  655. print( kll_expression.point_chars( err_pos ) )
  656. return False
  657. return True
  658. def tokenize_name_association( self, kll_expression ):
  659. '''
  660. Tokenize lparam and rparam in name association expressions
  661. <lparam> => <rparam>;
  662. '''
  663. # Define tokenization regex
  664. lspec = [
  665. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  666. ( 'Space', ( r'[ \t]+', ) ),
  667. ]
  668. rspec = [
  669. ( 'Space', ( r'[ \t]+', ) ),
  670. ( 'Parenthesis', ( r'\(|\)', ) ),
  671. ( 'Operator', ( r':', ) ),
  672. ( 'Comma', ( r',', ) ),
  673. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  674. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  675. ]
  676. # Tokenize, expression stores the result, status is returned
  677. return self.tokenize_base( kll_expression, lspec, rspec )
  678. def tokenize_data_association( self, kll_expression ):
  679. '''
  680. Tokenize lparam and rparam in data association expressions
  681. <lparam> <= <rparam>;
  682. '''
  683. # Define tokenization regex
  684. lspec = [
  685. ( 'Space', ( r'[ \t]+', ) ),
  686. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  687. ( 'ScanCodeStart', ( r'S\[', ) ),
  688. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  689. ( 'PixelStart', ( r'P\[', ) ),
  690. ( 'Animation', ( r'A"[^"]+"', ) ),
  691. ( 'AnimationStart', ( r'A\[', ) ),
  692. ( 'CodeBegin', ( r'\[', ) ),
  693. ( 'CodeEnd', ( r'\]', ) ),
  694. ( 'Position', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ),
  695. ( 'Comma', ( r',', ) ),
  696. ( 'Dash', ( r'-', ) ),
  697. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  698. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  699. ]
  700. rspec = [
  701. ( 'Space', ( r'[ \t]+', ) ),
  702. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  703. ( 'PixelStart', ( r'P\[', ) ),
  704. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  705. ( 'PixelLayerStart', ( r'PL\[', ) ),
  706. ( 'Animation', ( r'A"[^"]+"', ) ),
  707. ( 'AnimationStart', ( r'A\[', ) ),
  708. ( 'CodeBegin', ( r'\[', ) ),
  709. ( 'CodeEnd', ( r'\]', ) ),
  710. ( 'Position', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ),
  711. ( 'PixelOperator', ( r'(\+:|-:|>>|<<)', ) ),
  712. ( 'Operator', ( r':', ) ),
  713. ( 'Comma', ( r',', ) ),
  714. ( 'Dash', ( r'-', ) ),
  715. ( 'Plus', ( r'\+', ) ),
  716. ( 'Parenthesis', ( r'\(|\)', ) ),
  717. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  718. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  719. ]
  720. # Tokenize, expression stores the result, status is returned
  721. return self.tokenize_base( kll_expression, lspec, rspec )
  722. def tokenize_assignment( self, kll_expression ):
  723. '''
  724. Tokenize lparam and rparam in assignment expressions
  725. <lparam> = <rparam>;
  726. '''
  727. # Define tokenization regex
  728. lspec = [
  729. ( 'Space', ( r'[ \t]+', ) ),
  730. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  731. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  732. ( 'CodeBegin', ( r'\[', ) ),
  733. ( 'CodeEnd', ( r'\]', ) ),
  734. ]
  735. rspec = [
  736. ( 'Space', ( r'[ \t]+', ) ),
  737. ( 'String', ( r'"[^"]*"', ) ),
  738. ( 'SequenceString', ( r"'[^']*'", ) ),
  739. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  740. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  741. ( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
  742. ]
  743. # Tokenize, expression stores the result, status is returned
  744. return self.tokenize_base( kll_expression, lspec, rspec )
  745. def tokenize_mapping( self, kll_expression ):
  746. '''
  747. Tokenize lparam and rparam in mapping expressions
  748. <lparam> : <rparam>; # Set mapping
  749. <lparam> :+ <rparam>; # Mappping append
  750. <lparam> :- <rparam>; # Mapping removal
  751. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  752. Isolated versions of mappings
  753. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  754. <lparam> i: <rparam>;
  755. <lparam> i:+ <rparam>;
  756. <lparam> i:- <rparam>;
  757. <lparam> i:: <rparam>;
  758. '''
  759. # Define tokenization regex
  760. lspec = [
  761. ( 'Space', ( r'[ \t]+', ) ),
  762. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  763. ( 'USBCodeStart', ( r'U\[', ) ),
  764. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  765. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  766. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  767. ( 'SysCodeStart', ( r'SYS\[', ) ),
  768. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  769. ( 'ScanCodeStart', ( r'S\[', ) ),
  770. ( 'IndCode', ( r'I(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  771. ( 'IndicatorStart', ( r'I\[', ) ),
  772. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  773. ( 'PixelStart', ( r'P\[', ) ),
  774. ( 'Animation', ( r'A"[^"]+"', ) ),
  775. ( 'AnimationStart', ( r'A\[', ) ),
  776. ( 'CodeBegin', ( r'\[', ) ),
  777. ( 'CodeEnd', ( r'\]', ) ),
  778. ( 'String', ( r'"[^"]*"', ) ),
  779. ( 'SequenceString', ( r"'[^']*'", ) ),
  780. ( 'Operator', ( r':', ) ),
  781. ( 'Comma', ( r',', ) ),
  782. ( 'Dash', ( r'-', ) ),
  783. ( 'Plus', ( r'\+', ) ),
  784. ( 'Parenthesis', ( r'\(|\)', ) ),
  785. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  786. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  787. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  788. ]
  789. rspec = [
  790. ( 'Space', ( r'[ \t]+', ) ),
  791. ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  792. ( 'USBCodeStart', ( r'U\[', ) ),
  793. ( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  794. ( 'ConsCodeStart', ( r'CONS\[', ) ),
  795. ( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  796. ( 'SysCodeStart', ( r'SYS\[', ) ),
  797. ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  798. ( 'ScanCodeStart', ( r'S\[', ) ),
  799. ( 'Pixel', ( r'P((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  800. ( 'PixelStart', ( r'P\[', ) ),
  801. ( 'PixelLayer', ( r'PL((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
  802. ( 'PixelLayerStart', ( r'PL\[', ) ),
  803. ( 'Animation', ( r'A"[^"]+"', ) ),
  804. ( 'AnimationStart', ( r'A\[', ) ),
  805. ( 'CodeBegin', ( r'\[', ) ),
  806. ( 'CodeEnd', ( r'\]', ) ),
  807. ( 'String', ( r'"[^"]*"', ) ),
  808. ( 'SequenceString', ( r"'[^']*'", ) ),
  809. ( 'None', ( r'None', ) ),
  810. ( 'Operator', ( r':', ) ),
  811. ( 'Comma', ( r',', ) ),
  812. ( 'Dash', ( r'-', ) ),
  813. ( 'Plus', ( r'\+', ) ),
  814. ( 'Parenthesis', ( r'\(|\)', ) ),
  815. ( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us)|(ns))', ) ),
  816. ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', ) ),
  817. ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
  818. ]
  819. # Tokenize, expression stores the result, status is returned
  820. return self.tokenize_base( kll_expression, lspec, rspec )
  821. ## Parsers ##
  822. def parse_base( self, kll_expression, parse_expression, quiet ):
  823. '''
  824. Base parsing logic
  825. @param kll_expression: Expression being parsed, contains tokens
  826. @param parse_expression: Parse tree expression that understands the group of tokens
  827. @param quiet: Reduces verbosity, used when re-running an errored command in debug mode
  828. @return: False if parsing wasn't successful
  829. '''
  830. ret = True
  831. try:
  832. # Since the expressions have already been pre-organized, we only expect a single expression at a time
  833. ret = parse_expression.parse( kll_expression.final_tokens() )
  834. # Parse intepretation error, more info is provided by the specific parse intepreter
  835. if not ret and not quiet:
  836. print( kll_expression.final_tokens() )
  837. except NoParseError as err:
  838. if not quiet:
  839. print( kll_expression.final_tokens() )
  840. print( err )
  841. ret = False
  842. return ret
  843. def parse_name_association( self, kll_expression, quiet=False ):
  844. '''
  845. Parse name association expressions
  846. <lparam> => <rparam>;
  847. '''
  848. # Import parse elements/lambda functions
  849. from common.parse import (
  850. comma,
  851. name,
  852. number,
  853. operator,
  854. parenthesis,
  855. unarg,
  856. Make,
  857. )
  858. # Name Association
  859. # <capability name> => <c function>;
  860. capability_arguments = name + skip( operator(':') ) + number + skip( maybe( comma ) ) >> unarg( Make.capArg )
  861. capability_expression = name + skip( operator('=>') ) + name + skip( parenthesis('(') ) + many( capability_arguments ) + skip( parenthesis(')') ) >> unarg( kll_expression.capability )
  862. # Name Association
  863. # <define name> => <c define>;
  864. define_expression = name + skip( operator('=>') ) + name >> unarg( kll_expression.define )
  865. # Top-level Parser
  866. expr = (
  867. capability_expression |
  868. define_expression
  869. )
  870. return self.parse_base( kll_expression, expr, quiet )
  871. def parse_data_association( self, kll_expression, quiet=False ):
  872. '''
  873. Parse data association expressions
  874. <lparam> <= <rparam>;
  875. '''
  876. from common.parse import (
  877. animation_def,
  878. animation_elem,
  879. animation_flattened,
  880. animation_modlist,
  881. comma,
  882. flatten,
  883. operator,
  884. pixel_elem,
  885. pixel_expanded,
  886. pixelmod_elem,
  887. position_list,
  888. triggerCode_outerList,
  889. unarg,
  890. )
  891. # Data Association
  892. # <animation> <= <modifiers>;
  893. # <animation frame> <= <modifiers>;
  894. animation_expression = ( animation_elem | animation_def ) + skip( operator('<=') ) + animation_modlist >> unarg( kll_expression.animation )
  895. animationFrame_expression = animation_flattened + skip( operator('<=') ) + many( pixelmod_elem + skip( maybe( comma ) ) ) >> unarg( kll_expression.animationFrame )
  896. # Data Association
  897. # <pixel> <= <position>;
  898. pixelPosition_expression = ( pixel_expanded | pixel_elem ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.pixelPosition )
  899. # Data Association
  900. # <scancode> <= <position>;
  901. scanCodePosition_expression = ( triggerCode_outerList >> flatten >> flatten ) + skip( operator('<=') ) + position_list >> unarg( kll_expression.scanCodePosition )
  902. # Top-level Parser
  903. expr = (
  904. animation_expression |
  905. animationFrame_expression |
  906. pixelPosition_expression |
  907. scanCodePosition_expression
  908. )
  909. return self.parse_base( kll_expression, expr, quiet )
  910. def parse_assignment( self, kll_expression, quiet=False ):
  911. '''
  912. Parse assignment expressions
  913. <lparam> = <rparam>;
  914. '''
  915. # Import parse elements/lambda functions
  916. from common.parse import (
  917. code_begin,
  918. code_end,
  919. comma,
  920. content,
  921. dash,
  922. name,
  923. number,
  924. operator,
  925. string,
  926. unarg,
  927. unseqString,
  928. )
  929. # Assignment
  930. # <variable> = <variable contents>;
  931. variable_contents = name | content | string | number | comma | dash | unseqString
  932. variable_expression = name + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.variable )
  933. # Array Assignment
  934. # <variable>[] = <space> <separated> <list>;
  935. # <variable>[<index>] = <index element>;
  936. array_expression = name + skip( code_begin ) + maybe( number ) + skip( code_end ) + skip( operator('=') ) + oneplus( variable_contents ) >> unarg( kll_expression.array )
  937. # Top-level Parser
  938. expr = (
  939. array_expression |
  940. variable_expression
  941. )
  942. return self.parse_base( kll_expression, expr, quiet )
  943. def parse_mapping( self, kll_expression, quiet=False ):
  944. '''
  945. Parse mapping expressions
  946. <lparam> : <rparam>; # Set mapping
  947. <lparam> :+ <rparam>; # Mappping append
  948. <lparam> :- <rparam>; # Mapping removal
  949. <lparam> :: <rparam>; # Replace mapping (does nothing if nothing to replace)
  950. Isolated versions of mappings
  951. When expressions are evalutated during runtime, any non-isolated mapping expressions are cancelled
  952. <lparam> i: <rparam>;
  953. <lparam> i:+ <rparam>;
  954. <lparam> i:- <rparam>;
  955. <lparam> i:: <rparam>;
  956. '''
  957. # Import parse elements/lambda functions
  958. from common.parse import (
  959. animation_expanded,
  960. none,
  961. operator,
  962. pixelchan_elem,
  963. resultCode_outerList,
  964. scanCode_single,
  965. triggerCode_outerList,
  966. triggerUSBCode_outerList,
  967. unarg,
  968. )
  969. # Mapping
  970. # <trigger> : <result>;
  971. operatorTriggerResult = operator(':') | operator(':+') | operator(':-') | operator('::') | operator('i:') | operator('i:+') | operator('i:-') | operator('i::')
  972. scanCode_expression = triggerCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.scanCode )
  973. usbCode_expression = triggerUSBCode_outerList + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.usbCode )
  974. animation_trigger = animation_expanded + operatorTriggerResult + resultCode_outerList >> unarg( kll_expression.animationTrigger )
  975. # Data Association
  976. # <pixel chan> : <scanCode>;
  977. pixelChan_expression = pixelchan_elem + skip( operator(':') ) + ( scanCode_single | none ) >> unarg( kll_expression.pixelChannels )
  978. # Top-level Parser
  979. expr = (
  980. scanCode_expression |
  981. usbCode_expression |
  982. pixelChan_expression |
  983. animation_trigger
  984. )
  985. return self.parse_base( kll_expression, expr, quiet )
  986. ## Processing ##
  987. def tokenize( self, kll_context ):
  988. '''
  989. Tokenizes contents of both LOperatorData and ROperatorData
  990. LOperatorData and ROperatorData have different contexts, so tokenization can be simplified a bit
  991. @param context: KLL Context containing file data
  992. '''
  993. ret = True
  994. # Tokenizer map, each takes an expression argument
  995. tokenizers = {
  996. # Name association
  997. '=>' : self.tokenize_name_association,
  998. # Data association
  999. '<=' : self.tokenize_data_association,
  1000. # Assignment
  1001. '=' : self.tokenize_assignment,
  1002. # Mapping
  1003. # All : based operators have the same structure
  1004. # The only difference is the application context (handled in a later stage)
  1005. ':' : self.tokenize_mapping,
  1006. }
  1007. # Tokenize left and right parameters of the expression
  1008. for kll_expression in kll_context.expressions:
  1009. # Determine which parser to use
  1010. token = kll_expression.operator_type()
  1011. # If there was a problem tokenizing, display exprersion info
  1012. if not tokenizers[ token ]( kll_expression ):
  1013. ret = False
  1014. print( "{0} {1}:tokenize -> {2}:{3}".format(
  1015. ERROR,
  1016. self.__class__.__name__,
  1017. kll_context.parent.path,
  1018. kll_expression.lparam_token.start[0],
  1019. ) )
  1020. # Debug Output
  1021. # Displays each parsed expression on a single line
  1022. # Includes <filename>:<line number>
  1023. if self.token_debug:
  1024. # Uncolorize if requested
  1025. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m\033[1;36;41m>\033[0m {3}".format(
  1026. os.path.basename( kll_context.parent.path ),
  1027. kll_expression.lparam_token.start[0],
  1028. kll_expression.__class__.__name__,
  1029. kll_expression.final_tokens(),
  1030. )
  1031. print( self.color and output or ansi_escape.sub( '', output ) )
  1032. return ret
  1033. def parse( self, kll_context ):
  1034. '''
  1035. Parse the fully tokenized expressions
  1036. @param kll_context: KLL Context which has the fully tokenized expression list
  1037. '''
  1038. ret = True
  1039. # Parser map of functions, each takes an expression argument
  1040. parsers = {
  1041. # Name association
  1042. '=>' : self.parse_name_association,
  1043. # Data association
  1044. '<=' : self.parse_data_association,
  1045. # Assignment
  1046. '=' : self.parse_assignment,
  1047. # Mapping
  1048. # All : based operators have the same structure
  1049. # The only difference is the application context (handled in a later stage)
  1050. ':' : self.parse_mapping,
  1051. }
  1052. # Parse each expression to extract the data from it
  1053. for kll_expression in kll_context.expressions:
  1054. token = kll_expression.operator_type()
  1055. # Assume failed, unless proven otherwise
  1056. cur_ret = False
  1057. # In some situations we don't want a parser trace, but only disable when we know
  1058. parser_debug_ignore = False
  1059. # If there was a problem parsing, display expression info
  1060. # Catch any TypeErrors due to incorrect parsing rules
  1061. try:
  1062. cur_ret = parsers[ token ]( kll_expression )
  1063. # Unexpected token (user grammar error), sometimes might be a bug
  1064. except NoParseError as err:
  1065. import traceback
  1066. traceback.print_tb( err.__traceback__ )
  1067. print( type( err ).__name__, err )
  1068. print( "Bad kll expression, usually a syntax error." )
  1069. # Invalid parsing rules, definitely a bug
  1070. except TypeError as err:
  1071. import traceback
  1072. traceback.print_tb( err.__traceback__ )
  1073. print( type( err ).__name__, err )
  1074. print( "Bad parsing rule, this is a bug!" )
  1075. # Lookup error, invalid lookup
  1076. except KeyError as err:
  1077. import traceback
  1078. print( "".join( traceback.format_tb( err.__traceback__ )[-1:] ), end='' )
  1079. print( "Invalid dictionary lookup, check syntax." )
  1080. parser_debug_ignore = True
  1081. # Parsing failed, show more error info
  1082. if not cur_ret:
  1083. ret = False
  1084. # We don't always want a full trace of the parser
  1085. if not parser_debug_ignore:
  1086. # StringIO stream from funcparserlib parser.py
  1087. # Command failed, run again, this time with verbose logging enabled
  1088. # Helps debug erroneous parsing expressions
  1089. parser_log = io.StringIO()
  1090. # This part is not thread-safe
  1091. # You must run with --jobs 1 to get 100% valid output
  1092. Parser_debug( True, parser_log )
  1093. try:
  1094. parsers[ token ]( kll_expression, True )
  1095. except:
  1096. pass
  1097. Parser_debug( False )
  1098. # Display
  1099. print( parser_log.getvalue() )
  1100. # Cleanup StringIO
  1101. parser_log.close()
  1102. print( "{0} {1}:parse -> {2}:{3}".format(
  1103. ERROR,
  1104. self.__class__.__name__,
  1105. kll_context.parent.path,
  1106. kll_expression.lparam_token.start[0],
  1107. ) )
  1108. # Debug Output
  1109. # Displays each parsed expression on a single line
  1110. # Includes <filename>:<line number>
  1111. if self.parser_debug:
  1112. # Uncolorize if requested
  1113. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1114. os.path.basename( kll_context.parent.path ),
  1115. kll_expression.lparam_token.start[0],
  1116. kll_expression.__class__.__name__,
  1117. kll_expression.type,
  1118. kll_expression
  1119. )
  1120. print( self.color and output or ansi_escape.sub( '', output ) )
  1121. if self.parser_token_debug:
  1122. # Uncolorize if requested
  1123. output = "\t\033[1;4mTokens\033[0m\033[1;36m:\033[0m {0}".format(
  1124. [ ( t.type, t.value ) for t in kll_expression.final_tokens() ]
  1125. )
  1126. print( self.color and output or ansi_escape.sub( '', output ) )
  1127. return ret
  1128. def process( self ):
  1129. '''
  1130. Compiler Configuration Processing
  1131. '''
  1132. self._status = 'Running'
  1133. # Determine colorization setting
  1134. self.color = self.control.stage('CompilerConfigurationStage').color
  1135. # Acquire thread pool
  1136. pool = self.control.stage('CompilerConfigurationStage').pool
  1137. # Get list of KLL contexts
  1138. contexts = self.control.stage('OperationClassificationStage').contexts
  1139. # Tokenize operators
  1140. if False in pool.map( self.tokenize, contexts ):
  1141. self._status = 'Incomplete'
  1142. return
  1143. # Parse operators
  1144. if False in pool.map( self.parse, contexts ):
  1145. self._status = 'Incomplete'
  1146. return
  1147. self._status = 'Completed'
  1148. class OperationOrganizationStage( Stage ):
  1149. '''
  1150. Operation Organization Stage
  1151. * Using the type of each operation, apply the KLL Context to each operation
  1152. * This results in various datastructures being populated based upon the context and type of operation
  1153. * Each Context instance (distinct Context of the same type), remain separate
  1154. '''
  1155. def __init__( self, control ):
  1156. '''
  1157. Initialize configuration variables
  1158. '''
  1159. super().__init__( control )
  1160. self.operation_organization_debug = False
  1161. self.operation_organization_display = False
  1162. def command_line_args( self, args ):
  1163. '''
  1164. Group parser for command line arguments
  1165. @param args: Name space of processed arguments
  1166. '''
  1167. self.operation_organization_debug = args.operation_organization_debug
  1168. self.operation_organization_display = args.operation_organization_display
  1169. def command_line_flags( self, parser ):
  1170. '''
  1171. Group parser for command line options
  1172. @param parser: argparse setup object
  1173. '''
  1174. # Create new option group
  1175. group = parser.add_argument_group('\033[1mOperation Organization Configuration\033[0m')
  1176. # Optional Arguments
  1177. group.add_argument(
  1178. '--operation-organization-debug',
  1179. action='store_true',
  1180. default=self.operation_organization_debug,
  1181. help="Enable operation organization debug output.\n",
  1182. )
  1183. group.add_argument(
  1184. '--operation-organization-display',
  1185. action='store_true',
  1186. default=self.operation_organization_display,
  1187. help="Show datastructure of each context after filling.\n",
  1188. )
  1189. def organize( self, kll_context ):
  1190. '''
  1191. Organize each set of expressions on a context level
  1192. The full layout organization occurs over multiple stages, this is the first one
  1193. '''
  1194. # Add each of the expressions to the organization data structure
  1195. try:
  1196. for kll_expression in kll_context.expressions:
  1197. # Debug output
  1198. if self.operation_organization_debug:
  1199. # Uncolorize if requested
  1200. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m:\033[1;32m{2}\033[0m:\033[1;35m{3}\033[1;36;41m>\033[0m {4}".format(
  1201. os.path.basename( kll_context.parent.path ),
  1202. kll_expression.lparam_token.start[0],
  1203. kll_expression.__class__.__name__,
  1204. kll_expression.type,
  1205. kll_expression
  1206. )
  1207. print( self.color and output or ansi_escape.sub( '', output ) )
  1208. # Add expression
  1209. kll_context.organization.add_expression(
  1210. kll_expression,
  1211. ( self.operation_organization_debug, self.color )
  1212. )
  1213. except Exception as err:
  1214. import traceback
  1215. traceback.print_tb( err.__traceback__ )
  1216. print( type( err ).__name__, err )
  1217. print( "Could not add/modify kll expression in context datastructure." )
  1218. return False
  1219. return True
  1220. def process( self ):
  1221. '''
  1222. Operation Organization Stage Processing
  1223. '''
  1224. self._status = 'Running'
  1225. # Determine colorization setting
  1226. self.color = self.control.stage('CompilerConfigurationStage').color
  1227. # Acquire thread pool
  1228. pool = self.control.stage('CompilerConfigurationStage').pool
  1229. # Get list of KLL contexts
  1230. contexts = self.control.stage('OperationClassificationStage').contexts
  1231. # Add expressions from contexts to context datastructures
  1232. if False in pool.map( self.organize, contexts ):
  1233. self._status = 'Incomplete'
  1234. return
  1235. # Show result of filling datastructure
  1236. if self.operation_organization_display:
  1237. for kll_context in contexts:
  1238. # Uncolorize if requested
  1239. output = "\033[1m{0}\033[0m:\033[1;33m{1}\033[0m".format(
  1240. os.path.basename( kll_context.parent.path ),
  1241. kll_context.__class__.__name__
  1242. )
  1243. print( self.color and output or ansi_escape.sub( '', output ) )
  1244. # Display Table
  1245. for store in kll_context.organization.stores():
  1246. # Uncolorize if requested
  1247. output = "\t\033[1;4;32m{0}\033[0m".format(
  1248. store.__class__.__name__
  1249. )
  1250. print( self.color and output or ansi_escape.sub( '', output ) )
  1251. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1252. self._status = 'Completed'
  1253. class DataOrganizationStage( Stage ):
  1254. '''
  1255. Data Organization Stage
  1256. * Using the constructed Context datastructures, merge contexts of the same type together
  1257. * Precedence/priority is defined by the order each Context was included on the command line
  1258. * May include datastructure data optimizations
  1259. '''
  1260. def __init__( self, control ):
  1261. '''
  1262. Initialize configuration variables
  1263. '''
  1264. super().__init__( control )
  1265. self.data_organization_debug = False
  1266. self.data_organization_display = False
  1267. self.contexts = None
  1268. def command_line_args( self, args ):
  1269. '''
  1270. Group parser for command line arguments
  1271. @param args: Name space of processed arguments
  1272. '''
  1273. self.data_organization_debug = args.data_organization_debug
  1274. self.data_organization_display = args.data_organization_display
  1275. def command_line_flags( self, parser ):
  1276. '''
  1277. Group parser for command line options
  1278. @param parser: argparse setup object
  1279. '''
  1280. # Create new option group
  1281. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1282. # Optional Arguments
  1283. group.add_argument(
  1284. '--data-organization-debug',
  1285. action='store_true',
  1286. default=self.data_organization_debug,
  1287. help="Show debug info from data organization stage.\n",
  1288. )
  1289. group.add_argument(
  1290. '--data-organization-display',
  1291. action='store_true',
  1292. default=self.data_organization_display,
  1293. help="Show datastructure of each context after merging.\n",
  1294. )
  1295. def sort_contexts( self, contexts ):
  1296. '''
  1297. Returns a dictionary of list of sorted 'like' contexts
  1298. This is used to group the contexts that need merging
  1299. '''
  1300. lists = {}
  1301. for kll_context in contexts:
  1302. name = kll_context.__class__.__name__
  1303. # PartialMapContext's are sorted by name *and* layer number
  1304. if name == "PartialMapContext":
  1305. name = "{0}{1}".format( name, kll_context.layer )
  1306. # Add new list if no elements yet
  1307. if name not in lists.keys():
  1308. lists[ name ] = [ kll_context ]
  1309. else:
  1310. lists[ name ].append( kll_context )
  1311. return lists
  1312. def organize( self, kll_context ):
  1313. '''
  1314. Symbolically merge all like Contexts
  1315. The full layout organization occurs over multiple stages, this is the second stage
  1316. '''
  1317. # Lookup context name
  1318. context_name = "{0}".format( kll_context[0].__class__.__name__ )
  1319. # PartialMapContext's are sorted by name *and* layer number
  1320. if context_name == "PartialMapContext":
  1321. context_name = "{0}{1}".format( context_name, kll_context[0].layer )
  1322. # Initialize merge context as the first one
  1323. self.contexts[ context_name ] = context.MergeContext( kll_context[0] )
  1324. # Indicate when a context is skipped as there is only one
  1325. if self.data_organization_debug:
  1326. if len( kll_context ) < 2:
  1327. output = "\033[1;33mSkipping\033[0m\033[1m:\033[1;32m{0}\033[0m".format(
  1328. context_name
  1329. )
  1330. print( self.color and output or ansi_escape.sub( '', output ) )
  1331. return True
  1332. # The incoming list is ordered
  1333. # Merge in each of the contexts symbolically
  1334. for next_context in kll_context[1:]:
  1335. try:
  1336. self.contexts[ context_name ].merge(
  1337. next_context,
  1338. ( self.data_organization_debug, self.color )
  1339. )
  1340. except Exception as err:
  1341. import traceback
  1342. traceback.print_tb( err.__traceback__ )
  1343. print( type( err ).__name__, err )
  1344. print( "Could not merge '{0}' into '{1}' context.".format(
  1345. os.path.basename( next_context.parent.path ),
  1346. context_name
  1347. ) )
  1348. return False
  1349. return True
  1350. def process( self ):
  1351. '''
  1352. Data Organization Stage Processing
  1353. '''
  1354. self._status = 'Running'
  1355. # Determine colorization setting
  1356. self.color = self.control.stage('CompilerConfigurationStage').color
  1357. # Acquire thread pool
  1358. pool = self.control.stage('CompilerConfigurationStage').pool
  1359. # Get list of KLL contexts
  1360. contexts = self.control.stage('OperationClassificationStage').contexts
  1361. # Get sorted list of KLL contexts
  1362. sorted_contexts = self.sort_contexts( contexts )
  1363. self.contexts = {}
  1364. # Add expressions from contexts to context datastructures
  1365. if False in pool.map( self.organize, sorted_contexts.values() ):
  1366. self._status = 'Incomplete'
  1367. return
  1368. # Show result of filling datastructure
  1369. if self.data_organization_display:
  1370. for key, kll_context in self.contexts.items():
  1371. # Uncolorize if requested
  1372. output = "\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1373. key,
  1374. kll_context.paths(),
  1375. )
  1376. print( self.color and output or ansi_escape.sub( '', output ) )
  1377. # Display Table
  1378. for store in kll_context.organization.stores():
  1379. # Uncolorize if requested
  1380. output = "\t\033[1;4;32m{0}\033[0m".format(
  1381. store.__class__.__name__
  1382. )
  1383. print( self.color and output or ansi_escape.sub( '', output ) )
  1384. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1385. self._status = 'Completed'
  1386. class DataFinalizationStage( Stage ):
  1387. '''
  1388. Data Finalization Stage
  1389. * Using the merged Context datastructures, apply the Configuration and BaseMap contexts to the higher
  1390. level DefaultMap and PartialMap Contexts
  1391. * First BaseMap is applied on top of Configuration
  1392. * Next, DefaultMap is applied on top of (Configuration+BaseMap) as well as the PartialMaps
  1393. * May include datastructure data optimizations
  1394. '''
  1395. def __init__( self, control ):
  1396. '''
  1397. Initialize configuration variables
  1398. '''
  1399. super().__init__( control )
  1400. self.data_finalization_debug = False
  1401. self.data_finalization_display = False
  1402. self.base_context = None
  1403. self.default_context = None
  1404. self.partial_contexts = None
  1405. self.full_context = None
  1406. self.context_list = None
  1407. self.layer_contexts = None
  1408. def command_line_args( self, args ):
  1409. '''
  1410. Group parser for command line arguments
  1411. @param args: Name space of processed arguments
  1412. '''
  1413. self.data_finalization_debug = args.data_finalization_debug
  1414. self.data_finalization_display = args.data_finalization_display
  1415. def command_line_flags( self, parser ):
  1416. '''
  1417. Group parser for command line options
  1418. @param parser: argparse setup object
  1419. '''
  1420. # Create new option group
  1421. group = parser.add_argument_group('\033[1mData Organization Configuration\033[0m')
  1422. # Optional Arguments
  1423. group.add_argument(
  1424. '--data-finalization-debug',
  1425. action='store_true',
  1426. default=self.data_finalization_debug,
  1427. help="Show debug info from data finalization stage.\n",
  1428. )
  1429. group.add_argument(
  1430. '--data-finalization-display',
  1431. action='store_true',
  1432. default=self.data_finalization_display,
  1433. help="Show datastructure of each context after merging.\n",
  1434. )
  1435. def process( self ):
  1436. '''
  1437. Data Organization Stage Processing
  1438. '''
  1439. self._status = 'Running'
  1440. # Determine colorization setting
  1441. self.color = self.control.stage('CompilerConfigurationStage').color
  1442. # Get context silos
  1443. contexts = self.control.stage('DataOrganizationStage').contexts
  1444. self._status = 'Incomplete'
  1445. # Context list
  1446. self.context_list = []
  1447. # Depending on the calling order, we may need to use a GenericContext or ConfigurationContext as the base
  1448. # Default to ConfigurationContext first
  1449. if 'ConfigurationContext' in contexts.keys():
  1450. self.base_context = context.MergeContext( contexts['ConfigurationContext'] )
  1451. # If we still have GenericContexts around, merge them on top of the ConfigurationContext
  1452. if 'GenericContext' in contexts.keys():
  1453. self.base_context.merge(
  1454. contexts['GenericContext'],
  1455. ( self.data_finalization_debug, self.color )
  1456. )
  1457. # Otherwise, just use a GenericContext
  1458. elif 'GenericContext' in contexts.keys():
  1459. self.base_context = context.MergeContext( contexts['GenericContext'] )
  1460. # Fail otherwise, you *must* have a GenericContext or ConfigurationContext
  1461. else:
  1462. print( "{0} Missing a 'GenericContext' and/or 'ConfigurationContext'.".format( ERROR ) )
  1463. self._status = 'Incomplete'
  1464. return
  1465. # Next use the BaseMapContext and overlay on ConfigurationContext
  1466. # This serves as the basis for the next two merges
  1467. if 'BaseMapContext' in contexts.keys():
  1468. self.base_context.merge(
  1469. contexts['BaseMapContext'],
  1470. ( self.data_finalization_debug, self.color )
  1471. )
  1472. self.context_list.append( ( 'BaseMapContext', self.base_context ) )
  1473. # Then use the DefaultMapContext as the default keyboard mapping
  1474. self.default_context = context.MergeContext( self.base_context )
  1475. if 'DefaultMapContext' in contexts.keys():
  1476. self.default_context.merge(
  1477. contexts['DefaultMapContext'],
  1478. ( self.data_finalization_debug, self.color )
  1479. )
  1480. self.context_list.append( ( 'DefaultMapContext', self.default_context ) )
  1481. # For convenience build a fully merged dataset
  1482. # This is usually only required for variables
  1483. self.full_context = context.MergeContext( self.default_context )
  1484. # Finally setup each of the PartialMapContext groups
  1485. # Build list of PartialMapContexts and sort by layer before iterating over
  1486. self.partial_contexts = []
  1487. partial_context_list = [
  1488. ( item[1].layer, item[1] )
  1489. for item in contexts.items()
  1490. if 'PartialMapContext' in item[0]
  1491. ]
  1492. for layer, partial in sorted( partial_context_list, key=lambda x: x[0] ):
  1493. self.partial_contexts.append( context.MergeContext( self.base_context ) )
  1494. self.partial_contexts[ layer ].merge(
  1495. partial,
  1496. ( self.data_finalization_debug, self.color )
  1497. )
  1498. self.context_list.append( ( 'PartialMapContext{0}'.format( layer ), self.default_context ) )
  1499. # Add each partial to the full_context as well
  1500. self.full_context.merge(
  1501. partial,
  1502. ( self.data_finalization_debug, self.color )
  1503. )
  1504. # Build layer context list
  1505. # Each index of the list corresponds to the keyboard layer
  1506. self.layer_contexts = [ self.default_context ]
  1507. self.layer_contexts.extend( self.partial_contexts )
  1508. # Show result of filling datastructure
  1509. if self.data_finalization_display:
  1510. for key, kll_context in self.context_list:
  1511. # Uncolorize if requested
  1512. output = "*\033[1;33m{0}\033[0m:\033[1m{1}\033[0m".format(
  1513. key,
  1514. kll_context.paths(),
  1515. )
  1516. print( self.color and output or ansi_escape.sub( '', output ) )
  1517. # Display Table
  1518. for store in kll_context.organization.stores():
  1519. # Uncolorize if requested
  1520. output = "\t\033[1;4;32m{0}\033[0m".format(
  1521. store.__class__.__name__
  1522. )
  1523. print( self.color and output or ansi_escape.sub( '', output ) )
  1524. print( self.color and store or ansi_escape.sub( '', store ), end="" )
  1525. self._status = 'Completed'
  1526. class DataAnalysisStage( Stage ):
  1527. '''
  1528. Data Analysis Stage
  1529. * Using the completed Context datastructures, do additional analysis that may be required for Code Generation
  1530. '''
  1531. def __init__( self, control ):
  1532. '''
  1533. Initialize configuration variables
  1534. '''
  1535. super().__init__( control )
  1536. self.layer_contexts = None
  1537. self.full_context = None
  1538. def command_line_args( self, args ):
  1539. '''
  1540. Group parser for command line arguments
  1541. @param args: Name space of processed arguments
  1542. '''
  1543. def command_line_flags( self, parser ):
  1544. '''
  1545. Group parser for command line options
  1546. @param parser: argparse setup object
  1547. '''
  1548. # Create new option group
  1549. #group = parser.add_argument_group('\033[1mData Analysis Configuration\033[0m')
  1550. def reduction( self ):
  1551. '''
  1552. Builds a new reduced_contexts list
  1553. For each of the layers, evaluate triggers into ScanCodes (USBCode to ScanCodes)
  1554. (all other triggers don't require reductions)
  1555. '''
  1556. self.reduced_contexts = []
  1557. for layer in self.layer_contexts:
  1558. reduced = context.MergeContext( layer )
  1559. reduced.reduction()
  1560. self.reduced_contexts.append( reduced )
  1561. def generate_mapping_indices( self ):
  1562. '''
  1563. For each trigger:result pair generate a unique index
  1564. The triggers and results are first sorted alphabetically
  1565. '''
  1566. # Build list of map expressions
  1567. expressions = []
  1568. # Gather list of expressions
  1569. for layer in self.layer_contexts:
  1570. expressions.extend( layer.organization.mapping_data.data.items() )
  1571. # Sort expressions by trigger, there may be *duplicate* triggers however don't reduce yet
  1572. # we need the result mappings as well
  1573. trigger_sorted = sorted( expressions, key=lambda x: x[1][0].trigger_str() )
  1574. trigger_filtered = [ elem for elem in trigger_sorted if not elem[1][0].type == 'USBCode' ]
  1575. #print( trigger_filtered )
  1576. # Sort expressions by result, there may be *duplicate* results however don't reduce yet
  1577. # we need the result mappings as well
  1578. result_sorted = sorted( expressions, key=lambda x: x[1][0].result_str() )
  1579. #print( result_sorted )
  1580. # Build List of Triggers and sort by string contents
  1581. # XXX Only scan codes right now
  1582. # This will need to expand to a
  1583. #TODO
  1584. # Build List of Results and sort by string contents
  1585. # TODO
  1586. def sort_map_index_lists( self ):
  1587. '''
  1588. '''
  1589. def generate_map_offset_table( self ):
  1590. '''
  1591. '''
  1592. def generate_trigger_lists( self ):
  1593. '''
  1594. '''
  1595. def analyze( self ):
  1596. '''
  1597. Analyze the set of configured contexts
  1598. TODO: Perhaps use emitters or something like it for this code? -HaaTa
  1599. '''
  1600. # Reduce Contexts
  1601. # Convert all trigger USBCodes to ScanCodes
  1602. self.reduction()
  1603. # Generate Indices
  1604. # Assigns a sequential index (starting from 0) for each map expresssion
  1605. self.generate_mapping_indices()
  1606. # Sort Index Lists
  1607. # Using indices sort Trigger and Results macros
  1608. self.sort_map_index_lists()
  1609. # Generate Offset Table
  1610. # This is needed for interconnect devices
  1611. self.generate_map_offset_table()
  1612. # Generate Trigger Lists
  1613. self.generate_trigger_lists()
  1614. def process( self ):
  1615. '''
  1616. Data Analysis Stage Processing
  1617. '''
  1618. self._status = 'Running'
  1619. # Determine colorization setting
  1620. self.color = self.control.stage('CompilerConfigurationStage').color
  1621. # Acquire list of contexts
  1622. self.layer_contexts = self.control.stage('DataFinalizationStage').layer_contexts
  1623. self.full_context = self.control.stage('DataFinalizationStage').full_context
  1624. # Analyze set of contexts
  1625. self.analyze()
  1626. self._status = 'Completed'
  1627. class CodeGenerationStage( Stage ):
  1628. '''
  1629. Code Generation Stage
  1630. * Generates code for the given firmware backend
  1631. * Backend is selected in the Compiler Configuration Stage
  1632. * Uses the specified emitter to generate the code
  1633. '''
  1634. def __init__( self, control ):
  1635. '''
  1636. Initialize configuration variables
  1637. '''
  1638. super().__init__( control )
  1639. def command_line_args( self, args ):
  1640. '''
  1641. Group parser for command line arguments
  1642. @param args: Name space of processed arguments
  1643. '''
  1644. self.control.stage('CompilerConfigurationStage').emitters.command_line_args( args )
  1645. def command_line_flags( self, parser ):
  1646. '''
  1647. Group parser for command line options
  1648. @param parser: argparse setup object
  1649. '''
  1650. # Create new option group
  1651. #group = parser.add_argument_group('\033[1mCode Generation Configuration\033[0m')
  1652. # Create options groups for each of the Emitters
  1653. self.control.stage('CompilerConfigurationStage').emitters.command_line_flags( parser )
  1654. def process( self ):
  1655. '''
  1656. Data Organization Stage Processing
  1657. '''
  1658. self._status = 'Running'
  1659. # Determine colorization setting
  1660. self.color = self.control.stage('CompilerConfigurationStage').color
  1661. # Get Emitter object
  1662. self.emitter = self.control.stage('CompilerConfigurationStage').emitters.emitter(
  1663. self.control.stage('CompilerConfigurationStage').emitter
  1664. )
  1665. # Call Emitter
  1666. self.emitter.process()
  1667. # Generate Outputs using Emitter
  1668. self.emitter.output()
  1669. self._status = 'Completed'
  1670. class ReportGenerationStage( Stage ):
  1671. '''
  1672. Report Generation Stage
  1673. * Using the datastructures and analyzed data, generate a compiler report
  1674. * TODO
  1675. '''