#!/usr/bin/env python3
# KLL Compiler
# Keyboard Layout Langauge
#
# Copyright (C) 2014 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import argparse
import io
import os
import re
import sys
import token
import importlib
from tokenize import generate_tokens
from re import VERBOSE
from pprint import pformat
from kll_lib.hid_dict import *
from kll_lib.containers import *
from funcparserlib.lexer import make_tokenizer, Token, LexerError
from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)
### Decorators ###
## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
## Python Text Formatting Fixer...
## Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: " : "Usage: ",
"optional arguments" : "Optional Arguments",
}
def textFormatter_gettext( s ):
return textFormatter_lookup.get( s, s )
argparse._ = textFormatter_gettext
### Argument Parsing ###
def checkFileExists( filename ):
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )
def processCommandLineArgs():
# Setup argument processor
pArgs = argparse.ArgumentParser(
usage="%(prog)s [options] ...",
description="Generates .h file state tables and pointer indices from KLL .kll files.",
epilog="Example: {0} mykeyboard.kll -d colemak.kll -p hhkbpro2.kll -p symbols.kll".format( os.path.basename( sys.argv[0] ) ),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
# Positional Arguments
pArgs.add_argument( 'files', type=str, nargs='+',
help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly
# Optional Arguments
pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
help="Specify target backend for the KLL compiler.\n"
"Default: kiibohd" )
pArgs.add_argument( '-d', '--default', type=str, nargs='+',
help="Specify .kll files to layer on top of the default map to create a combined map." )
pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map." )
pArgs.add_argument( '-t', '--template', type=str, default="templates/kiibohdKeymap.h",
help="Specify template used to generate the keymap.\n"
"Default: templates/kiibohdKeymap.h" )
pArgs.add_argument( '-o', '--output', type=str, default="templateKeymap.h",
help="Specify output file. Writes to current working directory by default.\n"
"Default: generatedKeymap.h" )
pArgs.add_argument( '-h', '--help', action="help",
help="This message." )
# Process Arguments
args = pArgs.parse_args()
# Parameters
baseFiles = args.files
defaultFiles = args.default
partialFileSets = args.partial
if defaultFiles is None:
defaultFiles = []
if partialFileSets is None:
partialFileSets = [[]]
# Check file existance
for filename in baseFiles:
checkFileExists( filename )
for filename in defaultFiles:
checkFileExists( filename )
for partial in partialFileSets:
for filename in partial:
checkFileExists( filename )
return (baseFiles, defaultFiles, partialFileSets, args.backend, args.template, args.output)
### Tokenizer ###
def tokenize( string ):
"""str -> Sequence(Token)"""
# Basic Tokens Spec
specs = [
( 'Comment', ( r' *#.*', ) ),
( 'Space', ( r'[ \t\r\n]+', ) ),
( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'USBCodeStart', ( r'U\[', ) ),
( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ScanCodeStart', ( r'S\[', ) ),
( 'CodeEnd', ( r'\]', ) ),
( 'String', ( r'"[^"]*"', VERBOSE ) ),
( 'SequenceString', ( r"'[^']*'", ) ),
( 'Operator', ( r'=>|:\+|:-|:|=', ) ),
( 'Comma', ( r',', ) ),
( 'Dash', ( r'-', ) ),
( 'Plus', ( r'\+', ) ),
( 'Parenthesis', ( r'\(|\)', ) ),
( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
( 'EndOfLine', ( r';', ) ),
]
# Tokens to filter out of the token stream
useless = ['Space', 'Comment']
tokens = make_tokenizer( specs )
return [x for x in tokens( string ) if x.type not in useless]
### Parsing ###
## Map Arrays
macros_map = Macros()
variable_dict = dict()
capabilities_dict = Capabilities()
## Parsing Functions
def make_scanCode( token ):
scanCode = int( token[1:], 0 )
# Check size, to make sure it's valid
if scanCode > 0xFF:
print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
raise
return scanCode
def make_usbCode( token ):
# If first character is a U, strip
if token[0] == "U":
token = token[1:]
# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token:
try:
usbCode = kll_hid_lookup_dictionary[ token[1:-1].upper() ]
except LookupError as err:
print ( "{0} {1} is an invalid USB Code Lookup...".format( ERROR, err ) )
raise
else:
usbCode = int( token, 0 )
# Check size, to make sure it's valid
if usbCode > 0xFF:
print ( "{0} USBCode value {1} is larger than 255".format( ERROR, usbCode ) )
raise
return usbCode
def make_seqString( token ):
# Shifted Characters, and amount to move by to get non-shifted version
# US ANSI
shiftCharacters = (
( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
( "+", 0x12 ),
( "&(", 0x11 ),
( "!#$%<>", 0x10 ),
( "*", 0x0E ),
( ")", 0x07 ),
( '"', 0x05 ),
( ":", 0x01 ),
( "^", -0x10 ),
( "_", -0x18 ),
( "{}|", -0x1E ),
( "~", -0x20 ),
( "@", -0x32 ),
( "?", -0x38 ),
)
listOfLists = []
shiftKey = kll_hid_lookup_dictionary["SHIFT"]
# Creates a list of USB codes from the string: sequence (list) of combos (lists)
for char in token[1:-1]:
processedChar = char
# Whether or not to create a combo for this sequence with a shift
shiftCombo = False
# Depending on the ASCII character, convert to single character or Shift + character
for pair in shiftCharacters:
if char in pair[0]:
shiftCombo = True
processedChar = chr( ord( char ) + pair[1] )
break
# Do KLL HID Lookup on non-shifted character
# NOTE: Case-insensitive, which is why the shift must be pre-computed
usbCode = kll_hid_lookup_dictionary[ processedChar.upper() ]
# Create Combo for this character, add shift key if shifted
charCombo = []
if shiftCombo:
charCombo = [ [ shiftKey ] ]
charCombo.append( [ usbCode ] )
# Add to list of lists
listOfLists.append( charCombo )
return listOfLists
def make_string( token ):
return token[1:-1]
def make_number( token ):
return int( token, 0 )
# Range can go from high to low or low to high
def make_scanCode_range( rangeVals ):
start = rangeVals[0]
end = rangeVals[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
# Range can go from high to low or low to high
# Warn on 0-9 (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def make_usbCode_range( rangeVals ):
# Check if already integers
if isinstance( rangeVals[0], int ):
start = rangeVals[0]
else:
start = make_usbCode( rangeVals[0] )
if isinstance( rangeVals[1], int ):
end = rangeVals[1]
else:
end = make_usbCode( rangeVals[1] )
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
pass
## Base Rules
const = lambda x: lambda _: x
unarg = lambda f: lambda x: f(*x)
flatten = lambda list: sum( list, [] )
tokenValue = lambda x: x.value
tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
eol = a( Token( 'EndOfLine', ';' ) )
def listElem( item ):
return [ item ]
def listToTuple( items ):
return tuple( items )
# Flatten only the top layer (list of lists of ...)
def oneLayerFlatten( items ):
mainList = []
for sublist in items:
for item in sublist:
mainList.append( item )
return mainList
# Capability arguments may need to be expanded (e.g. 1 16 bit argument needs to be 2 8 bit arguments for the state machine)
def capArgExpander( items ):
newArgs = []
# For each defined argument in the capability definition
for arg in range( 0, len( capabilities_dict[ items[0] ][1] ) ):
argLen = capabilities_dict[ items[0] ][1][ arg ][1]
num = items[1][ arg ]
byteForm = num.to_bytes( argLen, byteorder='little' ) # XXX Yes, little endian from how the uC structs work
# For each sub-argument, split into byte-sized chunks
for byte in range( 0, argLen ):
newArgs.append( byteForm[ byte ] )
return tuple( [ items[0], tuple( newArgs ) ] )
# Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
# i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ],