#!/usr/bin/env python3
# KLL Compiler
# Keyboard Layout Langauge
#
# Copyright (C) 2014-2015 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import argparse
import importlib
import io
import os
import re
import sys
import token
from pprint import pformat
from re import VERBOSE
from tokenize import generate_tokens
from kll_lib.containers import *
from kll_lib.hid_dict import *
from funcparserlib.lexer import make_tokenizer, Token, LexerError
from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)
### Decorators ###
## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
## Python Text Formatting Fixer...
## Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: " : "Usage: ",
"optional arguments" : "Optional Arguments",
}
def textFormatter_gettext( s ):
return textFormatter_lookup.get( s, s )
argparse._ = textFormatter_gettext
### Argument Parsing ###
def checkFileExists( filename ):
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )
def processCommandLineArgs():
# Setup argument processor
pArgs = argparse.ArgumentParser(
usage="%(prog)s [options] ...",
description="Generates .h file state tables and pointer indices from KLL .kll files.",
epilog="Example: {0} mykeyboard.kll -d colemak.kll -p hhkbpro2.kll -p symbols.kll".format( os.path.basename( sys.argv[0] ) ),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
# Positional Arguments
pArgs.add_argument( 'files', type=str, nargs='+',
help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly
# Optional Arguments
pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
help="Specify target backend for the KLL compiler.\n"
"Default: kiibohd\n"
"Options: kiibohd, json" )
pArgs.add_argument( '-d', '--default', type=str, nargs='+',
help="Specify .kll files to layer on top of the default map to create a combined map." )
pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map." )
pArgs.add_argument( '-t', '--templates', type=str, nargs='+',
help="Specify template used to generate the keymap.\n"
"Default: " )
pArgs.add_argument( '-o', '--outputs', type=str, nargs='+',
help="Specify output file. Writes to current working directory by default.\n"
"Default: " )
pArgs.add_argument( '-h', '--help', action="help",
help="This message." )
# Process Arguments
args = pArgs.parse_args()
# Parameters
baseFiles = args.files
defaultFiles = args.default
partialFileSets = args.partial
if defaultFiles is None:
defaultFiles = []
if partialFileSets is None:
partialFileSets = [[]]
# Check file existance
for filename in baseFiles:
checkFileExists( filename )
for filename in defaultFiles:
checkFileExists( filename )
for partial in partialFileSets:
for filename in partial:
checkFileExists( filename )
return (baseFiles, defaultFiles, partialFileSets, args.backend, args.templates, args.outputs)
### Tokenizer ###
def tokenize( string ):
"""str -> Sequence(Token)"""
# Basic Tokens Spec
specs = [
( 'Comment', ( r' *#.*', ) ),
( 'Space', ( r'[ \t\r\n]+', ) ),
( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'USBCodeStart', ( r'U\[', ) ),
( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ConsCodeStart', ( r'CONS\[', ) ),
( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'SysCodeStart', ( r'SYS\[', ) ),
( 'LedCode', ( r'LED(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'LedCodeStart', ( r'LED\[', ) ),
( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ScanCodeStart', ( r'S\[', ) ),
( 'CodeEnd', ( r'\]', ) ),
( 'String', ( r'"[^"]*"', VERBOSE ) ),
( 'SequenceString', ( r"'[^']*'", ) ),
( 'Operator', ( r'=>|:\+|:-|:|=', ) ),
( 'Comma', ( r',', ) ),
( 'Dash', ( r'-', ) ),
( 'Plus', ( r'\+', ) ),
( 'Parenthesis', ( r'\(|\)', ) ),
( 'None', ( r'None', ) ),
( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
( 'EndOfLine', ( r';', ) ),
]
# Tokens to filter out of the token stream
useless = ['Space', 'Comment']
tokens = make_tokenizer( specs )
return [x for x in tokens( string ) if x.type not in useless]
### Parsing ###
## Map Arrays
macros_map = Macros()
variables_dict = Variables()
capabilities_dict = Capabilities()
## Parsing Functions
def make_scanCode( token ):
scanCode = int( token[1:], 0 )
# Check size, to make sure it's valid
if scanCode > 0xFF:
print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
raise
return scanCode
def make_hidCode( type, token ):
# If first character is a U, strip
if token[0] == "U":
token = token[1:]
# CONS specifier
elif 'CONS' in token:
token = token[4:]
# SYS specifier
elif 'SYS' in token:
token = token[3:]
# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token:
try:
hidCode = kll_hid_lookup_dictionary[ type ][ token[1:-1].upper() ][1]
except LookupError as err:
print ( "{0} {1} is an invalid USB HID Code Lookup...".format( ERROR, err ) )
raise
else:
# Already tokenized
if type == 'USBCode' and token[0] == 'USB' or type == 'SysCode' and token[0] == 'SYS' or type == 'ConsCode' and token[0] == 'CONS':
hidCode = token[1]
# Convert
else:
hidCode = int( token, 0 )
# Check size if a USB Code, to make sure it's valid
if type == 'USBCode' and hidCode > 0xFF:
print ( "{0} USBCode value {1} is larger than 255".format( ERROR, hidCode ) )
raise
# Return a tuple, identifying which type it is
if type == 'USBCode':
return make_usbCode_number( hidCode )
elif type == 'ConsCode':
return make_consCode_number( hidCode )
elif type == 'SysCode':
return make_sysCode_number( hidCode )
print ( "{0} Unknown HID Specifier '{1}'".format( ERROR, type ) )
raise
def make_usbCode( token ):
return make_hidCode( 'USBCode', token )
def make_consCode( token ):
return make_hidCode( 'ConsCode', token )
def make_sysCode( token ):
return make_hidCode( 'SysCode', token )
def make_hidCode_number( type, token ):
lookup = {
'ConsCode' : 'CONS',
'SysCode' : 'SYS',
'USBCode' : 'USB',
}
return ( lookup[ type ], token )
def make_usbCode_number( token ):
return make_hidCode_number( 'USBCode', token )
def make_consCode_number( token ):
return make_hidCode_number( 'ConsCode', token )
def make_sysCode_number( token ):
return make_hidCode_number( 'SysCode', token )
# Replace key-word with None specifier (which indicates a noneOut capability)
def make_none( token ):
return [[[('NONE', 0)]]]
def make_seqString( token ):
# Shifted Characters, and amount to move by to get non-shifted version
# US ANSI
shiftCharacters = (
( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
( "+", 0x12 ),
( "&(", 0x11 ),
( "!#$%<>", 0x10 ),
( "*", 0x0E ),
( ")", 0x07 ),
( '"', 0x05 ),
( ":", 0x01 ),
( "^", -0x10 ),
( "_", -0x18 ),
( "{}|~", -0x1E ),
( "@", -0x32 ),
( "?", -0x38 ),
)
listOfLists = []
shiftKey = kll_hid_lookup_dictionary['USBCode']["SHIFT"]
# Creates a list of USB codes from the string: sequence (list) of combos (lists)
for char in token[1:-1]:
processedChar = char
# Whether or not to create a combo for this sequence with a shift
shiftCombo = False
# Depending on the ASCII character, convert to single character or Shift + character
for pair in shiftCharacters:
if char in pair[0]:
shiftCombo = True
processedChar = chr( ord( char ) + pair[1] )
break
# Do KLL HID Lookup on non-shifted character
# NOTE: Case-insensitive, which is why the shift must be pre-computed
usbCode = kll_hid_lookup_dictionary['USBCode'][ processedChar.upper() ]
# Create Combo for this character, add shift key if shifted
charCombo = []
if shiftCombo:
charCombo = [ [ shiftKey ] ]
charCombo.append( [ usbCode ] )
# Add to list of lists
listOfLists.append( charCombo )
return listOfLists
def make_string( token ):
return token[1:-1]
def make_unseqString( token ):
return token[1:-1]
def make_number( token ):
return int( token, 0 )
# Range can go from high to low or low to high
def make_scanCode_range( rangeVals ):
start = rangeVals[0]
end = rangeVals[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
# Range can go from high to low or low to high
# Warn on 0-9 for USBCodes (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def make_hidCode_range( type, rangeVals ):
# Check if already integers
if isinstance( rangeVals[0], int ):
start = rangeVals[0]
else:
start = make_hidCode( type, rangeVals[0] )[1]
if isinstance( rangeVals[1], int ):
end = rangeVals[1]
else:
end = make_hidCode( type, rangeVals[1] )[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
listRange = list( range( start, end + 1 ) )
# Convert each item in the list to a tuple
for item in range( len( listRange ) ):
listRange[ item ] = make_hidCode_number( type, listRange[ item ] )
return listRange
def make_usbCode_range( rangeVals ):
return make_hidCode_range( 'USBCode', rangeVals )
def make_sysCode_range( rangeVals ):
return make_hidCode_range( 'SysCode', rangeVals )
def make_consCode_range( rangeVals ):
return make_hidCode_range( 'ConsCode', rangeVals )
## Base Rules
const = lambda x: lambda _: x
unarg = lambda f: lambda x: f(*x)
flatten = lambda list: sum( list, [] )
tokenValue = lambda x: x.value
tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
eol = a( Token( 'EndOfLine', ';' ) )
def listElem( item ):
return [ item ]
def listToTuple( items ):
return tuple( items )
# Flatten only the top layer (list of lists of ...)
def oneLayerFlatten( items ):
mainList = []
for sublist in items:
for item in sublist:
mainList.append( item )
return mainList
# Capability arguments may need to be expanded (e.g. 1 16 bit argument needs to be 2 8 bit arguments for the state machine)
def capArgExpander( items ):
newArgs = []
# For each defined argument in the capability definition
for arg in range( 0, len( capabilities_dict[ items[0] ][1] ) ):
argLen = capabilities_dict[ items[0] ][1][ arg ][1]
num = items[1][ arg ]
byteForm = num.to_bytes( argLen, byteorder='little' ) # XXX Yes, little endian from how the uC structs work
# For each sub-argument, split into byte-sized chunks
for byte in range( 0, argLen ):
newArgs.append( byteForm[ byte ] )
return tuple( [ items[0], tuple( newArgs ) ] )
# Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
# i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ],