#!/usr/bin/env python3
# KLL Compiler
# Keyboard Layout Langauge
#
# Copyright (C) 2014-2016 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see .
### Imports ###
import argparse
import importlib
import io
import os
import re
import sys
import token
from pprint import pformat
from re import VERBOSE
from tokenize import generate_tokens
from kll_lib.containers import *
from kll_lib.hid_dict import *
from funcparserlib.lexer import make_tokenizer, Token, LexerError
from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)
### Decorators ###
## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
## Python Text Formatting Fixer...
## Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: " : "Usage: ",
"optional arguments" : "Optional Arguments",
}
def textFormatter_gettext( s ):
return textFormatter_lookup.get( s, s )
argparse._ = textFormatter_gettext
### Argument Parsing ###
def checkFileExists( filename ):
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )
def processCommandLineArgs():
# Setup argument processor
pArgs = argparse.ArgumentParser(
usage="%(prog)s [options] ...",
description="Generates .h file state tables and pointer indices from KLL .kll files.",
epilog="Example: {0} mykeyboard.kll -d colemak.kll -p hhkbpro2.kll -p symbols.kll".format( os.path.basename( sys.argv[0] ) ),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
# Positional Arguments
pArgs.add_argument( 'files', type=str, nargs='+',
help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly
# Optional Arguments
pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
help="Specify target backend for the KLL compiler.\n"
"Default: kiibohd\n"
"Options: kiibohd" )
pArgs.add_argument( '-d', '--default', type=str, nargs='+',
help="Specify .kll files to layer on top of the default map to create a combined map." )
pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map." )
pArgs.add_argument( '-t', '--templates', type=str, nargs='+',
help="Specify template used to generate the keymap.\n"
"Default: " )
pArgs.add_argument( '-o', '--outputs', type=str, nargs='+',
help="Specify output file. Writes to current working directory by default.\n"
"Default: " )
pArgs.add_argument( '-h', '--help', action="help",
help="This message." )
# Process Arguments
args = pArgs.parse_args()
# Parameters
baseFiles = args.files
defaultFiles = args.default
partialFileSets = args.partial
if defaultFiles is None:
defaultFiles = []
if partialFileSets is None:
partialFileSets = [[]]
# Check file existance
for filename in baseFiles:
checkFileExists( filename )
for filename in defaultFiles:
checkFileExists( filename )
for partial in partialFileSets:
for filename in partial:
checkFileExists( filename )
return (baseFiles, defaultFiles, partialFileSets, args.backend, args.templates, args.outputs)
### Tokenizer ###
def tokenize( string ):
"""str -> Sequence(Token)"""
# Basic Tokens Spec
specs = [
( 'Comment', ( r' *#.*', ) ),
( 'Space', ( r'[ \t\r\n]+', ) ),
( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'USBCodeStart', ( r'U\[', ) ),
( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ConsCodeStart', ( r'CONS\[', ) ),
( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'SysCodeStart', ( r'SYS\[', ) ),
( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ScanCodeStart', ( r'S\[', ) ),
( 'Indicator', ( r'I(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'IndicatorStart', ( r'I\[', ) ),
( 'Pixel', ( r'P"[^"]+"', ) ),
( 'PixelStart', ( r'P\[', ) ),
( 'PixelLayer', ( r'PL"[^"]+"', ) ),
( 'PixelLayerStart', ( r'PL\[', ) ),
( 'Animation', ( r'A"[^"]+"', ) ),
( 'AnimationStart', ( r'A\[', ) ),
( 'CodeBegin', ( r'\[', ) ),
( 'CodeEnd', ( r'\]', ) ),
( 'Position', ( r'r?[xyz]:[0-9]+(.[0-9]+)?', ) ),
( 'String', ( r'"[^"]*"', ) ),
( 'SequenceString', ( r"'[^']*'", ) ),
( 'PixelOperator', ( r'(\+:|-:|>>|<<)', ) ),
( 'Operator', ( r'=>|<=|:\+|:-|::|:|=', ) ),
( 'Comma', ( r',', ) ),
( 'Dash', ( r'-', ) ),
( 'Plus', ( r'\+', ) ),
( 'Parenthesis', ( r'\(|\)', ) ),
( 'None', ( r'None', ) ),
( 'Timing', ( r'[0-9]+(.[0-9]+)?((s)|(ms)|(us))', ) ),
( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
( 'EndOfLine', ( r';', ) ),
]
# Tokens to filter out of the token stream
useless = ['Space', 'Comment']
tokens = make_tokenizer( specs )
return [x for x in tokens( string ) if x.type not in useless]
### Parsing ###
## Map Arrays
macros_map = Macros()
variables_dict = Variables()
capabilities_dict = Capabilities()
## Parsing Functions
class Make:
def scanCode( token ):
scanCode = int( token[1:], 0 )
# Check size, to make sure it's valid
# XXX Add better check that takes symbolic names into account (i.e. U"Latch5")
#if scanCode > 0xFF:
# print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
# raise
return scanCode
def hidCode( type, token ):
# If first character is a U or I, strip
if token[0] == "U" or token[0] == "I":
token = token[1:]
# CONS specifier
elif 'CONS' in token:
token = token[4:]
# SYS specifier
elif 'SYS' in token:
token = token[3:]
# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token:
try:
hidCode = kll_hid_lookup_dictionary[ type ][ token[1:-1].upper() ][1]
except LookupError as err:
print ( "{0} {1} is an invalid USB HID Code Lookup...".format( ERROR, err ) )
raise
else:
# Already tokenized
if type == 'USBCode' and token[0] == 'USB' or type == 'SysCode' and token[0] == 'SYS' or type == 'ConsCode' and token[0] == 'CONS':
hidCode = token[1]
# Convert
else:
hidCode = int( token, 0 )
# Check size if a USB Code, to make sure it's valid
# XXX Add better check that takes symbolic names into account (i.e. U"Latch5")
#if type == 'USBCode' and hidCode > 0xFF:
# print ( "{0} USBCode value {1} is larger than 255".format( ERROR, hidCode ) )
# raise
# Return a tuple, identifying which type it is
if type == 'USBCode':
return Make.usbCode_number( hidCode )
elif type == 'ConsCode':
return Make.consCode_number( hidCode )
elif type == 'SysCode':
return Make.sysCode_number( hidCode )
elif type == 'IndCode':
return Make.indCode_number( hidCode )
print ( "{0} Unknown HID Specifier '{1}'".format( ERROR, type ) )
raise
def usbCode( token ):
return Make.hidCode( 'USBCode', token )
def consCode( token ):
return Make.hidCode( 'ConsCode', token )
def sysCode( token ):
return Make.hidCode( 'SysCode', token )
def indCode( token ):
return Make.hidCode( 'IndCode', token )
def animation( token ):
# TODO
print( token )
return "NULL"
def animationCapability( token ):
# TODO
print( token )
return "DIS"
def pixelCapability( token ):
# TODO
print( token )
return "DAT"
def pixel( token ):
# TODO
print( token )
return "PNULL"
def pixelLayer( token ):
# TODO
print( token )
return "PLNULL"
def pixelchans( token ):
# Create dictionary list
channel_widths = []
for elem in token:
channel_widths.append( {
'chan' : elem[0],
'width' : elem[1],
} )
print(channel_widths)
return channel_widths
def pixelchan_elem( token ):
channel_config = {
'pixels' : token[0],
'chans' : token[1],
}
return channel_config
def pixelmods( token ):
# TODO
print( token )
return "PMOD"
def pixellayer( token ):
# TODO
print( token )
return "PL"
def position( token ):
return token.split(':')
def hidCode_number( type, token ):
lookup = {
'ConsCode' : 'CONS',
'SysCode' : 'SYS',
'USBCode' : 'USB',
'IndCode' : 'LED',
}
return ( lookup[ type ], token )
def usbCode_number( token ):
return Make.hidCode_number( 'USBCode', token )
def consCode_number( token ):
return Make.hidCode_number( 'ConsCode', token )
def sysCode_number( token ):
return Make.hidCode_number( 'SysCode', token )
def indCode_number( token ):
return Make.hidCode_number( 'IndCode', token )
# Replace key-word with None specifier (which indicates a noneOut capability)
def none( token ):
return [[[('NONE', 0)]]]
def seqString( token ):
# Shifted Characters, and amount to move by to get non-shifted version
# US ANSI
shiftCharacters = (
( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
( "+", 0x12 ),
( "&(", 0x11 ),
( "!#$%", 0x10 ),
( "*", 0x0E ),
( ")", 0x07 ),
( '"', 0x05 ),
( ":", 0x01 ),
( "@", -0x0E ),
( "<>?", -0x10 ),
( "~", -0x1E ),
( "{}|", -0x20 ),
( "^", -0x28 ),
( "_", -0x32 ),
)
listOfLists = []
shiftKey = kll_hid_lookup_dictionary['USBCode']["SHIFT"]
# Creates a list of USB codes from the string: sequence (list) of combos (lists)
for char in token[1:-1]:
processedChar = char
# Whether or not to create a combo for this sequence with a shift
shiftCombo = False
# Depending on the ASCII character, convert to single character or Shift + character
for pair in shiftCharacters:
if char in pair[0]:
shiftCombo = True
processedChar = chr( ord( char ) + pair[1] )
break
# Do KLL HID Lookup on non-shifted character
# NOTE: Case-insensitive, which is why the shift must be pre-computed
usbCode = kll_hid_lookup_dictionary['USBCode'][ processedChar.upper() ]
# Create Combo for this character, add shift key if shifted
charCombo = []
if shiftCombo:
charCombo = [ [ shiftKey ] ]
charCombo.append( [ usbCode ] )
# Add to list of lists
listOfLists.append( charCombo )
return listOfLists
def string( token ):
return token[1:-1]
def unseqString( token ):
return token[1:-1]
def number( token ):
return int( token, 0 )
def timing( token ):
# Find ms, us, or s
if 'ms' in token:
unit = 'ms'
num = token.split('m')[0]
print (token.split('m'))
elif 'us' in token:
unit = 'us'
num = token.split('u')[0]
elif 's' in token:
unit = 's'
num = token.split('s')[0]
else:
print ( "{0} cannot find timing unit in token '{1}'".format( ERROR, token ) )
return "NULL"
print ( num, unit )
ret = {
'time' : float( num ),
'unit' : unit,
}
return ret
def specifierState( values ):
# TODO
print ( values )
return "SPECSTATE"
def specifierAnalog( value ):
# TODO
print( value )
return "SPECANALOG"
def specifierUnroll( value ):
# TODO
print( value )
return [ value[0] ]
# Range can go from high to low or low to high
def scanCode_range( rangeVals ):
start = rangeVals[0]
end = rangeVals[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
# Range can go from high to low or low to high
# Warn on 0-9 for USBCodes (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def hidCode_range( type, rangeVals ):
# Check if already integers
if isinstance( rangeVals[0], int ):
start = rangeVals[0]
else:
start = Make.hidCode( type, rangeVals[0] )[1]
if isinstance( rangeVals[1], int ):
end = rangeVals[1]
else:
end = Make.hidCode( type, rangeVals[1] )[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
listRange = list( range( start, end + 1 ) )
# Convert each item in the list to a tuple
for item in range( len( listRange ) ):
listRange[ item ] = Make.hidCode_number( type, listRange[ item ] )
return listRange
def usbCode_range( rangeVals ):
return Make.hidCode_range( 'USBCode', rangeVals )
def sysCode_range( rangeVals ):
return Make.hidCode_range( 'SysCode', rangeVals )
def consCode_range( rangeVals ):
return Make.hidCode_range( 'ConsCode', rangeVals )
def indCode_range( rangeVals ):
return Make.hidCode_range( 'IndCode', rangeVals )
def range( rangeVals ):
# TODO
print (rangeVals)
return ""
## Base Rules
const = lambda x: lambda _: x
unarg = lambda f: lambda x: f(*x)
flatten = lambda list: sum( list, [] )
tokenValue = lambda x: x.value
tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
bracket = lambda s: a( Token( 'Bracket', s ) ) >> tokenValue
eol = a( Token( 'EndOfLine', ';' ) )
def listElem( item ):
return [ item ]
def listToTuple( items ):
return tuple( items )
# Flatten only the top layer (list of lists of ...)
def oneLayerFlatten( items ):
mainList = []
for sublist in items:
for item in sublist:
mainList.append( item )
return mainList
# Capability arguments may need to be expanded (e.g. 1 16 bit argument needs to be 2 8 bit arguments for the state machine)
def capArgExpander( items ):
newArgs = []
# For each defined argument in the capability definition
for arg in range( 0, len( capabilities_dict[ items[0] ][1] ) ):
argLen = capabilities_dict[ items[0] ][1][ arg ][1]
num = items[1][ arg ]
byteForm = num.to_bytes( argLen, byteorder='little' ) # XXX Yes, little endian from how the uC structs work
# For each sub-argument, split into byte-sized chunks
for byte in range( 0, argLen ):
newArgs.append( byteForm[ byte ] )
return tuple( [ items[0], tuple( newArgs ) ] )
# Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
# i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ],