Browse Source

Initial source dump.

- Not quite complete.
- Most of the parser is done (excluding analog) for 0.3 of the KLL spec
- Transformation and correlation isn't complete yet.
- Backend generation for Kiibohd capabilties is complete.
simple
Jacob Alexander 9 years ago
parent
commit
1f0bf65417
13 changed files with 1973 additions and 0 deletions
  1. 63
    0
      .gitignore
  2. 2
    0
      README
  3. 0
    0
      backends/__init__.py
  4. 82
    0
      backends/kiibohd.py
  5. 0
    0
      funcparserlib/__init__.py
  6. 133
    0
      funcparserlib/lexer.py
  7. 409
    0
      funcparserlib/parser.py
  8. 49
    0
      funcparserlib/util.py
  9. 505
    0
      kll.py
  10. 0
    0
      kll_lib/__init__.py
  11. 114
    0
      kll_lib/containers.py
  12. 502
    0
      kll_lib/hid_dict.py
  13. 114
    0
      templateKeymap.h

+ 63
- 0
.gitignore View File

@@ -0,0 +1,63 @@
# Compiled source #
###################
*.com
*.class
*.dll
*.exe
*.o
*.so
*.elf
*.bin
*.hex
*.lss
*.sym
*.map

# Packages #
############
# it's better to unpack these files and commit the raw source
# git has its own built in compression methods
*.7z
*.dmg
*.gz
*.iso
*.jar
*.rar
*.tar
*.zip

# Logs and databases #
######################
*.log
*.sql
*.sqlite

# OS generated files #
######################
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

# Editor generated files #
##########################
*.swp

# Source browsing files #
#########################
tags

# CMake Generated Files #
#########################
CMakeFiles
CMakeCache.txt
cmake_install.cmake

# Python Generated Files #
##########################
__pycache__/
*.py[cod]


+ 2
- 0
README View File

@@ -6,6 +6,8 @@ KLL Compiler
Most current version of the KLL spec: https://www.writelatex.com/read/zzqbdwqjfwwf
Or visit http://kiibohd.com

Uses funcparserlib: https://code.google.com/p/funcparserlib/


Usage
-----

+ 0
- 0
backends/__init__.py View File


+ 82
- 0
backends/kiibohd.py View File

@@ -0,0 +1,82 @@
#!/usr/bin/env python3
# KLL Compiler - Kiibohd Backend
#
# Backend code generator for the Kiibohd Controller firmware.
#
# Copyright (C) 2014 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.

### Imports ###

import os
import sys
import re

# Modifying Python Path, which is dumb, but the only way to import up one directory...
sys.path.append( os.path.expanduser('..') )

from kll_lib.containers import *


### Decorators ###

## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'



### Classes ###

class Backend:
# Initializes backend
# Looks for template file and builds list of fill tags
def __init__( self, templatePath ):
# Does template exist?
if not os.path.isfile( templatePath ):
print ( "{0} '{1}' does not exist...".format( ERROR, templatePath ) )
sys.exit( 1 )

self.templatePath = templatePath
self.fill_dict = dict()

# Generate list of fill tags
self.tagList = []
with open( templatePath, 'r' ) as openFile:
for line in openFile:
match = re.findall( '<\|([^|>]+)\|>', line )
for item in match:
self.tagList.append( item )


# Processes content for fill tags and does any needed dataset calculations
def process( self, capabilities ):
## Capabilities ##
self.fill_dict['CapabilitiesList'] = "const Capability CapabilitiesList[] = {\n"

# Keys are pre-sorted
for key in capabilities.keys():
funcName = capabilities.funcName( key )
argByteWidth = capabilities.totalArgBytes( key )
self.fill_dict['CapabilitiesList'] += "\t{{ {0}, {1} }},\n".format( funcName, argByteWidth )

self.fill_dict['CapabilitiesList'] += "};"

print( self.fill_dict['CapabilitiesList'] )


# Generates the output keymap with fill tags filled
def generate( self, filepath ):
print("My path: {0}".format( filepath) )


+ 0
- 0
funcparserlib/__init__.py View File


+ 133
- 0
funcparserlib/lexer.py View File

@@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-

# Copyright (c) 2008/2013 Andrey Vlasovskikh
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

__all__ = ['make_tokenizer', 'Token', 'LexerError']

import re


class LexerError(Exception):
def __init__(self, place, msg):
self.place = place
self.msg = msg

def __str__(self):
s = u'cannot tokenize data'
line, pos = self.place
return u'%s: %d,%d: "%s"' % (s, line, pos, self.msg)


class Token(object):
def __init__(self, type, value, start=None, end=None):
self.type = type
self.value = value
self.start = start
self.end = end

def __repr__(self):
return u'Token(%r, %r)' % (self.type, self.value)

def __eq__(self, other):
# FIXME: Case sensitivity is assumed here
return self.type == other.type and self.value == other.value

def _pos_str(self):
if self.start is None or self.end is None:
return ''
else:
sl, sp = self.start
el, ep = self.end
return u'%d,%d-%d,%d:' % (sl, sp, el, ep)

def __str__(self):
s = u"%s %s '%s'" % (self._pos_str(), self.type, self.value)
return s.strip()

@property
def name(self):
return self.value

def pformat(self):
return u"%s %s '%s'" % (self._pos_str().ljust(20),
self.type.ljust(14),
self.value)


def make_tokenizer(specs):
"""[(str, (str, int?))] -> (str -> Iterable(Token))"""

def compile_spec(spec):
name, args = spec
return name, re.compile(*args)

compiled = [compile_spec(s) for s in specs]

def match_specs(specs, str, i, position):
line, pos = position
for type, regexp in specs:
m = regexp.match(str, i)
if m is not None:
value = m.group()
nls = value.count(u'\n')
n_line = line + nls
if nls == 0:
n_pos = pos + len(value)
else:
n_pos = len(value) - value.rfind(u'\n') - 1
return Token(type, value, (line, pos + 1), (n_line, n_pos))
else:
errline = str.splitlines()[line - 1]
raise LexerError((line, pos + 1), errline)

def f(str):
length = len(str)
line, pos = 1, 0
i = 0
while i < length:
t = match_specs(compiled, str, i, (line, pos))
yield t
line, pos = t.end
i += len(t.value)

return f

# This is an example of a token spec. See also [this article][1] for a
# discussion of searching for multiline comments using regexps (including `*?`).
#
# [1]: http://ostermiller.org/findcomment.html
_example_token_specs = [
('COMMENT', (r'\(\*(.|[\r\n])*?\*\)', re.MULTILINE)),
('COMMENT', (r'\{(.|[\r\n])*?\}', re.MULTILINE)),
('COMMENT', (r'//.*',)),
('NL', (r'[\r\n]+',)),
('SPACE', (r'[ \t\r\n]+',)),
('NAME', (r'[A-Za-z_][A-Za-z_0-9]*',)),
('REAL', (r'[0-9]+\.[0-9]*([Ee][+\-]?[0-9]+)*',)),
('INT', (r'[0-9]+',)),
('INT', (r'\$[0-9A-Fa-f]+',)),
('OP', (r'(\.\.)|(<>)|(<=)|(>=)|(:=)|[;,=\(\):\[\]\.+\-<>\*/@\^]',)),
('STRING', (r"'([^']|(''))*'",)),
('CHAR', (r'#[0-9]+',)),
('CHAR', (r'#\$[0-9A-Fa-f]+',)),
]
#tokenize = make_tokenizer(_example_token_specs)

+ 409
- 0
funcparserlib/parser.py View File

@@ -0,0 +1,409 @@
# -*- coding: utf-8 -*-

# Copyright (c) 2008/2013 Andrey Vlasovskikh
# Small Python 3 modifications by Jacob Alexander 2014
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

"""A recurisve descent parser library based on functional combinators.

Basic combinators are taken from Harrison's book ["Introduction to Functional
Programming"][1] and translated from ML into Python. See also [a Russian
translation of the book][2].

[1]: http://www.cl.cam.ac.uk/teaching/Lectures/funprog-jrh-1996/
[2]: http://code.google.com/p/funprog-ru/

A parser `p` is represented by a function of type:

p :: Sequence(a), State -> (b, State)

that takes as its input a sequence of tokens of arbitrary type `a` and a
current parsing state and return a pair of a parsed token of arbitrary type
`b` and the new parsing state.

The parsing state includes the current position in the sequence being parsed and
the position of the rightmost token that has been consumed while parsing.

Parser functions are wrapped into an object of the class `Parser`. This class
implements custom operators `+` for sequential composition of parsers, `|` for
choice composition, `>>` for transforming the result of parsing. The method
`Parser.parse` provides an easier way for invoking a parser hiding details
related to a parser state:

Parser.parse :: Parser(a, b), Sequence(a) -> b

Altough this module is able to deal with a sequences of any kind of objects, the
recommended way of using it is applying a parser to a `Sequence(Token)`.
`Token` objects are produced by a regexp-based tokenizer defined in
`funcparserlib.lexer`. By using it this way you get more readable parsing error
messages (as `Token` objects contain their position in the source file) and good
separation of lexical and syntactic levels of the grammar. See examples for more
info.

Debug messages are emitted via a `logging.Logger` object named
`"funcparserlib"`.
"""

__all__ = [
'some', 'a', 'many', 'pure', 'finished', 'maybe', 'skip', 'oneplus',
'forward_decl', 'NoParseError',
]

import logging

log = logging.getLogger('funcparserlib')

debug = False


class Parser(object):
"""A wrapper around a parser function that defines some operators for parser
composition.
"""

def __init__(self, p):
"""Wraps a parser function p into an object."""
self.define(p)

def named(self, name):
"""Specifies the name of the parser for more readable parsing log."""
self.name = name
return self

def define(self, p):
"""Defines a parser wrapped into this object."""
f = getattr(p, 'run', p)
if debug:
setattr(self, '_run', f)
else:
setattr(self, 'run', f)
self.named(getattr(p, 'name', p.__doc__))

def run(self, tokens, s):
"""Sequence(a), State -> (b, State)

Runs a parser wrapped into this object.
"""
if debug:
log.debug(u'trying %s' % self.name)
return self._run(tokens, s)

def _run(self, tokens, s):
raise NotImplementedError(u'you must define() a parser')

def parse(self, tokens):
"""Sequence(a) -> b

Applies the parser to a sequence of tokens producing a parsing result.

It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
"""
try:
(tree, _) = self.run(tokens, State())
return tree
except NoParseError as e:
max = e.state.max
if len(tokens) > max:
tok = tokens[max]
else:
tok = u'<EOF>'
raise NoParseError(u'%s: %s' % (e.msg, tok), e.state)

def __add__(self, other):
"""Parser(a, b), Parser(a, c) -> Parser(a, _Tuple(b, c))

A sequential composition of parsers.

NOTE: The real type of the parsed value isn't always such as specified.
Here we use dynamic typing for ignoring the tokens that are of no
interest to the user. Also we merge parsing results into a single _Tuple
unless the user explicitely prevents it. See also skip and >>
combinators.
"""

def magic(v1, v2):
vs = [v for v in [v1, v2] if not isinstance(v, _Ignored)]
if len(vs) == 1:
return vs[0]
elif len(vs) == 2:
if isinstance(vs[0], _Tuple):
return _Tuple(v1 + (v2,))
else:
return _Tuple(vs)
else:
return _Ignored(())

@Parser
def _add(tokens, s):
(v1, s2) = self.run(tokens, s)
(v2, s3) = other.run(tokens, s2)
return magic(v1, v2), s3

# or in terms of bind and pure:
# _add = self.bind(lambda x: other.bind(lambda y: pure(magic(x, y))))
_add.name = u'(%s , %s)' % (self.name, other.name)
return _add

def __or__(self, other):
"""Parser(a, b), Parser(a, c) -> Parser(a, b or c)

A choice composition of two parsers.

NOTE: Here we are not providing the exact type of the result. In a
statically typed langage something like Either b c could be used. See
also + combinator.
"""

@Parser
def _or(tokens, s):
try:
return self.run(tokens, s)
except NoParseError as e:
return other.run(tokens, State(s.pos, e.state.max))

_or.name = u'(%s | %s)' % (self.name, other.name)
return _or

def __rshift__(self, f):
"""Parser(a, b), (b -> c) -> Parser(a, c)

Given a function from b to c, transforms a parser of b into a parser of
c. It is useful for transorming a parser value into another value for
making it a part of a parse tree or an AST.

This combinator may be thought of as a functor from b -> c to Parser(a,
b) -> Parser(a, c).
"""

@Parser
def _shift(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v), s2

# or in terms of bind and pure:
# _shift = self.bind(lambda x: pure(f(x)))
_shift.name = u'(%s)' % (self.name,)
return _shift

def bind(self, f):
"""Parser(a, b), (b -> Parser(a, c)) -> Parser(a, c)

NOTE: A monadic bind function. It is used internally to implement other
combinators. Functions bind and pure make the Parser a Monad.
"""

@Parser
def _bind(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v).run(tokens, s2)

_bind.name = u'(%s >>=)' % (self.name,)
return _bind


class State(object):
"""A parsing state that is maintained basically for error reporting.

It consists of the current position pos in the sequence being parsed and
the position max of the rightmost token that has been consumed while
parsing.
"""

def __init__(self, pos=0, max=0):
self.pos = pos
self.max = max

def __str__(self):
return unicode((self.pos, self.max))

def __repr__(self):
return u'State(%r, %r)' % (self.pos, self.max)


class NoParseError(Exception):
def __init__(self, msg=u'', state=None):
self.msg = msg
self.state = state

def __str__(self):
return self.msg


class _Tuple(tuple):
pass


class _Ignored(object):
def __init__(self, value):
self.value = value

def __repr__(self):
return u'_Ignored(%s)' % repr(self.value)


@Parser
def finished(tokens, s):
"""Parser(a, None)

Throws an exception if any tokens are left in the input unparsed.
"""
if s.pos >= len(tokens):
return None, s
else:
raise NoParseError(u'should have reached <EOF>', s)


finished.name = u'finished'


def many(p):
"""Parser(a, b) -> Parser(a, [b])

Returns a parser that infinitely applies the parser p to the input sequence
of tokens while it successfully parsers them. The resulting parser returns a
list of parsed values.
"""

@Parser
def _many(tokens, s):
"""Iterative implementation preventing the stack overflow."""
res = []
try:
while True:
(v, s) = p.run(tokens, s)
res.append(v)
except NoParseError as e:
return res, State(s.pos, e.state.max)

_many.name = u'{ %s }' % p.name
return _many


def some(pred):
"""(a -> bool) -> Parser(a, a)

Returns a parser that parses a token if it satisfies a predicate pred.
"""

@Parser
def _some(tokens, s):
if s.pos >= len(tokens):
raise NoParseError(u'no tokens left in the stream', s)
else:
t = tokens[s.pos]
if pred(t):
pos = s.pos + 1
s2 = State(pos, max(pos, s.max))
if debug:
log.debug(u'*matched* "%s", new state = %s' % (t, s2))
return t, s2
else:
if debug:
log.debug(u'failed "%s", state = %s' % (t, s))
raise NoParseError(u'got unexpected token', s)

_some.name = u'(some)'
return _some


def a(value):
"""Eq(a) -> Parser(a, a)

Returns a parser that parses a token that is equal to the value value.
"""
name = getattr(value, 'name', value)
return some(lambda t: t == value).named(u'(a "%s")' % (name,))


def pure(x):
@Parser
def _pure(_, s):
return x, s

_pure.name = u'(pure %r)' % (x,)
return _pure


def maybe(p):
"""Parser(a, b) -> Parser(a, b or None)

Returns a parser that retuns None if parsing fails.

NOTE: In a statically typed language, the type Maybe b could be more
approprieate.
"""
return (p | pure(None)).named(u'[ %s ]' % (p.name,))


def skip(p):
"""Parser(a, b) -> Parser(a, _Ignored(b))

Returns a parser which results are ignored by the combinator +. It is useful
for throwing away elements of concrete syntax (e. g. ",", ";").
"""
return p >> _Ignored


def oneplus(p):
"""Parser(a, b) -> Parser(a, [b])

Returns a parser that applies the parser p one or more times.
"""
q = p + many(p) >> (lambda x: [x[0]] + x[1])
return q.named(u'(%s , { %s })' % (p.name, p.name))


def with_forward_decls(suspension):
"""(None -> Parser(a, b)) -> Parser(a, b)

Returns a parser that computes itself lazily as a result of the suspension
provided. It is needed when some parsers contain forward references to
parsers defined later and such references are cyclic. See examples for more
details.
"""

@Parser
def f(tokens, s):
return suspension().run(tokens, s)

return f


def forward_decl():
"""None -> Parser(?, ?)

Returns an undefined parser that can be used as a forward declaration. You
will be able to define() it when all the parsers it depends on are
available.
"""

@Parser
def f(tokens, s):
raise NotImplementedError(u'you must define() a forward_decl somewhere')

return f


if __name__ == '__main__':
import doctest
doctest.testmod()

+ 49
- 0
funcparserlib/util.py View File

@@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-

# Copyright (c) 2008/2013 Andrey Vlasovskikh
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


def pretty_tree(x, kids, show):
"""(a, (a -> list(a)), (a -> str)) -> str

Returns a pseudographic tree representation of x similar to the tree command
in Unix.
"""
(MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'')

def rec(x, indent, sym):
line = indent + sym + show(x)
xs = kids(x)
if len(xs) == 0:
return line
else:
if sym == MID:
next_indent = indent + CONT
elif sym == ROOT:
next_indent = indent + ROOT
else:
next_indent = indent + LAST
syms = [MID] * (len(xs) - 1) + [END]
lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)]
return u'\n'.join([line] + lines)

return rec(x, u'', ROOT)

+ 505
- 0
kll.py View File

@@ -0,0 +1,505 @@
#!/usr/bin/env python3
# KLL Compiler
# Keyboard Layout Langauge
#
# Copyright (C) 2014 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.

### Imports ###

import argparse
import io
import os
import re
import sys
import token
import importlib

from tokenize import generate_tokens
from re import VERBOSE
from pprint import pformat

from kll_lib.hid_dict import *
from kll_lib.containers import *

from funcparserlib.lexer import make_tokenizer, Token, LexerError
from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)



### Decorators ###

## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'


## Python Text Formatting Fixer...
## Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: " : "Usage: ",
"optional arguments" : "Optional Arguments",
}

def textFormatter_gettext( s ):
return textFormatter_lookup.get( s, s )

argparse._ = textFormatter_gettext



### Argument Parsing ###

def processCommandLineArgs():
# Setup argument processor
pArgs = argparse.ArgumentParser(
usage="%(prog)s [options] <file1>...",
description="Generates .h file state tables and pointer indices from KLL .kll files.",
epilog="Example: {0} TODO".format( os.path.basename( sys.argv[0] ) ),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)

# Positional Arguments
pArgs.add_argument( 'files', type=str, nargs='+',
help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly

# Optional Arguments
pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
help="Specify target backend for the KLL compiler.\n"
"Default: kiibohd" )
pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map." )
pArgs.add_argument( '-t', '--template', type=str, default="templateKeymap.h",
help="Specify template used to generate the keymap.\n"
"Default: templateKeymap.h" )
pArgs.add_argument( '-o', '--output', type=str, default="templateKeymap.h",
help="Specify output file. Writes to current working directory by default.\n"
"Default: generatedKeymap.h" )
pArgs.add_argument( '-h', '--help', action="help",
help="This message." )

# Process Arguments
args = pArgs.parse_args()

# Parameters
defaultFiles = args.files
partialFileSets = args.partial
if partialFileSets is None:
partialFileSets = [[]]

# Check file existance
for filename in defaultFiles:
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )

for partial in partialFileSets:
for filename in partial:
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )

return (defaultFiles, partialFileSets, args.backend, args.template, args.output)



### Tokenizer ###

def tokenize( string ):
"""str -> Sequence(Token)"""

# Basic Tokens Spec
specs = [
( 'Comment', ( r' *#.*', ) ),
( 'Space', ( r'[ \t\r\n]+', ) ),
( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'USBCodeStart', ( r'U\[', ) ),
( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ScanCodeStart', ( r'S\[', ) ),
( 'CodeEnd', ( r'\]', ) ),
( 'String', ( r'"[^"]*"', VERBOSE ) ),
( 'SequenceString', ( r"'[^']*'", ) ),
( 'Comma', ( r',', ) ),
( 'Dash', ( r'-', ) ),
( 'Plus', ( r'\+', ) ),
( 'Operator', ( r'=>|:|=', ) ),
( 'Parenthesis', ( r'\(|\)', ) ),
( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
( 'EndOfLine', ( r';', ) ),
]

# Tokens to filter out of the token stream
useless = ['Space', 'Comment']

tokens = make_tokenizer( specs )
return [x for x in tokens( string ) if x.type not in useless]



### Parsing ###

## Map Arrays
scanCode_map = [ None ] * 0xFF # Define 8 bit address width
usbCode_map = [ None ] * 0xFF # Define 8 bit address width
variable_dict = dict()
capabilities_dict = Capabilities()


## Parsing Functions

def make_scanCode( token ):
scanCode = int( token[1:], 0 )
# Check size, to make sure it's valid
if scanCode > 0xFF:
print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
raise
return scanCode

def make_usbCode( token ):
# If first character is a U, strip
if token[0] == "U":
token = token[1:]

# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token:
try:
usbCode = kll_hid_lookup_dictionary[ token[1:-1].upper() ]
except LookupError as err:
print ( "{0} {1} is an invalid USB Code Lookup...".format( ERROR, err ) )
raise
else:
usbCode = int( token, 0 )

# Check size, to make sure it's valid
if usbCode > 0xFF:
print ( "{0} USBCode value {1} is larger than 255".format( ERROR, usbCode ) )
raise
return usbCode

def make_seqString( token ):
# Shifted Characters, and amount to move by to get non-shifted version
# US ANSI
shiftCharacters = (
( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
( "+", 0x12 ),
( "&(", 0x11 ),
( "!#$%<>", 0x10 ),
( "*", 0x0E ),
( ")", 0x07 ),
( '"', 0x05 ),
( ":", 0x01 ),
( "^", -0x10 ),
( "_", -0x18 ),
( "{}|", -0x1E ),
( "~", -0x20 ),
( "@", -0x32 ),
( "?", -0x38 ),
)

listOfLists = []
shiftKey = kll_hid_lookup_dictionary["SHIFT"]

# Creates a list of USB codes from the string: sequence (list) of combos (lists)
for char in token[1:-1]:
processedChar = char

# Whether or not to create a combo for this sequence with a shift
shiftCombo = False

# Depending on the ASCII character, convert to single character or Shift + character
for pair in shiftCharacters:
if char in pair[0]:
shiftCombo = True
processedChar = chr( ord( char ) + pair[1] )
break

# Do KLL HID Lookup on non-shifted character
# NOTE: Case-insensitive, which is why the shift must be pre-computed
usbCode = kll_hid_lookup_dictionary[ processedChar.upper() ]

# Create Combo for this character, add shift key if shifted
charCombo = []
if shiftCombo:
charCombo = [ [ shiftKey ] ]
charCombo.append( [ usbCode ] )

# Add to list of lists
listOfLists.append( charCombo )

return listOfLists

def make_string( token ):
return token[1:-1]

def make_number( token ):
return int( token, 0 )

# Range can go from high to low or low to high
def make_scanCode_range( rangeVals ):
start = rangeVals[0]
end = rangeVals[1]

# Swap start, end if start is greater than end
if start > end:
start, end = end, start

# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )

# Range can go from high to low or low to high
# Warn on 0-9 (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def make_usbCode_range( rangeVals ):
# Check if already integers
if isinstance( rangeVals[0], int ):
start = rangeVals[0]
else:
start = make_usbCode( rangeVals[0] )

if isinstance( rangeVals[1], int ):
end = rangeVals[1]
else:
end = make_usbCode( rangeVals[1] )

# Swap start, end if start is greater than end
if start > end:
start, end = end, start

# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
pass


## Base Rules

const = lambda x: lambda _: x
unarg = lambda f: lambda x: f(*x)
flatten = lambda list: sum( list, [] )

tokenValue = lambda x: x.value
tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
eol = a( Token( 'EndOfLine', ';' ) )

def listElem( item ):
return [ item ]

# Flatten only the top layer (list of lists of ...)
def oneLayerFlatten( items ):
mainList = []
for sublist in items:
for item in sublist:
mainList.append( item )

return mainList

# Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
# i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ], <option 2>, <option 3> ]
def optionExpansion( sequences ):
expandedSequences = []

# Total number of combinations of the sequence of combos that needs to be generated
totalCombinations = 1

# List of leaf lists, with number of leaves
maxLeafList = []

# Traverse to the leaf nodes, and count the items in each leaf list
for sequence in sequences:
for combo in sequence:
rangeLen = len( combo )
totalCombinations *= rangeLen
maxLeafList.append( rangeLen )

# Counter list to keep track of which combination is being generated
curLeafList = [0] * len( maxLeafList )

# Generate a list of permuations of the sequence of combos
for count in range( 0, totalCombinations ):
expandedSequences.append( [] ) # Prepare list for adding the new combination
position = 0

# Traverse sequence of combos to generate permuation
for sequence in sequences:
expandedSequences[ -1 ].append( [] )
for combo in sequence:
expandedSequences[ -1 ][ -1 ].append( combo[ curLeafList[ position ] ] )
position += 1

# Increment combination tracker
for leaf in range( 0, len( curLeafList ) ):
curLeafList[ leaf ] += 1

# Reset this position, increment next position (if it exists), then stop
if curLeafList[ leaf ] >= maxLeafList[ leaf ]:
curLeafList[ leaf ] = 0
if leaf + 1 < len( curLeafList ):
curLeafList[ leaf + 1 ] += 1
break

return expandedSequences


## Evaluation Rules

def eval_scanCode( trigger, result ):
# Convert to lists of lists of lists to tuples of tuples of tuples
trigger = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in trigger )
result = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in result )

# Add to the base scanCode map, overwrite if already defined
# if scanCode_map[ trigger ] != None:
# print ( "ScanCodeMap - Replacing '{0}' with '{1}' -> {2}".format( scanCode_map[ trigger ], result, trigger ) )
# scanCode_map[ trigger ] = result

def eval_usbCode( trigger, result ):
# Check if trigger is list

# Add to the base usbCode map, overwrite if already defined
if usbCode_map[ trigger ] != None:
print ( "USBCodeMap - Replacing '{0}' with '{1}' -> {2}".format( usbCode_map[ trigger ], result, trigger ) )
usbCode_map[ trigger ] = result
print ( trigger )

def eval_variable( name, content ):
# Content might be a concatenation of multiple data types, convert everything into a single string
assigned_content = ""
for item in content:
assigned_content += str( item )

variable_dict[ name ] = assigned_content

def eval_capability( name, function, args ):
capabilities_dict[ name ] = [ function, args ]

map_scanCode = unarg( eval_scanCode )
map_usbCode = unarg( eval_usbCode )

set_variable = unarg( eval_variable )
set_capability = unarg( eval_capability )


## Sub Rules

usbCode = tokenType('USBCode') >> make_usbCode
scanCode = tokenType('ScanCode') >> make_scanCode
name = tokenType('Name')
number = tokenType('Number') >> make_number
comma = tokenType('Comma')
dash = tokenType('Dash')
plus = tokenType('Plus')
content = tokenType('VariableContents')
string = tokenType('String') >> make_string
unString = tokenType('String') # When the double quotes are still needed for internal processing
seqString = tokenType('SequenceString') >> make_seqString

# Code variants
code_end = tokenType('CodeEnd')

# Scan Codes
scanCode_start = tokenType('ScanCodeStart')
scanCode_range = number + skip( dash ) + number >> make_scanCode_range
scanCode_listElem = number >> listElem
scanCode_innerList = oneplus( ( scanCode_range | scanCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
scanCode_expanded = skip( scanCode_start ) + scanCode_innerList + skip( code_end )
scanCode_elem = scanCode >> listElem
scanCode_combo = oneplus( ( scanCode_expanded | scanCode_elem ) + skip( maybe( plus ) ) )
scanCode_sequence = oneplus( scanCode_combo + skip( maybe( comma ) ) )

# USB Codes
usbCode_start = tokenType('USBCodeStart')
usbCode_range = ( number | unString ) + skip( dash ) + ( number | unString ) >> make_usbCode_range
usbCode_listElemTag = unString >> make_usbCode
usbCode_listElem = ( number | usbCode_listElemTag ) >> listElem
usbCode_innerList = oneplus( ( usbCode_range | usbCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
usbCode_expanded = skip( usbCode_start ) + usbCode_innerList + skip( code_end )
usbCode_elem = usbCode >> listElem
usbCode_combo = oneplus( ( usbCode_expanded | usbCode_elem ) + skip( maybe( plus ) ) ) >> listElem
usbCode_sequence = oneplus( ( usbCode_combo | seqString ) + skip( maybe( comma ) ) ) >> oneLayerFlatten

# Capabilities
capFunc_arguments = number + skip( maybe( comma ) )
capFunc_elem = name + skip( parenthesis('(') ) + many( capFunc_arguments ) + skip( parenthesis(')') ) >> listElem
capFunc_combo = oneplus( ( usbCode_expanded | usbCode_elem | capFunc_elem ) + skip( maybe( plus ) ) ) >> listElem
capFunc_sequence = oneplus( ( capFunc_combo | seqString ) + skip( maybe( comma ) ) ) >> oneLayerFlatten

# Trigger / Result Codes
triggerCode_outerList = scanCode_sequence >> optionExpansion
triggerUSBCode_outerList = usbCode_sequence >> optionExpansion
resultCode_outerList = capFunc_sequence >> optionExpansion


## Main Rules

#| <variable> = <variable contents>;
variable_contents = name | content | string | number | comma | dash
variable_expression = name + skip( operator('=') ) + oneplus( variable_contents ) + skip( eol ) >> set_variable

#| <capability name> => <c function>;
capability_arguments = name + skip( operator(':') ) + number + skip( maybe( comma ) )
capability_expression = name + skip( operator('=>') ) + name + skip( parenthesis('(') ) + many( capability_arguments ) + skip( parenthesis(')') ) + skip( eol ) >> set_capability

#| <trigger> : <result>;
scanCode_expression = triggerCode_outerList + skip( operator(':') ) + resultCode_outerList + skip( eol ) >> map_scanCode
usbCode_expression = triggerUSBCode_outerList + skip( operator(':') ) + resultCode_outerList + skip( eol ) #>> map_usbCode

def parse( tokenSequence ):
"""Sequence(Token) -> object"""

# Top-level Parser
expression = scanCode_expression | usbCode_expression | variable_expression | capability_expression

kll_text = many( expression )
kll_file = maybe( kll_text ) + skip( finished )

return kll_file.parse( tokenSequence )



### Main Entry Point ###

if __name__ == '__main__':
(defaultFiles, partialFileSets, backend_name, template, output) = processCommandLineArgs()

# Load backend module
global backend
backend_import = importlib.import_module( "backends.{0}".format( backend_name ) )
backend = backend_import.Backend( template )

#TODO Move elsewhere
for filename in defaultFiles:
with open( filename ) as file:
data = file.read()

tokenSequence = tokenize( data )
print ( pformat( tokenSequence ) )
tree = parse( tokenSequence )
#print ( tree )
#print ( scanCode_map )
#print ( usbCode_map )
print ( variable_dict )
print ( capabilities_dict )

# TODO Move
backend.process( capabilities_dict )

# Successful Execution
sys.exit( 0 )


+ 0
- 0
kll_lib/__init__.py View File


+ 114
- 0
kll_lib/containers.py View File

@@ -0,0 +1,114 @@
#!/usr/bin/env python3
# KLL Compiler Containers
#
# Copyright (C) 2014 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.

### Imports ###



### Decorators ###

## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'



### Parsing ###

## Containers
class Capabilities:
# Container for capabilities dictionary and convenience functions
def __init__( self ):
self.capabilities = dict()

def __getitem__( self, name ):
return self.capabilities[ name ]

def __setitem__( self, name, contents ):
self.capabilities[ name ] = contents

def __repr__( self ):
return "Capabilities => {0}\nIndexed Capabilities => {1}".format( self.capabilities, sorted( self.capabilities, key = self.capabilities.get ) )


# Total bytes needed to store arguments
def totalArgBytes( self, name ):
totalBytes = 0

# Iterate over the arguments, summing the total bytes
for arg in self.capabilities[ name ][1]:
totalBytes += int( arg[1] )

return totalBytes

# Name of the capability function
def funcName( self, name ):
return self.capabilities[ name ][0]


# Only valid while dictionary keys are not added/removed
def getIndex( self, name ):
return sorted( self.capabilities, key = self.capabilities.get ).index( name )

def getName( self, index ):
return sorted( self.capabilities, key = self.capabilities.get )[ index ]

def keys( self ):
return sorted( self.capabilities, key = self.capabilities.get )


class Macros:
# Container for Trigger Macro : Result Macro correlation
# Layer selection for generating TriggerLists
#
# Only convert USB Code list once all the ResultMacros have been accumulated (does a macro reduction; not reversible)
# Two staged list for ResultMacros:
# 1) USB Code/Non-converted (may contain capabilities)
# 2) Capabilities
def __init__( self ):
# Default layer (0)
self.layer = 0

# Macro Storage
self.macros = [ [] ]

def setLayer( self, layer ):
self.layer = layer

# Use for ScanCode trigger macros
def appendScanCode( self, trigger, result ):
self.macros[ self.layer ][ trigger ] = result

# Use for USBCode trigger macros
# An extra lookup is required
def appendUSBCode( self, trigger, result ):
noSuccess = True

for macro in self.macros[ self.layer ].keys():
# USB Code Found
if trigger == self.macros[ self.layer ][ macro ]:
print ( "USBCode - Replacing '{0}' with '{1}' -> '{2}'".format( trigger, macro, result ) )
self.macros[ self.layer ][ macro ] = result
noSuccess = False

# Only show warning if no replacements were done
if noSuccess:
print ( "Warning: '{1}' USB Code not found in layer {1}".format( trigger, self.layer ) )
return False

return True


+ 502
- 0
kll_lib/hid_dict.py View File

@@ -0,0 +1,502 @@
#!/usr/bin/env python3
# KLL Compiler - HID Dictionary Lookup
#
# USB Code Lookup Dictionary
#
# Copyright (C) 2014 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.

# Rather than generating tables of hex USB codes for the keymapping tables, readable defines are used (which correspond to usb_hid.h)
hid_lookup_dictionary = dict([
( 0x00, 'KEY_NOEVENT' ), # Event, not a physical key
( 0x01, 'KEY_ERRORROLLOVER' ), # Event, not a physical key
( 0x02, 'KEY_POSTFAIL' ), # Event, not a physical key
( 0x03, 'KEY_ERRORUNDEFINED' ), # Event, not a physical key
( 0x04, 'KEY_A' ),
( 0x05, 'KEY_B' ),
( 0x06, 'KEY_C' ),
( 0x07, 'KEY_D' ),
( 0x08, 'KEY_E' ),
( 0x09, 'KEY_F' ),
( 0x0A, 'KEY_G' ),
( 0x0B, 'KEY_H' ),
( 0x0C, 'KEY_I' ),
( 0x0D, 'KEY_J' ),
( 0x0E, 'KEY_K' ),
( 0x0F, 'KEY_L' ),
( 0x10, 'KEY_M' ),
( 0x11, 'KEY_N' ),
( 0x12, 'KEY_O' ),
( 0x13, 'KEY_P' ),
( 0x14, 'KEY_Q' ),
( 0x15, 'KEY_R' ),
( 0x16, 'KEY_S' ),
( 0x17, 'KEY_T' ),
( 0x18, 'KEY_U' ),
( 0x19, 'KEY_V' ),
( 0x1A, 'KEY_W' ),
( 0x1B, 'KEY_X' ),
( 0x1C, 'KEY_Y' ),
( 0x1D, 'KEY_Z' ),
( 0x1E, 'KEY_1' ),
( 0x1F, 'KEY_2' ),
( 0x20, 'KEY_3' ),
( 0x21, 'KEY_4' ),
( 0x22, 'KEY_5' ),
( 0x23, 'KEY_6' ),
( 0x24, 'KEY_7' ),
( 0x25, 'KEY_8' ),
( 0x26, 'KEY_9' ),
( 0x27, 'KEY_0' ),
( 0x28, 'KEY_ENTER' ),
( 0x29, 'KEY_ESC' ),
( 0x2A, 'KEY_BACKSPACE' ),
( 0x2B, 'KEY_TAB' ),
( 0x2C, 'KEY_SPACE' ),
( 0x2D, 'KEY_MINUS' ),
( 0x2E, 'KEY_EQUAL' ),
( 0x2F, 'KEY_LEFT_BRACE' ),
( 0x30, 'KEY_RIGHT_BRACE' ),
( 0x31, 'KEY_BACKSLASH' ),
( 0x32, 'KEY_NUMBER' ),
( 0x33, 'KEY_SEMICOLON' ),
( 0x34, 'KEY_QUOTE' ),
( 0x35, 'KEY_BACKTICK' ),
( 0x36, 'KEY_COMMA' ),
( 0x37, 'KEY_PERIOD' ),
( 0x38, 'KEY_SLASH' ),
( 0x39, 'KEY_CAPS_LOCK' ),
( 0x3A, 'KEY_F1' ),
( 0x3B, 'KEY_F2' ),
( 0x3C, 'KEY_F3' ),
( 0x3D, 'KEY_F4' ),
( 0x3E, 'KEY_F5' ),
( 0x3F, 'KEY_F6' ),
( 0x40, 'KEY_F7' ),
( 0x41, 'KEY_F8' ),
( 0x42, 'KEY_F9' ),
( 0x43, 'KEY_F10' ),
( 0x44, 'KEY_F11' ),
( 0x45, 'KEY_F12' ),
( 0x46, 'KEY_PRINTSCREEN' ),
( 0x47, 'KEY_SCROLL_LOCK' ),
( 0x48, 'KEY_PAUSE' ),
( 0x49, 'KEY_INSERT' ),
( 0x4A, 'KEY_HOME' ),
( 0x4B, 'KEY_PAGE_UP' ),
( 0x4C, 'KEY_DELETE' ),
( 0x4D, 'KEY_END' ),
( 0x4E, 'KEY_PAGE_DOWN' ),
( 0x4F, 'KEY_RIGHT' ),
( 0x50, 'KEY_LEFT' ),
( 0x51, 'KEY_DOWN' ),
( 0x52, 'KEY_UP' ),
( 0x53, 'KEY_NUM_LOCK' ),
( 0x54, 'KEYPAD_SLASH' ),
( 0x55, 'KEYPAD_ASTERIX' ),
( 0x56, 'KEYPAD_MINUS' ),
( 0x57, 'KEYPAD_PLUS' ),
( 0x58, 'KEYPAD_ENTER' ),
( 0x59, 'KEYPAD_1' ),
( 0x5A, 'KEYPAD_2' ),
( 0x5B, 'KEYPAD_3' ),
( 0x5C, 'KEYPAD_4' ),
( 0x5D, 'KEYPAD_5' ),
( 0x5E, 'KEYPAD_6' ),
( 0x5F, 'KEYPAD_7' ),
( 0x60, 'KEYPAD_8' ),
( 0x61, 'KEYPAD_9' ),
( 0x62, 'KEYPAD_0' ),
( 0x63, 'KEYPAD_PERIOD' ),
( 0x64, 'KEY_ISO_SLASH' ),
( 0x65, 'KEY_APP' ),
( 0x66, 'KEYBOARD_STATUS' ), # Used for indicating status or errors, not a key
( 0x67, 'KEYPAD_EQUAL' ),
( 0x68, 'KEY_F13' ),
( 0x69, 'KEY_F14' ),
( 0x6A, 'KEY_F15' ),
( 0x6B, 'KEY_F16' ),
( 0x6C, 'KEY_F17' ),
( 0x6D, 'KEY_F18' ),
( 0x6E, 'KEY_F19' ),
( 0x6F, 'KEY_F20' ),
( 0x70, 'KEY_F21' ),
( 0x71, 'KEY_F22' ),
( 0x72, 'KEY_F23' ),
( 0x73, 'KEY_F24' ),
( 0x74, 'KEY_EXEC' ),
( 0x75, 'KEY_HELP' ),
( 0x76, 'KEY_MENU' ),
( 0x77, 'KEY_SELECT' ),
( 0x78, 'KEY_STOP' ),
( 0x79, 'KEY_AGAIN' ),
( 0x7A, 'KEY_UNDO' ),
( 0x7B, 'KEY_CUT' ),
( 0x7C, 'KEY_COPY' ),
( 0x7D, 'KEY_PASTE' ),
( 0x7E, 'KEY_FIND' ),
( 0x7F, 'KEY_MUTE' ),
( 0x80, 'KEY_VOL_UP' ),
( 0x81, 'KEY_VOL_DOWN' ),
( 0x82, 'KEY_CAPS_TLOCK' ), # Toggle "Locking" Scroll Lock (Old keyboards with Locking Caps Lock)
( 0x83, 'KEY_NUM_TLOCK' ),
( 0x84, 'KEY_SCROLL_TLOCK' ),
( 0x85, 'KEYPAD_COMMA' ), # Brazillian (See spec)
( 0x86, 'KEYPAD_EQUAL_AS' ), # AS/400 Keyboard (See spec)
( 0x87, 'KEY_INTER1' ), # KANJI1 - Brazillian and Japanese "Ru" and "-"
( 0x88, 'KEY_INTER2' ), # KANJI2 - Japanese Katakana/Hiragana
( 0x89, 'KEY_INTER3' ), # KANJI3 - Japanese Yen
( 0x8A, 'KEY_INTER4' ), # KANJI4 - Japanese Henkan
( 0x8B, 'KEY_INTER5' ), # KANJI5 - Japanese Muhenkan
( 0x8C, 'KEY_INTER6' ), # KANJI6 - PC0x62 Comma (Ka-m-ma)
( 0x8D, 'KEY_INTER7' ), # KANJI7 - Double-Byte/Single-Byte Toggle
( 0x8E, 'KEY_INTER8' ), # KANJI8 - Undefined
( 0x8F, 'KEY_INTER9' ), # KANJI9 - Undefined
( 0x90, 'KEY_LANG1' ), # Korean Hangul/English Toggle
( 0x91, 'KEY_LANG2' ), # Korean Hanja Conversion - Japanese Eisu
( 0x92, 'KEY_LANG3' ), # Japanese Katakana Key (USB)
( 0x93, 'KEY_LANG4' ), # Japanese Hiragana Key (USB)
( 0x94, 'KEY_LANG5' ), # Japanese Zenkaku/Hankaku Key (USB)
( 0x95, 'KEY_LANG6' ), # Reserved (Application Specific)
( 0x96, 'KEY_LANG7' ), # Reserved (Application Specific)
( 0x97, 'KEY_LANG8' ), # Reserved (Application Specific)
( 0x98, 'KEY_LANG9' ), # Reserved (Application Specific)
( 0x99, 'KEY_ALT_ERASE' ), # Special Erase (See Spec)
( 0x9A, 'KEY_SYSREQ_ATT' ), # Modifier Type
( 0x9B, 'KEY_CANCEL' ),
( 0x9C, 'KEY_CLEAR' ),
( 0x9D, 'KEY_PRIOR' ),
( 0x9E, 'KEY_RETURN' ),
( 0x9F, 'KEY_SEPARATOR' ),
( 0xA0, 'KEY_OUT' ),
( 0xA1, 'KEY_OPER' ),
( 0xA2, 'KEY_CLEAR_AGAIN' ),
( 0xA3, 'KEY_CRSEL_PROPS' ),
( 0xA4, 'KEY_EXSEL' ),
# 0xA5 - 0xAF Reserved
( 0xB0, 'KEYPAD_00' ),
( 0xB1, 'KEYPAD_000' ),
( 0xB2, 'KEY_1000_SEP' ),
( 0xB3, 'KEY_DECIMAL_SEP' ),
( 0xB4, 'KEY_CURRENCY_MAIN' ),
( 0xB5, 'KEY_CURRENCY_SUB' ),
( 0xB6, 'KEYPAD_LPAREN' ),
( 0xB7, 'KEYPAD_RPAREN' ),
( 0xB8, 'KEYPAD_LBRACE' ),
( 0xB9, 'KEYPAD_RBRACE' ),
( 0xBA, 'KEYPAD_TAB' ),
( 0xBB, 'KEYPAD_BACKSPACE' ),
( 0xBC, 'KEYPAD_A' ),
( 0xBD, 'KEYPAD_B' ),
( 0xBE, 'KEYPAD_C' ),
( 0xBF, 'KEYPAD_D' ),
( 0xC0, 'KEYPAD_E' ),
( 0xC1, 'KEYPAD_F' ),
( 0xC2, 'KEYPAD_XOR' ),
( 0xC3, 'KEYPAD_CHEVRON' ),
( 0xC4, 'KEYPAD_PERCENT' ),
( 0xC5, 'KEYPAD_LTHAN' ),
( 0xC6, 'KEYPAD_GTHAN' ),
( 0xC7, 'KEYPAD_BITAND' ),
( 0xC8, 'KEYPAD_AND' ),
( 0xC9, 'KEYPAD_BITOR' ),
( 0xCA, 'KEYPAD_OR' ),
( 0xCB, 'KEYPAD_COLON' ),
( 0xCC, 'KEYPAD_POUND' ),
( 0xCD, 'KEYPAD_SPACE' ),
( 0xCE, 'KEYPAD_AT' ),
( 0xCF, 'KEYPAD_EXCLAIM' ),
( 0xD0, 'KEYPAD_MEM_STORE' ),
( 0xD1, 'KEYPAD_MEM_RECALL' ),
( 0xD2, 'KEYPAD_MEM_CLEAR' ),
( 0xD3, 'KEYPAD_MEM_ADD' ),
( 0xD4, 'KEYPAD_MEM_SUB' ),
( 0xD5, 'KEYPAD_MEM_MULT' ),
( 0xD6, 'KEYPAD_MEM_DIV' ),
( 0xD7, 'KEYPAD_PLUS_MINUS' ),
( 0xD8, 'KEYPAD_CLEAR' ),
( 0xD9, 'KEYPAD_CLEAR_ENTRY' ),
( 0xDA, 'KEYPAD_BINARY' ),
( 0xDB, 'KEYPAD_OCTAL' ),
( 0xDC, 'KEYPAD_DECIMAL' ),
( 0xDD, 'KEYPAD_HEX' ),
# 0xDE - 0xDF Reserved
( 0xE0, 'KEY_LCTRL' ),
( 0xE1, 'KEY_LSHIFT' ),
( 0xE2, 'KEY_LALT' ),
( 0xE3, 'KEY_LGUI' ),
( 0xE4, 'KEY_RCTRL' ),
( 0xE5, 'KEY_RSHIFT' ),
( 0xE6, 'KEY_RALT' ),
( 0xE7, 'KEY_RGUI' ),
# 0xE8 - 0xFFFF Reserved, using 0xF0 to 0xFF for function key placeholders
( 0xF0, 'KEY_FUN1' ),
( 0xF1, 'KEY_FUN2' ),
( 0xF2, 'KEY_FUN3' ),
( 0xF3, 'KEY_FUN4' ),
( 0xF4, 'KEY_FUN5' ),
( 0xF5, 'KEY_FUN6' ),
( 0xF6, 'KEY_FUN7' ),
( 0xF7, 'KEY_FUN8' ),
( 0xF8, 'KEY_FUN9' ),
( 0xF9, 'KEY_FUN10' ),
( 0xFA, 'KEY_FUN11' ),
( 0xFB, 'KEY_FUN12' ),
( 0xFC, 'KEY_FUN13' ),
( 0xFD, 'KEY_FUN14' ),
( 0xFE, 'KEY_FUN15' ),
( 0xFF, 'KEY_FUN16' ),
])



# Lookup for KLL defined HID values, internally the compiler uses numbers to combine the keymaps
kll_hid_lookup_dictionary = dict([
( 'A', 0x04 ),
( 'B', 0x05 ),
( 'C', 0x06 ),
( 'D', 0x07 ),
( 'E', 0x08 ),
( 'F', 0x09 ),
( 'G', 0x0A ),
( 'H', 0x0B ),
( 'I', 0x0C ),
( 'J', 0x0D ),
( 'K', 0x0E ),
( 'L', 0x0F ),
( 'M', 0x10 ),
( 'N', 0x11 ),
( 'O', 0x12 ),
( 'P', 0x13 ),
( 'Q', 0x14 ),
( 'R', 0x15 ),
( 'S', 0x16 ),
( 'T', 0x17 ),
( 'U', 0x18 ),
( 'V', 0x19 ),
( 'W', 0x1A ),
( 'X', 0x1B ),
( 'Y', 0x1C ),
( 'Z', 0x1D ),
( '1', 0x1E ),
( '2', 0x1F ),
( '3', 0x20 ),
( '4', 0x21 ),
( '5', 0x22 ),
( '6', 0x23 ),
( '7', 0x24 ),
( '8', 0x25 ),
( '9', 0x26 ),
( '0', 0x27 ),
( 'ENTER', 0x28 ),
( 'ESC', 0x29 ),
( 'BACKSPACE', 0x2A ),
( 'TAB', 0x2B ),
( 'SPACE', 0x2C ),
( '-', 0x2D ), ( 'MINUS', 0x2D ),
( '=', 0x2E ), ( 'EQUALS', 0x2E ), ( 'EQUAL', 0x2E ),
( '{', 0x2F ), ( 'LEFT BRACE', 0x2F ), ( 'LBRACE', 0x2F ),
( '}', 0x30 ), ( 'RIGHT BRACE', 0x30 ), ( 'RBRACE', 0x30 ),
( '\\', 0x31 ), ( 'BACKSLASH', 0x31 ),
( '#', 0x32 ), ( 'NUMBER', 0x32 ), ( 'HASH', 0x32 ),
( ';', 0x33 ), ( 'SEMICOLON', 0x33 ),
( "'", 0x34 ), ( 'QUOTE', 0x34 ),
( '`', 0x35 ), ( 'BACKTICK', 0x35 ),
( ',', 0x36 ), ( 'COMMA', 0x36 ),
( '.', 0x37 ), ( 'PERIOD', 0x37 ),
( '/', 0x38 ), ( 'SLASH', 0x38 ),
( 'CAPSLOCK', 0x39 ),
( 'F1', 0x3A ),
( 'F2', 0x3B ),
( 'F3', 0x3C ),
( 'F4', 0x3D ),
( 'F5', 0x3E ),
( 'F6', 0x3F ),
( 'F7', 0x40 ),
( 'F8', 0x41 ),
( 'F9', 0x42 ),
( 'F10', 0x43 ),
( 'F11', 0x44 ),
( 'F12', 0x45 ),
( 'PRINTSCREEN', 0x46 ),
( 'SCROLLLOCK', 0x47 ),
( 'PAUSE', 0x48 ),
( 'INSERT', 0x49 ),
( 'HOME', 0x4A ),
( 'PAGEUP', 0x4B ),
( 'DELETE', 0x4C ),
( 'END', 0x4D ),
( 'PAGEDOWN', 0x4E ),
( 'RIGHT', 0x4F ),
( 'LEFT', 0x50 ),
( 'DOWN', 0x51 ),
( 'UP', 0x52 ),
( 'NUMLOCK', 0x53 ),
( 'P/', 0x54 ), ( 'KEYPAD SLASH', 0x54 ),
( 'P*', 0x55 ), ( 'KEYPAD ASTERIX', 0x55 ),
( 'P-', 0x56 ), ( 'KEYPAD MINUS', 0x56 ),
( 'P+', 0x57 ), ( 'KEYPAD PLUS', 0x57 ),
( 'PENTER', 0x58 ), ( 'KEYPAD ENTER', 0x58 ),
( 'P1', 0x59 ), ( 'KEYPAD 1', 0x59 ),
( 'P2', 0x5A ), ( 'KEYPAD 2', 0x5A ),
( 'P3', 0x5B ), ( 'KEYPAD 3', 0x5B ),
( 'P4', 0x5C ), ( 'KEYPAD 4', 0x5C ),
( 'P5', 0x5D ), ( 'KEYPAD 5', 0x5D ),
( 'P6', 0x5E ), ( 'KEYPAD 6', 0x5E ),
( 'P7', 0x5F ), ( 'KEYPAD 7', 0x5F ),
( 'P8', 0x60 ), ( 'KEYPAD 8', 0x60 ),
( 'P9', 0x61 ), ( 'KEYPAD 9', 0x61 ),
( 'P0', 0x62 ), ( 'KEYPAD 0', 0x62 ),
( 'P.', 0x63 ), ( 'KEYPAD PERIOD', 0x63 ),
( 'ISO/', 0x64 ), ( 'ISO SLASH', 0x64 ),
( 'APP', 0x65 ),

( 'P=', 0x67 ), ( 'KEYPAD EQUAL', 0x67 ),
( 'F13', 0x68 ),
( 'F14', 0x69 ),
( 'F15', 0x6A ),
( 'F16', 0x6B ),
( 'F17', 0x6C ),
( 'F18', 0x6D ),
( 'F19', 0x6E ),
( 'F20', 0x6F ),
( 'F21', 0x70 ),
( 'F22', 0x71 ),
( 'F23', 0x72 ),
( 'F24', 0x73 ),
( 'EXEC', 0x74 ),
( 'HELP', 0x75 ),
( 'MENU', 0x76 ),
( 'SELECT', 0x77 ),
( 'STOP', 0x78 ),
( 'AGAIN', 0x79 ),
( 'UNDO', 0x7A ),
( 'CUT', 0x7B ),
( 'COPY', 0x7C ),
( 'PASTE', 0x7D ),
( 'FIND', 0x7E ),
( 'MUTE', 0x7F ),
( 'VOLUMEUP', 0x80 ),
( 'VOLUMEDOWN', 0x81 ),
( 'CAPSTOGGLELOCK', 0x82 ),
( 'NUMTOGGLELOCK', 0x83 ),
( 'SCROLLTOGGLELOCK', 0x84 ),
( 'P,', 0x85 ),
( 'KEYPAD AS400 EQUAL', 0x86 ),
( 'INTER1', 0x87 ), ( 'KANJI1', 0x87 ),
( 'INTER2', 0x88 ), ( 'KANJI2', 0x88 ), ( 'KANA', 0x88 ),
( 'INTER3', 0x89 ), ( 'KANJI3', 0x89 ), ( 'YEN', 0x89 ),
( 'INTER4', 0x8A ), ( 'KANJI4', 0x8A ), ( 'HENKAN', 0x8A ),
( 'INTER5', 0x8B ), ( 'KANJI5', 0x8B ), ( 'MUHENKAN', 0x8B ),
( 'INTER6', 0x8C ), ( 'KANJI6', 0x8C ),
( 'INTER7', 0x8D ), ( 'KANJI7', 0x8D ), ( 'BYTETOGGLE', 0x8D ),
( 'INTER8', 0x8E ), ( 'KANJI8', 0x8E ),
( 'INTER9', 0x8F ), ( 'KANJI9', 0x8F ),
( 'LANG1', 0x90 ), ( 'HANGULENGLISH', 0x90 ),
( 'LANG2', 0x91 ), ( 'HANJA', 0x91 ), ( 'EISU', 0x91 ),
( 'LANG3', 0x92 ), ( 'KATAKANA', 0x92 ),
( 'LANG4', 0x93 ), ( 'HIRAGANA', 0x93 ),
( 'LANG5', 0x94 ), ( 'ZENKAKUHANKAKU', 0x94 ),
( 'LANG6', 0x95 ),
( 'LANG7', 0x96 ),
( 'LANG8', 0x97 ),
( 'LANG9', 0x98 ),
( 'ALTERASE', 0x99 ),
( 'SYSREQATT', 0x9A ),
( 'CANCEL', 0x9B ),
( 'CLEAR', 0x9C ),
( 'PRIOR', 0x9D ),
( 'RETURN', 0x9E ),
( 'SEP', 0x9F ), ( 'SEPARATOR', 0x9F ),
( 'OUT', 0xA0 ),
( 'OPER', 0xA1 ),
( 'CLEAR_AGAIN', 0xA2 ),
( 'CRSEL_PROPS', 0xA3 ),
( 'EXSEL', 0xA4 ),

( 'P00', 0xB0 ), ( 'KEYPAD 00', 0xB0 ),
( 'P000', 0xB1 ), ( 'KEYPAD 000', 0xB1 ),
( '1000SEP', 0xB2 ), ( 'THOUSANDSEPARATOR', 0xB2 ),
( 'DECIMALSEP', 0xB3 ), ( 'DECIMALSEPARATOR', 0xB3 ),
( 'CURRENCY', 0xB4 ), ( 'CURRENCYUNIT', 0xB4 ),
( 'CURRENCYSUB', 0xB5 ), ( 'CURRENCYSUBUNIT', 0xB5 ),
( 'P(', 0xB6 ), ( 'KEYPAD LEFT PARENTHESES', 0xB6 ),
( 'P)', 0xB7 ), ( 'KEYPAD RIGHT PARENTHESES', 0xB7 ),
( 'P{', 0xB8 ), ( 'KEYPAD LEFT BRACE', 0xB8 ),
( 'P}', 0xB9 ), ( 'KEYPAD RIGHT BRACE', 0xB9 ),
( 'PTAB', 0xBA ), ( 'KEYPAD TAB', 0xBA ),
( 'PBACKSPACE', 0xBB ), ( 'KEYPAD BACKSPACE', 0xBB ),
( 'PA', 0xBC ), ( 'KEYPAD A', 0xBC ),
( 'PB', 0xBD ), ( 'KEYPAD B', 0xBD ),
( 'PC', 0xBE ), ( 'KEYPAD C', 0xBE ),
( 'PD', 0xBF ), ( 'KEYPAD D', 0xBF ),
( 'PE', 0xC0 ), ( 'KEYPAD E', 0xC0 ),
( 'PF', 0xC1 ), ( 'KEYPAD F', 0xC1 ),
( 'PXOR', 0xC2 ), ( 'KEYPAD XOR', 0xC2 ),
( 'P^', 0xC3 ), ( 'KEYPAD CHEVRON', 0xC3 ),
( 'P%', 0xC4 ), ( 'KEYPAD PERCENT', 0xC4 ),
( 'P<', 0xC5 ), ( 'KEYPAD LESSTHAN', 0xC5 ),
( 'P>', 0xC6 ), ( 'KEYPAD GREATERTHAN', 0xC6 ),
( 'P&', 0xC7 ), ( 'KEYPAD BITAND', 0xC7 ),
( 'P&&', 0xC8 ), ( 'KEYPAD AND', 0xC8 ),
( 'P|', 0xC9 ), ( 'KEYPAD BITOR', 0xC9 ),
( 'P||', 0xCA ), ( 'KEYPAD OR', 0xCA ),
( 'P:', 0xCB ), ( 'KEYPAD COLON', 0xCB ),
( 'P#', 0xCC ), ( 'KEYPAD NUMBER', 0xCC ), ( 'KEYPAD HASH', 0xCC ),
( 'PSPACE', 0xCD ), ( 'KEYPAD SPACE', 0xCD ),
( 'P@', 0xCE ), ( 'KEYPAD AT', 0xCE ),
( 'P!', 0xCF ), ( 'KEYPAD EXCLAIM', 0xCF ),
( 'PMEMSTORE', 0xD0 ), ( 'KEYPAD MEMSTORE', 0xD0 ),
( 'PMEMRECALL', 0xD1 ), ( 'KEYPAD MEMRECALL', 0xD1 ),
( 'PMEMCLEAR', 0xD2 ), ( 'KEYPAD MEMCLEAR', 0xD2 ),
( 'PMEMADD', 0xD3 ), ( 'KEYPAD MEMADD', 0xD3 ),
( 'PMEMSUB', 0xD4 ), ( 'KEYPAD MEMSUB', 0xD4 ),
( 'PMEMMULT', 0xD5 ), ( 'KEYPAD MEMMULT', 0xD5 ),
( 'PMEMDIV', 0xD6 ), ( 'KEYPAD MEMDIV', 0xD6 ),
( 'P+/-', 0xD7 ), ( 'KEYPAD PLUSMINUS', 0xD7 ),
( 'PCLEAR', 0xD8 ), ( 'KEYPAD CLEAR', 0xD8 ),
( 'PCLEARENTRY', 0xD9 ), ( 'KEYPAD CLEARENTRY', 0xD9 ),
( 'PBINARY', 0xDA ), ( 'KEYPAD BINARY', 0xDA ),
( 'POCTAL', 0xDB ), ( 'KEYPAD OCTAL', 0xDB ),
( 'PDECIMAL', 0xDC ), ( 'KEYPAD DECIMAL', 0xDC ),
( 'PHEX', 0xDD ), ( 'KEYPAD HEX', 0xDD ),

( 'LCTRL', 0xE0 ), ( 'LEFT CTRL', 0xE0 ), ( 'CTRL', 0xE0 ),
( 'LSHIFT', 0xE1 ), ( 'LEFT SHIFT', 0xE1 ), ( 'SHIFT', 0xE1 ),
( 'LALT', 0xE2 ), ( 'LEFT ALT', 0xE2 ), ( 'ALT', 0xE2 ),
( 'LGUI', 0xE3 ), ( 'LEFT GUI', 0xE3 ), ( 'GUI', 0xE3 ),
( 'RCTRL', 0xE4 ), ( 'RIGHT CTRL', 0xE4 ),
( 'RSHIFT', 0xE5 ), ( 'RIGHT SHIFT', 0xE5 ),
( 'RALT', 0xE6 ), ( 'RIGHT ALT', 0xE6 ),
( 'RGUI', 0xE7 ), ( 'RIGHT GUI', 0xE7 ),

( 'FUN1', 0xF0 ), ( 'FUNCTION1', 0xF0 ), ( 'FUN', 0xF0 ),
( 'FUN2', 0xF1 ), ( 'FUNCTION2', 0xF1 ),
( 'FUN3', 0xF2 ), ( 'FUNCTION3', 0xF2 ),
( 'FUN4', 0xF3 ), ( 'FUNCTION4', 0xF3 ),
( 'FUN5', 0xF4 ), ( 'FUNCTION5', 0xF4 ),
( 'FUN6', 0xF5 ), ( 'FUNCTION6', 0xF5 ),
( 'FUN7', 0xF6 ), ( 'FUNCTION7', 0xF6 ),
( 'FUN8', 0xF7 ), ( 'FUNCTION8', 0xF7 ),
( 'FUN9', 0xF8 ), ( 'FUNCTION9', 0xF8 ),
( 'FUN10', 0xF9 ), ( 'FUNCTION10', 0xF9 ),
( 'FUN11', 0xFA ), ( 'FUNCTION11', 0xFA ),
( 'FUN12', 0xFB ), ( 'FUNCTION12', 0xFB ),
( 'FUN13', 0xFC ), ( 'FUNCTION13', 0xFC ),
( 'FUN14', 0xFD ), ( 'FUNCTION14', 0xFD ),
( 'FUN15', 0xFE ), ( 'FUNCTION15', 0xFE ),
( 'FUN16', 0xFF ), ( 'FUNCTION16', 0xFF ),
])


+ 114
- 0
templateKeymap.h View File

@@ -0,0 +1,114 @@
/* Copyright (C) 2014 by Jacob Alexander
*
* This file is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this file. If not, see <http://www.gnu.org/licenses/>.
*/

// Generated MSG /w timestamp and compiler information

#ifndef __generatedKeymap_h
#define __generatedKeymap_h

// ----- Includes -----

// KLL Include
#include <kll.h>



// ----- Capabilities -----

// Indexed Capabilities Table
<|CapabilitiesList|>


// -- Result Macros

// Result Macro Guides
<|ResultMacros|>


// -- Result Macro List

// Indexed Table of Result Macros
<|ResultMacroList|>


// -- Trigger Macros

// Trigger Macro Guides
<|TriggerMacros|>


// -- Trigger Macro List

// Indexed Table of Trigger Macros
<|TriggerMacroList|>



// ----- Trigger Maps -----

// MaxScanCode
// - This is retrieved from the KLL configuration
// - Should be corollated with the max scan code in the scan module
// - Maximum value is 0x100 (0x0 to 0xFF)
// - Increasing it beyond the keyboard's capabilities is just a waste of ram...
#define MaxScanCode <MaxScanCode>

// -- Trigger Lists
//
// Index 0: # of triggers in list
// Index n: pointer to trigger macro - use tm() macro

// - Default Layer -
<|DefaultLayerTriggerList|>


// - Partial Layers -
<|PartialLayerTriggerLists|>


// -- ScanCode Indexed Maps
// Maps to a trigger list of macro pointers
// _
// <scan code> -> |T|
// |r| -> <trigger macro pointer 1>
// |i|
// |g| -> <trigger macro pointer 2>
// |g|
// |e| -> <trigger macro pointer 3>
// |r|
// |s| -> <trigger macro pointer n>
// -

// - Default Map for ScanCode Lookup -
<|DefaultLayerScanMap|>

// - Partial Layer ScanCode Lookup Maps -
<|PartialLayerScanMaps|>



// ----- Layer Index -----

// -- Layer Index List
//
// Index 0: Default map
// Index n: Additional layers
<|LayerIndexList|>



#endif // __generatedKeymap_h