diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..fd28480
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,63 @@
+# Compiled source #
+###################
+*.com
+*.class
+*.dll
+*.exe
+*.o
+*.so
+*.elf
+*.bin
+*.hex
+*.lss
+*.sym
+*.map
+
+# Packages #
+############
+# it's better to unpack these files and commit the raw source
+# git has its own built in compression methods
+*.7z
+*.dmg
+*.gz
+*.iso
+*.jar
+*.rar
+*.tar
+*.zip
+
+# Logs and databases #
+######################
+*.log
+*.sql
+*.sqlite
+
+# OS generated files #
+######################
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+ehthumbs.db
+Thumbs.db
+
+# Editor generated files #
+##########################
+*.swp
+
+# Source browsing files #
+#########################
+tags
+
+# CMake Generated Files #
+#########################
+CMakeFiles
+CMakeCache.txt
+cmake_install.cmake
+
+# Python Generated Files #
+##########################
+__pycache__/
+*.py[cod]
+
diff --git a/README b/README
index 190b408..9a04997 100644
--- a/README
+++ b/README
@@ -6,6 +6,8 @@ KLL Compiler
Most current version of the KLL spec: https://www.writelatex.com/read/zzqbdwqjfwwf
Or visit http://kiibohd.com
+Uses funcparserlib: https://code.google.com/p/funcparserlib/
+
Usage
-----
diff --git a/backends/__init__.py b/backends/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backends/kiibohd.py b/backends/kiibohd.py
new file mode 100644
index 0000000..9896c5a
--- /dev/null
+++ b/backends/kiibohd.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+# KLL Compiler - Kiibohd Backend
+#
+# Backend code generator for the Kiibohd Controller firmware.
+#
+# Copyright (C) 2014 by Jacob Alexander
+#
+# This file is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This file is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this file. If not, see .
+
+### Imports ###
+
+import os
+import sys
+import re
+
+# Modifying Python Path, which is dumb, but the only way to import up one directory...
+sys.path.append( os.path.expanduser('..') )
+
+from kll_lib.containers import *
+
+
+### Decorators ###
+
+ ## Print Decorator Variables
+ERROR = '\033[5;1;31mERROR\033[0m:'
+
+
+
+### Classes ###
+
+class Backend:
+ # Initializes backend
+ # Looks for template file and builds list of fill tags
+ def __init__( self, templatePath ):
+ # Does template exist?
+ if not os.path.isfile( templatePath ):
+ print ( "{0} '{1}' does not exist...".format( ERROR, templatePath ) )
+ sys.exit( 1 )
+
+ self.templatePath = templatePath
+ self.fill_dict = dict()
+
+ # Generate list of fill tags
+ self.tagList = []
+ with open( templatePath, 'r' ) as openFile:
+ for line in openFile:
+ match = re.findall( '<\|([^|>]+)\|>', line )
+ for item in match:
+ self.tagList.append( item )
+
+
+ # Processes content for fill tags and does any needed dataset calculations
+ def process( self, capabilities ):
+ ## Capabilities ##
+ self.fill_dict['CapabilitiesList'] = "const Capability CapabilitiesList[] = {\n"
+
+ # Keys are pre-sorted
+ for key in capabilities.keys():
+ funcName = capabilities.funcName( key )
+ argByteWidth = capabilities.totalArgBytes( key )
+ self.fill_dict['CapabilitiesList'] += "\t{{ {0}, {1} }},\n".format( funcName, argByteWidth )
+
+ self.fill_dict['CapabilitiesList'] += "};"
+
+ print( self.fill_dict['CapabilitiesList'] )
+
+
+ # Generates the output keymap with fill tags filled
+ def generate( self, filepath ):
+ print("My path: {0}".format( filepath) )
+
diff --git a/funcparserlib/__init__.py b/funcparserlib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/funcparserlib/lexer.py b/funcparserlib/lexer.py
new file mode 100644
index 0000000..96cbd98
--- /dev/null
+++ b/funcparserlib/lexer.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2008/2013 Andrey Vlasovskikh
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+__all__ = ['make_tokenizer', 'Token', 'LexerError']
+
+import re
+
+
+class LexerError(Exception):
+ def __init__(self, place, msg):
+ self.place = place
+ self.msg = msg
+
+ def __str__(self):
+ s = u'cannot tokenize data'
+ line, pos = self.place
+ return u'%s: %d,%d: "%s"' % (s, line, pos, self.msg)
+
+
+class Token(object):
+ def __init__(self, type, value, start=None, end=None):
+ self.type = type
+ self.value = value
+ self.start = start
+ self.end = end
+
+ def __repr__(self):
+ return u'Token(%r, %r)' % (self.type, self.value)
+
+ def __eq__(self, other):
+ # FIXME: Case sensitivity is assumed here
+ return self.type == other.type and self.value == other.value
+
+ def _pos_str(self):
+ if self.start is None or self.end is None:
+ return ''
+ else:
+ sl, sp = self.start
+ el, ep = self.end
+ return u'%d,%d-%d,%d:' % (sl, sp, el, ep)
+
+ def __str__(self):
+ s = u"%s %s '%s'" % (self._pos_str(), self.type, self.value)
+ return s.strip()
+
+ @property
+ def name(self):
+ return self.value
+
+ def pformat(self):
+ return u"%s %s '%s'" % (self._pos_str().ljust(20),
+ self.type.ljust(14),
+ self.value)
+
+
+def make_tokenizer(specs):
+ """[(str, (str, int?))] -> (str -> Iterable(Token))"""
+
+ def compile_spec(spec):
+ name, args = spec
+ return name, re.compile(*args)
+
+ compiled = [compile_spec(s) for s in specs]
+
+ def match_specs(specs, str, i, position):
+ line, pos = position
+ for type, regexp in specs:
+ m = regexp.match(str, i)
+ if m is not None:
+ value = m.group()
+ nls = value.count(u'\n')
+ n_line = line + nls
+ if nls == 0:
+ n_pos = pos + len(value)
+ else:
+ n_pos = len(value) - value.rfind(u'\n') - 1
+ return Token(type, value, (line, pos + 1), (n_line, n_pos))
+ else:
+ errline = str.splitlines()[line - 1]
+ raise LexerError((line, pos + 1), errline)
+
+ def f(str):
+ length = len(str)
+ line, pos = 1, 0
+ i = 0
+ while i < length:
+ t = match_specs(compiled, str, i, (line, pos))
+ yield t
+ line, pos = t.end
+ i += len(t.value)
+
+ return f
+
+# This is an example of a token spec. See also [this article][1] for a
+# discussion of searching for multiline comments using regexps (including `*?`).
+#
+# [1]: http://ostermiller.org/findcomment.html
+_example_token_specs = [
+ ('COMMENT', (r'\(\*(.|[\r\n])*?\*\)', re.MULTILINE)),
+ ('COMMENT', (r'\{(.|[\r\n])*?\}', re.MULTILINE)),
+ ('COMMENT', (r'//.*',)),
+ ('NL', (r'[\r\n]+',)),
+ ('SPACE', (r'[ \t\r\n]+',)),
+ ('NAME', (r'[A-Za-z_][A-Za-z_0-9]*',)),
+ ('REAL', (r'[0-9]+\.[0-9]*([Ee][+\-]?[0-9]+)*',)),
+ ('INT', (r'[0-9]+',)),
+ ('INT', (r'\$[0-9A-Fa-f]+',)),
+ ('OP', (r'(\.\.)|(<>)|(<=)|(>=)|(:=)|[;,=\(\):\[\]\.+\-<>\*/@\^]',)),
+ ('STRING', (r"'([^']|(''))*'",)),
+ ('CHAR', (r'#[0-9]+',)),
+ ('CHAR', (r'#\$[0-9A-Fa-f]+',)),
+]
+#tokenize = make_tokenizer(_example_token_specs)
diff --git a/funcparserlib/parser.py b/funcparserlib/parser.py
new file mode 100644
index 0000000..92afebb
--- /dev/null
+++ b/funcparserlib/parser.py
@@ -0,0 +1,409 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2008/2013 Andrey Vlasovskikh
+# Small Python 3 modifications by Jacob Alexander 2014
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""A recurisve descent parser library based on functional combinators.
+
+Basic combinators are taken from Harrison's book ["Introduction to Functional
+Programming"][1] and translated from ML into Python. See also [a Russian
+translation of the book][2].
+
+ [1]: http://www.cl.cam.ac.uk/teaching/Lectures/funprog-jrh-1996/
+ [2]: http://code.google.com/p/funprog-ru/
+
+A parser `p` is represented by a function of type:
+
+ p :: Sequence(a), State -> (b, State)
+
+that takes as its input a sequence of tokens of arbitrary type `a` and a
+current parsing state and return a pair of a parsed token of arbitrary type
+`b` and the new parsing state.
+
+The parsing state includes the current position in the sequence being parsed and
+the position of the rightmost token that has been consumed while parsing.
+
+Parser functions are wrapped into an object of the class `Parser`. This class
+implements custom operators `+` for sequential composition of parsers, `|` for
+choice composition, `>>` for transforming the result of parsing. The method
+`Parser.parse` provides an easier way for invoking a parser hiding details
+related to a parser state:
+
+ Parser.parse :: Parser(a, b), Sequence(a) -> b
+
+Altough this module is able to deal with a sequences of any kind of objects, the
+recommended way of using it is applying a parser to a `Sequence(Token)`.
+`Token` objects are produced by a regexp-based tokenizer defined in
+`funcparserlib.lexer`. By using it this way you get more readable parsing error
+messages (as `Token` objects contain their position in the source file) and good
+separation of lexical and syntactic levels of the grammar. See examples for more
+info.
+
+Debug messages are emitted via a `logging.Logger` object named
+`"funcparserlib"`.
+"""
+
+__all__ = [
+ 'some', 'a', 'many', 'pure', 'finished', 'maybe', 'skip', 'oneplus',
+ 'forward_decl', 'NoParseError',
+]
+
+import logging
+
+log = logging.getLogger('funcparserlib')
+
+debug = False
+
+
+class Parser(object):
+ """A wrapper around a parser function that defines some operators for parser
+ composition.
+ """
+
+ def __init__(self, p):
+ """Wraps a parser function p into an object."""
+ self.define(p)
+
+ def named(self, name):
+ """Specifies the name of the parser for more readable parsing log."""
+ self.name = name
+ return self
+
+ def define(self, p):
+ """Defines a parser wrapped into this object."""
+ f = getattr(p, 'run', p)
+ if debug:
+ setattr(self, '_run', f)
+ else:
+ setattr(self, 'run', f)
+ self.named(getattr(p, 'name', p.__doc__))
+
+ def run(self, tokens, s):
+ """Sequence(a), State -> (b, State)
+
+ Runs a parser wrapped into this object.
+ """
+ if debug:
+ log.debug(u'trying %s' % self.name)
+ return self._run(tokens, s)
+
+ def _run(self, tokens, s):
+ raise NotImplementedError(u'you must define() a parser')
+
+ def parse(self, tokens):
+ """Sequence(a) -> b
+
+ Applies the parser to a sequence of tokens producing a parsing result.
+
+ It provides a way to invoke a parser hiding details related to the
+ parser state. Also it makes error messages more readable by specifying
+ the position of the rightmost token that has been reached.
+ """
+ try:
+ (tree, _) = self.run(tokens, State())
+ return tree
+ except NoParseError as e:
+ max = e.state.max
+ if len(tokens) > max:
+ tok = tokens[max]
+ else:
+ tok = u''
+ raise NoParseError(u'%s: %s' % (e.msg, tok), e.state)
+
+ def __add__(self, other):
+ """Parser(a, b), Parser(a, c) -> Parser(a, _Tuple(b, c))
+
+ A sequential composition of parsers.
+
+ NOTE: The real type of the parsed value isn't always such as specified.
+ Here we use dynamic typing for ignoring the tokens that are of no
+ interest to the user. Also we merge parsing results into a single _Tuple
+ unless the user explicitely prevents it. See also skip and >>
+ combinators.
+ """
+
+ def magic(v1, v2):
+ vs = [v for v in [v1, v2] if not isinstance(v, _Ignored)]
+ if len(vs) == 1:
+ return vs[0]
+ elif len(vs) == 2:
+ if isinstance(vs[0], _Tuple):
+ return _Tuple(v1 + (v2,))
+ else:
+ return _Tuple(vs)
+ else:
+ return _Ignored(())
+
+ @Parser
+ def _add(tokens, s):
+ (v1, s2) = self.run(tokens, s)
+ (v2, s3) = other.run(tokens, s2)
+ return magic(v1, v2), s3
+
+ # or in terms of bind and pure:
+ # _add = self.bind(lambda x: other.bind(lambda y: pure(magic(x, y))))
+ _add.name = u'(%s , %s)' % (self.name, other.name)
+ return _add
+
+ def __or__(self, other):
+ """Parser(a, b), Parser(a, c) -> Parser(a, b or c)
+
+ A choice composition of two parsers.
+
+ NOTE: Here we are not providing the exact type of the result. In a
+ statically typed langage something like Either b c could be used. See
+ also + combinator.
+ """
+
+ @Parser
+ def _or(tokens, s):
+ try:
+ return self.run(tokens, s)
+ except NoParseError as e:
+ return other.run(tokens, State(s.pos, e.state.max))
+
+ _or.name = u'(%s | %s)' % (self.name, other.name)
+ return _or
+
+ def __rshift__(self, f):
+ """Parser(a, b), (b -> c) -> Parser(a, c)
+
+ Given a function from b to c, transforms a parser of b into a parser of
+ c. It is useful for transorming a parser value into another value for
+ making it a part of a parse tree or an AST.
+
+ This combinator may be thought of as a functor from b -> c to Parser(a,
+ b) -> Parser(a, c).
+ """
+
+ @Parser
+ def _shift(tokens, s):
+ (v, s2) = self.run(tokens, s)
+ return f(v), s2
+
+ # or in terms of bind and pure:
+ # _shift = self.bind(lambda x: pure(f(x)))
+ _shift.name = u'(%s)' % (self.name,)
+ return _shift
+
+ def bind(self, f):
+ """Parser(a, b), (b -> Parser(a, c)) -> Parser(a, c)
+
+ NOTE: A monadic bind function. It is used internally to implement other
+ combinators. Functions bind and pure make the Parser a Monad.
+ """
+
+ @Parser
+ def _bind(tokens, s):
+ (v, s2) = self.run(tokens, s)
+ return f(v).run(tokens, s2)
+
+ _bind.name = u'(%s >>=)' % (self.name,)
+ return _bind
+
+
+class State(object):
+ """A parsing state that is maintained basically for error reporting.
+
+ It consists of the current position pos in the sequence being parsed and
+ the position max of the rightmost token that has been consumed while
+ parsing.
+ """
+
+ def __init__(self, pos=0, max=0):
+ self.pos = pos
+ self.max = max
+
+ def __str__(self):
+ return unicode((self.pos, self.max))
+
+ def __repr__(self):
+ return u'State(%r, %r)' % (self.pos, self.max)
+
+
+class NoParseError(Exception):
+ def __init__(self, msg=u'', state=None):
+ self.msg = msg
+ self.state = state
+
+ def __str__(self):
+ return self.msg
+
+
+class _Tuple(tuple):
+ pass
+
+
+class _Ignored(object):
+ def __init__(self, value):
+ self.value = value
+
+ def __repr__(self):
+ return u'_Ignored(%s)' % repr(self.value)
+
+
+@Parser
+def finished(tokens, s):
+ """Parser(a, None)
+
+ Throws an exception if any tokens are left in the input unparsed.
+ """
+ if s.pos >= len(tokens):
+ return None, s
+ else:
+ raise NoParseError(u'should have reached ', s)
+
+
+finished.name = u'finished'
+
+
+def many(p):
+ """Parser(a, b) -> Parser(a, [b])
+
+ Returns a parser that infinitely applies the parser p to the input sequence
+ of tokens while it successfully parsers them. The resulting parser returns a
+ list of parsed values.
+ """
+
+ @Parser
+ def _many(tokens, s):
+ """Iterative implementation preventing the stack overflow."""
+ res = []
+ try:
+ while True:
+ (v, s) = p.run(tokens, s)
+ res.append(v)
+ except NoParseError as e:
+ return res, State(s.pos, e.state.max)
+
+ _many.name = u'{ %s }' % p.name
+ return _many
+
+
+def some(pred):
+ """(a -> bool) -> Parser(a, a)
+
+ Returns a parser that parses a token if it satisfies a predicate pred.
+ """
+
+ @Parser
+ def _some(tokens, s):
+ if s.pos >= len(tokens):
+ raise NoParseError(u'no tokens left in the stream', s)
+ else:
+ t = tokens[s.pos]
+ if pred(t):
+ pos = s.pos + 1
+ s2 = State(pos, max(pos, s.max))
+ if debug:
+ log.debug(u'*matched* "%s", new state = %s' % (t, s2))
+ return t, s2
+ else:
+ if debug:
+ log.debug(u'failed "%s", state = %s' % (t, s))
+ raise NoParseError(u'got unexpected token', s)
+
+ _some.name = u'(some)'
+ return _some
+
+
+def a(value):
+ """Eq(a) -> Parser(a, a)
+
+ Returns a parser that parses a token that is equal to the value value.
+ """
+ name = getattr(value, 'name', value)
+ return some(lambda t: t == value).named(u'(a "%s")' % (name,))
+
+
+def pure(x):
+ @Parser
+ def _pure(_, s):
+ return x, s
+
+ _pure.name = u'(pure %r)' % (x,)
+ return _pure
+
+
+def maybe(p):
+ """Parser(a, b) -> Parser(a, b or None)
+
+ Returns a parser that retuns None if parsing fails.
+
+ NOTE: In a statically typed language, the type Maybe b could be more
+ approprieate.
+ """
+ return (p | pure(None)).named(u'[ %s ]' % (p.name,))
+
+
+def skip(p):
+ """Parser(a, b) -> Parser(a, _Ignored(b))
+
+ Returns a parser which results are ignored by the combinator +. It is useful
+ for throwing away elements of concrete syntax (e. g. ",", ";").
+ """
+ return p >> _Ignored
+
+
+def oneplus(p):
+ """Parser(a, b) -> Parser(a, [b])
+
+ Returns a parser that applies the parser p one or more times.
+ """
+ q = p + many(p) >> (lambda x: [x[0]] + x[1])
+ return q.named(u'(%s , { %s })' % (p.name, p.name))
+
+
+def with_forward_decls(suspension):
+ """(None -> Parser(a, b)) -> Parser(a, b)
+
+ Returns a parser that computes itself lazily as a result of the suspension
+ provided. It is needed when some parsers contain forward references to
+ parsers defined later and such references are cyclic. See examples for more
+ details.
+ """
+
+ @Parser
+ def f(tokens, s):
+ return suspension().run(tokens, s)
+
+ return f
+
+
+def forward_decl():
+ """None -> Parser(?, ?)
+
+ Returns an undefined parser that can be used as a forward declaration. You
+ will be able to define() it when all the parsers it depends on are
+ available.
+ """
+
+ @Parser
+ def f(tokens, s):
+ raise NotImplementedError(u'you must define() a forward_decl somewhere')
+
+ return f
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/funcparserlib/util.py b/funcparserlib/util.py
new file mode 100644
index 0000000..8a510bd
--- /dev/null
+++ b/funcparserlib/util.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2008/2013 Andrey Vlasovskikh
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+def pretty_tree(x, kids, show):
+ """(a, (a -> list(a)), (a -> str)) -> str
+
+ Returns a pseudographic tree representation of x similar to the tree command
+ in Unix.
+ """
+ (MID, END, CONT, LAST, ROOT) = (u'|-- ', u'`-- ', u'| ', u' ', u'')
+
+ def rec(x, indent, sym):
+ line = indent + sym + show(x)
+ xs = kids(x)
+ if len(xs) == 0:
+ return line
+ else:
+ if sym == MID:
+ next_indent = indent + CONT
+ elif sym == ROOT:
+ next_indent = indent + ROOT
+ else:
+ next_indent = indent + LAST
+ syms = [MID] * (len(xs) - 1) + [END]
+ lines = [rec(x, next_indent, sym) for x, sym in zip(xs, syms)]
+ return u'\n'.join([line] + lines)
+
+ return rec(x, u'', ROOT)
diff --git a/kll.py b/kll.py
new file mode 100755
index 0000000..1e477a1
--- /dev/null
+++ b/kll.py
@@ -0,0 +1,505 @@
+#!/usr/bin/env python3
+# KLL Compiler
+# Keyboard Layout Langauge
+#
+# Copyright (C) 2014 by Jacob Alexander
+#
+# This file is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This file is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this file. If not, see .
+
+### Imports ###
+
+import argparse
+import io
+import os
+import re
+import sys
+import token
+import importlib
+
+from tokenize import generate_tokens
+from re import VERBOSE
+from pprint import pformat
+
+from kll_lib.hid_dict import *
+from kll_lib.containers import *
+
+from funcparserlib.lexer import make_tokenizer, Token, LexerError
+from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)
+
+
+
+### Decorators ###
+
+ ## Print Decorator Variables
+ERROR = '\033[5;1;31mERROR\033[0m:'
+
+
+ ## Python Text Formatting Fixer...
+ ## Because the creators of Python are averse to proper capitalization.
+textFormatter_lookup = {
+ "usage: " : "Usage: ",
+ "optional arguments" : "Optional Arguments",
+}
+
+def textFormatter_gettext( s ):
+ return textFormatter_lookup.get( s, s )
+
+argparse._ = textFormatter_gettext
+
+
+
+### Argument Parsing ###
+
+def processCommandLineArgs():
+ # Setup argument processor
+ pArgs = argparse.ArgumentParser(
+ usage="%(prog)s [options] ...",
+ description="Generates .h file state tables and pointer indices from KLL .kll files.",
+ epilog="Example: {0} TODO".format( os.path.basename( sys.argv[0] ) ),
+ formatter_class=argparse.RawTextHelpFormatter,
+ add_help=False,
+)
+
+ # Positional Arguments
+ pArgs.add_argument( 'files', type=str, nargs='+',
+ help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly
+
+ # Optional Arguments
+ pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
+ help="Specify target backend for the KLL compiler.\n"
+ "Default: kiibohd" )
+ pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
+ help="Specify .kll files to generate partial map, multiple files per flag.\n"
+ "Each -p defines another partial map.\n"
+ "Base .kll files (that define the scan code maps) must be defined for each partial map." )
+ pArgs.add_argument( '-t', '--template', type=str, default="templateKeymap.h",
+ help="Specify template used to generate the keymap.\n"
+ "Default: templateKeymap.h" )
+ pArgs.add_argument( '-o', '--output', type=str, default="templateKeymap.h",
+ help="Specify output file. Writes to current working directory by default.\n"
+ "Default: generatedKeymap.h" )
+ pArgs.add_argument( '-h', '--help', action="help",
+ help="This message." )
+
+ # Process Arguments
+ args = pArgs.parse_args()
+
+ # Parameters
+ defaultFiles = args.files
+ partialFileSets = args.partial
+ if partialFileSets is None:
+ partialFileSets = [[]]
+
+ # Check file existance
+ for filename in defaultFiles:
+ if not os.path.isfile( filename ):
+ print ( "{0} {1} does not exist...".format( ERROR, filename ) )
+ sys.exit( 1 )
+
+ for partial in partialFileSets:
+ for filename in partial:
+ if not os.path.isfile( filename ):
+ print ( "{0} {1} does not exist...".format( ERROR, filename ) )
+ sys.exit( 1 )
+
+ return (defaultFiles, partialFileSets, args.backend, args.template, args.output)
+
+
+
+### Tokenizer ###
+
+def tokenize( string ):
+ """str -> Sequence(Token)"""
+
+ # Basic Tokens Spec
+ specs = [
+ ( 'Comment', ( r' *#.*', ) ),
+ ( 'Space', ( r'[ \t\r\n]+', ) ),
+ ( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
+ ( 'USBCodeStart', ( r'U\[', ) ),
+ ( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
+ ( 'ScanCodeStart', ( r'S\[', ) ),
+ ( 'CodeEnd', ( r'\]', ) ),
+ ( 'String', ( r'"[^"]*"', VERBOSE ) ),
+ ( 'SequenceString', ( r"'[^']*'", ) ),
+ ( 'Comma', ( r',', ) ),
+ ( 'Dash', ( r'-', ) ),
+ ( 'Plus', ( r'\+', ) ),
+ ( 'Operator', ( r'=>|:|=', ) ),
+ ( 'Parenthesis', ( r'\(|\)', ) ),
+ ( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
+ ( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
+ ( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
+ ( 'EndOfLine', ( r';', ) ),
+ ]
+
+ # Tokens to filter out of the token stream
+ useless = ['Space', 'Comment']
+
+ tokens = make_tokenizer( specs )
+ return [x for x in tokens( string ) if x.type not in useless]
+
+
+
+### Parsing ###
+
+ ## Map Arrays
+scanCode_map = [ None ] * 0xFF # Define 8 bit address width
+usbCode_map = [ None ] * 0xFF # Define 8 bit address width
+variable_dict = dict()
+capabilities_dict = Capabilities()
+
+
+ ## Parsing Functions
+
+def make_scanCode( token ):
+ scanCode = int( token[1:], 0 )
+ # Check size, to make sure it's valid
+ if scanCode > 0xFF:
+ print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
+ raise
+ return scanCode
+
+def make_usbCode( token ):
+ # If first character is a U, strip
+ if token[0] == "U":
+ token = token[1:]
+
+ # If using string representation of USB Code, do lookup, case-insensitive
+ if '"' in token:
+ try:
+ usbCode = kll_hid_lookup_dictionary[ token[1:-1].upper() ]
+ except LookupError as err:
+ print ( "{0} {1} is an invalid USB Code Lookup...".format( ERROR, err ) )
+ raise
+ else:
+ usbCode = int( token, 0 )
+
+ # Check size, to make sure it's valid
+ if usbCode > 0xFF:
+ print ( "{0} USBCode value {1} is larger than 255".format( ERROR, usbCode ) )
+ raise
+ return usbCode
+
+def make_seqString( token ):
+ # Shifted Characters, and amount to move by to get non-shifted version
+ # US ANSI
+ shiftCharacters = (
+ ( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
+ ( "+", 0x12 ),
+ ( "&(", 0x11 ),
+ ( "!#$%<>", 0x10 ),
+ ( "*", 0x0E ),
+ ( ")", 0x07 ),
+ ( '"', 0x05 ),
+ ( ":", 0x01 ),
+ ( "^", -0x10 ),
+ ( "_", -0x18 ),
+ ( "{}|", -0x1E ),
+ ( "~", -0x20 ),
+ ( "@", -0x32 ),
+ ( "?", -0x38 ),
+ )
+
+ listOfLists = []
+ shiftKey = kll_hid_lookup_dictionary["SHIFT"]
+
+ # Creates a list of USB codes from the string: sequence (list) of combos (lists)
+ for char in token[1:-1]:
+ processedChar = char
+
+ # Whether or not to create a combo for this sequence with a shift
+ shiftCombo = False
+
+ # Depending on the ASCII character, convert to single character or Shift + character
+ for pair in shiftCharacters:
+ if char in pair[0]:
+ shiftCombo = True
+ processedChar = chr( ord( char ) + pair[1] )
+ break
+
+ # Do KLL HID Lookup on non-shifted character
+ # NOTE: Case-insensitive, which is why the shift must be pre-computed
+ usbCode = kll_hid_lookup_dictionary[ processedChar.upper() ]
+
+ # Create Combo for this character, add shift key if shifted
+ charCombo = []
+ if shiftCombo:
+ charCombo = [ [ shiftKey ] ]
+ charCombo.append( [ usbCode ] )
+
+ # Add to list of lists
+ listOfLists.append( charCombo )
+
+ return listOfLists
+
+def make_string( token ):
+ return token[1:-1]
+
+def make_number( token ):
+ return int( token, 0 )
+
+ # Range can go from high to low or low to high
+def make_scanCode_range( rangeVals ):
+ start = rangeVals[0]
+ end = rangeVals[1]
+
+ # Swap start, end if start is greater than end
+ if start > end:
+ start, end = end, start
+
+ # Iterate from start to end, and generate the range
+ return list( range( start, end + 1 ) )
+
+ # Range can go from high to low or low to high
+ # Warn on 0-9 (as this does not do what one would expect) TODO
+ # Lookup USB HID tags and convert to a number
+def make_usbCode_range( rangeVals ):
+ # Check if already integers
+ if isinstance( rangeVals[0], int ):
+ start = rangeVals[0]
+ else:
+ start = make_usbCode( rangeVals[0] )
+
+ if isinstance( rangeVals[1], int ):
+ end = rangeVals[1]
+ else:
+ end = make_usbCode( rangeVals[1] )
+
+ # Swap start, end if start is greater than end
+ if start > end:
+ start, end = end, start
+
+ # Iterate from start to end, and generate the range
+ return list( range( start, end + 1 ) )
+ pass
+
+
+ ## Base Rules
+
+const = lambda x: lambda _: x
+unarg = lambda f: lambda x: f(*x)
+flatten = lambda list: sum( list, [] )
+
+tokenValue = lambda x: x.value
+tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
+operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
+parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
+eol = a( Token( 'EndOfLine', ';' ) )
+
+def listElem( item ):
+ return [ item ]
+
+ # Flatten only the top layer (list of lists of ...)
+def oneLayerFlatten( items ):
+ mainList = []
+ for sublist in items:
+ for item in sublist:
+ mainList.append( item )
+
+ return mainList
+
+ # Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
+ # i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ],