|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__version__ = '3.10' |
|
__tabversion__ = '3.10' |
|
|
|
import re |
|
import sys |
|
import types |
|
import copy |
|
import os |
|
import inspect |
|
|
|
|
|
try: |
|
|
|
StringTypes = (types.StringType, types.UnicodeType) |
|
except AttributeError: |
|
|
|
StringTypes = (str, bytes) |
|
|
|
|
|
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') |
|
|
|
|
|
|
|
class LexError(Exception): |
|
def __init__(self, message, s): |
|
self.args = (message,) |
|
self.text = s |
|
|
|
|
|
|
|
class LexToken(object): |
|
def __str__(self): |
|
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) |
|
|
|
def __repr__(self): |
|
return str(self) |
|
|
|
|
|
|
|
|
|
|
|
class PlyLogger(object): |
|
def __init__(self, f): |
|
self.f = f |
|
|
|
def critical(self, msg, *args, **kwargs): |
|
self.f.write((msg % args) + '\n') |
|
|
|
def warning(self, msg, *args, **kwargs): |
|
self.f.write('WARNING: ' + (msg % args) + '\n') |
|
|
|
def error(self, msg, *args, **kwargs): |
|
self.f.write('ERROR: ' + (msg % args) + '\n') |
|
|
|
info = critical |
|
debug = critical |
|
|
|
|
|
|
|
class NullLogger(object): |
|
def __getattribute__(self, name): |
|
return self |
|
|
|
def __call__(self, *args, **kwargs): |
|
return self |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Lexer: |
|
def __init__(self): |
|
self.lexre = None |
|
|
|
|
|
|
|
self.lexretext = None |
|
self.lexstatere = {} |
|
self.lexstateretext = {} |
|
self.lexstaterenames = {} |
|
self.lexstate = 'INITIAL' |
|
self.lexstatestack = [] |
|
self.lexstateinfo = None |
|
self.lexstateignore = {} |
|
self.lexstateerrorf = {} |
|
self.lexstateeoff = {} |
|
self.lexreflags = 0 |
|
self.lexdata = None |
|
self.lexpos = 0 |
|
self.lexlen = 0 |
|
self.lexerrorf = None |
|
self.lexeoff = None |
|
self.lextokens = None |
|
self.lexignore = '' |
|
self.lexliterals = '' |
|
self.lexmodule = None |
|
self.lineno = 1 |
|
self.lexoptimize = False |
|
|
|
def clone(self, object=None): |
|
c = copy.copy(self) |
|
|
|
|
|
|
|
|
|
|
|
if object: |
|
newtab = {} |
|
for key, ritem in self.lexstatere.items(): |
|
newre = [] |
|
for cre, findex in ritem: |
|
newfindex = [] |
|
for f in findex: |
|
if not f or not f[0]: |
|
newfindex.append(f) |
|
continue |
|
newfindex.append((getattr(object, f[0].__name__), f[1])) |
|
newre.append((cre, newfindex)) |
|
newtab[key] = newre |
|
c.lexstatere = newtab |
|
c.lexstateerrorf = {} |
|
for key, ef in self.lexstateerrorf.items(): |
|
c.lexstateerrorf[key] = getattr(object, ef.__name__) |
|
c.lexmodule = object |
|
return c |
|
|
|
|
|
|
|
|
|
def writetab(self, lextab, outputdir=''): |
|
if isinstance(lextab, types.ModuleType): |
|
raise IOError("Won't overwrite existing lextab module") |
|
basetabmodule = lextab.split('.')[-1] |
|
filename = os.path.join(outputdir, basetabmodule) + '.py' |
|
with open(filename, 'w') as tf: |
|
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) |
|
tf.write('_tabversion = %s\n' % repr(__tabversion__)) |
|
tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) |
|
tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) |
|
tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) |
|
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) |
|
|
|
|
|
tabre = {} |
|
for statename, lre in self.lexstatere.items(): |
|
titem = [] |
|
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): |
|
titem.append((retext, _funcs_to_names(func, renames))) |
|
tabre[statename] = titem |
|
|
|
tf.write('_lexstatere = %s\n' % repr(tabre)) |
|
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) |
|
|
|
taberr = {} |
|
for statename, ef in self.lexstateerrorf.items(): |
|
taberr[statename] = ef.__name__ if ef else None |
|
tf.write('_lexstateerrorf = %s\n' % repr(taberr)) |
|
|
|
tabeof = {} |
|
for statename, ef in self.lexstateeoff.items(): |
|
tabeof[statename] = ef.__name__ if ef else None |
|
tf.write('_lexstateeoff = %s\n' % repr(tabeof)) |
|
|
|
|
|
|
|
|
|
def readtab(self, tabfile, fdict): |
|
if isinstance(tabfile, types.ModuleType): |
|
lextab = tabfile |
|
else: |
|
exec('import %s' % tabfile) |
|
lextab = sys.modules[tabfile] |
|
|
|
if getattr(lextab, '_tabversion', '0.0') != __tabversion__: |
|
raise ImportError('Inconsistent PLY version') |
|
|
|
self.lextokens = lextab._lextokens |
|
self.lexreflags = lextab._lexreflags |
|
self.lexliterals = lextab._lexliterals |
|
self.lextokens_all = self.lextokens | set(self.lexliterals) |
|
self.lexstateinfo = lextab._lexstateinfo |
|
self.lexstateignore = lextab._lexstateignore |
|
self.lexstatere = {} |
|
self.lexstateretext = {} |
|
for statename, lre in lextab._lexstatere.items(): |
|
titem = [] |
|
txtitem = [] |
|
for pat, func_name in lre: |
|
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) |
|
|
|
self.lexstatere[statename] = titem |
|
self.lexstateretext[statename] = txtitem |
|
|
|
self.lexstateerrorf = {} |
|
for statename, ef in lextab._lexstateerrorf.items(): |
|
self.lexstateerrorf[statename] = fdict[ef] |
|
|
|
self.lexstateeoff = {} |
|
for statename, ef in lextab._lexstateeoff.items(): |
|
self.lexstateeoff[statename] = fdict[ef] |
|
|
|
self.begin('INITIAL') |
|
|
|
|
|
|
|
|
|
def input(self, s): |
|
|
|
c = s[:1] |
|
if not isinstance(c, StringTypes): |
|
raise ValueError('Expected a string') |
|
self.lexdata = s |
|
self.lexpos = 0 |
|
self.lexlen = len(s) |
|
|
|
|
|
|
|
|
|
def begin(self, state): |
|
if state not in self.lexstatere: |
|
raise ValueError('Undefined state') |
|
self.lexre = self.lexstatere[state] |
|
self.lexretext = self.lexstateretext[state] |
|
self.lexignore = self.lexstateignore.get(state, '') |
|
self.lexerrorf = self.lexstateerrorf.get(state, None) |
|
self.lexeoff = self.lexstateeoff.get(state, None) |
|
self.lexstate = state |
|
|
|
|
|
|
|
|
|
def push_state(self, state): |
|
self.lexstatestack.append(self.lexstate) |
|
self.begin(state) |
|
|
|
|
|
|
|
|
|
def pop_state(self): |
|
self.begin(self.lexstatestack.pop()) |
|
|
|
|
|
|
|
|
|
def current_state(self): |
|
return self.lexstate |
|
|
|
|
|
|
|
|
|
def skip(self, n): |
|
self.lexpos += n |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def token(self): |
|
|
|
lexpos = self.lexpos |
|
lexlen = self.lexlen |
|
lexignore = self.lexignore |
|
lexdata = self.lexdata |
|
|
|
while lexpos < lexlen: |
|
|
|
if lexdata[lexpos] in lexignore: |
|
lexpos += 1 |
|
continue |
|
|
|
|
|
for lexre, lexindexfunc in self.lexre: |
|
m = lexre.match(lexdata, lexpos) |
|
if not m: |
|
continue |
|
|
|
|
|
tok = LexToken() |
|
tok.value = m.group() |
|
tok.lineno = self.lineno |
|
tok.lexpos = lexpos |
|
|
|
i = m.lastindex |
|
func, tok.type = lexindexfunc[i] |
|
|
|
if not func: |
|
|
|
if tok.type: |
|
self.lexpos = m.end() |
|
return tok |
|
else: |
|
lexpos = m.end() |
|
break |
|
|
|
lexpos = m.end() |
|
|
|
|
|
|
|
tok.lexer = self |
|
self.lexmatch = m |
|
self.lexpos = lexpos |
|
|
|
newtok = func(tok) |
|
|
|
|
|
if not newtok: |
|
lexpos = self.lexpos |
|
lexignore = self.lexignore |
|
break |
|
|
|
|
|
if not self.lexoptimize: |
|
if newtok.type not in self.lextokens_all: |
|
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( |
|
func.__code__.co_filename, func.__code__.co_firstlineno, |
|
func.__name__, newtok.type), lexdata[lexpos:]) |
|
|
|
return newtok |
|
else: |
|
|
|
if lexdata[lexpos] in self.lexliterals: |
|
tok = LexToken() |
|
tok.value = lexdata[lexpos] |
|
tok.lineno = self.lineno |
|
tok.type = tok.value |
|
tok.lexpos = lexpos |
|
self.lexpos = lexpos + 1 |
|
return tok |
|
|
|
|
|
if self.lexerrorf: |
|
tok = LexToken() |
|
tok.value = self.lexdata[lexpos:] |
|
tok.lineno = self.lineno |
|
tok.type = 'error' |
|
tok.lexer = self |
|
tok.lexpos = lexpos |
|
self.lexpos = lexpos |
|
newtok = self.lexerrorf(tok) |
|
if lexpos == self.lexpos: |
|
|
|
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) |
|
lexpos = self.lexpos |
|
if not newtok: |
|
continue |
|
return newtok |
|
|
|
self.lexpos = lexpos |
|
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) |
|
|
|
if self.lexeoff: |
|
tok = LexToken() |
|
tok.type = 'eof' |
|
tok.value = '' |
|
tok.lineno = self.lineno |
|
tok.lexpos = lexpos |
|
tok.lexer = self |
|
self.lexpos = lexpos |
|
newtok = self.lexeoff(tok) |
|
return newtok |
|
|
|
self.lexpos = lexpos + 1 |
|
if self.lexdata is None: |
|
raise RuntimeError('No input string given with input()') |
|
return None |
|
|
|
|
|
def __iter__(self): |
|
return self |
|
|
|
def next(self): |
|
t = self.token() |
|
if t is None: |
|
raise StopIteration |
|
return t |
|
|
|
__next__ = next |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_regex(func): |
|
return getattr(func, 'regex', func.__doc__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_caller_module_dict(levels): |
|
f = sys._getframe(levels) |
|
ldict = f.f_globals.copy() |
|
if f.f_globals != f.f_locals: |
|
ldict.update(f.f_locals) |
|
return ldict |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _funcs_to_names(funclist, namelist): |
|
result = [] |
|
for f, name in zip(funclist, namelist): |
|
if f and f[0]: |
|
result.append((name, f[1])) |
|
else: |
|
result.append(f) |
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _names_to_funcs(namelist, fdict): |
|
result = [] |
|
for n in namelist: |
|
if n and n[0]: |
|
result.append((fdict[n[0]], n[1])) |
|
else: |
|
result.append(n) |
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _form_master_re(relist, reflags, ldict, toknames): |
|
if not relist: |
|
return [] |
|
regex = '|'.join(relist) |
|
try: |
|
lexre = re.compile(regex, reflags) |
|
|
|
|
|
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) |
|
lexindexnames = lexindexfunc[:] |
|
|
|
for f, i in lexre.groupindex.items(): |
|
handle = ldict.get(f, None) |
|
if type(handle) in (types.FunctionType, types.MethodType): |
|
lexindexfunc[i] = (handle, toknames[f]) |
|
lexindexnames[i] = f |
|
elif handle is not None: |
|
lexindexnames[i] = f |
|
if f.find('ignore_') > 0: |
|
lexindexfunc[i] = (None, None) |
|
else: |
|
lexindexfunc[i] = (None, toknames[f]) |
|
|
|
return [(lexre, lexindexfunc)], [regex], [lexindexnames] |
|
except Exception: |
|
m = int(len(relist)/2) |
|
if m == 0: |
|
m = 1 |
|
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) |
|
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) |
|
return (llist+rlist), (lre+rre), (lnames+rnames) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _statetoken(s, names): |
|
nonstate = 1 |
|
parts = s.split('_') |
|
for i, part in enumerate(parts[1:], 1): |
|
if part not in names and part != 'ANY': |
|
break |
|
|
|
if i > 1: |
|
states = tuple(parts[1:i]) |
|
else: |
|
states = ('INITIAL',) |
|
|
|
if 'ANY' in states: |
|
states = tuple(names) |
|
|
|
tokenname = '_'.join(parts[i:]) |
|
return (states, tokenname) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LexerReflect(object): |
|
def __init__(self, ldict, log=None, reflags=0): |
|
self.ldict = ldict |
|
self.error_func = None |
|
self.tokens = [] |
|
self.reflags = reflags |
|
self.stateinfo = {'INITIAL': 'inclusive'} |
|
self.modules = set() |
|
self.error = False |
|
self.log = PlyLogger(sys.stderr) if log is None else log |
|
|
|
|
|
def get_all(self): |
|
self.get_tokens() |
|
self.get_literals() |
|
self.get_states() |
|
self.get_rules() |
|
|
|
|
|
def validate_all(self): |
|
self.validate_tokens() |
|
self.validate_literals() |
|
self.validate_rules() |
|
return self.error |
|
|
|
|
|
def get_tokens(self): |
|
tokens = self.ldict.get('tokens', None) |
|
if not tokens: |
|
self.log.error('No token list is defined') |
|
self.error = True |
|
return |
|
|
|
if not isinstance(tokens, (list, tuple)): |
|
self.log.error('tokens must be a list or tuple') |
|
self.error = True |
|
return |
|
|
|
if not tokens: |
|
self.log.error('tokens is empty') |
|
self.error = True |
|
return |
|
|
|
self.tokens = tokens |
|
|
|
|
|
def validate_tokens(self): |
|
terminals = {} |
|
for n in self.tokens: |
|
if not _is_identifier.match(n): |
|
self.log.error("Bad token name '%s'", n) |
|
self.error = True |
|
if n in terminals: |
|
self.log.warning("Token '%s' multiply defined", n) |
|
terminals[n] = 1 |
|
|
|
|
|
def get_literals(self): |
|
self.literals = self.ldict.get('literals', '') |
|
if not self.literals: |
|
self.literals = '' |
|
|
|
|
|
def validate_literals(self): |
|
try: |
|
for c in self.literals: |
|
if not isinstance(c, StringTypes) or len(c) > 1: |
|
self.log.error('Invalid literal %s. Must be a single character', repr(c)) |
|
self.error = True |
|
|
|
except TypeError: |
|
self.log.error('Invalid literals specification. literals must be a sequence of characters') |
|
self.error = True |
|
|
|
def get_states(self): |
|
self.states = self.ldict.get('states', None) |
|
|
|
if self.states: |
|
if not isinstance(self.states, (tuple, list)): |
|
self.log.error('states must be defined as a tuple or list') |
|
self.error = True |
|
else: |
|
for s in self.states: |
|
if not isinstance(s, tuple) or len(s) != 2: |
|
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) |
|
self.error = True |
|
continue |
|
name, statetype = s |
|
if not isinstance(name, StringTypes): |
|
self.log.error('State name %s must be a string', repr(name)) |
|
self.error = True |
|
continue |
|
if not (statetype == 'inclusive' or statetype == 'exclusive'): |
|
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) |
|
self.error = True |
|
continue |
|
if name in self.stateinfo: |
|
self.log.error("State '%s' already defined", name) |
|
self.error = True |
|
continue |
|
self.stateinfo[name] = statetype |
|
|
|
|
|
|
|
|
|
def get_rules(self): |
|
tsymbols = [f for f in self.ldict if f[:2] == 't_'] |
|
|
|
|
|
self.toknames = {} |
|
self.funcsym = {} |
|
self.strsym = {} |
|
self.ignore = {} |
|
self.errorf = {} |
|
self.eoff = {} |
|
|
|
for s in self.stateinfo: |
|
self.funcsym[s] = [] |
|
self.strsym[s] = [] |
|
|
|
if len(tsymbols) == 0: |
|
self.log.error('No rules of the form t_rulename are defined') |
|
self.error = True |
|
return |
|
|
|
for f in tsymbols: |
|
t = self.ldict[f] |
|
states, tokname = _statetoken(f, self.stateinfo) |
|
self.toknames[f] = tokname |
|
|
|
if hasattr(t, '__call__'): |
|
if tokname == 'error': |
|
for s in states: |
|
self.errorf[s] = t |
|
elif tokname == 'eof': |
|
for s in states: |
|
self.eoff[s] = t |
|
elif tokname == 'ignore': |
|
line = t.__code__.co_firstlineno |
|
file = t.__code__.co_filename |
|
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) |
|
self.error = True |
|
else: |
|
for s in states: |
|
self.funcsym[s].append((f, t)) |
|
elif isinstance(t, StringTypes): |
|
if tokname == 'ignore': |
|
for s in states: |
|
self.ignore[s] = t |
|
if '\\' in t: |
|
self.log.warning("%s contains a literal backslash '\\'", f) |
|
|
|
elif tokname == 'error': |
|
self.log.error("Rule '%s' must be defined as a function", f) |
|
self.error = True |
|
else: |
|
for s in states: |
|
self.strsym[s].append((f, t)) |
|
else: |
|
self.log.error('%s not defined as a function or string', f) |
|
self.error = True |
|
|
|
|
|
for f in self.funcsym.values(): |
|
f.sort(key=lambda x: x[1].__code__.co_firstlineno) |
|
|
|
|
|
for s in self.strsym.values(): |
|
s.sort(key=lambda x: len(x[1]), reverse=True) |
|
|
|
|
|
def validate_rules(self): |
|
for state in self.stateinfo: |
|
|
|
|
|
for fname, f in self.funcsym[state]: |
|
line = f.__code__.co_firstlineno |
|
file = f.__code__.co_filename |
|
module = inspect.getmodule(f) |
|
self.modules.add(module) |
|
|
|
tokname = self.toknames[fname] |
|
if isinstance(f, types.MethodType): |
|
reqargs = 2 |
|
else: |
|
reqargs = 1 |
|
nargs = f.__code__.co_argcount |
|
if nargs > reqargs: |
|
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) |
|
self.error = True |
|
continue |
|
|
|
if nargs < reqargs: |
|
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) |
|
self.error = True |
|
continue |
|
|
|
if not _get_regex(f): |
|
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) |
|
self.error = True |
|
continue |
|
|
|
try: |
|
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) |
|
if c.match(''): |
|
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) |
|
self.error = True |
|
except re.error as e: |
|
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) |
|
if '#' in _get_regex(f): |
|
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) |
|
self.error = True |
|
|
|
|
|
for name, r in self.strsym[state]: |
|
tokname = self.toknames[name] |
|
if tokname == 'error': |
|
self.log.error("Rule '%s' must be defined as a function", name) |
|
self.error = True |
|
continue |
|
|
|
if tokname not in self.tokens and tokname.find('ignore_') < 0: |
|
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) |
|
self.error = True |
|
continue |
|
|
|
try: |
|
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) |
|
if (c.match('')): |
|
self.log.error("Regular expression for rule '%s' matches empty string", name) |
|
self.error = True |
|
except re.error as e: |
|
self.log.error("Invalid regular expression for rule '%s'. %s", name, e) |
|
if '#' in r: |
|
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) |
|
self.error = True |
|
|
|
if not self.funcsym[state] and not self.strsym[state]: |
|
self.log.error("No rules defined for state '%s'", state) |
|
self.error = True |
|
|
|
|
|
efunc = self.errorf.get(state, None) |
|
if efunc: |
|
f = efunc |
|
line = f.__code__.co_firstlineno |
|
file = f.__code__.co_filename |
|
module = inspect.getmodule(f) |
|
self.modules.add(module) |
|
|
|
if isinstance(f, types.MethodType): |
|
reqargs = 2 |
|
else: |
|
reqargs = 1 |
|
nargs = f.__code__.co_argcount |
|
if nargs > reqargs: |
|
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) |
|
self.error = True |
|
|
|
if nargs < reqargs: |
|
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) |
|
self.error = True |
|
|
|
for module in self.modules: |
|
self.validate_module(module) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_module(self, module): |
|
try: |
|
lines, linen = inspect.getsourcelines(module) |
|
except IOError: |
|
return |
|
|
|
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') |
|
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') |
|
|
|
counthash = {} |
|
linen += 1 |
|
for line in lines: |
|
m = fre.match(line) |
|
if not m: |
|
m = sre.match(line) |
|
if m: |
|
name = m.group(1) |
|
prev = counthash.get(name) |
|
if not prev: |
|
counthash[name] = linen |
|
else: |
|
filename = inspect.getsourcefile(module) |
|
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) |
|
self.error = True |
|
linen += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', |
|
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): |
|
|
|
if lextab is None: |
|
lextab = 'lextab' |
|
|
|
global lexer |
|
|
|
ldict = None |
|
stateinfo = {'INITIAL': 'inclusive'} |
|
lexobj = Lexer() |
|
lexobj.lexoptimize = optimize |
|
global token, input |
|
|
|
if errorlog is None: |
|
errorlog = PlyLogger(sys.stderr) |
|
|
|
if debug: |
|
if debuglog is None: |
|
debuglog = PlyLogger(sys.stderr) |
|
|
|
|
|
if object: |
|
module = object |
|
|
|
|
|
if module: |
|
_items = [(k, getattr(module, k)) for k in dir(module)] |
|
ldict = dict(_items) |
|
|
|
if '__file__' not in ldict: |
|
ldict['__file__'] = sys.modules[ldict['__module__']].__file__ |
|
else: |
|
ldict = get_caller_module_dict(2) |
|
|
|
|
|
|
|
pkg = ldict.get('__package__') |
|
if pkg and isinstance(lextab, str): |
|
if '.' not in lextab: |
|
lextab = pkg + '.' + lextab |
|
|
|
|
|
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) |
|
linfo.get_all() |
|
if not optimize: |
|
if linfo.validate_all(): |
|
raise SyntaxError("Can't build lexer") |
|
|
|
if optimize and lextab: |
|
try: |
|
lexobj.readtab(lextab, ldict) |
|
token = lexobj.token |
|
input = lexobj.input |
|
lexer = lexobj |
|
return lexobj |
|
|
|
except ImportError: |
|
pass |
|
|
|
|
|
if debug: |
|
debuglog.info('lex: tokens = %r', linfo.tokens) |
|
debuglog.info('lex: literals = %r', linfo.literals) |
|
debuglog.info('lex: states = %r', linfo.stateinfo) |
|
|
|
|
|
lexobj.lextokens = set() |
|
for n in linfo.tokens: |
|
lexobj.lextokens.add(n) |
|
|
|
|
|
if isinstance(linfo.literals, (list, tuple)): |
|
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) |
|
else: |
|
lexobj.lexliterals = linfo.literals |
|
|
|
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) |
|
|
|
|
|
stateinfo = linfo.stateinfo |
|
|
|
regexs = {} |
|
|
|
for state in stateinfo: |
|
regex_list = [] |
|
|
|
|
|
for fname, f in linfo.funcsym[state]: |
|
line = f.__code__.co_firstlineno |
|
file = f.__code__.co_filename |
|
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) |
|
if debug: |
|
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) |
|
|
|
|
|
for name, r in linfo.strsym[state]: |
|
regex_list.append('(?P<%s>%s)' % (name, r)) |
|
if debug: |
|
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) |
|
|
|
regexs[state] = regex_list |
|
|
|
|
|
|
|
if debug: |
|
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') |
|
|
|
for state in regexs: |
|
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) |
|
lexobj.lexstatere[state] = lexre |
|
lexobj.lexstateretext[state] = re_text |
|
lexobj.lexstaterenames[state] = re_names |
|
if debug: |
|
for i, text in enumerate(re_text): |
|
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) |
|
|
|
|
|
for state, stype in stateinfo.items(): |
|
if state != 'INITIAL' and stype == 'inclusive': |
|
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) |
|
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) |
|
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) |
|
|
|
lexobj.lexstateinfo = stateinfo |
|
lexobj.lexre = lexobj.lexstatere['INITIAL'] |
|
lexobj.lexretext = lexobj.lexstateretext['INITIAL'] |
|
lexobj.lexreflags = reflags |
|
|
|
|
|
lexobj.lexstateignore = linfo.ignore |
|
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') |
|
|
|
|
|
lexobj.lexstateerrorf = linfo.errorf |
|
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) |
|
if not lexobj.lexerrorf: |
|
errorlog.warning('No t_error rule is defined') |
|
|
|
|
|
lexobj.lexstateeoff = linfo.eoff |
|
lexobj.lexeoff = linfo.eoff.get('INITIAL', None) |
|
|
|
|
|
for s, stype in stateinfo.items(): |
|
if stype == 'exclusive': |
|
if s not in linfo.errorf: |
|
errorlog.warning("No error rule is defined for exclusive state '%s'", s) |
|
if s not in linfo.ignore and lexobj.lexignore: |
|
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) |
|
elif stype == 'inclusive': |
|
if s not in linfo.errorf: |
|
linfo.errorf[s] = linfo.errorf.get('INITIAL', None) |
|
if s not in linfo.ignore: |
|
linfo.ignore[s] = linfo.ignore.get('INITIAL', '') |
|
|
|
|
|
token = lexobj.token |
|
input = lexobj.input |
|
lexer = lexobj |
|
|
|
|
|
if lextab and optimize: |
|
if outputdir is None: |
|
|
|
|
|
|
|
|
|
if isinstance(lextab, types.ModuleType): |
|
srcfile = lextab.__file__ |
|
else: |
|
if '.' not in lextab: |
|
srcfile = ldict['__file__'] |
|
else: |
|
parts = lextab.split('.') |
|
pkgname = '.'.join(parts[:-1]) |
|
exec('import %s' % pkgname) |
|
srcfile = getattr(sys.modules[pkgname], '__file__', '') |
|
outputdir = os.path.dirname(srcfile) |
|
try: |
|
lexobj.writetab(lextab, outputdir) |
|
except IOError as e: |
|
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) |
|
|
|
return lexobj |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def runmain(lexer=None, data=None): |
|
if not data: |
|
try: |
|
filename = sys.argv[1] |
|
f = open(filename) |
|
data = f.read() |
|
f.close() |
|
except IndexError: |
|
sys.stdout.write('Reading from standard input (type EOF to end):\n') |
|
data = sys.stdin.read() |
|
|
|
if lexer: |
|
_input = lexer.input |
|
else: |
|
_input = input |
|
_input(data) |
|
if lexer: |
|
_token = lexer.token |
|
else: |
|
_token = token |
|
|
|
while True: |
|
tok = _token() |
|
if not tok: |
|
break |
|
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def TOKEN(r): |
|
def set_regex(f): |
|
if hasattr(r, '__call__'): |
|
f.regex = _get_regex(r) |
|
else: |
|
f.regex = r |
|
return f |
|
return set_regex |
|
|
|
|
|
Token = TOKEN |
|
|