diff --git a/.travis.yml b/.travis.yml index 9e8dd5a..39090de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,8 @@ sudo: false python: - 3.7 + - 3.8 + - 3.9 addons: apt: @@ -11,7 +13,7 @@ addons: - iverilog install: - - pip install pytest pytest-pythonpath jinja2 + - pip install pytest pytest-pythonpath jinja2 ply script: - python -m pytest tests diff --git a/README.md b/README.md index 27a3d4a..f0eda14 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,6 @@ License Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0) -Note that this software package includes PLY-3.4 in "vparser/ply". The license of PLY is BSD. - Publication ============================== @@ -65,7 +63,7 @@ for pull requests Please check "CONTRIBUTORS.md" for the contributors who provided pull requests. -Pyverilog uses **pytest** for the integration testing. **When you send a pull request, please include a testing example with pytest.** +Pyverilog uses **pytest** for the integration testing. **When you send a pull request, please include a testing example with pytest.** To write a testing code, please refer the existing testing examples in "tests" directory. If the pull request code passes all the tests successfully and has no obvious problem, it will be merged to the *develop* branch by the main committers. @@ -77,7 +75,7 @@ Installation Requirements -------------------- -- Python3: 3.6 or later +- Python3: 3.7 or later - Icarus Verilog: 10.1 or later ``` @@ -85,9 +83,10 @@ sudo apt install iverilog ``` - Jinja2: 2.10 or later +- PLY: 3.4 or later ``` -pip3 install jinja2 +pip3 install jinja2 ply ``` Optional installation for testing diff --git a/examples/example_active_analyzer.py b/examples/example_active_analyzer.py index 32b5558..ce05d3d 100644 --- a/examples/example_active_analyzer.py +++ b/examples/example_active_analyzer.py @@ -7,16 +7,17 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog import pyverilog.utils.util as util import pyverilog.controlflow.splitter as splitter from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.controlflow.active_analyzer import VerilogActiveConditionAnalyzer + def main(): INFO = "Active condition analyzer" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_active_analyzer.py -t TOPMODULE file ..." def showVersion(): @@ -24,14 +25,14 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") (options, args) = optparser.parse_args() filelist = args @@ -39,7 +40,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -58,25 +60,28 @@ def showVersion(): resolved_binddict = optimizer.getResolvedBinddict() constlist = optimizer.getConstlist() - canalyzer = VerilogActiveConditionAnalyzer(options.topmodule, terms, binddict, + canalyzer = VerilogActiveConditionAnalyzer(options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist) for target in options.searchtarget: signal = util.toTermname(target) - active_conditions = canalyzer.getActiveConditions( signal ) + active_conditions = canalyzer.getActiveConditions(signal) #active_conditions = canalyzer.getActiveConditions( signal, condition=splitter.active_modify ) #active_conditions = canalyzer.getActiveConditions( signal, condition=splitter.active_unmodify ) print('Active Cases: %s' % signal) - for fsm_sig, active_conditions in sorted(active_conditions.items(), key=lambda x:str(x[0])): + for fsm_sig, active_conditions in sorted(active_conditions.items(), key=lambda x: str(x[0])): print('FSM: %s' % fsm_sig) - for state, active_condition in sorted(active_conditions, key=lambda x:str(x[0])): + for state, active_condition in sorted(active_conditions, key=lambda x: str(x[0])): s = [] s.append('state: %d -> ' % state) - if active_condition: s.append(active_condition.tocode()) - else: s.append('empty') + if active_condition: + s.append(active_condition.tocode()) + else: + s.append('empty') print(''.join(s)) + if __name__ == '__main__': main() diff --git a/examples/example_active_range.py b/examples/example_active_range.py index 6da8f86..7dfeae9 100644 --- a/examples/example_active_range.py +++ b/examples/example_active_range.py @@ -7,15 +7,16 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog import pyverilog.utils.util as util from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.controlflow.active_range import VerilogActiveAnalyzer + def main(): INFO = "Active condition analyzer (Obsoluted)" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_active_range.py -t TOPMODULE file ..." def showVersion(): @@ -23,14 +24,14 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") (options, args) = optparser.parse_args() filelist = args @@ -38,7 +39,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -57,27 +59,28 @@ def showVersion(): resolved_binddict = optimizer.getResolvedBinddict() constlist = optimizer.getConstlist() - aanalyzer = VerilogActiveAnalyzer(options.topmodule, terms, binddict, + aanalyzer = VerilogActiveAnalyzer(options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist) for target in options.searchtarget: signal = util.toTermname(target) print('Active Conditions: %s' % signal) - active_conditions = aanalyzer.getActiveConditions( signal ) - print(sorted(active_conditions, key=lambda x:str(x))) + active_conditions = aanalyzer.getActiveConditions(signal) + print(sorted(active_conditions, key=lambda x: str(x))) print('Changed Conditions') - changed_conditions = aanalyzer.getChangedConditions( signal ) - print(sorted(changed_conditions, key=lambda x:str(x))) - + changed_conditions = aanalyzer.getChangedConditions(signal) + print(sorted(changed_conditions, key=lambda x: str(x))) + print('Changed Condition Dict') - changed_conditiondict = aanalyzer.getChangedConditionsWithAssignments( signal ) - print(sorted(changed_conditiondict.items(), key=lambda x:str(x[0]))) - + changed_conditiondict = aanalyzer.getChangedConditionsWithAssignments(signal) + print(sorted(changed_conditiondict.items(), key=lambda x: str(x[0]))) + print('Unchanged Conditions') - unchanged_conditions = aanalyzer.getUnchangedConditions( signal ) - print(sorted(unchanged_conditions, key=lambda x:str(x))) + unchanged_conditions = aanalyzer.getUnchangedConditions(signal) + print(sorted(unchanged_conditions, key=lambda x: str(x))) + if __name__ == '__main__': main() diff --git a/examples/example_ast_code.py b/examples/example_ast_code.py index d47929f..d52bd2b 100644 --- a/examples/example_ast_code.py +++ b/examples/example_ast_code.py @@ -9,43 +9,45 @@ import pyverilog.vparser.ast as vast from pyverilog.ast_code_generator.codegen import ASTCodeGenerator + def main(): - datawid = vast.Parameter( 'DATAWID', vast.Rvalue(vast.IntConst('32')) ) - params = vast.Paramlist( [datawid] ) - clk = vast.Ioport( vast.Input('CLK') ) - rst = vast.Ioport( vast.Input('RST') ) - width = vast.Width( vast.IntConst('7'), vast.IntConst('0') ) - led = vast.Ioport( vast.Output('led', width=width) ) - ports = vast.Portlist( [clk, rst, led] ) - - width = vast.Width( vast.Minus(vast.Identifier('DATAWID'), vast.IntConst('1')), vast.IntConst('0') ) + datawid = vast.Parameter('DATAWID', vast.Rvalue(vast.IntConst('32'))) + params = vast.Paramlist([datawid]) + clk = vast.Ioport(vast.Input('CLK')) + rst = vast.Ioport(vast.Input('RST')) + width = vast.Width(vast.IntConst('7'), vast.IntConst('0')) + led = vast.Ioport(vast.Output('led', width=width)) + ports = vast.Portlist([clk, rst, led]) + + width = vast.Width(vast.Minus(vast.Identifier('DATAWID'), + vast.IntConst('1')), vast.IntConst('0')) count = vast.Reg('count', width=width) assign = vast.Assign( - vast.Lvalue(vast.Identifier('led')), + vast.Lvalue(vast.Identifier('led')), vast.Rvalue( vast.Partselect( - vast.Identifier('count'), # count - vast.Minus(vast.Identifier('DATAWID'), vast.IntConst('1')), # [DATAWID-1: - vast.Minus(vast.Identifier('DATAWID'), vast.IntConst('8'))))) # :DATAWID-8] + vast.Identifier('count'), # count + vast.Minus(vast.Identifier('DATAWID'), vast.IntConst('1')), # [DATAWID-1: + vast.Minus(vast.Identifier('DATAWID'), vast.IntConst('8'))))) # :DATAWID-8] sens = vast.Sens(vast.Identifier('CLK'), type='posedge') - senslist = vast.SensList([ sens ]) + senslist = vast.SensList([sens]) assign_count_true = vast.NonblockingSubstitution( vast.Lvalue(vast.Identifier('count')), vast.Rvalue(vast.IntConst('0'))) - if0_true = vast.Block([ assign_count_true ]) + if0_true = vast.Block([assign_count_true]) # count + 1 count_plus_1 = vast.Plus(vast.Identifier('count'), vast.IntConst('1')) assign_count_false = vast.NonblockingSubstitution( vast.Lvalue(vast.Identifier('count')), vast.Rvalue(count_plus_1)) - if0_false = vast.Block([ assign_count_false ]) + if0_false = vast.Block([assign_count_false]) if0 = vast.IfStatement(vast.Identifier('RST'), if0_true, if0_false) - statement = vast.Block([ if0 ]) + statement = vast.Block([if0]) always = vast.Always(senslist, statement) @@ -55,10 +57,11 @@ def main(): items.append(always) ast = vast.ModuleDef("top", params, ports, items) - + codegen = ASTCodeGenerator() rslt = codegen.visit(ast) print(rslt) + if __name__ == '__main__': main() diff --git a/examples/example_codegen.py b/examples/example_codegen.py index 6cee035..cdc9949 100644 --- a/examples/example_codegen.py +++ b/examples/example_codegen.py @@ -7,13 +7,14 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.vparser.parser import VerilogCodeParser from pyverilog.ast_code_generator.codegen import ASTCodeGenerator + def main(): INFO = "Code converter from AST" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_codegen.py file ..." def showVersion(): @@ -21,14 +22,14 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") (options, args) = optparser.parse_args() filelist = args @@ -36,7 +37,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -52,5 +54,6 @@ def showVersion(): rslt = codegen.visit(ast) print(rslt) + if __name__ == '__main__': main() diff --git a/examples/example_controlflow_analyzer.py b/examples/example_controlflow_analyzer.py index 7197f8b..f1a6eec 100644 --- a/examples/example_controlflow_analyzer.py +++ b/examples/example_controlflow_analyzer.py @@ -7,15 +7,16 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog import pyverilog.utils.util as util from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.controlflow.controlflow_analyzer import VerilogControlflowAnalyzer + def main(): INFO = "Control-flow analyzer for Verilog definitions" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_controlflow_analyzer.py -t TOPMODULE file ..." def showVersion(): @@ -23,24 +24,24 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") - optparser.add_option("--graphformat",dest="graphformat", - default="png",help="Graph file format, Default=png") - optparser.add_option("--nograph",action="store_true",dest="nograph", - default=False,help="Non graph generation") - optparser.add_option("--nolabel",action="store_true",dest="nolabel", - default=False,help="State Machine Graph without Labels") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") + optparser.add_option("--graphformat", dest="graphformat", + default="png", help="Graph file format, Default=png") + optparser.add_option("--nograph", action="store_true", dest="nograph", + default=False, help="Non graph generation") + optparser.add_option("--nolabel", action="store_true", dest="nolabel", + default=False, help="State Machine Graph without Labels") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") (options, args) = optparser.parse_args() filelist = args @@ -48,7 +49,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -69,7 +71,7 @@ def showVersion(): resolved_binddict = optimizer.getResolvedBinddict() constlist = optimizer.getConstlist() fsm_vars = tuple(['fsm', 'state', 'count', 'cnt', 'step', 'mode'] + options.searchtarget) - + canalyzer = VerilogControlflowAnalyzer(options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist, fsm_vars) fsms = canalyzer.getFiniteStateMachines() @@ -79,11 +81,13 @@ def showVersion(): print('# DELAY CNT: %d' % fsm.delaycnt) fsm.view() if not options.nograph: - fsm.tograph(filename=util.toFlatname(signame)+'.'+options.graphformat, nolabel=options.nolabel) + fsm.tograph(filename=util.toFlatname(signame) + '.' + + options.graphformat, nolabel=options.nolabel) loops = fsm.get_loop() print('Loop') for loop in loops: print(loop) + if __name__ == '__main__': main() diff --git a/examples/example_dataflow_analyzer.py b/examples/example_dataflow_analyzer.py index a2ae889..479cfe2 100644 --- a/examples/example_dataflow_analyzer.py +++ b/examples/example_dataflow_analyzer.py @@ -7,12 +7,13 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer + def main(): INFO = "Verilog module signal/module dataflow analyzer" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_dataflow_analyzer.py -t TOPMODULE file ..." def showVersion(): @@ -20,20 +21,20 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") (options, args) = optparser.parse_args() filelist = args @@ -41,7 +42,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -55,12 +57,12 @@ def showVersion(): directives = analyzer.get_directives() print('Directive:') - for dr in sorted(directives, key=lambda x:str(x)): + for dr in sorted(directives, key=lambda x: str(x)): print(dr) instances = analyzer.getInstances() print('Instance:') - for module, instname in sorted(instances, key=lambda x:str(x[1])): + for module, instname in sorted(instances, key=lambda x: str(x[1])): print((module, instname)) if options.nobind: @@ -77,14 +79,15 @@ def showVersion(): else: terms = analyzer.getTerms() print('Term:') - for tk, tv in sorted(terms.items(), key=lambda x:str(x[0])): + for tk, tv in sorted(terms.items(), key=lambda x: str(x[0])): print(tv.tostr()) - + binddict = analyzer.getBinddict() print('Bind:') - for bk, bv in sorted(binddict.items(), key=lambda x:str(x[0])): + for bk, bv in sorted(binddict.items(), key=lambda x: str(x[0])): for bvi in bv: print(bvi.tostr()) + if __name__ == '__main__': main() diff --git a/examples/example_dataflow_codegen.py b/examples/example_dataflow_codegen.py index 4c308c1..6fe8507 100644 --- a/examples/example_dataflow_codegen.py +++ b/examples/example_dataflow_codegen.py @@ -7,14 +7,15 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.dataflow.dataflow_codegen import VerilogCodeGenerator + def main(): INFO = "Code generator from Verilog dataflow definitions" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_dataflow_codegen.py -t TOPMODULE file ..." def showVersion(): @@ -22,32 +23,32 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") - optparser.add_option("-o","--output",dest="outputfile", - default="helperthread.v",help="Output File name, Default=helperthread.v") - optparser.add_option("--clockname",dest="clockname", - default="CLK",help="Clock signal name") - optparser.add_option("--resetname",dest="resetname", - default="RST_X",help="Reset signal name") - optparser.add_option("--clockedge",dest="clockedge", - default="posedge",help="Clock signal edge") - optparser.add_option("--resetedge",dest="resetedge", - default="negedge",help="Reset signal edge") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") + optparser.add_option("-o", "--output", dest="outputfile", + default="helperthread.v", help="Output File name, Default=helperthread.v") + optparser.add_option("--clockname", dest="clockname", + default="CLK", help="Clock signal name") + optparser.add_option("--resetname", dest="resetname", + default="RST_X", help="Reset signal name") + optparser.add_option("--clockedge", dest="clockedge", + default="posedge", help="Clock signal edge") + optparser.add_option("--resetedge", dest="resetedge", + default="negedge", help="Reset signal edge") (options, args) = optparser.parse_args() filelist = args @@ -55,7 +56,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -87,6 +89,7 @@ def showVersion(): f = open(options.outputfile, 'w') f.write(code) f.close() - + + if __name__ == '__main__': main() diff --git a/examples/example_graphgen.py b/examples/example_graphgen.py index d8f96fd..3194229 100644 --- a/examples/example_graphgen.py +++ b/examples/example_graphgen.py @@ -1,4 +1,4 @@ -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- # graphgen.py # # Dataflow graph generator (Only Python 2.7) @@ -7,7 +7,7 @@ # # Copyright (C) 2013, Shinya Takamaeda-Yamazaki # License: Apache 2.0 -#------------------------------------------------------------------------------- +# ------------------------------------------------------------------------------- from __future__ import absolute_import from __future__ import print_function import sys @@ -18,14 +18,15 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.dataflow.graphgen import VerilogGraphGenerator + def main(): INFO = "Graph generator from dataflow" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_graphgen.py -t TOPMODULE -s TARGETSIGNAL file ..." def showVersion(): @@ -33,34 +34,34 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") - optparser.add_option("-o","--output",dest="outputfile", - default="out.png",help="Graph file name, Default=out.png") - optparser.add_option("--identical",action="store_true",dest="identical", - default=False,help="# Identical Laef, Default=False") - optparser.add_option("--walk",action="store_true",dest="walk", - default=False,help="Walk contineous signals, Default=False") - optparser.add_option("--step",dest="step",type='int', - default=1,help="# Search Steps, Default=1") - optparser.add_option("--reorder",action="store_true",dest="reorder", - default=False,help="Reorder the contineous tree, Default=False") - optparser.add_option("--delay",action="store_true",dest="delay", - default=False,help="Inset Delay Node to walk Regs, Default=False") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") + optparser.add_option("-o", "--output", dest="outputfile", + default="out.png", help="Graph file name, Default=out.png") + optparser.add_option("--identical", action="store_true", dest="identical", + default=False, help="# Identical Laef, Default=False") + optparser.add_option("--walk", action="store_true", dest="walk", + default=False, help="Walk contineous signals, Default=False") + optparser.add_option("--step", dest="step", type='int', + default=1, help="# Search Steps, Default=1") + optparser.add_option("--reorder", action="store_true", dest="reorder", + default=False, help="Reorder the contineous tree, Default=False") + optparser.add_option("--delay", action="store_true", dest="delay", + default=False, help="Inset Delay Node to walk Regs, Default=False") (options, args) = optparser.parse_args() filelist = args @@ -68,7 +69,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -91,14 +93,15 @@ def showVersion(): resolved_binddict = optimizer.getResolvedBinddict() constlist = optimizer.getConstlist() - graphgen = VerilogGraphGenerator(options.topmodule, terms, binddict, + graphgen = VerilogGraphGenerator(options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist, options.outputfile) for target in options.searchtarget: - graphgen.generate(target, walk=options.walk, identical=options.identical, + graphgen.generate(target, walk=options.walk, identical=options.identical, step=options.step, reorder=options.reorder, delay=options.delay) graphgen.draw() - + + if __name__ == '__main__': main() diff --git a/examples/example_identifierreplace.py b/examples/example_identifierreplace.py index aca99ac..b226d34 100644 --- a/examples/example_identifierreplace.py +++ b/examples/example_identifierreplace.py @@ -9,16 +9,18 @@ import pyverilog.vparser.ast as vast from pyverilog.utils.identifierreplace import replaceIdentifiers + def main(): a = vast.Identifier('a') b = vast.Identifier('b') c = vast.Plus(a, b) - ids = {'a' : 'x', - 'b' : 'y'} - + ids = {'a': 'x', + 'b': 'y'} + d = replaceIdentifiers(c, ids) print(d) - + + if __name__ == '__main__': main() diff --git a/examples/example_identifiervisitor.py b/examples/example_identifiervisitor.py index abe12fa..e42620c 100644 --- a/examples/example_identifiervisitor.py +++ b/examples/example_identifiervisitor.py @@ -9,6 +9,7 @@ import pyverilog.vparser.ast as vast from pyverilog.utils.identifiervisitor import getIdentifiers + def main(): a = vast.Identifier('a') b = vast.Identifier('b') @@ -17,5 +18,6 @@ def main(): ids = getIdentifiers(c) print(ids) + if __name__ == '__main__': main() diff --git a/examples/example_lexer.py b/examples/example_lexer.py index eed8639..d2bfd7a 100644 --- a/examples/example_lexer.py +++ b/examples/example_lexer.py @@ -7,13 +7,14 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.vparser.preprocessor import preprocess from pyverilog.vparser.lexer import dump_tokens + def main(): INFO = "Verilog Lexer" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_lexer.py file ..." def showVersion(): @@ -23,12 +24,12 @@ def showVersion(): sys.exit() optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") (options, args) = optparser.parse_args() filelist = args @@ -36,18 +37,20 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() - - text = preprocess(filelist, + + text = preprocess(filelist, include=options.include, define=options.define) dump = dump_tokens(text) - + print(dump) + if __name__ == '__main__': main() diff --git a/examples/example_merge.py b/examples/example_merge.py index 16c9801..e622ee1 100644 --- a/examples/example_merge.py +++ b/examples/example_merge.py @@ -7,15 +7,16 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog import pyverilog.utils.util as util from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.dataflow.merge import VerilogDataflowMerge + def main(): INFO = "Dataflow merge module" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_merge.py -t TOPMODULE file ..." def showVersion(): @@ -23,22 +24,22 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") (options, args) = optparser.parse_args() filelist = args @@ -46,7 +47,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -77,5 +79,6 @@ def showVersion(): print('target: %s' % target) print(tree.tostr()) + if __name__ == '__main__': main() diff --git a/examples/example_optimizer.py b/examples/example_optimizer.py index f3482fe..9c89dc8 100644 --- a/examples/example_optimizer.py +++ b/examples/example_optimizer.py @@ -7,13 +7,14 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer + def main(): INFO = "Verilog dataflow optimizer with Pyverilog" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_optimizer.py -t TOPMODULE file ..." def showVersion(): @@ -21,12 +22,12 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") (options, args) = optparser.parse_args() filelist = args @@ -34,7 +35,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -58,17 +60,18 @@ def showVersion(): print(dr) print('Term:') - for tk, tv in sorted(resolved_terms.items(), key=lambda x:len(x[0])): + for tk, tv in sorted(resolved_terms.items(), key=lambda x: len(x[0])): print(tv.tostr()) print('Bind:') - for bk, bv in sorted(resolved_binddict.items(), key=lambda x:len(x[0])): + for bk, bv in sorted(resolved_binddict.items(), key=lambda x: len(x[0])): for bvi in bv: print(bvi.tostr()) print('Const:') - for ck, cv in sorted(constlist.items(), key=lambda x:len(x[0])): + for ck, cv in sorted(constlist.items(), key=lambda x: len(x[0])): print(ck, cv) + if __name__ == '__main__': main() diff --git a/examples/example_parser.py b/examples/example_parser.py index fc2163a..298edc2 100644 --- a/examples/example_parser.py +++ b/examples/example_parser.py @@ -7,12 +7,13 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.vparser.parser import parse + def main(): INFO = "Verilog code parser" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_parser.py file ..." def showVersion(): @@ -22,12 +23,12 @@ def showVersion(): sys.exit() optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") (options, args) = optparser.parse_args() filelist = args @@ -35,7 +36,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -43,10 +45,11 @@ def showVersion(): ast, directives = parse(filelist, preprocess_include=options.include, preprocess_define=options.define) - + ast.show() for lineno, directive in directives: print('Line %d : %s' % (lineno, directive)) - + + if __name__ == '__main__': main() diff --git a/examples/example_preprocessor.py b/examples/example_preprocessor.py index a81bc28..a2c1003 100644 --- a/examples/example_preprocessor.py +++ b/examples/example_preprocessor.py @@ -7,12 +7,13 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.vparser.preprocessor import preprocess + def main(): INFO = "Verilog Preprocessor" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_preprocessor.py file ..." def showVersion(): @@ -22,12 +23,12 @@ def showVersion(): sys.exit() optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") (options, args) = optparser.parse_args() filelist = args @@ -35,14 +36,16 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() - + text = preprocess(filelist, include=options.include, define=options.define) - + print(text) + if __name__ == '__main__': main() diff --git a/examples/example_subset.py b/examples/example_subset.py index bed3dc1..9e1860b 100644 --- a/examples/example_subset.py +++ b/examples/example_subset.py @@ -7,14 +7,15 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.dataflow.subset import VerilogSubset + def main(): INFO = "Subset generator from Verilog dataflow definitions" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_subset.py -t TOPMODULE file ..." def showVersion(): @@ -22,30 +23,30 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") - optparser.add_option("--clockname",dest="clockname", - default="CLK",help="Clock signal name") - optparser.add_option("--resetname",dest="resetname", - default="RST_X",help="Reset signal name") - optparser.add_option("--clockedge",dest="clockedge", - default="posedge",help="Clock signal edge") - optparser.add_option("--resetedge",dest="resetedge", - default="negedge",help="Reset signal edge") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") + optparser.add_option("--clockname", dest="clockname", + default="CLK", help="Clock signal name") + optparser.add_option("--resetname", dest="resetname", + default="RST_X", help="Reset signal name") + optparser.add_option("--clockedge", dest="clockedge", + default="posedge", help="Clock signal edge") + optparser.add_option("--resetedge", dest="resetedge", + default="negedge", help="Reset signal edge") (options, args) = optparser.parse_args() filelist = args @@ -53,7 +54,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -76,14 +78,15 @@ def showVersion(): resolved_binddict = optimizer.getResolvedBinddict() constlist = optimizer.getConstlist() - subset = VerilogSubset(options.topmodule, terms, binddict, + subset = VerilogSubset(options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist) subset.set_clock_info(options.clockname, options.clockedge) subset.set_reset_info(options.resetname, options.resetedge) sub_binds, sub_terms = subset.getBindSourceSubset(options.searchtarget) - terms, parameter, assign, always_clockedge, always_combination = subset.getSubset(options.searchtarget) + terms, parameter, assign, always_clockedge, always_combination = subset.getSubset( + options.searchtarget) for k, v in terms.items(): print(v.tocode()) @@ -99,5 +102,6 @@ def showVersion(): for vv in v: print(vv.tocode()) + if __name__ == '__main__': main() diff --git a/examples/example_walker.py b/examples/example_walker.py index 8bf4f79..4977a51 100644 --- a/examples/example_walker.py +++ b/examples/example_walker.py @@ -7,14 +7,15 @@ # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import pyverilog.utils.version +import pyverilog from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer from pyverilog.dataflow.walker import VerilogDataflowWalker + def main(): INFO = "Dataflow walker" - VERSION = pyverilog.utils.version.VERSION + VERSION = pyverilog.__version__ USAGE = "Usage: python example_walker.py -t TOPMODULE -s TARGETSIGNAL file ..." def showVersion(): @@ -22,22 +23,22 @@ def showVersion(): print(VERSION) print(USAGE) sys.exit() - + optparser = OptionParser() - optparser.add_option("-v","--version",action="store_true",dest="showversion", - default=False,help="Show the version") - optparser.add_option("-I","--include",dest="include",action="append", - default=[],help="Include path") - optparser.add_option("-D",dest="define",action="append", - default=[],help="Macro Definition") - optparser.add_option("-t","--top",dest="topmodule", - default="TOP",help="Top module, Default=TOP") - optparser.add_option("--nobind",action="store_true",dest="nobind", - default=False,help="No binding traversal, Default=False") - optparser.add_option("--noreorder",action="store_true",dest="noreorder", - default=False,help="No reordering of binding dataflow, Default=False") - optparser.add_option("-s","--search",dest="searchtarget",action="append", - default=[],help="Search Target Signal") + optparser.add_option("-v", "--version", action="store_true", dest="showversion", + default=False, help="Show the version") + optparser.add_option("-I", "--include", dest="include", action="append", + default=[], help="Include path") + optparser.add_option("-D", dest="define", action="append", + default=[], help="Macro Definition") + optparser.add_option("-t", "--top", dest="topmodule", + default="TOP", help="Top module, Default=TOP") + optparser.add_option("--nobind", action="store_true", dest="nobind", + default=False, help="No binding traversal, Default=False") + optparser.add_option("--noreorder", action="store_true", dest="noreorder", + default=False, help="No reordering of binding dataflow, Default=False") + optparser.add_option("-s", "--search", dest="searchtarget", action="append", + default=[], help="Search Target Signal") (options, args) = optparser.parse_args() filelist = args @@ -45,7 +46,8 @@ def showVersion(): showVersion() for f in filelist: - if not os.path.exists(f): raise IOError("file not found: " + f) + if not os.path.exists(f): + raise IOError("file not found: " + f) if len(filelist) == 0: showVersion() @@ -76,5 +78,6 @@ def showVersion(): print('target: %s' % target) print(tree.tostr()) + if __name__ == '__main__': main() diff --git a/pyverilog/VERSION b/pyverilog/VERSION index 6085e94..f0bb29e 100644 --- a/pyverilog/VERSION +++ b/pyverilog/VERSION @@ -1 +1 @@ -1.2.1 +1.3.0 diff --git a/pyverilog/dataflow/graphgen.py b/pyverilog/dataflow/graphgen.py index 1d08e62..a23c406 100644 --- a/pyverilog/dataflow/graphgen.py +++ b/pyverilog/dataflow/graphgen.py @@ -54,18 +54,18 @@ def __init__(self, topmodule, self.resolved_binddict, constlist) self.optimizer = VerilogOptimizer(terms, constlist) - def generate(self, signalname, identical=False, walk=True, step=1, reorder=False, delay=False): + def generate(self, signalname, identical=False, walk=True, step=1, do_reorder=False, delay=False): termname = util.toTermname(signalname) tree = self.treewalker.getTree(termname) if tree is None: raise verror.DefinitionError('No such signals: %s' % str(signalname)) if walk: tree = self.treewalker.walkTree(tree, visited=set(), step=step, delay=delay) - if reorder: + if do_reorder: tree = reorder.reorder(tree) tree = self.optimizer.optimize(tree) - if reorder: + if do_reorder: tree = reorder.reorder(tree) tree = replace.replaceUndefined(tree, termname) diff --git a/pyverilog/vparser/lexer.py b/pyverilog/vparser/lexer.py index 94501da..8fe6628 100644 --- a/pyverilog/vparser/lexer.py +++ b/pyverilog/vparser/lexer.py @@ -20,7 +20,7 @@ import os import re -from pyverilog.vparser.ply.lex import * +from ply.lex import * class VerilogLexer(object): diff --git a/pyverilog/vparser/parser.py b/pyverilog/vparser/parser.py index c3c2f7e..3a82912 100644 --- a/pyverilog/vparser/parser.py +++ b/pyverilog/vparser/parser.py @@ -19,15 +19,14 @@ import sys import os import pathlib +from ply.yacc import yacc -from pyverilog.vparser.ply.yacc import yacc -from pyverilog.vparser.plyparser import PLYParser, Coord, ParseError from pyverilog.vparser.preprocessor import VerilogPreprocessor from pyverilog.vparser.lexer import VerilogLexer from pyverilog.vparser.ast import * -class VerilogParser(PLYParser): +class VerilogParser(object): 'Verilog HDL Parser' # Expression Precedence @@ -64,7 +63,8 @@ def __init__(self, outputdir=".", debug=True): ) def _lexer_error_func(self, msg, line, column): - self._parse_error(msg, self._coord(line, column)) + coord = self._coord(line, column) + raise ParseError('%s: %s' % (coord, msg)) def get_directives(self): return self.lexer.get_directives() @@ -2265,13 +2265,29 @@ def p_empty(self, p): # -------------------------------------------------------------------------- def p_error(self, p): - print("Syntax error") + self._raise_error(p) + + # -------------------------------------------------------------------------- + def _raise_error(self, p): if p: - self._parse_error( - 'before: %s' % p.value, - self._coord(p.lineno)) + msg = 'before: "%s"' % p.value + coord = self._coord(p.lineno) else: - self._parse_error('At end of input', '') + msg = 'at end of input' + coord = None + + raise ParseError("%s: %s" % (coord, msg)) + + def _coord(self, lineno, column=None): + ret = [self.lexer.filename] + ret.append('line:%s' % lineno) + if column is not None: + ret.append('column:%s' % column) + return ' '.join(ret) + + +class ParseError(Exception): + pass class VerilogCodeParser(object): diff --git a/pyverilog/vparser/ply/__init__.py b/pyverilog/vparser/ply/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pyverilog/vparser/ply/lex.py b/pyverilog/vparser/ply/lex.py deleted file mode 100644 index bd32da9..0000000 --- a/pyverilog/vparser/ply/lex.py +++ /dev/null @@ -1,1058 +0,0 @@ -# ----------------------------------------------------------------------------- -# ply: lex.py -# -# Copyright (C) 2001-2011, -# David M. Beazley (Dabeaz LLC) -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of the David Beazley or Dabeaz LLC may be used to -# endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------------------------- - -__version__ = "3.4" -__tabversion__ = "3.2" # Version of table file used - -import re, sys, types, copy, os - -# This tuple contains known string types -try: - # Python 2.6 - StringTypes = (types.StringType, types.UnicodeType) -except AttributeError: - # Python 3.0 - StringTypes = (str, bytes) - -# Extract the code attribute of a function. Different implementations -# are for Python 2/3 compatibility. - -if sys.version_info[0] < 3: - def func_code(f): - return f.func_code -else: - def func_code(f): - return f.__code__ - -# This regular expression is used to match valid token names -_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') - -# Exception thrown when invalid token encountered and no default error -# handler is defined. - -class LexError(Exception): - def __init__(self,message,s): - self.args = (message,) - self.text = s - -# Token class. This class is used to represent the tokens produced. -class LexToken(object): - def __str__(self): - return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos) - def __repr__(self): - return str(self) - -# This object is a stand-in for a logging object created by the -# logging module. - -class PlyLogger(object): - def __init__(self,f): - self.f = f - def critical(self,msg,*args,**kwargs): - self.f.write((msg % args) + "\n") - - def warning(self,msg,*args,**kwargs): - self.f.write("WARNING: "+ (msg % args) + "\n") - - def error(self,msg,*args,**kwargs): - self.f.write("ERROR: " + (msg % args) + "\n") - - info = critical - debug = critical - -# Null logger is used when no output is generated. Does nothing. -class NullLogger(object): - def __getattribute__(self,name): - return self - def __call__(self,*args,**kwargs): - return self - -# ----------------------------------------------------------------------------- -# === Lexing Engine === -# -# The following Lexer class implements the lexer runtime. There are only -# a few public methods and attributes: -# -# input() - Store a new string in the lexer -# token() - Get the next token -# clone() - Clone the lexer -# -# lineno - Current line number -# lexpos - Current position in the input string -# ----------------------------------------------------------------------------- - -class Lexer: - def __init__(self): - self.lexre = None # Master regular expression. This is a list of - # tuples (re,findex) where re is a compiled - # regular expression and findex is a list - # mapping regex group numbers to rules - self.lexretext = None # Current regular expression strings - self.lexstatere = {} # Dictionary mapping lexer states to master regexs - self.lexstateretext = {} # Dictionary mapping lexer states to regex strings - self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names - self.lexstate = "INITIAL" # Current lexer state - self.lexstatestack = [] # Stack of lexer states - self.lexstateinfo = None # State information - self.lexstateignore = {} # Dictionary of ignored characters for each state - self.lexstateerrorf = {} # Dictionary of error functions for each state - self.lexreflags = 0 # Optional re compile flags - self.lexdata = None # Actual input data (as a string) - self.lexpos = 0 # Current position in input text - self.lexlen = 0 # Length of the input text - self.lexerrorf = None # Error rule (if any) - self.lextokens = None # List of valid tokens - self.lexignore = "" # Ignored characters - self.lexliterals = "" # Literal characters that can be passed through - self.lexmodule = None # Module - self.lineno = 1 # Current line number - self.lexoptimize = 0 # Optimized mode - - def clone(self,object=None): - c = copy.copy(self) - - # If the object parameter has been supplied, it means we are attaching the - # lexer to a new object. In this case, we have to rebind all methods in - # the lexstatere and lexstateerrorf tables. - - if object: - newtab = { } - for key, ritem in self.lexstatere.items(): - newre = [] - for cre, findex in ritem: - newfindex = [] - for f in findex: - if not f or not f[0]: - newfindex.append(f) - continue - newfindex.append((getattr(object,f[0].__name__),f[1])) - newre.append((cre,newfindex)) - newtab[key] = newre - c.lexstatere = newtab - c.lexstateerrorf = { } - for key, ef in self.lexstateerrorf.items(): - c.lexstateerrorf[key] = getattr(object,ef.__name__) - c.lexmodule = object - return c - - # ------------------------------------------------------------ - # writetab() - Write lexer information to a table file - # ------------------------------------------------------------ - def writetab(self,tabfile,outputdir=""): - if isinstance(tabfile,types.ModuleType): - return - basetabfilename = tabfile.split(".")[-1] - filename = os.path.join(outputdir,basetabfilename)+".py" - tf = open(filename,"w") - tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__)) - tf.write("_tabversion = %s\n" % repr(__version__)) - tf.write("_lextokens = %s\n" % repr(self.lextokens)) - tf.write("_lexreflags = %s\n" % repr(self.lexreflags)) - tf.write("_lexliterals = %s\n" % repr(self.lexliterals)) - tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo)) - - tabre = { } - # Collect all functions in the initial state - initial = self.lexstatere["INITIAL"] - initialfuncs = [] - for part in initial: - for f in part[1]: - if f and f[0]: - initialfuncs.append(f) - - for key, lre in self.lexstatere.items(): - titem = [] - for i in range(len(lre)): - titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i]))) - tabre[key] = titem - - tf.write("_lexstatere = %s\n" % repr(tabre)) - tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore)) - - taberr = { } - for key, ef in self.lexstateerrorf.items(): - if ef: - taberr[key] = ef.__name__ - else: - taberr[key] = None - tf.write("_lexstateerrorf = %s\n" % repr(taberr)) - tf.close() - - # ------------------------------------------------------------ - # readtab() - Read lexer information from a tab file - # ------------------------------------------------------------ - def readtab(self,tabfile,fdict): - if isinstance(tabfile,types.ModuleType): - lextab = tabfile - else: - if sys.version_info[0] < 3: - exec("import %s as lextab" % tabfile) - else: - env = { } - exec("import %s as lextab" % tabfile, env,env) - lextab = env['lextab'] - - if getattr(lextab,"_tabversion","0.0") != __version__: - raise ImportError("Inconsistent PLY version") - - self.lextokens = lextab._lextokens - self.lexreflags = lextab._lexreflags - self.lexliterals = lextab._lexliterals - self.lexstateinfo = lextab._lexstateinfo - self.lexstateignore = lextab._lexstateignore - self.lexstatere = { } - self.lexstateretext = { } - for key,lre in lextab._lexstatere.items(): - titem = [] - txtitem = [] - for i in range(len(lre)): - titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict))) - txtitem.append(lre[i][0]) - self.lexstatere[key] = titem - self.lexstateretext[key] = txtitem - self.lexstateerrorf = { } - for key,ef in lextab._lexstateerrorf.items(): - self.lexstateerrorf[key] = fdict[ef] - self.begin('INITIAL') - - # ------------------------------------------------------------ - # input() - Push a new string into the lexer - # ------------------------------------------------------------ - def input(self,s): - # Pull off the first character to see if s looks like a string - c = s[:1] - if not isinstance(c,StringTypes): - raise ValueError("Expected a string") - self.lexdata = s - self.lexpos = 0 - self.lexlen = len(s) - - # ------------------------------------------------------------ - # begin() - Changes the lexing state - # ------------------------------------------------------------ - def begin(self,state): - if not state in self.lexstatere: - raise ValueError("Undefined state") - self.lexre = self.lexstatere[state] - self.lexretext = self.lexstateretext[state] - self.lexignore = self.lexstateignore.get(state,"") - self.lexerrorf = self.lexstateerrorf.get(state,None) - self.lexstate = state - - # ------------------------------------------------------------ - # push_state() - Changes the lexing state and saves old on stack - # ------------------------------------------------------------ - def push_state(self,state): - self.lexstatestack.append(self.lexstate) - self.begin(state) - - # ------------------------------------------------------------ - # pop_state() - Restores the previous state - # ------------------------------------------------------------ - def pop_state(self): - self.begin(self.lexstatestack.pop()) - - # ------------------------------------------------------------ - # current_state() - Returns the current lexing state - # ------------------------------------------------------------ - def current_state(self): - return self.lexstate - - # ------------------------------------------------------------ - # skip() - Skip ahead n characters - # ------------------------------------------------------------ - def skip(self,n): - self.lexpos += n - - # ------------------------------------------------------------ - # opttoken() - Return the next token from the Lexer - # - # Note: This function has been carefully implemented to be as fast - # as possible. Don't make changes unless you really know what - # you are doing - # ------------------------------------------------------------ - def token(self): - # Make local copies of frequently referenced attributes - lexpos = self.lexpos - lexlen = self.lexlen - lexignore = self.lexignore - lexdata = self.lexdata - - while lexpos < lexlen: - # This code provides some short-circuit code for whitespace, tabs, and other ignored characters - if lexdata[lexpos] in lexignore: - lexpos += 1 - continue - - # Look for a regular expression match - for lexre,lexindexfunc in self.lexre: - m = lexre.match(lexdata,lexpos) - if not m: continue - - # Create a token for return - tok = LexToken() - tok.value = m.group() - tok.lineno = self.lineno - tok.lexpos = lexpos - - i = m.lastindex - func,tok.type = lexindexfunc[i] - - if not func: - # If no token type was set, it's an ignored token - if tok.type: - self.lexpos = m.end() - return tok - else: - lexpos = m.end() - break - - lexpos = m.end() - - # If token is processed by a function, call it - - tok.lexer = self # Set additional attributes useful in token rules - self.lexmatch = m - self.lexpos = lexpos - - newtok = func(tok) - - # Every function must return a token, if nothing, we just move to next token - if not newtok: - lexpos = self.lexpos # This is here in case user has updated lexpos. - lexignore = self.lexignore # This is here in case there was a state change - break - - # Verify type of the token. If not in the token map, raise an error - if not self.lexoptimize: - if not newtok.type in self.lextokens: - raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( - func_code(func).co_filename, func_code(func).co_firstlineno, - func.__name__, newtok.type),lexdata[lexpos:]) - - return newtok - else: - # No match, see if in literals - if lexdata[lexpos] in self.lexliterals: - tok = LexToken() - tok.value = lexdata[lexpos] - tok.lineno = self.lineno - tok.type = tok.value - tok.lexpos = lexpos - self.lexpos = lexpos + 1 - return tok - - # No match. Call t_error() if defined. - if self.lexerrorf: - tok = LexToken() - tok.value = self.lexdata[lexpos:] - tok.lineno = self.lineno - tok.type = "error" - tok.lexer = self - tok.lexpos = lexpos - self.lexpos = lexpos - newtok = self.lexerrorf(tok) - if lexpos == self.lexpos: - # Error method didn't change text position at all. This is an error. - raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) - lexpos = self.lexpos - if not newtok: continue - return newtok - - self.lexpos = lexpos - raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:]) - - self.lexpos = lexpos + 1 - if self.lexdata is None: - raise RuntimeError("No input string given with input()") - return None - - # Iterator interface - def __iter__(self): - return self - - def next(self): - t = self.token() - if t is None: - raise StopIteration - return t - - __next__ = next - -# ----------------------------------------------------------------------------- -# ==== Lex Builder === -# -# The functions and classes below are used to collect lexing information -# and build a Lexer object from it. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# get_caller_module_dict() -# -# This function returns a dictionary containing all of the symbols defined within -# a caller further down the call stack. This is used to get the environment -# associated with the yacc() call if none was provided. -# ----------------------------------------------------------------------------- - -def get_caller_module_dict(levels): - try: - raise RuntimeError - except RuntimeError: - e,b,t = sys.exc_info() - f = t.tb_frame - while levels > 0: - f = f.f_back - levels -= 1 - ldict = f.f_globals.copy() - if f.f_globals != f.f_locals: - ldict.update(f.f_locals) - - return ldict - -# ----------------------------------------------------------------------------- -# _funcs_to_names() -# -# Given a list of regular expression functions, this converts it to a list -# suitable for output to a table file -# ----------------------------------------------------------------------------- - -def _funcs_to_names(funclist,namelist): - result = [] - for f,name in zip(funclist,namelist): - if f and f[0]: - result.append((name, f[1])) - else: - result.append(f) - return result - -# ----------------------------------------------------------------------------- -# _names_to_funcs() -# -# Given a list of regular expression function names, this converts it back to -# functions. -# ----------------------------------------------------------------------------- - -def _names_to_funcs(namelist,fdict): - result = [] - for n in namelist: - if n and n[0]: - result.append((fdict[n[0]],n[1])) - else: - result.append(n) - return result - -# ----------------------------------------------------------------------------- -# _form_master_re() -# -# This function takes a list of all of the regex components and attempts to -# form the master regular expression. Given limitations in the Python re -# module, it may be necessary to break the master regex into separate expressions. -# ----------------------------------------------------------------------------- - -def _form_master_re(relist,reflags,ldict,toknames): - if not relist: return [] - regex = "|".join(relist) - try: - lexre = re.compile(regex,re.VERBOSE | reflags) - - # Build the index to function map for the matching engine - lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1) - lexindexnames = lexindexfunc[:] - - for f,i in lexre.groupindex.items(): - handle = ldict.get(f,None) - if type(handle) in (types.FunctionType, types.MethodType): - lexindexfunc[i] = (handle,toknames[f]) - lexindexnames[i] = f - elif handle is not None: - lexindexnames[i] = f - if f.find("ignore_") > 0: - lexindexfunc[i] = (None,None) - else: - lexindexfunc[i] = (None, toknames[f]) - - return [(lexre,lexindexfunc)],[regex],[lexindexnames] - except Exception: - m = int(len(relist)/2) - if m == 0: m = 1 - llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames) - rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames) - return llist+rlist, lre+rre, lnames+rnames - -# ----------------------------------------------------------------------------- -# def _statetoken(s,names) -# -# Given a declaration name s of the form "t_" and a dictionary whose keys are -# state names, this function returns a tuple (states,tokenname) where states -# is a tuple of state names and tokenname is the name of the token. For example, -# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') -# ----------------------------------------------------------------------------- - -def _statetoken(s,names): - nonstate = 1 - parts = s.split("_") - for i in range(1,len(parts)): - if not parts[i] in names and parts[i] != 'ANY': break - if i > 1: - states = tuple(parts[1:i]) - else: - states = ('INITIAL',) - - if 'ANY' in states: - states = tuple(names) - - tokenname = "_".join(parts[i:]) - return (states,tokenname) - - -# ----------------------------------------------------------------------------- -# LexerReflect() -# -# This class represents information needed to build a lexer as extracted from a -# user's input file. -# ----------------------------------------------------------------------------- -class LexerReflect(object): - def __init__(self,ldict,log=None,reflags=0): - self.ldict = ldict - self.error_func = None - self.tokens = [] - self.reflags = reflags - self.stateinfo = { 'INITIAL' : 'inclusive'} - self.files = {} - self.error = 0 - - if log is None: - self.log = PlyLogger(sys.stderr) - else: - self.log = log - - # Get all of the basic information - def get_all(self): - self.get_tokens() - self.get_literals() - self.get_states() - self.get_rules() - - # Validate all of the information - def validate_all(self): - self.validate_tokens() - self.validate_literals() - self.validate_rules() - return self.error - - # Get the tokens map - def get_tokens(self): - tokens = self.ldict.get("tokens",None) - if not tokens: - self.log.error("No token list is defined") - self.error = 1 - return - - if not isinstance(tokens,(list, tuple)): - self.log.error("tokens must be a list or tuple") - self.error = 1 - return - - if not tokens: - self.log.error("tokens is empty") - self.error = 1 - return - - self.tokens = tokens - - # Validate the tokens - def validate_tokens(self): - terminals = {} - for n in self.tokens: - if not _is_identifier.match(n): - self.log.error("Bad token name '%s'",n) - self.error = 1 - if n in terminals: - self.log.warning("Token '%s' multiply defined", n) - terminals[n] = 1 - - # Get the literals specifier - def get_literals(self): - self.literals = self.ldict.get("literals","") - - # Validate literals - def validate_literals(self): - try: - for c in self.literals: - if not isinstance(c,StringTypes) or len(c) > 1: - self.log.error("Invalid literal %s. Must be a single character", repr(c)) - self.error = 1 - continue - - except TypeError: - self.log.error("Invalid literals specification. literals must be a sequence of characters") - self.error = 1 - - def get_states(self): - self.states = self.ldict.get("states",None) - # Build statemap - if self.states: - if not isinstance(self.states,(tuple,list)): - self.log.error("states must be defined as a tuple or list") - self.error = 1 - else: - for s in self.states: - if not isinstance(s,tuple) or len(s) != 2: - self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s)) - self.error = 1 - continue - name, statetype = s - if not isinstance(name,StringTypes): - self.log.error("State name %s must be a string", repr(name)) - self.error = 1 - continue - if not (statetype == 'inclusive' or statetype == 'exclusive'): - self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name) - self.error = 1 - continue - if name in self.stateinfo: - self.log.error("State '%s' already defined",name) - self.error = 1 - continue - self.stateinfo[name] = statetype - - # Get all of the symbols with a t_ prefix and sort them into various - # categories (functions, strings, error functions, and ignore characters) - - def get_rules(self): - tsymbols = [f for f in self.ldict if f[:2] == 't_' ] - - # Now build up a list of functions and a list of strings - - self.toknames = { } # Mapping of symbols to token names - self.funcsym = { } # Symbols defined as functions - self.strsym = { } # Symbols defined as strings - self.ignore = { } # Ignore strings by state - self.errorf = { } # Error functions by state - - for s in self.stateinfo: - self.funcsym[s] = [] - self.strsym[s] = [] - - if len(tsymbols) == 0: - self.log.error("No rules of the form t_rulename are defined") - self.error = 1 - return - - for f in tsymbols: - t = self.ldict[f] - states, tokname = _statetoken(f,self.stateinfo) - self.toknames[f] = tokname - - if hasattr(t,"__call__"): - if tokname == 'error': - for s in states: - self.errorf[s] = t - elif tokname == 'ignore': - line = func_code(t).co_firstlineno - file = func_code(t).co_filename - self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__) - self.error = 1 - else: - for s in states: - self.funcsym[s].append((f,t)) - elif isinstance(t, StringTypes): - if tokname == 'ignore': - for s in states: - self.ignore[s] = t - if "\\" in t: - self.log.warning("%s contains a literal backslash '\\'",f) - - elif tokname == 'error': - self.log.error("Rule '%s' must be defined as a function", f) - self.error = 1 - else: - for s in states: - self.strsym[s].append((f,t)) - else: - self.log.error("%s not defined as a function or string", f) - self.error = 1 - - # Sort the functions by line number - for f in self.funcsym.values(): - if sys.version_info[0] < 3: - f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno)) - else: - # Python 3.0 - f.sort(key=lambda x: func_code(x[1]).co_firstlineno) - - # Sort the strings by regular expression length - for s in self.strsym.values(): - if sys.version_info[0] < 3: - s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1]))) - else: - # Python 3.0 - s.sort(key=lambda x: len(x[1]),reverse=True) - - # Validate all of the t_rules collected - def validate_rules(self): - for state in self.stateinfo: - # Validate all rules defined by functions - - - - for fname, f in self.funcsym[state]: - line = func_code(f).co_firstlineno - file = func_code(f).co_filename - self.files[file] = 1 - - tokname = self.toknames[fname] - if isinstance(f, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - nargs = func_code(f).co_argcount - if nargs > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__) - self.error = 1 - continue - - if nargs < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__) - self.error = 1 - continue - - if not f.__doc__: - self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__) - self.error = 1 - continue - - try: - c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags) - if c.match(""): - self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__) - self.error = 1 - except re.error: - _etype, e, _etrace = sys.exc_info() - self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e) - if '#' in f.__doc__: - self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__) - self.error = 1 - - # Validate all rules defined by strings - for name,r in self.strsym[state]: - tokname = self.toknames[name] - if tokname == 'error': - self.log.error("Rule '%s' must be defined as a function", name) - self.error = 1 - continue - - if not tokname in self.tokens and tokname.find("ignore_") < 0: - self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname) - self.error = 1 - continue - - try: - c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags) - if (c.match("")): - self.log.error("Regular expression for rule '%s' matches empty string",name) - self.error = 1 - except re.error: - _etype, e, _etrace = sys.exc_info() - self.log.error("Invalid regular expression for rule '%s'. %s",name,e) - if '#' in r: - self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name) - self.error = 1 - - if not self.funcsym[state] and not self.strsym[state]: - self.log.error("No rules defined for state '%s'",state) - self.error = 1 - - # Validate the error function - efunc = self.errorf.get(state,None) - if efunc: - f = efunc - line = func_code(f).co_firstlineno - file = func_code(f).co_filename - self.files[file] = 1 - - if isinstance(f, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - nargs = func_code(f).co_argcount - if nargs > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__) - self.error = 1 - - if nargs < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__) - self.error = 1 - - for f in self.files: - self.validate_file(f) - - - # ----------------------------------------------------------------------------- - # validate_file() - # - # This checks to see if there are duplicated t_rulename() functions or strings - # in the parser input file. This is done using a simple regular expression - # match on each line in the given file. - # ----------------------------------------------------------------------------- - - def validate_file(self,filename): - import os.path - base,ext = os.path.splitext(filename) - if ext != '.py': return # No idea what the file is. Return OK - - try: - f = open(filename) - lines = f.readlines() - f.close() - except IOError: - return # Couldn't find the file. Don't worry about it - - fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') - sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') - - counthash = { } - linen = 1 - for l in lines: - m = fre.match(l) - if not m: - m = sre.match(l) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev) - self.error = 1 - linen += 1 - -# ----------------------------------------------------------------------------- -# lex(module) -# -# Build all of the regular expression rules from definitions in the supplied module -# ----------------------------------------------------------------------------- -def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None): - global lexer - ldict = None - stateinfo = { 'INITIAL' : 'inclusive'} - lexobj = Lexer() - lexobj.lexoptimize = optimize - global token,input - - if errorlog is None: - errorlog = PlyLogger(sys.stderr) - - if debug: - if debuglog is None: - debuglog = PlyLogger(sys.stderr) - - # Get the module dictionary used for the lexer - if object: module = object - - if module: - _items = [(k,getattr(module,k)) for k in dir(module)] - ldict = dict(_items) - else: - ldict = get_caller_module_dict(2) - - # Collect parser information from the dictionary - linfo = LexerReflect(ldict,log=errorlog,reflags=reflags) - linfo.get_all() - if not optimize: - if linfo.validate_all(): - raise SyntaxError("Can't build lexer") - - if optimize and lextab: - try: - lexobj.readtab(lextab,ldict) - token = lexobj.token - input = lexobj.input - lexer = lexobj - return lexobj - - except ImportError: - pass - - # Dump some basic debugging information - if debug: - debuglog.info("lex: tokens = %r", linfo.tokens) - debuglog.info("lex: literals = %r", linfo.literals) - debuglog.info("lex: states = %r", linfo.stateinfo) - - # Build a dictionary of valid token names - lexobj.lextokens = { } - for n in linfo.tokens: - lexobj.lextokens[n] = 1 - - # Get literals specification - if isinstance(linfo.literals,(list,tuple)): - lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) - else: - lexobj.lexliterals = linfo.literals - - # Get the stateinfo dictionary - stateinfo = linfo.stateinfo - - regexs = { } - # Build the master regular expressions - for state in stateinfo: - regex_list = [] - - # Add rules defined by functions first - for fname, f in linfo.funcsym[state]: - line = func_code(f).co_firstlineno - file = func_code(f).co_filename - regex_list.append("(?P<%s>%s)" % (fname,f.__doc__)) - if debug: - debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state) - - # Now add all of the simple rules - for name,r in linfo.strsym[state]: - regex_list.append("(?P<%s>%s)" % (name,r)) - if debug: - debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state) - - regexs[state] = regex_list - - # Build the master regular expressions - - if debug: - debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====") - - for state in regexs: - lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames) - lexobj.lexstatere[state] = lexre - lexobj.lexstateretext[state] = re_text - lexobj.lexstaterenames[state] = re_names - if debug: - for i in range(len(re_text)): - debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i]) - - # For inclusive states, we need to add the regular expressions from the INITIAL state - for state,stype in stateinfo.items(): - if state != "INITIAL" and stype == 'inclusive': - lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) - lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) - lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) - - lexobj.lexstateinfo = stateinfo - lexobj.lexre = lexobj.lexstatere["INITIAL"] - lexobj.lexretext = lexobj.lexstateretext["INITIAL"] - lexobj.lexreflags = reflags - - # Set up ignore variables - lexobj.lexstateignore = linfo.ignore - lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","") - - # Set up error functions - lexobj.lexstateerrorf = linfo.errorf - lexobj.lexerrorf = linfo.errorf.get("INITIAL",None) - if not lexobj.lexerrorf: - errorlog.warning("No t_error rule is defined") - - # Check state information for ignore and error rules - for s,stype in stateinfo.items(): - if stype == 'exclusive': - if not s in linfo.errorf: - errorlog.warning("No error rule is defined for exclusive state '%s'", s) - if not s in linfo.ignore and lexobj.lexignore: - errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) - elif stype == 'inclusive': - if not s in linfo.errorf: - linfo.errorf[s] = linfo.errorf.get("INITIAL",None) - if not s in linfo.ignore: - linfo.ignore[s] = linfo.ignore.get("INITIAL","") - - # Create global versions of the token() and input() functions - token = lexobj.token - input = lexobj.input - lexer = lexobj - - # If in optimize mode, we write the lextab - if lextab and optimize: - lexobj.writetab(lextab,outputdir) - - return lexobj - -# ----------------------------------------------------------------------------- -# runmain() -# -# This runs the lexer as a main program -# ----------------------------------------------------------------------------- - -def runmain(lexer=None,data=None): - if not data: - try: - filename = sys.argv[1] - f = open(filename) - data = f.read() - f.close() - except IndexError: - sys.stdout.write("Reading from standard input (type EOF to end):\n") - data = sys.stdin.read() - - if lexer: - _input = lexer.input - else: - _input = input - _input(data) - if lexer: - _token = lexer.token - else: - _token = token - - while 1: - tok = _token() - if not tok: break - sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos)) - -# ----------------------------------------------------------------------------- -# @TOKEN(regex) -# -# This decorator function can be used to set the regex expression on a function -# when its docstring might need to be set in an alternative way -# ----------------------------------------------------------------------------- - -def TOKEN(r): - def set_doc(f): - if hasattr(r,"__call__"): - f.__doc__ = r.__doc__ - else: - f.__doc__ = r - return f - return set_doc - -# Alternative spelling of the TOKEN decorator -Token = TOKEN - diff --git a/pyverilog/vparser/ply/yacc.py b/pyverilog/vparser/ply/yacc.py deleted file mode 100644 index 921d4ca..0000000 --- a/pyverilog/vparser/ply/yacc.py +++ /dev/null @@ -1,3279 +0,0 @@ -# ----------------------------------------------------------------------------- -# ply: yacc.py -# -# Copyright (C) 2001-2011, -# David M. Beazley (Dabeaz LLC) -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of the David Beazley or Dabeaz LLC may be used to -# endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------------------------- -# -# This implements an LR parser that is constructed from grammar rules defined -# as Python functions. The grammer is specified by supplying the BNF inside -# Python documentation strings. The inspiration for this technique was borrowed -# from John Aycock's Spark parsing system. PLY might be viewed as cross between -# Spark and the GNU bison utility. -# -# The current implementation is only somewhat object-oriented. The -# LR parser itself is defined in terms of an object (which allows multiple -# parsers to co-exist). However, most of the variables used during table -# construction are defined in terms of global variables. Users shouldn't -# notice unless they are trying to define multiple parsers at the same -# time using threads (in which case they should have their head examined). -# -# This implementation supports both SLR and LALR(1) parsing. LALR(1) -# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), -# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, -# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced -# by the more efficient DeRemer and Pennello algorithm. -# -# :::::::: WARNING ::::::: -# -# Construction of LR parsing tables is fairly complicated and expensive. -# To make this module run fast, a *LOT* of work has been put into -# optimization---often at the expensive of readability and what might -# consider to be good Python "coding style." Modify the code at your -# own risk! -# ---------------------------------------------------------------------------- - -__version__ = "3.4" -__tabversion__ = "3.2" # Table version - -#----------------------------------------------------------------------------- -# === User configurable parameters === -# -# Change these to modify the default behavior of yacc (if you wish) -#----------------------------------------------------------------------------- - -yaccdebug = 1 # Debugging mode. If set, yacc generates a - # a 'parser.out' file in the current directory - -debug_file = 'parser.out' # Default name of the debugging file -tab_module = 'parsetab' # Default name of the table module -default_lr = 'LALR' # Default LR table generation method - -error_count = 3 # Number of symbols that must be shifted to leave recovery mode - -yaccdevel = 0 # Set to True if developing yacc. This turns off optimized - # implementations of certain functions. - -resultlimit = 40 # Size limit of results when running in debug mode. - -pickle_protocol = 0 # Protocol to use when writing pickle files - -import re, types, sys, os.path - -# Compatibility function for python 2.6/3.0 -if sys.version_info[0] < 3: - def func_code(f): - return f.func_code -else: - def func_code(f): - return f.__code__ - -# Compatibility -try: - MAXINT = sys.maxint -except AttributeError: - MAXINT = sys.maxsize - -# Python 2.x/3.0 compatibility. -def load_ply_lex(): - if sys.version_info[0] < 3: - import lex - else: - import ply.lex as lex - return lex - -# This object is a stand-in for a logging object created by the -# logging module. PLY will use this by default to create things -# such as the parser.out file. If a user wants more detailed -# information, they can create their own logging object and pass -# it into PLY. - -class PlyLogger(object): - def __init__(self,f): - self.f = f - def debug(self,msg,*args,**kwargs): - self.f.write((msg % args) + "\n") - info = debug - - def warning(self,msg,*args,**kwargs): - self.f.write("WARNING: "+ (msg % args) + "\n") - - def error(self,msg,*args,**kwargs): - self.f.write("ERROR: " + (msg % args) + "\n") - - critical = debug - -# Null logger is used when no output is generated. Does nothing. -class NullLogger(object): - def __getattribute__(self,name): - return self - def __call__(self,*args,**kwargs): - return self - -# Exception raised for yacc-related errors -class YaccError(Exception): pass - -# Format the result message that the parser produces when running in debug mode. -def format_result(r): - repr_str = repr(r) - if '\n' in repr_str: repr_str = repr(repr_str) - if len(repr_str) > resultlimit: - repr_str = repr_str[:resultlimit]+" ..." - result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str) - return result - - -# Format stack entries when the parser is running in debug mode -def format_stack_entry(r): - repr_str = repr(r) - if '\n' in repr_str: repr_str = repr(repr_str) - if len(repr_str) < 16: - return repr_str - else: - return "<%s @ 0x%x>" % (type(r).__name__,id(r)) - -#----------------------------------------------------------------------------- -# === LR Parsing Engine === -# -# The following classes are used for the LR parser itself. These are not -# used during table construction and are independent of the actual LR -# table generation algorithm -#----------------------------------------------------------------------------- - -# This class is used to hold non-terminal grammar symbols during parsing. -# It normally has the following attributes set: -# .type = Grammar symbol type -# .value = Symbol value -# .lineno = Starting line number -# .endlineno = Ending line number (optional, set automatically) -# .lexpos = Starting lex position -# .endlexpos = Ending lex position (optional, set automatically) - -class YaccSymbol: - def __str__(self): return self.type - def __repr__(self): return str(self) - -# This class is a wrapper around the objects actually passed to each -# grammar rule. Index lookup and assignment actually assign the -# .value attribute of the underlying YaccSymbol object. -# The lineno() method returns the line number of a given -# item (or 0 if not defined). The linespan() method returns -# a tuple of (startline,endline) representing the range of lines -# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) -# representing the range of positional information for a symbol. - -class YaccProduction: - def __init__(self,s,stack=None): - self.slice = s - self.stack = stack - self.lexer = None - self.parser= None - def __getitem__(self,n): - if n >= 0: return self.slice[n].value - else: return self.stack[n].value - - def __setitem__(self,n,v): - self.slice[n].value = v - - def __getslice__(self,i,j): - return [s.value for s in self.slice[i:j]] - - def __len__(self): - return len(self.slice) - - def lineno(self,n): - return getattr(self.slice[n],"lineno",0) - - def set_lineno(self,n,lineno): - self.slice[n].lineno = lineno - - def linespan(self,n): - startline = getattr(self.slice[n],"lineno",0) - endline = getattr(self.slice[n],"endlineno",startline) - return startline,endline - - def lexpos(self,n): - return getattr(self.slice[n],"lexpos",0) - - def lexspan(self,n): - startpos = getattr(self.slice[n],"lexpos",0) - endpos = getattr(self.slice[n],"endlexpos",startpos) - return startpos,endpos - - def error(self): - raise SyntaxError - - -# ----------------------------------------------------------------------------- -# == LRParser == -# -# The LR Parsing engine. -# ----------------------------------------------------------------------------- - -class LRParser: - def __init__(self,lrtab,errorf): - self.productions = lrtab.lr_productions - self.action = lrtab.lr_action - self.goto = lrtab.lr_goto - self.errorfunc = errorf - - def errok(self): - self.errorok = 1 - - def restart(self): - del self.statestack[:] - del self.symstack[:] - sym = YaccSymbol() - sym.type = '$end' - self.symstack.append(sym) - self.statestack.append(0) - - def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): - if debug or yaccdevel: - if isinstance(debug,int): - debug = PlyLogger(sys.stderr) - return self.parsedebug(input,lexer,debug,tracking,tokenfunc) - elif tracking: - return self.parseopt(input,lexer,debug,tracking,tokenfunc) - else: - return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc) - - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parsedebug(). - # - # This is the debugging enabled version of parse(). All changes made to the - # parsing engine should be made here. For the non-debugging version, - # copy this code to a method parseopt() and delete all of the sections - # enclosed in: - # - # #--! DEBUG - # statements - # #--! DEBUG - # - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None): - lookahead = None # Current lookahead symbol - lookaheadstack = [ ] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - # --! DEBUG - debug.info("PLY: PARSE DEBUG START") - # --! DEBUG - - # If no lexer was given, we will try to use the lex module - if not lexer: - lex = load_ply_lex() - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set up the state and symbol stacks - - statestack = [ ] # Stack of parsing states - self.statestack = statestack - symstack = [ ] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = "$end" - symstack.append(sym) - state = 0 - while 1: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - # --! DEBUG - debug.debug('') - debug.debug('State : %s', state) - # --! DEBUG - - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = "$end" - - # --! DEBUG - debug.debug('Stack : %s', - ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) - # --! DEBUG - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - # --! DEBUG - debug.debug("Action : Shift and goto state %s", t) - # --! DEBUG - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: errorcount -=1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - # --! DEBUG - if plen: - debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t) - else: - debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t) - - # --! DEBUG - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - # --! TRACKING - if tracking: - t1 = targ[1] - sym.lineno = t1.lineno - sym.lexpos = t1.lexpos - t1 = targ[-1] - sym.endlineno = getattr(t1,"endlineno",t1.lineno) - sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) - - # --! TRACKING - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - del statestack[-plen:] - p.callable(pslice) - # --! DEBUG - debug.info("Result : %s", format_result(pslice[0])) - # --! DEBUG - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - # --! TRACKING - if tracking: - sym.lineno = lexer.lineno - sym.lexpos = lexer.lexpos - # --! TRACKING - - targ = [ sym ] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - p.callable(pslice) - # --! DEBUG - debug.info("Result : %s", format_result(pslice[0])) - # --! DEBUG - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - result = getattr(n,"value",None) - # --! DEBUG - debug.info("Done : Returning %s", format_result(result)) - debug.info("PLY: PARSE DEBUG END") - # --! DEBUG - return result - - if t == None: - - # --! DEBUG - debug.error('Error : %s', - ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) - # --! DEBUG - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = 0 - errtoken = lookahead - if errtoken.type == "$end": - errtoken = None # End of file! - if self.errorfunc: - global errok,token,restart - errok = self.errok # Set some special functions available in error recovery - token = get_token - restart = self.restart - if errtoken and not hasattr(errtoken,'lexer'): - errtoken.lexer = lexer - tok = self.errorfunc(errtoken) - del errok, token, restart # Delete special functions - - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken,"lineno"): lineno = lookahead.lineno - else: lineno = 0 - if lineno: - sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) - else: - sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) - else: - sys.stderr.write("yacc: Parse error in input. EOF\n") - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != "$end": - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == "$end": - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - lookahead = None - continue - t = YaccSymbol() - t.type = 'error' - if hasattr(lookahead,"lineno"): - t.lineno = lookahead.lineno - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - symstack.pop() - statestack.pop() - state = statestack[-1] # Potential bug fix - - continue - - # Call an error function here - raise RuntimeError("yacc: internal parser error!!!\n") - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parseopt(). - # - # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY. - # Edit the debug version above, then copy any modifications to the method - # below while removing #--! DEBUG sections. - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - - def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): - lookahead = None # Current lookahead symbol - lookaheadstack = [ ] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - # If no lexer was given, we will try to use the lex module - if not lexer: - lex = load_ply_lex() - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set up the state and symbol stacks - - statestack = [ ] # Stack of parsing states - self.statestack = statestack - symstack = [ ] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = '$end' - symstack.append(sym) - state = 0 - while 1: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$end' - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: errorcount -=1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - # --! TRACKING - if tracking: - t1 = targ[1] - sym.lineno = t1.lineno - sym.lexpos = t1.lexpos - t1 = targ[-1] - sym.endlineno = getattr(t1,"endlineno",t1.lineno) - sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos) - - # --! TRACKING - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - del statestack[-plen:] - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - # --! TRACKING - if tracking: - sym.lineno = lexer.lineno - sym.lexpos = lexer.lexpos - # --! TRACKING - - targ = [ sym ] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - return getattr(n,"value",None) - - if t == None: - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = 0 - errtoken = lookahead - if errtoken.type == '$end': - errtoken = None # End of file! - if self.errorfunc: - global errok,token,restart - errok = self.errok # Set some special functions available in error recovery - token = get_token - restart = self.restart - if errtoken and not hasattr(errtoken,'lexer'): - errtoken.lexer = lexer - tok = self.errorfunc(errtoken) - del errok, token, restart # Delete special functions - - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken,"lineno"): lineno = lookahead.lineno - else: lineno = 0 - if lineno: - sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) - else: - sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) - else: - sys.stderr.write("yacc: Parse error in input. EOF\n") - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != '$end': - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == '$end': - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - lookahead = None - continue - t = YaccSymbol() - t.type = 'error' - if hasattr(lookahead,"lineno"): - t.lineno = lookahead.lineno - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - symstack.pop() - statestack.pop() - state = statestack[-1] # Potential bug fix - - continue - - # Call an error function here - raise RuntimeError("yacc: internal parser error!!!\n") - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parseopt_notrack(). - # - # Optimized version of parseopt() with line number tracking removed. - # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove - # code in the #--! TRACKING sections - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None): - lookahead = None # Current lookahead symbol - lookaheadstack = [ ] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - # If no lexer was given, we will try to use the lex module - if not lexer: - lex = load_ply_lex() - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set up the state and symbol stacks - - statestack = [ ] # Stack of parsing states - self.statestack = statestack - symstack = [ ] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = '$end' - symstack.append(sym) - state = 0 - while 1: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$end' - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: errorcount -=1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - del statestack[-plen:] - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - targ = [ sym ] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) - symstack.pop() - statestack.pop() - state = statestack[-1] - sym.type = 'error' - lookahead = sym - errorcount = error_count - self.errorok = 0 - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - return getattr(n,"value",None) - - if t == None: - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = 0 - errtoken = lookahead - if errtoken.type == '$end': - errtoken = None # End of file! - if self.errorfunc: - global errok,token,restart - errok = self.errok # Set some special functions available in error recovery - token = get_token - restart = self.restart - if errtoken and not hasattr(errtoken,'lexer'): - errtoken.lexer = lexer - tok = self.errorfunc(errtoken) - del errok, token, restart # Delete special functions - - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken,"lineno"): lineno = lookahead.lineno - else: lineno = 0 - if lineno: - sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type)) - else: - sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type) - else: - sys.stderr.write("yacc: Parse error in input. EOF\n") - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != '$end': - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == '$end': - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - lookahead = None - continue - t = YaccSymbol() - t.type = 'error' - if hasattr(lookahead,"lineno"): - t.lineno = lookahead.lineno - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - symstack.pop() - statestack.pop() - state = statestack[-1] # Potential bug fix - - continue - - # Call an error function here - raise RuntimeError("yacc: internal parser error!!!\n") - -# ----------------------------------------------------------------------------- -# === Grammar Representation === -# -# The following functions, classes, and variables are used to represent and -# manipulate the rules that make up a grammar. -# ----------------------------------------------------------------------------- - -import re - -# regex matching identifiers -_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') - -# ----------------------------------------------------------------------------- -# class Production: -# -# This class stores the raw information about a single production or grammar rule. -# A grammar rule refers to a specification such as this: -# -# expr : expr PLUS term -# -# Here are the basic attributes defined on all productions -# -# name - Name of the production. For example 'expr' -# prod - A list of symbols on the right side ['expr','PLUS','term'] -# prec - Production precedence level -# number - Production number. -# func - Function that executes on reduce -# file - File where production function is defined -# lineno - Line number where production function is defined -# -# The following attributes are defined or optional. -# -# len - Length of the production (number of symbols on right hand side) -# usyms - Set of unique symbols found in the production -# ----------------------------------------------------------------------------- - -class Production(object): - reduced = 0 - def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0): - self.name = name - self.prod = tuple(prod) - self.number = number - self.func = func - self.callable = None - self.file = file - self.line = line - self.prec = precedence - - # Internal settings used during table construction - - self.len = len(self.prod) # Length of the production - - # Create a list of unique production symbols used in the production - self.usyms = [ ] - for s in self.prod: - if s not in self.usyms: - self.usyms.append(s) - - # List of all LR items for the production - self.lr_items = [] - self.lr_next = None - - # Create a string representation - if self.prod: - self.str = "%s -> %s" % (self.name," ".join(self.prod)) - else: - self.str = "%s -> " % self.name - - def __str__(self): - return self.str - - def __repr__(self): - return "Production("+str(self)+")" - - def __len__(self): - return len(self.prod) - - def __nonzero__(self): - return 1 - - def __getitem__(self,index): - return self.prod[index] - - # Return the nth lr_item from the production (or None if at the end) - def lr_item(self,n): - if n > len(self.prod): return None - p = LRItem(self,n) - - # Precompute the list of productions immediately following. Hack. Remove later - try: - p.lr_after = Prodnames[p.prod[n+1]] - except (IndexError,KeyError): - p.lr_after = [] - try: - p.lr_before = p.prod[n-1] - except IndexError: - p.lr_before = None - - return p - - # Bind the production function name to a callable - def bind(self,pdict): - if self.func: - self.callable = pdict[self.func] - -# This class serves as a minimal standin for Production objects when -# reading table data from files. It only contains information -# actually used by the LR parsing engine, plus some additional -# debugging information. -class MiniProduction(object): - def __init__(self,str,name,len,func,file,line): - self.name = name - self.len = len - self.func = func - self.callable = None - self.file = file - self.line = line - self.str = str - def __str__(self): - return self.str - def __repr__(self): - return "MiniProduction(%s)" % self.str - - # Bind the production function name to a callable - def bind(self,pdict): - if self.func: - self.callable = pdict[self.func] - - -# ----------------------------------------------------------------------------- -# class LRItem -# -# This class represents a specific stage of parsing a production rule. For -# example: -# -# expr : expr . PLUS term -# -# In the above, the "." represents the current location of the parse. Here -# basic attributes: -# -# name - Name of the production. For example 'expr' -# prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] -# number - Production number. -# -# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' -# then lr_next refers to 'expr -> expr PLUS . term' -# lr_index - LR item index (location of the ".") in the prod list. -# lookaheads - LALR lookahead symbols for this item -# len - Length of the production (number of symbols on right hand side) -# lr_after - List of all productions that immediately follow -# lr_before - Grammar symbol immediately before -# ----------------------------------------------------------------------------- - -class LRItem(object): - def __init__(self,p,n): - self.name = p.name - self.prod = list(p.prod) - self.number = p.number - self.lr_index = n - self.lookaheads = { } - self.prod.insert(n,".") - self.prod = tuple(self.prod) - self.len = len(self.prod) - self.usyms = p.usyms - - def __str__(self): - if self.prod: - s = "%s -> %s" % (self.name," ".join(self.prod)) - else: - s = "%s -> " % self.name - return s - - def __repr__(self): - return "LRItem("+str(self)+")" - -# ----------------------------------------------------------------------------- -# rightmost_terminal() -# -# Return the rightmost terminal from a list of symbols. Used in add_production() -# ----------------------------------------------------------------------------- -def rightmost_terminal(symbols, terminals): - i = len(symbols) - 1 - while i >= 0: - if symbols[i] in terminals: - return symbols[i] - i -= 1 - return None - -# ----------------------------------------------------------------------------- -# === GRAMMAR CLASS === -# -# The following class represents the contents of the specified grammar along -# with various computed properties such as first sets, follow sets, LR items, etc. -# This data is used for critical parts of the table generation process later. -# ----------------------------------------------------------------------------- - -class GrammarError(YaccError): pass - -class Grammar(object): - def __init__(self,terminals): - self.Productions = [None] # A list of all of the productions. The first - # entry is always reserved for the purpose of - # building an augmented grammar - - self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all - # productions of that nonterminal. - - self.Prodmap = { } # A dictionary that is only used to detect duplicate - # productions. - - self.Terminals = { } # A dictionary mapping the names of terminal symbols to a - # list of the rules where they are used. - - for term in terminals: - self.Terminals[term] = [] - - self.Terminals['error'] = [] - - self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list - # of rule numbers where they are used. - - self.First = { } # A dictionary of precomputed FIRST(x) symbols - - self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols - - self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the - # form ('right',level) or ('nonassoc', level) or ('left',level) - - self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer. - # This is only used to provide error checking and to generate - # a warning about unused precedence rules. - - self.Start = None # Starting symbol for the grammar - - - def __len__(self): - return len(self.Productions) - - def __getitem__(self,index): - return self.Productions[index] - - # ----------------------------------------------------------------------------- - # set_precedence() - # - # Sets the precedence for a given terminal. assoc is the associativity such as - # 'left','right', or 'nonassoc'. level is a numeric level. - # - # ----------------------------------------------------------------------------- - - def set_precedence(self,term,assoc,level): - assert self.Productions == [None],"Must call set_precedence() before add_production()" - if term in self.Precedence: - raise GrammarError("Precedence already specified for terminal '%s'" % term) - if assoc not in ['left','right','nonassoc']: - raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") - self.Precedence[term] = (assoc,level) - - # ----------------------------------------------------------------------------- - # add_production() - # - # Given an action function, this function assembles a production rule and - # computes its precedence level. - # - # The production rule is supplied as a list of symbols. For example, - # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and - # symbols ['expr','PLUS','term']. - # - # Precedence is determined by the precedence of the right-most non-terminal - # or the precedence of a terminal specified by %prec. - # - # A variety of error checks are performed to make sure production symbols - # are valid and that %prec is used correctly. - # ----------------------------------------------------------------------------- - - def add_production(self,prodname,syms,func=None,file='',line=0): - - if prodname in self.Terminals: - raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname)) - if prodname == 'error': - raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname)) - if not _is_identifier.match(prodname): - raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname)) - - # Look for literal tokens - for n,s in enumerate(syms): - if s[0] in "'\"": - try: - c = eval(s) - if (len(c) > 1): - raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname)) - if not c in self.Terminals: - self.Terminals[c] = [] - syms[n] = c - continue - except SyntaxError: - pass - if not _is_identifier.match(s) and s != '%prec': - raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname)) - - # Determine the precedence level - if '%prec' in syms: - if syms[-1] == '%prec': - raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line)) - if syms[-2] != '%prec': - raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line)) - precname = syms[-1] - prodprec = self.Precedence.get(precname,None) - if not prodprec: - raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname)) - else: - self.UsedPrecedence[precname] = 1 - del syms[-2:] # Drop %prec from the rule - else: - # If no %prec, precedence is determined by the rightmost terminal symbol - precname = rightmost_terminal(syms,self.Terminals) - prodprec = self.Precedence.get(precname,('right',0)) - - # See if the rule is already in the rulemap - map = "%s -> %s" % (prodname,syms) - if map in self.Prodmap: - m = self.Prodmap[map] - raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) + - "Previous definition at %s:%d" % (m.file, m.line)) - - # From this point on, everything is valid. Create a new Production instance - pnumber = len(self.Productions) - if not prodname in self.Nonterminals: - self.Nonterminals[prodname] = [ ] - - # Add the production number to Terminals and Nonterminals - for t in syms: - if t in self.Terminals: - self.Terminals[t].append(pnumber) - else: - if not t in self.Nonterminals: - self.Nonterminals[t] = [ ] - self.Nonterminals[t].append(pnumber) - - # Create a production and add it to the list of productions - p = Production(pnumber,prodname,syms,prodprec,func,file,line) - self.Productions.append(p) - self.Prodmap[map] = p - - # Add to the global productions list - try: - self.Prodnames[prodname].append(p) - except KeyError: - self.Prodnames[prodname] = [ p ] - return 0 - - # ----------------------------------------------------------------------------- - # set_start() - # - # Sets the starting symbol and creates the augmented grammar. Production - # rule 0 is S' -> start where start is the start symbol. - # ----------------------------------------------------------------------------- - - def set_start(self,start=None): - if not start: - start = self.Productions[1].name - if start not in self.Nonterminals: - raise GrammarError("start symbol %s undefined" % start) - self.Productions[0] = Production(0,"S'",[start]) - self.Nonterminals[start].append(0) - self.Start = start - - # ----------------------------------------------------------------------------- - # find_unreachable() - # - # Find all of the nonterminal symbols that can't be reached from the starting - # symbol. Returns a list of nonterminals that can't be reached. - # ----------------------------------------------------------------------------- - - def find_unreachable(self): - - # Mark all symbols that are reachable from a symbol s - def mark_reachable_from(s): - if reachable[s]: - # We've already reached symbol s. - return - reachable[s] = 1 - for p in self.Prodnames.get(s,[]): - for r in p.prod: - mark_reachable_from(r) - - reachable = { } - for s in list(self.Terminals) + list(self.Nonterminals): - reachable[s] = 0 - - mark_reachable_from( self.Productions[0].prod[0] ) - - return [s for s in list(self.Nonterminals) - if not reachable[s]] - - # ----------------------------------------------------------------------------- - # infinite_cycles() - # - # This function looks at the various parsing rules and tries to detect - # infinite recursion cycles (grammar rules where there is no possible way - # to derive a string of only terminals). - # ----------------------------------------------------------------------------- - - def infinite_cycles(self): - terminates = {} - - # Terminals: - for t in self.Terminals: - terminates[t] = 1 - - terminates['$end'] = 1 - - # Nonterminals: - - # Initialize to false: - for n in self.Nonterminals: - terminates[n] = 0 - - # Then propagate termination until no change: - while 1: - some_change = 0 - for (n,pl) in self.Prodnames.items(): - # Nonterminal n terminates iff any of its productions terminates. - for p in pl: - # Production p terminates iff all of its rhs symbols terminate. - for s in p.prod: - if not terminates[s]: - # The symbol s does not terminate, - # so production p does not terminate. - p_terminates = 0 - break - else: - # didn't break from the loop, - # so every symbol s terminates - # so production p terminates. - p_terminates = 1 - - if p_terminates: - # symbol n terminates! - if not terminates[n]: - terminates[n] = 1 - some_change = 1 - # Don't need to consider any more productions for this n. - break - - if not some_change: - break - - infinite = [] - for (s,term) in terminates.items(): - if not term: - if not s in self.Prodnames and not s in self.Terminals and s != 'error': - # s is used-but-not-defined, and we've already warned of that, - # so it would be overkill to say that it's also non-terminating. - pass - else: - infinite.append(s) - - return infinite - - - # ----------------------------------------------------------------------------- - # undefined_symbols() - # - # Find all symbols that were used the grammar, but not defined as tokens or - # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol - # and prod is the production where the symbol was used. - # ----------------------------------------------------------------------------- - def undefined_symbols(self): - result = [] - for p in self.Productions: - if not p: continue - - for s in p.prod: - if not s in self.Prodnames and not s in self.Terminals and s != 'error': - result.append((s,p)) - return result - - # ----------------------------------------------------------------------------- - # unused_terminals() - # - # Find all terminals that were defined, but not used by the grammar. Returns - # a list of all symbols. - # ----------------------------------------------------------------------------- - def unused_terminals(self): - unused_tok = [] - for s,v in self.Terminals.items(): - if s != 'error' and not v: - unused_tok.append(s) - - return unused_tok - - # ------------------------------------------------------------------------------ - # unused_rules() - # - # Find all grammar rules that were defined, but not used (maybe not reachable) - # Returns a list of productions. - # ------------------------------------------------------------------------------ - - def unused_rules(self): - unused_prod = [] - for s,v in self.Nonterminals.items(): - if not v: - p = self.Prodnames[s][0] - unused_prod.append(p) - return unused_prod - - # ----------------------------------------------------------------------------- - # unused_precedence() - # - # Returns a list of tuples (term,precedence) corresponding to precedence - # rules that were never used by the grammar. term is the name of the terminal - # on which precedence was applied and precedence is a string such as 'left' or - # 'right' corresponding to the type of precedence. - # ----------------------------------------------------------------------------- - - def unused_precedence(self): - unused = [] - for termname in self.Precedence: - if not (termname in self.Terminals or termname in self.UsedPrecedence): - unused.append((termname,self.Precedence[termname][0])) - - return unused - - # ------------------------------------------------------------------------- - # _first() - # - # Compute the value of FIRST1(beta) where beta is a tuple of symbols. - # - # During execution of compute_first1, the result may be incomplete. - # Afterward (e.g., when called from compute_follow()), it will be complete. - # ------------------------------------------------------------------------- - def _first(self,beta): - - # We are computing First(x1,x2,x3,...,xn) - result = [ ] - for x in beta: - x_produces_empty = 0 - - # Add all the non- symbols of First[x] to the result. - for f in self.First[x]: - if f == '': - x_produces_empty = 1 - else: - if f not in result: result.append(f) - - if x_produces_empty: - # We have to consider the next x in beta, - # i.e. stay in the loop. - pass - else: - # We don't have to consider any further symbols in beta. - break - else: - # There was no 'break' from the loop, - # so x_produces_empty was true for all x in beta, - # so beta produces empty as well. - result.append('') - - return result - - # ------------------------------------------------------------------------- - # compute_first() - # - # Compute the value of FIRST1(X) for all symbols - # ------------------------------------------------------------------------- - def compute_first(self): - if self.First: - return self.First - - # Terminals: - for t in self.Terminals: - self.First[t] = [t] - - self.First['$end'] = ['$end'] - - # Nonterminals: - - # Initialize to the empty set: - for n in self.Nonterminals: - self.First[n] = [] - - # Then propagate symbols until no change: - while 1: - some_change = 0 - for n in self.Nonterminals: - for p in self.Prodnames[n]: - for f in self._first(p.prod): - if f not in self.First[n]: - self.First[n].append( f ) - some_change = 1 - if not some_change: - break - - return self.First - - # --------------------------------------------------------------------- - # compute_follow() - # - # Computes all of the follow sets for every non-terminal symbol. The - # follow set is the set of all symbols that might follow a given - # non-terminal. See the Dragon book, 2nd Ed. p. 189. - # --------------------------------------------------------------------- - def compute_follow(self,start=None): - # If already computed, return the result - if self.Follow: - return self.Follow - - # If first sets not computed yet, do that first. - if not self.First: - self.compute_first() - - # Add '$end' to the follow list of the start symbol - for k in self.Nonterminals: - self.Follow[k] = [ ] - - if not start: - start = self.Productions[1].name - - self.Follow[start] = [ '$end' ] - - while 1: - didadd = 0 - for p in self.Productions[1:]: - # Here is the production set - for i in range(len(p.prod)): - B = p.prod[i] - if B in self.Nonterminals: - # Okay. We got a non-terminal in a production - fst = self._first(p.prod[i+1:]) - hasempty = 0 - for f in fst: - if f != '' and f not in self.Follow[B]: - self.Follow[B].append(f) - didadd = 1 - if f == '': - hasempty = 1 - if hasempty or i == (len(p.prod)-1): - # Add elements of follow(a) to follow(b) - for f in self.Follow[p.name]: - if f not in self.Follow[B]: - self.Follow[B].append(f) - didadd = 1 - if not didadd: break - return self.Follow - - - # ----------------------------------------------------------------------------- - # build_lritems() - # - # This function walks the list of productions and builds a complete set of the - # LR items. The LR items are stored in two ways: First, they are uniquely - # numbered and placed in the list _lritems. Second, a linked list of LR items - # is built for each production. For example: - # - # E -> E PLUS E - # - # Creates the list - # - # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] - # ----------------------------------------------------------------------------- - - def build_lritems(self): - for p in self.Productions: - lastlri = p - i = 0 - lr_items = [] - while 1: - if i > len(p): - lri = None - else: - lri = LRItem(p,i) - # Precompute the list of productions immediately following - try: - lri.lr_after = self.Prodnames[lri.prod[i+1]] - except (IndexError,KeyError): - lri.lr_after = [] - try: - lri.lr_before = lri.prod[i-1] - except IndexError: - lri.lr_before = None - - lastlri.lr_next = lri - if not lri: break - lr_items.append(lri) - lastlri = lri - i += 1 - p.lr_items = lr_items - -# ----------------------------------------------------------------------------- -# == Class LRTable == -# -# This basic class represents a basic table of LR parsing information. -# Methods for generating the tables are not defined here. They are defined -# in the derived class LRGeneratedTable. -# ----------------------------------------------------------------------------- - -class VersionError(YaccError): pass - -class LRTable(object): - def __init__(self): - self.lr_action = None - self.lr_goto = None - self.lr_productions = None - self.lr_method = None - - def read_table(self,module,outputdir): - if isinstance(module,types.ModuleType): - parsetab = module - else: - old_path = sys.path - sys.path = [outputdir] - if sys.version_info[0] < 3: - exec("import %s as parsetab" % module) - else: - env = { } - exec("import %s as parsetab" % module, env, env) - parsetab = env['parsetab'] - sys.path = old_path - - if parsetab._tabversion != __tabversion__: - raise VersionError("yacc table file version is out of date") - - self.lr_action = parsetab._lr_action - self.lr_goto = parsetab._lr_goto - - self.lr_productions = [] - for p in parsetab._lr_productions: - self.lr_productions.append(MiniProduction(*p)) - - self.lr_method = parsetab._lr_method - return parsetab._lr_signature - - def read_pickle(self,filename): - try: - import cPickle as pickle - except ImportError: - import pickle - - in_f = open(filename,"rb") - - tabversion = pickle.load(in_f) - if tabversion != __tabversion__: - raise VersionError("yacc table file version is out of date") - self.lr_method = pickle.load(in_f) - signature = pickle.load(in_f) - self.lr_action = pickle.load(in_f) - self.lr_goto = pickle.load(in_f) - productions = pickle.load(in_f) - - self.lr_productions = [] - for p in productions: - self.lr_productions.append(MiniProduction(*p)) - - in_f.close() - return signature - - # Bind all production function names to callable objects in pdict - def bind_callables(self,pdict): - for p in self.lr_productions: - p.bind(pdict) - -# ----------------------------------------------------------------------------- -# === LR Generator === -# -# The following classes and functions are used to generate LR parsing tables on -# a grammar. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# digraph() -# traverse() -# -# The following two functions are used to compute set valued functions -# of the form: -# -# F(x) = F'(x) U U{F(y) | x R y} -# -# This is used to compute the values of Read() sets as well as FOLLOW sets -# in LALR(1) generation. -# -# Inputs: X - An input set -# R - A relation -# FP - Set-valued function -# ------------------------------------------------------------------------------ - -def digraph(X,R,FP): - N = { } - for x in X: - N[x] = 0 - stack = [] - F = { } - for x in X: - if N[x] == 0: traverse(x,N,stack,F,X,R,FP) - return F - -def traverse(x,N,stack,F,X,R,FP): - stack.append(x) - d = len(stack) - N[x] = d - F[x] = FP(x) # F(X) <- F'(x) - - rel = R(x) # Get y's related to x - for y in rel: - if N[y] == 0: - traverse(y,N,stack,F,X,R,FP) - N[x] = min(N[x],N[y]) - for a in F.get(y,[]): - if a not in F[x]: F[x].append(a) - if N[x] == d: - N[stack[-1]] = MAXINT - F[stack[-1]] = F[x] - element = stack.pop() - while element != x: - N[stack[-1]] = MAXINT - F[stack[-1]] = F[x] - element = stack.pop() - -class LALRError(YaccError): pass - -# ----------------------------------------------------------------------------- -# == LRGeneratedTable == -# -# This class implements the LR table generation algorithm. There are no -# public methods except for write() -# ----------------------------------------------------------------------------- - -class LRGeneratedTable(LRTable): - def __init__(self,grammar,method='LALR',log=None): - if method not in ['SLR','LALR']: - raise LALRError("Unsupported method %s" % method) - - self.grammar = grammar - self.lr_method = method - - # Set up the logger - if not log: - log = NullLogger() - self.log = log - - # Internal attributes - self.lr_action = {} # Action table - self.lr_goto = {} # Goto table - self.lr_productions = grammar.Productions # Copy of grammar Production array - self.lr_goto_cache = {} # Cache of computed gotos - self.lr0_cidhash = {} # Cache of closures - - self._add_count = 0 # Internal counter used to detect cycles - - # Diagonistic information filled in by the table generator - self.sr_conflict = 0 - self.rr_conflict = 0 - self.conflicts = [] # List of conflicts - - self.sr_conflicts = [] - self.rr_conflicts = [] - - # Build the tables - self.grammar.build_lritems() - self.grammar.compute_first() - self.grammar.compute_follow() - self.lr_parse_table() - - # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. - - def lr0_closure(self,I): - self._add_count += 1 - - # Add everything in I to J - J = I[:] - didadd = 1 - while didadd: - didadd = 0 - for j in J: - for x in j.lr_after: - if getattr(x,"lr0_added",0) == self._add_count: continue - # Add B --> .G to J - J.append(x.lr_next) - x.lr0_added = self._add_count - didadd = 1 - - return J - - # Compute the LR(0) goto function goto(I,X) where I is a set - # of LR(0) items and X is a grammar symbol. This function is written - # in a way that guarantees uniqueness of the generated goto sets - # (i.e. the same goto set will never be returned as two different Python - # objects). With uniqueness, we can later do fast set comparisons using - # id(obj) instead of element-wise comparison. - - def lr0_goto(self,I,x): - # First we look for a previously cached entry - g = self.lr_goto_cache.get((id(I),x),None) - if g: return g - - # Now we generate the goto set in a way that guarantees uniqueness - # of the result - - s = self.lr_goto_cache.get(x,None) - if not s: - s = { } - self.lr_goto_cache[x] = s - - gs = [ ] - for p in I: - n = p.lr_next - if n and n.lr_before == x: - s1 = s.get(id(n),None) - if not s1: - s1 = { } - s[id(n)] = s1 - gs.append(n) - s = s1 - g = s.get('$end',None) - if not g: - if gs: - g = self.lr0_closure(gs) - s['$end'] = g - else: - s['$end'] = gs - self.lr_goto_cache[(id(I),x)] = g - return g - - # Compute the LR(0) sets of item function - def lr0_items(self): - - C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ] - i = 0 - for I in C: - self.lr0_cidhash[id(I)] = i - i += 1 - - # Loop over the items in C and each grammar symbols - i = 0 - while i < len(C): - I = C[i] - i += 1 - - # Collect all of the symbols that could possibly be in the goto(I,X) sets - asyms = { } - for ii in I: - for s in ii.usyms: - asyms[s] = None - - for x in asyms: - g = self.lr0_goto(I,x) - if not g: continue - if id(g) in self.lr0_cidhash: continue - self.lr0_cidhash[id(g)] = len(C) - C.append(g) - - return C - - # ----------------------------------------------------------------------------- - # ==== LALR(1) Parsing ==== - # - # LALR(1) parsing is almost exactly the same as SLR except that instead of - # relying upon Follow() sets when performing reductions, a more selective - # lookahead set that incorporates the state of the LR(0) machine is utilized. - # Thus, we mainly just have to focus on calculating the lookahead sets. - # - # The method used here is due to DeRemer and Pennelo (1982). - # - # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) - # Lookahead Sets", ACM Transactions on Programming Languages and Systems, - # Vol. 4, No. 4, Oct. 1982, pp. 615-649 - # - # Further details can also be found in: - # - # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", - # McGraw-Hill Book Company, (1985). - # - # ----------------------------------------------------------------------------- - - # ----------------------------------------------------------------------------- - # compute_nullable_nonterminals() - # - # Creates a dictionary containing all of the non-terminals that might produce - # an empty production. - # ----------------------------------------------------------------------------- - - def compute_nullable_nonterminals(self): - nullable = {} - num_nullable = 0 - while 1: - for p in self.grammar.Productions[1:]: - if p.len == 0: - nullable[p.name] = 1 - continue - for t in p.prod: - if not t in nullable: break - else: - nullable[p.name] = 1 - if len(nullable) == num_nullable: break - num_nullable = len(nullable) - return nullable - - # ----------------------------------------------------------------------------- - # find_nonterminal_trans(C) - # - # Given a set of LR(0) items, this functions finds all of the non-terminal - # transitions. These are transitions in which a dot appears immediately before - # a non-terminal. Returns a list of tuples of the form (state,N) where state - # is the state number and N is the nonterminal symbol. - # - # The input C is the set of LR(0) items. - # ----------------------------------------------------------------------------- - - def find_nonterminal_transitions(self,C): - trans = [] - for state in range(len(C)): - for p in C[state]: - if p.lr_index < p.len - 1: - t = (state,p.prod[p.lr_index+1]) - if t[1] in self.grammar.Nonterminals: - if t not in trans: trans.append(t) - state = state + 1 - return trans - - # ----------------------------------------------------------------------------- - # dr_relation() - # - # Computes the DR(p,A) relationships for non-terminal transitions. The input - # is a tuple (state,N) where state is a number and N is a nonterminal symbol. - # - # Returns a list of terminals. - # ----------------------------------------------------------------------------- - - def dr_relation(self,C,trans,nullable): - dr_set = { } - state,N = trans - terms = [] - - g = self.lr0_goto(C[state],N) - for p in g: - if p.lr_index < p.len - 1: - a = p.prod[p.lr_index+1] - if a in self.grammar.Terminals: - if a not in terms: terms.append(a) - - # This extra bit is to handle the start state - if state == 0 and N == self.grammar.Productions[0].prod[0]: - terms.append('$end') - - return terms - - # ----------------------------------------------------------------------------- - # reads_relation() - # - # Computes the READS() relation (p,A) READS (t,C). - # ----------------------------------------------------------------------------- - - def reads_relation(self,C, trans, empty): - # Look for empty transitions - rel = [] - state, N = trans - - g = self.lr0_goto(C[state],N) - j = self.lr0_cidhash.get(id(g),-1) - for p in g: - if p.lr_index < p.len - 1: - a = p.prod[p.lr_index + 1] - if a in empty: - rel.append((j,a)) - - return rel - - # ----------------------------------------------------------------------------- - # compute_lookback_includes() - # - # Determines the lookback and includes relations - # - # LOOKBACK: - # - # This relation is determined by running the LR(0) state machine forward. - # For example, starting with a production "N : . A B C", we run it forward - # to obtain "N : A B C ." We then build a relationship between this final - # state and the starting state. These relationships are stored in a dictionary - # lookdict. - # - # INCLUDES: - # - # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). - # - # This relation is used to determine non-terminal transitions that occur - # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) - # if the following holds: - # - # B -> LAT, where T -> epsilon and p' -L-> p - # - # L is essentially a prefix (which may be empty), T is a suffix that must be - # able to derive an empty string. State p' must lead to state p with the string L. - # - # ----------------------------------------------------------------------------- - - def compute_lookback_includes(self,C,trans,nullable): - - lookdict = {} # Dictionary of lookback relations - includedict = {} # Dictionary of include relations - - # Make a dictionary of non-terminal transitions - dtrans = {} - for t in trans: - dtrans[t] = 1 - - # Loop over all transitions and compute lookbacks and includes - for state,N in trans: - lookb = [] - includes = [] - for p in C[state]: - if p.name != N: continue - - # Okay, we have a name match. We now follow the production all the way - # through the state machine until we get the . on the right hand side - - lr_index = p.lr_index - j = state - while lr_index < p.len - 1: - lr_index = lr_index + 1 - t = p.prod[lr_index] - - # Check to see if this symbol and state are a non-terminal transition - if (j,t) in dtrans: - # Yes. Okay, there is some chance that this is an includes relation - # the only way to know for certain is whether the rest of the - # production derives empty - - li = lr_index + 1 - while li < p.len: - if p.prod[li] in self.grammar.Terminals: break # No forget it - if not p.prod[li] in nullable: break - li = li + 1 - else: - # Appears to be a relation between (j,t) and (state,N) - includes.append((j,t)) - - g = self.lr0_goto(C[j],t) # Go to next set - j = self.lr0_cidhash.get(id(g),-1) # Go to next state - - # When we get here, j is the final state, now we have to locate the production - for r in C[j]: - if r.name != p.name: continue - if r.len != p.len: continue - i = 0 - # This look is comparing a production ". A B C" with "A B C ." - while i < r.lr_index: - if r.prod[i] != p.prod[i+1]: break - i = i + 1 - else: - lookb.append((j,r)) - for i in includes: - if not i in includedict: includedict[i] = [] - includedict[i].append((state,N)) - lookdict[(state,N)] = lookb - - return lookdict,includedict - - # ----------------------------------------------------------------------------- - # compute_read_sets() - # - # Given a set of LR(0) items, this function computes the read sets. - # - # Inputs: C = Set of LR(0) items - # ntrans = Set of nonterminal transitions - # nullable = Set of empty transitions - # - # Returns a set containing the read sets - # ----------------------------------------------------------------------------- - - def compute_read_sets(self,C, ntrans, nullable): - FP = lambda x: self.dr_relation(C,x,nullable) - R = lambda x: self.reads_relation(C,x,nullable) - F = digraph(ntrans,R,FP) - return F - - # ----------------------------------------------------------------------------- - # compute_follow_sets() - # - # Given a set of LR(0) items, a set of non-terminal transitions, a readset, - # and an include set, this function computes the follow sets - # - # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} - # - # Inputs: - # ntrans = Set of nonterminal transitions - # readsets = Readset (previously computed) - # inclsets = Include sets (previously computed) - # - # Returns a set containing the follow sets - # ----------------------------------------------------------------------------- - - def compute_follow_sets(self,ntrans,readsets,inclsets): - FP = lambda x: readsets[x] - R = lambda x: inclsets.get(x,[]) - F = digraph(ntrans,R,FP) - return F - - # ----------------------------------------------------------------------------- - # add_lookaheads() - # - # Attaches the lookahead symbols to grammar rules. - # - # Inputs: lookbacks - Set of lookback relations - # followset - Computed follow set - # - # This function directly attaches the lookaheads to productions contained - # in the lookbacks set - # ----------------------------------------------------------------------------- - - def add_lookaheads(self,lookbacks,followset): - for trans,lb in lookbacks.items(): - # Loop over productions in lookback - for state,p in lb: - if not state in p.lookaheads: - p.lookaheads[state] = [] - f = followset.get(trans,[]) - for a in f: - if a not in p.lookaheads[state]: p.lookaheads[state].append(a) - - # ----------------------------------------------------------------------------- - # add_lalr_lookaheads() - # - # This function does all of the work of adding lookahead information for use - # with LALR parsing - # ----------------------------------------------------------------------------- - - def add_lalr_lookaheads(self,C): - # Determine all of the nullable nonterminals - nullable = self.compute_nullable_nonterminals() - - # Find all non-terminal transitions - trans = self.find_nonterminal_transitions(C) - - # Compute read sets - readsets = self.compute_read_sets(C,trans,nullable) - - # Compute lookback/includes relations - lookd, included = self.compute_lookback_includes(C,trans,nullable) - - # Compute LALR FOLLOW sets - followsets = self.compute_follow_sets(trans,readsets,included) - - # Add all of the lookaheads - self.add_lookaheads(lookd,followsets) - - # ----------------------------------------------------------------------------- - # lr_parse_table() - # - # This function constructs the parse tables for SLR or LALR - # ----------------------------------------------------------------------------- - def lr_parse_table(self): - Productions = self.grammar.Productions - Precedence = self.grammar.Precedence - goto = self.lr_goto # Goto array - action = self.lr_action # Action array - log = self.log # Logger for output - - actionp = { } # Action production array (temporary) - - log.info("Parsing method: %s", self.lr_method) - - # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items - # This determines the number of states - - C = self.lr0_items() - - if self.lr_method == 'LALR': - self.add_lalr_lookaheads(C) - - # Build the parser table, state by state - st = 0 - for I in C: - # Loop over each production in I - actlist = [ ] # List of actions - st_action = { } - st_actionp = { } - st_goto = { } - log.info("") - log.info("state %d", st) - log.info("") - for p in I: - log.info(" (%d) %s", p.number, str(p)) - log.info("") - - for p in I: - if p.len == p.lr_index + 1: - if p.name == "S'": - # Start symbol. Accept! - st_action["$end"] = 0 - st_actionp["$end"] = p - else: - # We are at the end of a production. Reduce! - if self.lr_method == 'LALR': - laheads = p.lookaheads[st] - else: - laheads = self.grammar.Follow[p.name] - for a in laheads: - actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) - r = st_action.get(a,None) - if r is not None: - # Whoa. Have a shift/reduce or reduce/reduce conflict - if r > 0: - # Need to decide on shift or reduce here - # By default we favor shifting. Need to add - # some precedence rules here. - sprec,slevel = Productions[st_actionp[a].number].prec - rprec,rlevel = Precedence.get(a,('right',0)) - if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): - # We really need to reduce here. - st_action[a] = -p.number - st_actionp[a] = p - if not slevel and not rlevel: - log.info(" ! shift/reduce conflict for %s resolved as reduce",a) - self.sr_conflicts.append((st,a,'reduce')) - Productions[p.number].reduced += 1 - elif (slevel == rlevel) and (rprec == 'nonassoc'): - st_action[a] = None - else: - # Hmmm. Guess we'll keep the shift - if not rlevel: - log.info(" ! shift/reduce conflict for %s resolved as shift",a) - self.sr_conflicts.append((st,a,'shift')) - elif r < 0: - # Reduce/reduce conflict. In this case, we favor the rule - # that was defined first in the grammar file - oldp = Productions[-r] - pp = Productions[p.number] - if oldp.line > pp.line: - st_action[a] = -p.number - st_actionp[a] = p - chosenp,rejectp = pp,oldp - Productions[p.number].reduced += 1 - Productions[oldp.number].reduced -= 1 - else: - chosenp,rejectp = oldp,pp - self.rr_conflicts.append((st,chosenp,rejectp)) - log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a]) - else: - raise LALRError("Unknown conflict in state %d" % st) - else: - st_action[a] = -p.number - st_actionp[a] = p - Productions[p.number].reduced += 1 - else: - i = p.lr_index - a = p.prod[i+1] # Get symbol right after the "." - if a in self.grammar.Terminals: - g = self.lr0_goto(I,a) - j = self.lr0_cidhash.get(id(g),-1) - if j >= 0: - # We are in a shift state - actlist.append((a,p,"shift and go to state %d" % j)) - r = st_action.get(a,None) - if r is not None: - # Whoa have a shift/reduce or shift/shift conflict - if r > 0: - if r != j: - raise LALRError("Shift/shift conflict in state %d" % st) - elif r < 0: - # Do a precedence check. - # - if precedence of reduce rule is higher, we reduce. - # - if precedence of reduce is same and left assoc, we reduce. - # - otherwise we shift - rprec,rlevel = Productions[st_actionp[a].number].prec - sprec,slevel = Precedence.get(a,('right',0)) - if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): - # We decide to shift here... highest precedence to shift - Productions[st_actionp[a].number].reduced -= 1 - st_action[a] = j - st_actionp[a] = p - if not rlevel: - log.info(" ! shift/reduce conflict for %s resolved as shift",a) - self.sr_conflicts.append((st,a,'shift')) - elif (slevel == rlevel) and (rprec == 'nonassoc'): - st_action[a] = None - else: - # Hmmm. Guess we'll keep the reduce - if not slevel and not rlevel: - log.info(" ! shift/reduce conflict for %s resolved as reduce",a) - self.sr_conflicts.append((st,a,'reduce')) - - else: - raise LALRError("Unknown conflict in state %d" % st) - else: - st_action[a] = j - st_actionp[a] = p - - # Print the actions associated with each terminal - _actprint = { } - for a,p,m in actlist: - if a in st_action: - if p is st_actionp[a]: - log.info(" %-15s %s",a,m) - _actprint[(a,m)] = 1 - log.info("") - # Print the actions that were not used. (debugging) - not_used = 0 - for a,p,m in actlist: - if a in st_action: - if p is not st_actionp[a]: - if not (a,m) in _actprint: - log.debug(" ! %-15s [ %s ]",a,m) - not_used = 1 - _actprint[(a,m)] = 1 - if not_used: - log.debug("") - - # Construct the goto table for this state - - nkeys = { } - for ii in I: - for s in ii.usyms: - if s in self.grammar.Nonterminals: - nkeys[s] = None - for n in nkeys: - g = self.lr0_goto(I,n) - j = self.lr0_cidhash.get(id(g),-1) - if j >= 0: - st_goto[n] = j - log.info(" %-30s shift and go to state %d",n,j) - - action[st] = st_action - actionp[st] = st_actionp - goto[st] = st_goto - st += 1 - - - # ----------------------------------------------------------------------------- - # write() - # - # This function writes the LR parsing tables to a file - # ----------------------------------------------------------------------------- - - def write_table(self,modulename,outputdir='',signature=""): - basemodulename = modulename.split(".")[-1] - filename = os.path.join(outputdir,basemodulename) + ".py" - try: - f = open(filename,"w") - - f.write(""" -# %s -# This file is automatically generated. Do not edit. -_tabversion = %r - -_lr_method = %r - -_lr_signature = %r - """ % (filename, __tabversion__, self.lr_method, signature)) - - # Change smaller to 0 to go back to original tables - smaller = 1 - - # Factor out names to try and make smaller - if smaller: - items = { } - - for s,nd in self.lr_action.items(): - for name,v in nd.items(): - i = items.get(name) - if not i: - i = ([],[]) - items[name] = i - i[0].append(s) - i[1].append(v) - - f.write("\n_lr_action_items = {") - for k,v in items.items(): - f.write("%r:([" % k) - for i in v[0]: - f.write("%r," % i) - f.write("],[") - for i in v[1]: - f.write("%r," % i) - - f.write("]),") - f.write("}\n") - - f.write(""" -_lr_action = { } -for _k, _v in _lr_action_items.items(): - for _x,_y in zip(_v[0],_v[1]): - if not _x in _lr_action: _lr_action[_x] = { } - _lr_action[_x][_k] = _y -del _lr_action_items -""") - - else: - f.write("\n_lr_action = { "); - for k,v in self.lr_action.items(): - f.write("(%r,%r):%r," % (k[0],k[1],v)) - f.write("}\n"); - - if smaller: - # Factor out names to try and make smaller - items = { } - - for s,nd in self.lr_goto.items(): - for name,v in nd.items(): - i = items.get(name) - if not i: - i = ([],[]) - items[name] = i - i[0].append(s) - i[1].append(v) - - f.write("\n_lr_goto_items = {") - for k,v in items.items(): - f.write("%r:([" % k) - for i in v[0]: - f.write("%r," % i) - f.write("],[") - for i in v[1]: - f.write("%r," % i) - - f.write("]),") - f.write("}\n") - - f.write(""" -_lr_goto = { } -for _k, _v in _lr_goto_items.items(): - for _x,_y in zip(_v[0],_v[1]): - if not _x in _lr_goto: _lr_goto[_x] = { } - _lr_goto[_x][_k] = _y -del _lr_goto_items -""") - else: - f.write("\n_lr_goto = { "); - for k,v in self.lr_goto.items(): - f.write("(%r,%r):%r," % (k[0],k[1],v)) - f.write("}\n"); - - # Write production table - f.write("_lr_productions = [\n") - for p in self.lr_productions: - if p.func: - f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line)) - else: - f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len)) - f.write("]\n") - f.close() - - except IOError: - e = sys.exc_info()[1] - sys.stderr.write("Unable to create '%s'\n" % filename) - sys.stderr.write(str(e)+"\n") - return - - - # ----------------------------------------------------------------------------- - # pickle_table() - # - # This function pickles the LR parsing tables to a supplied file object - # ----------------------------------------------------------------------------- - - def pickle_table(self,filename,signature=""): - try: - import cPickle as pickle - except ImportError: - import pickle - outf = open(filename,"wb") - pickle.dump(__tabversion__,outf,pickle_protocol) - pickle.dump(self.lr_method,outf,pickle_protocol) - pickle.dump(signature,outf,pickle_protocol) - pickle.dump(self.lr_action,outf,pickle_protocol) - pickle.dump(self.lr_goto,outf,pickle_protocol) - - outp = [] - for p in self.lr_productions: - if p.func: - outp.append((p.str,p.name, p.len, p.func,p.file,p.line)) - else: - outp.append((str(p),p.name,p.len,None,None,None)) - pickle.dump(outp,outf,pickle_protocol) - outf.close() - -# ----------------------------------------------------------------------------- -# === INTROSPECTION === -# -# The following functions and classes are used to implement the PLY -# introspection features followed by the yacc() function itself. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# get_caller_module_dict() -# -# This function returns a dictionary containing all of the symbols defined within -# a caller further down the call stack. This is used to get the environment -# associated with the yacc() call if none was provided. -# ----------------------------------------------------------------------------- - -def get_caller_module_dict(levels): - try: - raise RuntimeError - except RuntimeError: - e,b,t = sys.exc_info() - f = t.tb_frame - while levels > 0: - f = f.f_back - levels -= 1 - ldict = f.f_globals.copy() - if f.f_globals != f.f_locals: - ldict.update(f.f_locals) - - return ldict - -# ----------------------------------------------------------------------------- -# parse_grammar() -# -# This takes a raw grammar rule string and parses it into production data -# ----------------------------------------------------------------------------- -def parse_grammar(doc,file,line): - grammar = [] - # Split the doc string into lines - pstrings = doc.splitlines() - lastp = None - dline = line - for ps in pstrings: - dline += 1 - p = ps.split() - if not p: continue - try: - if p[0] == '|': - # This is a continuation of a previous rule - if not lastp: - raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline)) - prodname = lastp - syms = p[1:] - else: - prodname = p[0] - lastp = prodname - syms = p[2:] - assign = p[1] - if assign != ':' and assign != '::=': - raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline)) - - grammar.append((file,dline,prodname,syms)) - except SyntaxError: - raise - except Exception: - raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip())) - - return grammar - -# ----------------------------------------------------------------------------- -# ParserReflect() -# -# This class represents information extracted for building a parser including -# start symbol, error function, tokens, precedence list, action functions, -# etc. -# ----------------------------------------------------------------------------- -class ParserReflect(object): - def __init__(self,pdict,log=None): - self.pdict = pdict - self.start = None - self.error_func = None - self.tokens = None - self.files = {} - self.grammar = [] - self.error = 0 - - if log is None: - self.log = PlyLogger(sys.stderr) - else: - self.log = log - - # Get all of the basic information - def get_all(self): - self.get_start() - self.get_error_func() - self.get_tokens() - self.get_precedence() - self.get_pfunctions() - - # Validate all of the information - def validate_all(self): - self.validate_start() - self.validate_error_func() - self.validate_tokens() - self.validate_precedence() - self.validate_pfunctions() - self.validate_files() - return self.error - - # Compute a signature over the grammar - def signature(self): - try: - from hashlib import md5 - except ImportError: - from md5 import md5 - try: - sig = md5() - if self.start: - sig.update(self.start.encode('latin-1')) - if self.prec: - sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1')) - if self.tokens: - sig.update(" ".join(self.tokens).encode('latin-1')) - for f in self.pfuncs: - if f[3]: - sig.update(f[3].encode('latin-1')) - except (TypeError,ValueError): - pass - return sig.digest() - - # ----------------------------------------------------------------------------- - # validate_file() - # - # This method checks to see if there are duplicated p_rulename() functions - # in the parser module file. Without this function, it is really easy for - # users to make mistakes by cutting and pasting code fragments (and it's a real - # bugger to try and figure out why the resulting parser doesn't work). Therefore, - # we just do a little regular expression pattern matching of def statements - # to try and detect duplicates. - # ----------------------------------------------------------------------------- - - def validate_files(self): - # Match def p_funcname( - fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') - - for filename in self.files.keys(): - base,ext = os.path.splitext(filename) - if ext != '.py': return 1 # No idea. Assume it's okay. - - try: - f = open(filename) - lines = f.readlines() - f.close() - except IOError: - continue - - counthash = { } - for linen,l in enumerate(lines): - linen += 1 - m = fre.match(l) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev) - - # Get the start symbol - def get_start(self): - self.start = self.pdict.get('start') - - # Validate the start symbol - def validate_start(self): - if self.start is not None: - if not isinstance(self.start,str): - self.log.error("'start' must be a string") - - # Look for error handler - def get_error_func(self): - self.error_func = self.pdict.get('p_error') - - # Validate the error function - def validate_error_func(self): - if self.error_func: - if isinstance(self.error_func,types.FunctionType): - ismethod = 0 - elif isinstance(self.error_func, types.MethodType): - ismethod = 1 - else: - self.log.error("'p_error' defined, but is not a function or method") - self.error = 1 - return - - eline = func_code(self.error_func).co_firstlineno - efile = func_code(self.error_func).co_filename - self.files[efile] = 1 - - if (func_code(self.error_func).co_argcount != 1+ismethod): - self.log.error("%s:%d: p_error() requires 1 argument",efile,eline) - self.error = 1 - - # Get the tokens map - def get_tokens(self): - tokens = self.pdict.get("tokens",None) - if not tokens: - self.log.error("No token list is defined") - self.error = 1 - return - - if not isinstance(tokens,(list, tuple)): - self.log.error("tokens must be a list or tuple") - self.error = 1 - return - - if not tokens: - self.log.error("tokens is empty") - self.error = 1 - return - - self.tokens = tokens - - # Validate the tokens - def validate_tokens(self): - # Validate the tokens. - if 'error' in self.tokens: - self.log.error("Illegal token name 'error'. Is a reserved word") - self.error = 1 - return - - terminals = {} - for n in self.tokens: - if n in terminals: - self.log.warning("Token '%s' multiply defined", n) - terminals[n] = 1 - - # Get the precedence map (if any) - def get_precedence(self): - self.prec = self.pdict.get("precedence",None) - - # Validate and parse the precedence map - def validate_precedence(self): - preclist = [] - if self.prec: - if not isinstance(self.prec,(list,tuple)): - self.log.error("precedence must be a list or tuple") - self.error = 1 - return - for level,p in enumerate(self.prec): - if not isinstance(p,(list,tuple)): - self.log.error("Bad precedence table") - self.error = 1 - return - - if len(p) < 2: - self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p) - self.error = 1 - return - assoc = p[0] - if not isinstance(assoc,str): - self.log.error("precedence associativity must be a string") - self.error = 1 - return - for term in p[1:]: - if not isinstance(term,str): - self.log.error("precedence items must be strings") - self.error = 1 - return - preclist.append((term,assoc,level+1)) - self.preclist = preclist - - # Get all p_functions from the grammar - def get_pfunctions(self): - p_functions = [] - for name, item in self.pdict.items(): - if name[:2] != 'p_': continue - if name == 'p_error': continue - if isinstance(item,(types.FunctionType,types.MethodType)): - line = func_code(item).co_firstlineno - file = func_code(item).co_filename - p_functions.append((line,file,name,item.__doc__)) - - # Sort all of the actions by line number - p_functions.sort() - self.pfuncs = p_functions - - - # Validate all of the p_functions - def validate_pfunctions(self): - grammar = [] - # Check for non-empty symbols - if len(self.pfuncs) == 0: - self.log.error("no rules of the form p_rulename are defined") - self.error = 1 - return - - for line, file, name, doc in self.pfuncs: - func = self.pdict[name] - if isinstance(func, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - if func_code(func).co_argcount > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__) - self.error = 1 - elif func_code(func).co_argcount < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__) - self.error = 1 - elif not func.__doc__: - self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__) - else: - try: - parsed_g = parse_grammar(doc,file,line) - for g in parsed_g: - grammar.append((name, g)) - except SyntaxError: - e = sys.exc_info()[1] - self.log.error(str(e)) - self.error = 1 - - # Looks like a valid grammar rule - # Mark the file in which defined. - self.files[file] = 1 - - # Secondary validation step that looks for p_ definitions that are not functions - # or functions that look like they might be grammar rules. - - for n,v in self.pdict.items(): - if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue - if n[0:2] == 't_': continue - if n[0:2] == 'p_' and n != 'p_error': - self.log.warning("'%s' not defined as a function", n) - if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or - (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)): - try: - doc = v.__doc__.split(" ") - if doc[1] == ':': - self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix", - func_code(v).co_filename, func_code(v).co_firstlineno,n) - except Exception: - pass - - self.grammar = grammar - -# ----------------------------------------------------------------------------- -# yacc(module) -# -# Build a parser -# ----------------------------------------------------------------------------- - -def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, - check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='', - debuglog=None, errorlog = None, picklefile=None): - - global parse # Reference to the parsing method of the last built parser - - # If pickling is enabled, table files are not created - - if picklefile: - write_tables = 0 - - if errorlog is None: - errorlog = PlyLogger(sys.stderr) - - # Get the module dictionary used for the parser - if module: - _items = [(k,getattr(module,k)) for k in dir(module)] - pdict = dict(_items) - else: - pdict = get_caller_module_dict(2) - - # Collect parser information from the dictionary - pinfo = ParserReflect(pdict,log=errorlog) - pinfo.get_all() - - if pinfo.error: - raise YaccError("Unable to build parser") - - # Check signature against table files (if any) - signature = pinfo.signature() - - # Read the tables - try: - lr = LRTable() - if picklefile: - read_signature = lr.read_pickle(picklefile) - else: - read_signature = lr.read_table(tabmodule,outputdir) - if optimize or (read_signature == signature): - try: - lr.bind_callables(pinfo.pdict) - parser = LRParser(lr,pinfo.error_func) - parse = parser.parse - return parser - except Exception: - e = sys.exc_info()[1] - errorlog.warning("There was a problem loading the table file: %s", repr(e)) - except VersionError: - e = sys.exc_info() - errorlog.warning(str(e)) - except Exception: - pass - - if debuglog is None: - if debug: - debuglog = PlyLogger(open(debugfile,"w")) - else: - debuglog = NullLogger() - - debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__) - - - errors = 0 - - # Validate the parser information - if pinfo.validate_all(): - raise YaccError("Unable to build parser") - - if not pinfo.error_func: - errorlog.warning("no p_error() function is defined") - - # Create a grammar object - grammar = Grammar(pinfo.tokens) - - # Set precedence level for terminals - for term, assoc, level in pinfo.preclist: - try: - grammar.set_precedence(term,assoc,level) - except GrammarError: - e = sys.exc_info()[1] - errorlog.warning("%s",str(e)) - - # Add productions to the grammar - for funcname, gram in pinfo.grammar: - file, line, prodname, syms = gram - try: - grammar.add_production(prodname,syms,funcname,file,line) - except GrammarError: - e = sys.exc_info()[1] - errorlog.error("%s",str(e)) - errors = 1 - - # Set the grammar start symbols - try: - if start is None: - grammar.set_start(pinfo.start) - else: - grammar.set_start(start) - except GrammarError: - e = sys.exc_info()[1] - errorlog.error(str(e)) - errors = 1 - - if errors: - raise YaccError("Unable to build parser") - - # Verify the grammar structure - undefined_symbols = grammar.undefined_symbols() - for sym, prod in undefined_symbols: - errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym) - errors = 1 - - unused_terminals = grammar.unused_terminals() - if unused_terminals: - debuglog.info("") - debuglog.info("Unused terminals:") - debuglog.info("") - for term in unused_terminals: - errorlog.warning("Token '%s' defined, but not used", term) - debuglog.info(" %s", term) - - # Print out all productions to the debug log - if debug: - debuglog.info("") - debuglog.info("Grammar") - debuglog.info("") - for n,p in enumerate(grammar.Productions): - debuglog.info("Rule %-5d %s", n, p) - - # Find unused non-terminals - unused_rules = grammar.unused_rules() - for prod in unused_rules: - errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name) - - if len(unused_terminals) == 1: - errorlog.warning("There is 1 unused token") - if len(unused_terminals) > 1: - errorlog.warning("There are %d unused tokens", len(unused_terminals)) - - if len(unused_rules) == 1: - errorlog.warning("There is 1 unused rule") - if len(unused_rules) > 1: - errorlog.warning("There are %d unused rules", len(unused_rules)) - - if debug: - debuglog.info("") - debuglog.info("Terminals, with rules where they appear") - debuglog.info("") - terms = list(grammar.Terminals) - terms.sort() - for term in terms: - debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]])) - - debuglog.info("") - debuglog.info("Nonterminals, with rules where they appear") - debuglog.info("") - nonterms = list(grammar.Nonterminals) - nonterms.sort() - for nonterm in nonterms: - debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]])) - debuglog.info("") - - if check_recursion: - unreachable = grammar.find_unreachable() - for u in unreachable: - errorlog.warning("Symbol '%s' is unreachable",u) - - infinite = grammar.infinite_cycles() - for inf in infinite: - errorlog.error("Infinite recursion detected for symbol '%s'", inf) - errors = 1 - - unused_prec = grammar.unused_precedence() - for term, assoc in unused_prec: - errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term) - errors = 1 - - if errors: - raise YaccError("Unable to build parser") - - # Run the LRGeneratedTable on the grammar - if debug: - errorlog.debug("Generating %s tables", method) - - lr = LRGeneratedTable(grammar,method,debuglog) - - if debug: - num_sr = len(lr.sr_conflicts) - - # Report shift/reduce and reduce/reduce conflicts - if num_sr == 1: - errorlog.warning("1 shift/reduce conflict") - elif num_sr > 1: - errorlog.warning("%d shift/reduce conflicts", num_sr) - - num_rr = len(lr.rr_conflicts) - if num_rr == 1: - errorlog.warning("1 reduce/reduce conflict") - elif num_rr > 1: - errorlog.warning("%d reduce/reduce conflicts", num_rr) - - # Write out conflicts to the output file - if debug and (lr.sr_conflicts or lr.rr_conflicts): - debuglog.warning("") - debuglog.warning("Conflicts:") - debuglog.warning("") - - for state, tok, resolution in lr.sr_conflicts: - debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution) - - already_reported = {} - for state, rule, rejected in lr.rr_conflicts: - if (state,id(rule),id(rejected)) in already_reported: - continue - debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) - debuglog.warning("rejected rule (%s) in state %d", rejected,state) - errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule) - errorlog.warning("rejected rule (%s) in state %d", rejected, state) - already_reported[state,id(rule),id(rejected)] = 1 - - warned_never = [] - for state, rule, rejected in lr.rr_conflicts: - if not rejected.reduced and (rejected not in warned_never): - debuglog.warning("Rule (%s) is never reduced", rejected) - errorlog.warning("Rule (%s) is never reduced", rejected) - warned_never.append(rejected) - - # Write the table file if requested - if write_tables: - lr.write_table(tabmodule,outputdir,signature) - - # Write a pickled version of the tables - if picklefile: - lr.pickle_table(picklefile,signature) - - # Build the parser - lr.bind_callables(pinfo.pdict) - parser = LRParser(lr,pinfo.error_func) - - parse = parser.parse - return parser diff --git a/pyverilog/vparser/plyparser.py b/pyverilog/vparser/plyparser.py deleted file mode 100644 index f9624a6..0000000 --- a/pyverilog/vparser/plyparser.py +++ /dev/null @@ -1,55 +0,0 @@ -""" - Copyright 2008-2011, Eli Bendersky - - License: BSD - - PLYParser class and other utilites for simplifying programming parsers with PLY -""" - - -class Coord(object): - """ Coordinates of a syntactic element. Consists of: - - File name - - Line number - - (optional) column number, for the Lexer - """ - - def __init__(self, file, line, column=None): - self.file = file - self.line = line - self.column = column - - def __str__(self): - str = "%s:%s" % (self.file, self.line) - if self.column: - str += ":%s" % self.column - return str - - -class ParseError(Exception): - pass - - -class PLYParser(object): - def _create_opt_rule(self, rulename): - """ Given a rule name, creates an optional ply.yacc rule - for it. The name of the optional rule is - _opt - """ - optname = rulename + '_opt' - - def optrule(self, p): - p[0] = p[1] - - optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename) - optrule.__name__ = 'p_%s' % optname - setattr(self.__class__, optrule.__name__, optrule) - - def _coord(self, lineno, column=None): - return Coord( - file=self.lexer.filename, - line=lineno, - column=column) - - def _parse_error(self, msg, coord): - raise ParseError("%s: %s" % (coord, msg)) diff --git a/pyverilog/vparser/preprocessor.py b/pyverilog/vparser/preprocessor.py index 8dc8f15..4a45548 100644 --- a/pyverilog/vparser/preprocessor.py +++ b/pyverilog/vparser/preprocessor.py @@ -24,6 +24,7 @@ from __future__ import print_function import sys import os +import tempfile import subprocess @@ -33,7 +34,28 @@ def __init__(self, filelist, outputfile='pp.out', include=None, define=None): if not isinstance(filelist, (tuple, list)): filelist = list(filelist) - self.filelist = filelist + # Elements in `filelist` can either be raw Verilog files, or Verilog code + # in python string. The following loop iterates through these `sources`, + # and normalizes all of them into files. + # + # For Verilog code in python string, the contents of the string is stored + # in a temporary file for further use with `iverilog`. + self.temp_files_paths = [] + self.filelist = [] + + for source in filelist: + # If `source` is verilog code in python strings + if not os.path.isfile(source): + temp_fd, temp_path = tempfile.mkstemp(prefix="pyverilog_temp_", suffix=".v") + with open(temp_fd, 'w') as f: + f.write(source) + + self.temp_files_paths.append(temp_path) + + else: # else if it is normal verilog file path + self.filelist.append(source) + + self.filelist += self.temp_files_paths iverilog = os.environ.get('PYVERILOG_IVERILOG') if iverilog is None: @@ -63,6 +85,10 @@ def preprocess(self): cmd = self.iv + list(self.filelist) subprocess.call(cmd) + # Removing the temporary files that were created + for temp_file_path in self.temp_files_paths: + os.remove(temp_file_path) + def preprocess( filelist, diff --git a/setup.py b/setup.py index dd39fef..695b3f1 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ def read(filename): packages=find_packages(), package_data={'pyverilog': ['VERSION'], 'pyverilog.ast_code_generator': ['template/*'], }, - install_requires=['Jinja2>=2.10'], + install_requires=['Jinja2>=2.10', 'ply>=3.4'], extras_require={ 'test': ['pytest>=3.8.1', 'pytest-pythonpath>=0.7.3'], 'graph': ['pygraphviz>=1.3.1'],