This very document is itself the Python source for Interscript, and that leads to interesting bootstrappping problems in development.
1: #line 13 "python_tangler.ipk" 2: #--------------------------------------------------------- 3: # python tangler: write to a file, insert source line numbers 4: # using '#line ' comments 5: # works for Python 6: from interscript.tanglers.base import tangler_base 7: import re 8: import string 9: from interscript.tokenisers.python import python_tokeniser 10: import keyword 11: import token 12: 13: class python_tangler(tangler_base): 14: def __init__(self,sink,weaver): 15: tangler_base.__init__(self,sink,weaver) 16: self.matchPOD = re.compile('^ *#@(.*)$') 17: self.matchcomment = re.compile('^([^#]*)#.*$') 18: self.excludeid = [] 19: self.userdict = {} 20: self.tokeniser = python_tokeniser() 21: self.language = 'python' 22: 23: def __del__(self): 24: try: 25: tokens = self.tokeniser.close() 26: except: 27: print 'Tokeniser error' 28: print 'closing tokeniser for',self.sink.name 29: 30: def writeline(self,data,file,count,inhibit_sref=0): 31: match = self.matchPOD.match(data) 32: if match: 33: command = match.group(1) 34: py_exec(command,file,count,globals(),self.userdict) 35: else: 36: self.weaver.set_fc_anchor(file,count) 37: # special hack to preserve leading #! line 38: if self.sink.lines_written == 0 and len(data)>2: 39: inhibit_sref = data[:2]=='#!' 40: tangler_base.writeline(self,data,file,count, inhibit_sref) 41: 42: try: 43: tokens = self.tokeniser.tokenize(data+'\n') 44: except TokenError, e: 45: print 'Tokeniser error',e 46: print 'in file',file,'line',line 47: print 'data['+data+']' 48: 49: 50: dst_count = self.sink.lines_written 51: dst_file = self.sink.name 52: class_name = 0 53: function_name = 0 54: level = 0 55: for kind,id,lstart,lend,dummy in tokens: 56: if kind == token.INDENT: 57: level = level + 1 58: elif kind == token.DEDENT: 59: level = level - 1 60: if kind is token.NAME: 61: if not (keyword.iskeyword(id) or id in self.excludeid): 62: if not self.pass_frame.ids.has_key(id): self.pass_frame.ids[id]=[] 63: self.pass_frame.ids[id].append((file,count,dst_file,dst_count)) 64: if class_name: 65: #print 'class',id 66: if not self.pass_frame.classes.has_key(id): self.pass_frame.classes[id]=[] 67: self.pass_frame.classes[id].append((file,count,dst_file,dst_count)) 68: class_name = 0 69: elif function_name: 70: if not self.pass_frame.functions.has_key(id): self.pass_frame.functions[id]=[] 71: self.pass_frame.functions[id].append((file,count,dst_file,dst_count)) 72: function_name = 0 73: elif id == 'class': 74: class_name = 1 75: elif id == 'def': 76: function_name = 1 77: 78: def write_comment(self,line,file,count): 79: self.writeline('# '+line,file,count) 80: 81: def start_section(self, file, count): 82: data = '#line '+str(count)+' '+'"'+file+'"' 83: self._writeline(data) 84: if self.weaver: 85: self.weaver.echotangle(self.sink.lines_written,data) 86: 87: def get_comment_tangler(self): 88: return script_comment_tangler(self.sink) 89: 90: def get_string_tangler(self,eol,width): 91: return c_string_tangler(self.sink,self.get_weaver(),eol,width) 92: