OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """ Lexer for PPAPI IDL | 6 """ Lexer for PPAPI IDL |
7 | 7 |
8 The lexer uses the PLY library to build a tokenizer which understands both | 8 The lexer uses the PLY library to build a tokenizer which understands both |
9 WebIDL and Pepper tokens. | 9 WebIDL and Pepper tokens. |
10 | 10 |
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
222 def GetTokens(self): | 222 def GetTokens(self): |
223 outlist = [] | 223 outlist = [] |
224 while True: | 224 while True: |
225 t = self.lexobj.token() | 225 t = self.lexobj.token() |
226 if not t: | 226 if not t: |
227 break | 227 break |
228 outlist.append(t) | 228 outlist.append(t) |
229 return outlist | 229 return outlist |
230 | 230 |
231 def Tokenize(self, data, filename='__no_file__'): | 231 def Tokenize(self, data, filename='__no_file__'): |
| 232 self.lexobj.lineno = 1 |
232 self.lexobj.filename = filename | 233 self.lexobj.filename = filename |
233 self.lexobj.input(data) | 234 self.lexobj.input(data) |
234 self.lines = data.split('\n') | 235 self.lines = data.split('\n') |
235 | 236 |
236 def __init__(self): | 237 def __init__(self): |
237 self.index = [0] | 238 self.index = [0] |
238 self._lex_errors = 0 | 239 self._lex_errors = 0 |
239 self.linex = [] | 240 self.linex = [] |
240 self.filename = None | 241 self.filename = None |
241 self.lexobj = lex.lex(object=self, lextab=None, optimize=0) | 242 self.lexobj = lex.lex(object=self, lextab=None, optimize=0) |
242 | 243 |
OLD | NEW |