1 | #!/usr/bin/env python2
|
2 | """
|
3 | lazylex/html.py - Wrapper around HTM8
|
4 |
|
5 | See doc/lazylex.md for details.
|
6 |
|
7 | """
|
8 | from __future__ import print_function
|
9 |
|
10 | from _devbuild.gen.htm8_asdl import (h8_id, h8_id_t, h8_id_str)
|
11 | from data_lang import htm8
|
12 | from data_lang.htm8 import (Lexer, TagLexer, LexError, ParseError, Output)
|
13 | from doctools.util import log
|
14 |
|
15 | try:
|
16 | from cStringIO import StringIO
|
17 | except ImportError:
|
18 | # for python3
|
19 | from io import StringIO # type: ignore
|
20 | import sys
|
21 |
|
22 | if sys.version_info.major == 2:
|
23 | from typing import List, Tuple, Iterator
|
24 |
|
25 |
|
26 | def _Tokens(s, left_pos, right_pos):
|
27 | # type: (str, int, int) -> Iterator[Tuple[h8_id_t, int]]
|
28 | """
|
29 | Args:
|
30 | s: string to parse
|
31 | left_pos, right_pos: Optional span boundaries.
|
32 | """
|
33 | lx = Lexer(s, left_pos, right_pos)
|
34 | while True:
|
35 | tok_id, pos = lx.Read()
|
36 | yield tok_id, pos
|
37 | if tok_id == h8_id.EndOfStream:
|
38 | break
|
39 |
|
40 |
|
41 | def ValidTokens(s, left_pos=0, right_pos=-1):
|
42 | # type: (str, int, int) -> Iterator[Tuple[h8_id_t, int]]
|
43 | """Wrapper around _Tokens to prevent callers from having to handle Invalid.
|
44 |
|
45 | I'm not combining the two functions because I might want to do a
|
46 | 'yield' transformation on Tokens()? Exceptions might complicate the
|
47 | issue?
|
48 | """
|
49 | pos = left_pos
|
50 | for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
|
51 | if tok_id == h8_id.Invalid:
|
52 | raise LexError('ValidTokens() got invalid token', s, pos)
|
53 | yield tok_id, end_pos
|
54 | pos = end_pos
|
55 |
|
56 |
|
57 | def ReadUntilStartTag(it, tag_lexer, tag_name):
|
58 | # type: (Iterator[Tuple[h8_id_t, int]], TagLexer, str) -> Tuple[int, int]
|
59 | """Find the next <foo>, returning its (start, end) positions
|
60 |
|
61 | Raise ParseError if it's not found.
|
62 |
|
63 | tag_lexer is RESET.
|
64 | """
|
65 | pos = 0
|
66 | while True:
|
67 | try:
|
68 | tok_id, end_pos = next(it)
|
69 | except StopIteration:
|
70 | break
|
71 | tag_lexer.Reset(pos, end_pos)
|
72 | if tok_id == h8_id.StartTag and tag_lexer.GetTagName() == tag_name:
|
73 | return pos, end_pos
|
74 |
|
75 | pos = end_pos
|
76 |
|
77 | raise ParseError('No start tag %r' % tag_name)
|
78 |
|
79 |
|
80 | def ReadUntilEndTag(it, tag_lexer, tag_name):
|
81 | # type: (Iterator[Tuple[h8_id_t, int]], TagLexer, str) -> Tuple[int, int]
|
82 | """Find the next </foo>, returning its (start, end) position
|
83 |
|
84 | Raise ParseError if it's not found.
|
85 |
|
86 | tag_lexer is RESET.
|
87 | """
|
88 | pos = 0
|
89 | while True:
|
90 | try:
|
91 | tok_id, end_pos = next(it)
|
92 | except StopIteration:
|
93 | break
|
94 | tag_lexer.Reset(pos, end_pos)
|
95 | if tok_id == h8_id.EndTag and tag_lexer.GetTagName() == tag_name:
|
96 | return pos, end_pos
|
97 |
|
98 | pos = end_pos
|
99 |
|
100 | raise ParseError('No end tag %r' % tag_name)
|
101 |
|
102 |
|
103 | CHAR_ENTITY = {
|
104 | 'amp': '&',
|
105 | 'lt': '<',
|
106 | 'gt': '>',
|
107 | 'quot': '"',
|
108 | 'apos': "'",
|
109 | }
|
110 |
|
111 |
|
112 | def ToText(s, left_pos=0, right_pos=-1):
|
113 | # type: (str, int, int) -> str
|
114 | """Given HTML, return text by unquoting > and < etc.
|
115 |
|
116 | Used by:
|
117 | doctools/oils_doc.py: PygmentsPlugin
|
118 | doctools/help_gen.py: HelpIndexCards
|
119 |
|
120 | In the latter case, we cold process some tags, like:
|
121 |
|
122 | - Blue Link (not clickable, but still useful)
|
123 | - Red X
|
124 |
|
125 | That should be html.ToAnsi.
|
126 | """
|
127 | f = StringIO()
|
128 | out = Output(s, f, left_pos, right_pos)
|
129 |
|
130 | pos = left_pos
|
131 | for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
|
132 | if tok_id in (h8_id.RawData, h8_id.BadAmpersand, h8_id.BadGreaterThan,
|
133 | h8_id.BadLessThan):
|
134 | out.SkipTo(pos)
|
135 | out.PrintUntil(end_pos)
|
136 |
|
137 | elif tok_id == h8_id.CharEntity: # &
|
138 |
|
139 | entity = s[pos + 1:end_pos - 1]
|
140 |
|
141 | out.SkipTo(pos)
|
142 | out.Print(CHAR_ENTITY[entity])
|
143 | out.SkipTo(end_pos)
|
144 |
|
145 | # Not handling these yet
|
146 | elif tok_id == h8_id.HexChar:
|
147 | raise AssertionError('Hex Char %r' % s[pos:pos + 20])
|
148 |
|
149 | elif tok_id == h8_id.DecChar:
|
150 | raise AssertionError('Dec Char %r' % s[pos:pos + 20])
|
151 |
|
152 | else:
|
153 | # Skip everything else
|
154 | out.SkipTo(end_pos)
|
155 |
|
156 | pos = end_pos
|
157 |
|
158 | out.PrintTheRest()
|
159 | return f.getvalue()
|
160 |
|
161 |
|
162 | # https://developer.mozilla.org/en-US/docs/Glossary/Void_element
|
163 | VOID_ELEMENTS = [
|
164 | 'area',
|
165 | 'base',
|
166 | 'br',
|
167 | 'col',
|
168 | 'embed',
|
169 | 'hr',
|
170 | 'img',
|
171 | 'input',
|
172 | 'link',
|
173 | 'meta',
|
174 | 'param',
|
175 | 'source',
|
176 | 'track',
|
177 | 'wbr',
|
178 | ]
|
179 |
|
180 | LEX_ATTRS = 1 << 1
|
181 | LEX_QUOTED_VALUES = 1 << 2 # href="?x=42&y=99"
|
182 | NO_SPECIAL_TAGS = 1 << 3 # <script> <style>, VOID tags, etc.
|
183 | BALANCED_TAGS = 1 << 4 # are tags balanced?
|
184 |
|
185 |
|
186 | def Validate(contents, flags, counters):
|
187 | # type: (str, int, Counters) -> None
|
188 |
|
189 | attr_lx = htm8.AttrLexer(contents)
|
190 |
|
191 | no_special_tags = bool(flags & NO_SPECIAL_TAGS)
|
192 | lx = htm8.Lexer(contents, no_special_tags=no_special_tags)
|
193 | tokens = []
|
194 | start_pos = 0
|
195 | tag_stack = []
|
196 | while True:
|
197 | tok_id, end_pos = lx.Read()
|
198 | #log('TOP %s %r', h8_id_str(tok_id), contents[start_pos:end_pos])
|
199 |
|
200 | if tok_id == h8_id.Invalid:
|
201 | raise LexError('Validate() got invalid token', contents, start_pos)
|
202 | if tok_id == h8_id.EndOfStream:
|
203 | break
|
204 |
|
205 | tokens.append((tok_id, end_pos))
|
206 |
|
207 | if tok_id == h8_id.StartEndTag:
|
208 | counters.num_start_end_tags += 1
|
209 |
|
210 | attr_lx.Init(tok_id, lx.TagNamePos(), end_pos)
|
211 | all_attrs = htm8.AllAttrsRaw(attr_lx)
|
212 | counters.num_attrs += len(all_attrs)
|
213 | # TODO: val_lexer.NumTokens() can be replaced with tokens_out
|
214 |
|
215 | elif tok_id == h8_id.StartTag:
|
216 | counters.num_start_tags += 1
|
217 |
|
218 | attr_lx.Init(tok_id, lx.TagNamePos(), end_pos)
|
219 | all_attrs = htm8.AllAttrsRaw(attr_lx)
|
220 | counters.num_attrs += len(all_attrs)
|
221 |
|
222 | #counters.debug_attrs.extend(all_attrs)
|
223 |
|
224 | if flags & BALANCED_TAGS:
|
225 | tag_name = lx.CanonicalTagName()
|
226 | if flags & NO_SPECIAL_TAGS:
|
227 | tag_stack.append(tag_name)
|
228 | else:
|
229 | # e.g. <meta> is considered self-closing, like <meta/>
|
230 | if tag_name not in VOID_ELEMENTS:
|
231 | tag_stack.append(tag_name)
|
232 |
|
233 | counters.max_tag_stack = max(counters.max_tag_stack,
|
234 | len(tag_stack))
|
235 | elif tok_id == h8_id.EndTag:
|
236 | if flags & BALANCED_TAGS:
|
237 | try:
|
238 | expected = tag_stack.pop()
|
239 | except IndexError:
|
240 | raise ParseError('Tag stack empty',
|
241 | s=contents,
|
242 | start_pos=start_pos)
|
243 |
|
244 | actual = lx.CanonicalTagName()
|
245 | if expected != actual:
|
246 | raise ParseError(
|
247 | 'Got unexpected closing tag %r; opening tag was %r' %
|
248 | (contents[start_pos:end_pos], expected),
|
249 | s=contents,
|
250 | start_pos=start_pos)
|
251 |
|
252 | start_pos = end_pos
|
253 |
|
254 | if len(tag_stack) != 0:
|
255 | raise ParseError('Missing closing tags at end of doc: %s' %
|
256 | ' '.join(tag_stack),
|
257 | s=contents,
|
258 | start_pos=start_pos)
|
259 |
|
260 | counters.num_tokens += len(tokens)
|
261 |
|
262 |
|
263 | def ToXml(htm8_str):
|
264 | # type: (str) -> str
|
265 |
|
266 | # TODO:
|
267 | # 1. Lex it
|
268 | # 2. < & > must be escaped
|
269 | # a. in raw data
|
270 | # b. in quoted strings
|
271 | # 3. <script> turned into CDATA
|
272 | # 4. void tags turned into self-closing tags
|
273 | # 5. case-sensitive tag matching - not sure about this
|
274 |
|
275 | tag_lexer = TagLexer(htm8_str)
|
276 |
|
277 | f = StringIO()
|
278 | out = Output(htm8_str, f)
|
279 |
|
280 | lx = Lexer(htm8_str)
|
281 |
|
282 | pos = 0
|
283 | while True:
|
284 | tok_id, end_pos = lx.Read()
|
285 |
|
286 | if tok_id == h8_id.Invalid:
|
287 | raise LexError('ToXml() got invalid token', htm8_str, pos)
|
288 | if tok_id == h8_id.EndOfStream:
|
289 | break
|
290 |
|
291 | if tok_id in (h8_id.RawData, h8_id.CharEntity, h8_id.HexChar,
|
292 | h8_id.DecChar):
|
293 | out.PrintUntil(end_pos)
|
294 | elif tok_id in (h8_id.StartTag, h8_id.StartEndTag):
|
295 | tag_lexer.Reset(pos, end_pos)
|
296 | # TODO: reduce allocations here
|
297 | all_attrs = tag_lexer.AllAttrsRawSlice()
|
298 | for name, val_start, val_end in all_attrs:
|
299 | #val_lexer.Reset(val_start, val_end)
|
300 | pass
|
301 | # TODO: get the kind of string
|
302 | #
|
303 | # Quoted: we need to replace & with & and < with <
|
304 | # note > is not allowed
|
305 | # Unquoted: right now, we can just surround with double quotes
|
306 | # because we don't allow any bad chars
|
307 | # Empty : add "", so empty= becomes =""
|
308 | # Missing : add ="", so missing becomes missing=""
|
309 |
|
310 | tag_name = lx.CanonicalTagName()
|
311 | if tok_id == h8_id.StartTag and tag_name in VOID_ELEMENTS:
|
312 | # TODO: instead of closing >, print />
|
313 | pass
|
314 |
|
315 | elif tok_id == h8_id.BadAmpersand:
|
316 | #out.SkipTo(pos)
|
317 | out.Print('&')
|
318 | out.SkipTo(end_pos)
|
319 |
|
320 | elif tok_id == h8_id.BadGreaterThan:
|
321 | #out.SkipTo(pos)
|
322 | out.Print('>')
|
323 | out.SkipTo(end_pos)
|
324 | else:
|
325 | out.PrintUntil(end_pos)
|
326 |
|
327 | pos = end_pos
|
328 |
|
329 | out.PrintTheRest()
|
330 | return f.getvalue()
|
331 |
|
332 |
|
333 | class Counters(object):
|
334 |
|
335 | def __init__(self):
|
336 | # type: () -> None
|
337 | self.num_tokens = 0
|
338 | self.num_start_tags = 0
|
339 | self.num_start_end_tags = 0
|
340 | self.num_attrs = 0
|
341 | self.max_tag_stack = 0
|
342 | self.num_val_tokens = 0
|
343 |
|
344 | #self.debug_attrs = []
|
345 |
|
346 |
|
347 | def main(argv):
|
348 | # type: (List[str]) -> int
|
349 | action = argv[1]
|
350 |
|
351 | if action == 'tokens':
|
352 | contents = sys.stdin.read()
|
353 |
|
354 | lx = Lexer(contents)
|
355 | start_pos = 0
|
356 | while True:
|
357 | tok_id, end_pos = lx.Read()
|
358 | if tok_id == h8_id.Invalid:
|
359 | raise LexError('Invalid token', contents, start_pos)
|
360 | if tok_id == h8_id.EndOfStream:
|
361 | break
|
362 |
|
363 | frag = contents[start_pos:end_pos]
|
364 | log('%d %s %r', end_pos, h8_id_str(tok_id), frag)
|
365 | start_pos = end_pos
|
366 |
|
367 | return 0
|
368 |
|
369 | elif action in ('lex-htm8', 'parse-htm8', 'parse-xml'):
|
370 |
|
371 | errors = []
|
372 | counters = Counters()
|
373 |
|
374 | flags = LEX_ATTRS | LEX_QUOTED_VALUES
|
375 | if action.startswith('parse-'):
|
376 | flags |= BALANCED_TAGS
|
377 | if action == 'parse-xml':
|
378 | flags |= NO_SPECIAL_TAGS
|
379 |
|
380 | i = 0
|
381 | for line in sys.stdin:
|
382 | filename = line.strip()
|
383 | with open(filename) as f:
|
384 | contents = f.read()
|
385 |
|
386 | try:
|
387 | Validate(contents, flags, counters)
|
388 | except LexError as e:
|
389 | log('Lex error in %r: %s', filename, e)
|
390 | errors.append((filename, e))
|
391 | except ParseError as e:
|
392 | log('Parse error in %r: %s', filename, e)
|
393 | errors.append((filename, e))
|
394 | i += 1
|
395 |
|
396 | log('')
|
397 | log('%10d tokens', counters.num_tokens)
|
398 | log('%10d start/end tags', counters.num_start_end_tags)
|
399 | log('%10d start tags', counters.num_start_tags)
|
400 | log('%10d attrs', counters.num_attrs)
|
401 | log('%10d max tag stack depth', counters.max_tag_stack)
|
402 | log('%10d attr val tokens', counters.num_val_tokens)
|
403 | log('%10d errors', len(errors))
|
404 | if len(errors):
|
405 | return 1
|
406 | return 0
|
407 |
|
408 | elif action == 'todo':
|
409 | # Other algorithms:
|
410 | #
|
411 | # - select first subtree with given ID
|
412 | # - this requires understanding the void tags I suppose
|
413 | # - select all subtrees that have a class
|
414 | # - materialize DOM
|
415 |
|
416 | # Safe-HTM8? This is a filter
|
417 | return 0
|
418 |
|
419 | else:
|
420 | raise RuntimeError('Invalid action %r' % action)
|
421 |
|
422 |
|
423 | if __name__ == '__main__':
|
424 | sys.exit(main(sys.argv))
|