OILS / lazylex / html.py View on Github | oils.pub

423 lines, 252 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Wrapper around HTM8
4
5See doc/lazylex.md for details.
6
7"""
8from __future__ import print_function
9
10from _devbuild.gen.htm8_asdl import (h8_id, h8_id_t, h8_id_str)
11from data_lang import htm8
12from data_lang.htm8 import (Lexer, TagLexer, LexError, ParseError, Output)
13from doctools.util import log
14
15try:
16 from cStringIO import StringIO
17except ImportError:
18 # for python3
19 from io import StringIO # type: ignore
20import sys
21
22if sys.version_info.major == 2:
23 from typing import List, Tuple, Iterator
24
25
26def _Tokens(s, left_pos, right_pos):
27 # type: (str, int, int) -> Iterator[Tuple[h8_id_t, int]]
28 """
29 Args:
30 s: string to parse
31 left_pos, right_pos: Optional span boundaries.
32 """
33 lx = Lexer(s, left_pos, right_pos)
34 while True:
35 tok_id, pos = lx.Read()
36 yield tok_id, pos
37 if tok_id == h8_id.EndOfStream:
38 break
39
40
41def ValidTokens(s, left_pos=0, right_pos=-1):
42 # type: (str, int, int) -> Iterator[Tuple[h8_id_t, int]]
43 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
44
45 I'm not combining the two functions because I might want to do a
46 'yield' transformation on Tokens()? Exceptions might complicate the
47 issue?
48 """
49 pos = left_pos
50 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
51 if tok_id == h8_id.Invalid:
52 raise LexError('ValidTokens() got invalid token', s, pos)
53 yield tok_id, end_pos
54 pos = end_pos
55
56
57def ReadUntilStartTag(it, tag_lexer, tag_name):
58 # type: (Iterator[Tuple[h8_id_t, int]], TagLexer, str) -> Tuple[int, int]
59 """Find the next <foo>, returning its (start, end) positions
60
61 Raise ParseError if it's not found.
62
63 tag_lexer is RESET.
64 """
65 pos = 0
66 while True:
67 try:
68 tok_id, end_pos = next(it)
69 except StopIteration:
70 break
71 tag_lexer.Reset(pos, end_pos)
72 if tok_id == h8_id.StartTag and tag_lexer.GetTagName() == tag_name:
73 return pos, end_pos
74
75 pos = end_pos
76
77 raise ParseError('No start tag %r' % tag_name)
78
79
80def ReadUntilEndTag(it, tag_lexer, tag_name):
81 # type: (Iterator[Tuple[h8_id_t, int]], TagLexer, str) -> Tuple[int, int]
82 """Find the next </foo>, returning its (start, end) position
83
84 Raise ParseError if it's not found.
85
86 tag_lexer is RESET.
87 """
88 pos = 0
89 while True:
90 try:
91 tok_id, end_pos = next(it)
92 except StopIteration:
93 break
94 tag_lexer.Reset(pos, end_pos)
95 if tok_id == h8_id.EndTag and tag_lexer.GetTagName() == tag_name:
96 return pos, end_pos
97
98 pos = end_pos
99
100 raise ParseError('No end tag %r' % tag_name)
101
102
103CHAR_ENTITY = {
104 'amp': '&',
105 'lt': '<',
106 'gt': '>',
107 'quot': '"',
108 'apos': "'",
109}
110
111
112def ToText(s, left_pos=0, right_pos=-1):
113 # type: (str, int, int) -> str
114 """Given HTML, return text by unquoting &gt; and &lt; etc.
115
116 Used by:
117 doctools/oils_doc.py: PygmentsPlugin
118 doctools/help_gen.py: HelpIndexCards
119
120 In the latter case, we cold process some tags, like:
121
122 - Blue Link (not clickable, but still useful)
123 - Red X
124
125 That should be html.ToAnsi.
126 """
127 f = StringIO()
128 out = Output(s, f, left_pos, right_pos)
129
130 pos = left_pos
131 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
132 if tok_id in (h8_id.RawData, h8_id.BadAmpersand, h8_id.BadGreaterThan,
133 h8_id.BadLessThan):
134 out.SkipTo(pos)
135 out.PrintUntil(end_pos)
136
137 elif tok_id == h8_id.CharEntity: # &amp;
138
139 entity = s[pos + 1:end_pos - 1]
140
141 out.SkipTo(pos)
142 out.Print(CHAR_ENTITY[entity])
143 out.SkipTo(end_pos)
144
145 # Not handling these yet
146 elif tok_id == h8_id.HexChar:
147 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
148
149 elif tok_id == h8_id.DecChar:
150 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
151
152 else:
153 # Skip everything else
154 out.SkipTo(end_pos)
155
156 pos = end_pos
157
158 out.PrintTheRest()
159 return f.getvalue()
160
161
162# https://developer.mozilla.org/en-US/docs/Glossary/Void_element
163VOID_ELEMENTS = [
164 'area',
165 'base',
166 'br',
167 'col',
168 'embed',
169 'hr',
170 'img',
171 'input',
172 'link',
173 'meta',
174 'param',
175 'source',
176 'track',
177 'wbr',
178]
179
180LEX_ATTRS = 1 << 1
181LEX_QUOTED_VALUES = 1 << 2 # href="?x=42&amp;y=99"
182NO_SPECIAL_TAGS = 1 << 3 # <script> <style>, VOID tags, etc.
183BALANCED_TAGS = 1 << 4 # are tags balanced?
184
185
186def Validate(contents, flags, counters):
187 # type: (str, int, Counters) -> None
188
189 attr_lx = htm8.AttrLexer(contents)
190
191 no_special_tags = bool(flags & NO_SPECIAL_TAGS)
192 lx = htm8.Lexer(contents, no_special_tags=no_special_tags)
193 tokens = []
194 start_pos = 0
195 tag_stack = []
196 while True:
197 tok_id, end_pos = lx.Read()
198 #log('TOP %s %r', h8_id_str(tok_id), contents[start_pos:end_pos])
199
200 if tok_id == h8_id.Invalid:
201 raise LexError('Validate() got invalid token', contents, start_pos)
202 if tok_id == h8_id.EndOfStream:
203 break
204
205 tokens.append((tok_id, end_pos))
206
207 if tok_id == h8_id.StartEndTag:
208 counters.num_start_end_tags += 1
209
210 attr_lx.Init(tok_id, lx.TagNamePos(), end_pos)
211 all_attrs = htm8.AllAttrsRaw(attr_lx)
212 counters.num_attrs += len(all_attrs)
213 # TODO: val_lexer.NumTokens() can be replaced with tokens_out
214
215 elif tok_id == h8_id.StartTag:
216 counters.num_start_tags += 1
217
218 attr_lx.Init(tok_id, lx.TagNamePos(), end_pos)
219 all_attrs = htm8.AllAttrsRaw(attr_lx)
220 counters.num_attrs += len(all_attrs)
221
222 #counters.debug_attrs.extend(all_attrs)
223
224 if flags & BALANCED_TAGS:
225 tag_name = lx.CanonicalTagName()
226 if flags & NO_SPECIAL_TAGS:
227 tag_stack.append(tag_name)
228 else:
229 # e.g. <meta> is considered self-closing, like <meta/>
230 if tag_name not in VOID_ELEMENTS:
231 tag_stack.append(tag_name)
232
233 counters.max_tag_stack = max(counters.max_tag_stack,
234 len(tag_stack))
235 elif tok_id == h8_id.EndTag:
236 if flags & BALANCED_TAGS:
237 try:
238 expected = tag_stack.pop()
239 except IndexError:
240 raise ParseError('Tag stack empty',
241 s=contents,
242 start_pos=start_pos)
243
244 actual = lx.CanonicalTagName()
245 if expected != actual:
246 raise ParseError(
247 'Got unexpected closing tag %r; opening tag was %r' %
248 (contents[start_pos:end_pos], expected),
249 s=contents,
250 start_pos=start_pos)
251
252 start_pos = end_pos
253
254 if len(tag_stack) != 0:
255 raise ParseError('Missing closing tags at end of doc: %s' %
256 ' '.join(tag_stack),
257 s=contents,
258 start_pos=start_pos)
259
260 counters.num_tokens += len(tokens)
261
262
263def ToXml(htm8_str):
264 # type: (str) -> str
265
266 # TODO:
267 # 1. Lex it
268 # 2. < & > must be escaped
269 # a. in raw data
270 # b. in quoted strings
271 # 3. <script> turned into CDATA
272 # 4. void tags turned into self-closing tags
273 # 5. case-sensitive tag matching - not sure about this
274
275 attr_lexer = htm8.AttrLexer(htm8_str)
276
277 f = StringIO()
278 out = Output(htm8_str, f)
279
280 lx = Lexer(htm8_str)
281
282 pos = 0
283 while True:
284 tok_id, end_pos = lx.Read()
285
286 if tok_id == h8_id.Invalid:
287 raise LexError('ToXml() got invalid token', htm8_str, pos)
288 if tok_id == h8_id.EndOfStream:
289 break
290
291 if tok_id in (h8_id.RawData, h8_id.CharEntity, h8_id.HexChar,
292 h8_id.DecChar):
293 out.PrintUntil(end_pos)
294 elif tok_id in (h8_id.StartTag, h8_id.StartEndTag):
295 attr_lexer.Init(tok_id, lx.TagNamePos(), end_pos)
296 all_attrs = htm8.AllAttrsRawSlice(attr_lexer)
297 for name_start, name_end, v, val_start, val_end in all_attrs:
298 #val_lexer.Reset(val_start, val_end)
299 pass
300 # TODO: get the kind of string
301 #
302 # Quoted: we need to replace & with &amp; and < with &lt;
303 # note > is not allowed
304 # Unquoted: right now, we can just surround with double quotes
305 # because we don't allow any bad chars
306 # Empty : add "", so empty= becomes =""
307 # Missing : add ="", so missing becomes missing=""
308
309 tag_name = lx.CanonicalTagName()
310 if tok_id == h8_id.StartTag and tag_name in VOID_ELEMENTS:
311 # TODO: instead of closing >, print />
312 pass
313
314 elif tok_id == h8_id.BadAmpersand:
315 #out.SkipTo(pos)
316 out.Print('&amp;')
317 out.SkipTo(end_pos)
318
319 elif tok_id == h8_id.BadGreaterThan:
320 #out.SkipTo(pos)
321 out.Print('&gt;')
322 out.SkipTo(end_pos)
323 else:
324 out.PrintUntil(end_pos)
325
326 pos = end_pos
327
328 out.PrintTheRest()
329 return f.getvalue()
330
331
332class Counters(object):
333
334 def __init__(self):
335 # type: () -> None
336 self.num_tokens = 0
337 self.num_start_tags = 0
338 self.num_start_end_tags = 0
339 self.num_attrs = 0
340 self.max_tag_stack = 0
341 self.num_val_tokens = 0
342
343 #self.debug_attrs = []
344
345
346def main(argv):
347 # type: (List[str]) -> int
348 action = argv[1]
349
350 if action == 'tokens':
351 contents = sys.stdin.read()
352
353 lx = Lexer(contents)
354 start_pos = 0
355 while True:
356 tok_id, end_pos = lx.Read()
357 if tok_id == h8_id.Invalid:
358 raise LexError('Invalid token', contents, start_pos)
359 if tok_id == h8_id.EndOfStream:
360 break
361
362 frag = contents[start_pos:end_pos]
363 log('%d %s %r', end_pos, h8_id_str(tok_id), frag)
364 start_pos = end_pos
365
366 return 0
367
368 elif action in ('lex-htm8', 'parse-htm8', 'parse-xml'):
369
370 errors = []
371 counters = Counters()
372
373 flags = LEX_ATTRS | LEX_QUOTED_VALUES
374 if action.startswith('parse-'):
375 flags |= BALANCED_TAGS
376 if action == 'parse-xml':
377 flags |= NO_SPECIAL_TAGS
378
379 i = 0
380 for line in sys.stdin:
381 filename = line.strip()
382 with open(filename) as f:
383 contents = f.read()
384
385 try:
386 Validate(contents, flags, counters)
387 except LexError as e:
388 log('Lex error in %r: %s', filename, e)
389 errors.append((filename, e))
390 except ParseError as e:
391 log('Parse error in %r: %s', filename, e)
392 errors.append((filename, e))
393 i += 1
394
395 log('')
396 log('%10d tokens', counters.num_tokens)
397 log('%10d start/end tags', counters.num_start_end_tags)
398 log('%10d start tags', counters.num_start_tags)
399 log('%10d attrs', counters.num_attrs)
400 log('%10d max tag stack depth', counters.max_tag_stack)
401 log('%10d attr val tokens', counters.num_val_tokens)
402 log('%10d errors', len(errors))
403 if len(errors):
404 return 1
405 return 0
406
407 elif action == 'todo':
408 # Other algorithms:
409 #
410 # - select first subtree with given ID
411 # - this requires understanding the void tags I suppose
412 # - select all subtrees that have a class
413 # - materialize DOM
414
415 # Safe-HTM8? This is a filter
416 return 0
417
418 else:
419 raise RuntimeError('Invalid action %r' % action)
420
421
422if __name__ == '__main__':
423 sys.exit(main(sys.argv))