OILS / lazylex / html.py View on Github | oils.pub

808 lines, 405 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9
10Conflicts between HTML5 and XML:
11
12- In XML, <source> is like any tag, and must be closed,
13- In HTML, <source> is a VOID tag, and must NOT be closedlike any tag, and must be closed,
14
15- In XML, <script> and <style> don't have special treatment
16- In HTML, they do
17
18- The header is different - <!DOCTYPE html> vs. <?xml version= ... ?>
19
20So do have a mode for <script> <style> and void tags? Upgrade HX8 into HTM8?
21"""
22from __future__ import print_function
23
24try:
25 from cStringIO import StringIO
26except ImportError:
27 from io import StringIO # python3
28import re
29import sys
30
31if sys.version_info.major == 2:
32 from typing import List, Tuple, Optional
33
34
35def log(msg, *args):
36 msg = msg % args
37 print(msg, file=sys.stderr)
38
39
40class LexError(Exception):
41 """
42 Examples of lex errors:
43
44 - Tok.Invalid, like <> or &&
45 - Unclosed <!-- <? <![CDATA[ <script> <style>
46 """
47
48 def __init__(self, s, start_pos):
49 self.s = s
50 self.start_pos = start_pos
51
52 def __str__(self):
53 return '(LexError %r)' % (self.s[self.start_pos:self.start_pos + 20])
54
55
56def FindLineNum(s, error_pos):
57 current_pos = 0
58 line_num = 1
59 while True:
60 newline_pos = s.find('\n', current_pos)
61 #log('current = %d, N %d, line %d', current_pos, newline_pos, line_num)
62
63 if newline_pos == -1: # this is the last line
64 return line_num
65 if newline_pos >= error_pos:
66 return line_num
67 line_num += 1
68 current_pos = newline_pos + 1
69
70
71class ParseError(Exception):
72 """
73 Examples of parse errors
74
75 - unbalanced tag structure
76 - ul_table.py errors
77 """
78
79 def __init__(self, msg, s=None, start_pos=-1):
80 self.msg = msg
81 self.s = s
82 self.start_pos = start_pos
83
84 def __str__(self):
85 if self.s is not None:
86 assert self.start_pos != -1, self.start_pos
87 snippet = (self.s[self.start_pos:self.start_pos + 20])
88
89 line_num = FindLineNum(self.s, self.start_pos)
90 else:
91 snippet = ''
92 line_num = -1
93 msg = 'line %d: %r %r' % (line_num, self.msg, snippet)
94 return msg
95
96
97class Output(object):
98 """Takes an underlying input buffer and an output file. Maintains a
99 position in the input buffer.
100
101 Print FROM the input or print new text to the output.
102 """
103
104 def __init__(self, s, f, left_pos=0, right_pos=-1):
105 self.s = s
106 self.f = f
107 self.pos = left_pos
108 self.right_pos = len(s) if right_pos == -1 else right_pos
109
110 def SkipTo(self, pos):
111 """Skip to a position."""
112 self.pos = pos
113
114 def PrintUntil(self, pos):
115 """Print until a position."""
116 piece = self.s[self.pos:pos]
117 self.f.write(piece)
118 self.pos = pos
119
120 def PrintTheRest(self):
121 """Print until the end of the string."""
122 self.PrintUntil(self.right_pos)
123
124 def Print(self, s):
125 """Print text to the underlying buffer."""
126 self.f.write(s)
127
128
129# HTML Tokens
130# CommentBegin, ProcessingBegin, CDataBegin are "pseudo-tokens", not visible
131TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin CData CDataBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData HtmlCData Invalid EndOfStream'.split(
132)
133
134
135class Tok(object):
136 """
137 Avoid lint errors by using these aliases
138 """
139 pass
140
141
142TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
143
144this_module = sys.modules[__name__]
145for i, tok_str in enumerate(TOKENS):
146 setattr(this_module, tok_str, i)
147 setattr(Tok, tok_str, i)
148 TOKEN_NAMES[i] = tok_str
149
150
151def TokenName(tok_id):
152 return TOKEN_NAMES[tok_id]
153
154
155def MakeLexer(rules):
156 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
157
158
159#
160# Eggex
161#
162# Tag = / ~['>']+ /
163
164# Is this valid? A single character?
165# Tag = / ~'>'* /
166
167# Maybe better: / [NOT '>']+/
168# capital letters not allowed there?
169#
170# But then this is confusing:
171# / [NOT ~digit]+/
172#
173# / [NOT digit] / is [^\d]
174# / ~digit / is \D
175#
176# Or maybe:
177#
178# / [~ digit]+ /
179# / [~ '>']+ /
180# / [NOT '>']+ /
181
182# End = / '</' Tag '>' /
183# StartEnd = / '<' Tag '/>' /
184# Start = / '<' Tag '>' /
185#
186# EntityRef = / '&' dot{* N} ';' /
187
188# Tag name, or attribute name
189# colon is used in XML
190
191# https://www.w3.org/TR/xml/#NT-Name
192# Hm there is a lot of unicode stuff. We are simplifying parsing
193
194_NAME = r'[a-zA-Z][a-zA-Z0-9:_\-]*' # must start with letter
195
196LEXER = [
197 (r'<!--', Tok.CommentBegin),
198
199 # Processing instruction are used for the XML header:
200 # <?xml version="1.0" encoding="UTF-8"?>
201 # They are technically XML-only, but in HTML5, they are another kind of
202 # comment:
203 #
204 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
205 #
206 (r'<\?', Tok.ProcessingBegin),
207 # Not necessary in HTML5, but occurs in XML
208 (r'<!\[CDATA\[', Tok.CDataBegin), # <![CDATA[
209
210 # Markup declarations
211 # - In HTML5, there is only <!DOCTYPE html>
212 # - XML has 4 more declarations: <!ELEMENT ...> ATTLIST ENTITY NOTATION
213 # - these seem to be part of DTD
214 # - it's useful to skip these, and be able to parse the rest of the document
215 # - Note: < is allowed?
216 (r'<! [^>]+ >', Tok.Decl),
217
218 # Tags
219 # Notes:
220 # - We look for a valid tag name, but we don't validate attributes.
221 # That's done in the tag lexer.
222 # - We don't allow leading whitespace
223 (r'</ (%s) >' % _NAME, Tok.EndTag),
224 # self-closing <br/> comes before StarttTag
225 (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
226 (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
227
228 # Characters
229 # https://www.w3.org/TR/xml/#sec-references
230 (r'&\# [0-9]+ ;', Tok.DecChar),
231 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
232 (r'& %s ;' % _NAME, Tok.CharEntity),
233
234 # HTML5 allows unescaped > in raw data, but < is not allowed.
235 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
236 #
237 # - My early blog has THREE errors when disallowing >
238 # - So do some .wwz files
239 (r'[^&<]+', Tok.RawData),
240 (r'.', Tok.Invalid), # error!
241]
242
243# Old notes:
244#
245# Non-greedy matches are regular and can be matched in linear time
246# with RE2.
247#
248# https://news.ycombinator.com/item?id=27099798
249#
250# Maybe try combining all of these for speed.
251
252# . is any char except newline
253# https://re2c.org/manual/manual_c.html
254
255# Discarded options
256#(r'<!-- .*? -->', Tok.Comment),
257
258# Hack from Claude: \s\S instead of re.DOTALL. I don't like this
259#(r'<!-- [\s\S]*? -->', Tok.Comment),
260#(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
261
262LEXER = MakeLexer(LEXER)
263
264
265class Lexer(object):
266
267 def __init__(self, s, left_pos=0, right_pos=-1):
268 self.s = s
269 self.pos = left_pos
270 self.right_pos = len(s) if right_pos == -1 else right_pos
271 self.cache = {} # string -> compiled regex pattern object
272
273 # either </script> or </style> - we search until we see that
274 self.search_state = None # type: Optional[str]
275
276 # Position of tag name, if applicable
277 # - Set after you get a StartTag, EndTag, or StartEndTag
278 # - Unset on other tags
279 self.tag_pos_left = -1
280 self.tag_pos_right = -1
281
282 def _Peek(self):
283 # type: () -> Tuple[int, int]
284 """
285 Note: not using _Peek() now
286 """
287 if self.pos == self.right_pos:
288 return Tok.EndOfStream, self.pos
289
290 assert self.pos < self.right_pos, self.pos
291
292 if self.search_state is not None:
293 pos = self.s.find(self.search_state, self.pos)
294 if pos == -1:
295 # unterminated <script> or <style>
296 raise LexError(self.s, self.pos)
297 self.search_state = None
298 # beginning
299 return Tok.HtmlCData, pos
300
301 # Find the first match.
302 # Note: frontend/match.py uses _LongestMatch(), which is different!
303 # TODO: reconcile them. This lexer should be expressible in re2c.
304
305 for pat, tok_id in LEXER:
306 m = pat.match(self.s, self.pos)
307 if m:
308 if tok_id in (Tok.StartTag, Tok.EndTag, Tok.StartEndTag):
309 self.tag_pos_left = m.start(1)
310 self.tag_pos_right = m.end(1)
311 else:
312 # Reset state
313 self.tag_pos_left = -1
314 self.tag_pos_right = -1
315
316 if tok_id == Tok.CommentBegin:
317 pos = self.s.find('-->', self.pos)
318 if pos == -1:
319 # unterminated <!--
320 raise LexError(self.s, self.pos)
321 return Tok.Comment, pos + 3 # -->
322
323 if tok_id == Tok.ProcessingBegin:
324 pos = self.s.find('?>', self.pos)
325 if pos == -1:
326 # unterminated <?
327 raise LexError(self.s, self.pos)
328 return Tok.Processing, pos + 2 # ?>
329
330 if tok_id == Tok.CDataBegin:
331 pos = self.s.find(']]>', self.pos)
332 if pos == -1:
333 # unterminated <![CDATA[
334 raise LexError(self.s, self.pos)
335 return Tok.CData, pos + 3 # ]]>
336
337 if tok_id == Tok.StartTag:
338 if self.TagNameEquals('script'):
339 self.search_state = '</script>'
340 elif self.TagNameEquals('style'):
341 self.search_state = '</style>'
342
343 return tok_id, m.end()
344 else:
345 raise AssertionError('Tok.Invalid rule should have matched')
346
347 def TagNameEquals(self, expected):
348 # type: (str) -> bool
349 assert self.tag_pos_left != -1, self.tag_pos_left
350 assert self.tag_pos_right != -1, self.tag_pos_right
351
352 # TODO: In C++, this does not need an allocation
353 return expected == self.s[self.tag_pos_left:self.tag_pos_right]
354
355 def TagName(self):
356 # type: () -> None
357 assert self.tag_pos_left != -1, self.tag_pos_left
358 assert self.tag_pos_right != -1, self.tag_pos_right
359
360 return self.s[self.tag_pos_left:self.tag_pos_right]
361
362 def Read(self):
363 # type: () -> Tuple[int, int]
364 tok_id, end_pos = self._Peek()
365 self.pos = end_pos # advance
366 return tok_id, end_pos
367
368 def LookAhead(self, regex):
369 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
370 # or something.
371 pat = self.cache.get(regex)
372 if pat is None:
373 pat = re.compile(regex)
374 self.cache[regex] = pat
375
376 m = pat.match(self.s, self.pos)
377 return m is not None
378
379
380def _Tokens(s, left_pos, right_pos):
381 """
382 Args:
383 s: string to parse
384 left_pos, right_pos: Optional span boundaries.
385 """
386 lx = Lexer(s, left_pos, right_pos)
387 while True:
388 tok_id, pos = lx.Read()
389 yield tok_id, pos
390 if tok_id == Tok.EndOfStream:
391 break
392
393
394def ValidTokens(s, left_pos=0, right_pos=-1):
395 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
396
397 I'm not combining the two functions because I might want to do a
398 'yield' transformation on Tokens()? Exceptions might complicate the
399 issue?
400 """
401 pos = left_pos
402 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
403 if tok_id == Tok.Invalid:
404 raise LexError(s, pos)
405 yield tok_id, end_pos
406 pos = end_pos
407
408
409# Tag names:
410# Match <a or </a
411# Match <h2, but not <2h
412#
413# HTML 5 doesn't restrict tag names at all
414# https://html.spec.whatwg.org/#toc-syntax
415#
416# XML allows : - .
417# https://www.w3.org/TR/xml/#NT-NameChar
418
419# Namespaces for MathML, SVG
420# XLink, XML, XMLNS
421#
422# https://infra.spec.whatwg.org/#namespaces
423#
424# Allow - for td-attrs
425
426_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
427
428# TODO: we don't need to capture the tag name here? That's done at the top
429# level
430_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
431
432# To match href="foo"
433
434_ATTR_RE = re.compile(
435 r'''
436\s+ # Leading whitespace is required
437(%s) # Attribute name
438(?: # Optional attribute value
439 \s* = \s*
440 (?:
441 " ([^>"]*) " # double quoted value
442 | (%s) # Attribute value
443 # TODO: relax this? for href=$foo
444 )
445)?
446''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
447
448TagName, AttrName, UnquotedValue, QuotedValue = range(4)
449
450
451class TagLexer(object):
452 """
453 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
454 provides a few operations:
455
456 - What is the tag?
457 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
458 """
459
460 def __init__(self, s):
461 self.s = s
462 self.start_pos = -1 # Invalid
463 self.end_pos = -1
464
465 def Reset(self, start_pos, end_pos):
466 """Reuse instances of this object."""
467 self.start_pos = start_pos
468 self.end_pos = end_pos
469
470 def TagString(self):
471 return self.s[self.start_pos:self.end_pos]
472
473 def TagName(self):
474 # First event
475 tok_id, start, end = next(self.Tokens())
476 return self.s[start:end]
477
478 def GetSpanForAttrValue(self, attr_name):
479 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
480 # TODO: Could also cache these
481
482 events = self.Tokens()
483 val = (-1, -1)
484 try:
485 while True:
486 tok_id, start, end = next(events)
487 if tok_id == AttrName:
488 name = self.s[start:end]
489 if name == attr_name:
490 # The value should come next
491 tok_id, start, end = next(events)
492 if tok_id in (QuotedValue, UnquotedValue):
493 # Note: quoted values may have &amp;
494 # We would need ANOTHER lexer to unescape them.
495 # Right now help_gen.py and oils_doc.py
496 val = start, end
497 break
498
499 except StopIteration:
500 pass
501 return val
502
503 def GetAttrRaw(self, attr_name):
504 """
505 Return the value, which may be UNESCAPED.
506 """
507 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
508 # TODO: Could also cache these
509 start, end = self.GetSpanForAttrValue(attr_name)
510 if start == -1:
511 return None
512 return self.s[start:end]
513
514 def AllAttrsRaw(self):
515 """
516 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
517
518 The quoted values may be escaped. We would need another lexer to
519 unescape them.
520 """
521 pairs = []
522 events = self.Tokens()
523 try:
524 while True:
525 tok_id, start, end = next(events)
526 if tok_id == AttrName:
527 name = self.s[start:end]
528
529 # The value should come next
530 tok_id, start, end = next(events)
531 if tok_id in (QuotedValue, UnquotedValue):
532 # Note: quoted values may have &amp;
533 # We would need ANOTHER lexer to unescape them, but we
534 # don't need that for ul-table
535
536 val = self.s[start:end]
537 pairs.append((name, val))
538 except StopIteration:
539 pass
540 return pairs
541
542 def Tokens(self):
543 """
544 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
545
546 Where each Token is (Type, start_pos, end_pos)
547
548 Note that start and end are NOT redundant! We skip over some unwanted
549 characters.
550 """
551 m = _TAG_RE.match(self.s, self.start_pos + 1)
552 if not m:
553 raise RuntimeError("Couldn't find HTML tag in %r" %
554 self.TagString())
555 yield TagName, m.start(1), m.end(1)
556
557 pos = m.end(0)
558
559 while True:
560 # don't search past the end
561 m = _ATTR_RE.match(self.s, pos, self.end_pos)
562 if not m:
563 # A validating parser would check that > or /> is next -- there's no junk
564 break
565
566 yield AttrName, m.start(1), m.end(1)
567
568 # Quoted is group 2, unquoted is group 3.
569 if m.group(2) is not None:
570 yield QuotedValue, m.start(2), m.end(2)
571 elif m.group(3) is not None:
572 yield UnquotedValue, m.start(3), m.end(3)
573
574 # Skip past the "
575 pos = m.end(0)
576
577
578def ReadUntilStartTag(it, tag_lexer, tag_name):
579 """Find the next <foo>, returning its (start, end) positions
580
581 Raise ParseError if it's not found.
582
583 tag_lexer is RESET.
584 """
585 pos = 0
586 while True:
587 try:
588 tok_id, end_pos = next(it)
589 except StopIteration:
590 break
591 tag_lexer.Reset(pos, end_pos)
592 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
593 return pos, end_pos
594
595 pos = end_pos
596
597 raise ParseError('No start tag %r' % tag_name)
598
599
600def ReadUntilEndTag(it, tag_lexer, tag_name):
601 """Find the next </foo>, returning its (start, end) position
602
603 Raise ParseError if it's not found.
604
605 tag_lexer is RESET.
606 """
607 pos = 0
608 while True:
609 try:
610 tok_id, end_pos = next(it)
611 except StopIteration:
612 break
613 tag_lexer.Reset(pos, end_pos)
614 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
615 return pos, end_pos
616
617 pos = end_pos
618
619 raise ParseError('No end tag %r' % tag_name)
620
621
622CHAR_ENTITY = {
623 'amp': '&',
624 'lt': '<',
625 'gt': '>',
626 'quot': '"',
627}
628
629
630def ToText(s, left_pos=0, right_pos=-1):
631 """Given HTML, return text by unquoting &gt; and &lt; etc.
632
633 Used by:
634 doctools/oils_doc.py: PygmentsPlugin
635 doctools/help_gen.py: HelpIndexCards
636
637 In the latter case, we cold process some tags, like:
638
639 - Blue Link (not clickable, but still useful)
640 - Red X
641
642 That should be html.ToAnsi.
643 """
644 f = StringIO()
645 out = Output(s, f, left_pos, right_pos)
646
647 pos = left_pos
648 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
649 if tok_id == Tok.RawData:
650 out.SkipTo(pos)
651 out.PrintUntil(end_pos)
652
653 elif tok_id == Tok.CharEntity: # &amp;
654
655 entity = s[pos + 1:end_pos - 1]
656
657 out.SkipTo(pos)
658 out.Print(CHAR_ENTITY[entity])
659 out.SkipTo(end_pos)
660
661 # Not handling these yet
662 elif tok_id == Tok.HexChar:
663 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
664
665 elif tok_id == Tok.DecChar:
666 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
667
668 pos = end_pos
669
670 out.PrintTheRest()
671 return f.getvalue()
672
673
674# https://developer.mozilla.org/en-US/docs/Glossary/Void_element
675VOID_ELEMENTS = [
676 'area',
677 'base',
678 'br',
679 'col',
680 'embed',
681 'hr',
682 'img',
683 'input',
684 'link',
685 'meta',
686 'param',
687 'source',
688 'track',
689 'wbr',
690]
691
692
693def main(argv):
694 action = argv[1]
695
696 if action == 'tokens':
697 contents = sys.stdin.read()
698
699 lx = Lexer(contents)
700 start_pos = 0
701 while True:
702 tok_id, end_pos = lx.Read()
703 if tok_id == Tok.Invalid:
704 raise LexError(contents, start_pos)
705 if tok_id == Tok.EndOfStream:
706 break
707
708 frag = contents[start_pos:end_pos]
709 log('%d %s %r', end_pos, TokenName(tok_id), frag)
710 start_pos = end_pos
711
712 return 0
713
714 elif action in ('lex-tags', 'lex-attrs', 'lex-attr-values', 'well-formed'):
715 num_tokens = 0
716 num_start_tags = 0
717 num_start_end_tags = 0
718 num_attrs = 0
719 max_tag_stack = 0
720
721 errors = []
722 i = 0
723 for line in sys.stdin:
724 name = line.strip()
725 with open(name) as f:
726 contents = f.read()
727
728 tag_lexer = TagLexer(contents)
729 lx = Lexer(contents)
730 tokens = []
731 start_pos = 0
732 tag_stack = []
733 try:
734 while True:
735 tok_id, end_pos = lx.Read()
736
737 if tok_id == Tok.Invalid:
738 raise LexError(contents, start_pos)
739 if tok_id == Tok.EndOfStream:
740 break
741
742 tokens.append((tok_id, end_pos))
743
744 if tok_id == Tok.StartEndTag:
745 num_start_end_tags += 1
746 if action in ('lex-attrs', 'lex-attr-values',
747 'well-formed'):
748 tag_lexer.Reset(start_pos, end_pos)
749 all_attrs = tag_lexer.AllAttrsRaw()
750 num_attrs += len(all_attrs)
751 elif tok_id == Tok.StartTag:
752 num_start_tags += 1
753 if action in ('lex-attrs', 'lex-attr-values',
754 'well-formed'):
755 tag_lexer.Reset(start_pos, end_pos)
756 all_attrs = tag_lexer.AllAttrsRaw()
757
758 tag_name = lx.TagName()
759 # Don't bother to check
760 if tag_name not in VOID_ELEMENTS:
761 tag_stack.append(tag_name)
762
763 max_tag_stack = max(max_tag_stack, len(tag_stack))
764 elif tok_id == Tok.EndTag:
765 try:
766 expected = tag_stack.pop()
767 except IndexError:
768 raise ParseError('Tag stack empty',
769 s=contents,
770 start_pos=start_pos)
771
772 actual = lx.TagName()
773 if expected != actual:
774 raise ParseError(
775 'Got unexpected closing tag %r; opening tag was %r'
776 % (contents[start_pos:end_pos], expected),
777 s=contents,
778 start_pos=start_pos)
779
780 start_pos = end_pos
781 except LexError as e:
782 log('Lex error in %r: %s', name, e)
783 errors.append((name, e))
784 except ParseError as e:
785 log('Parse error in %r: %s', name, e)
786 errors.append((name, e))
787 else:
788 num_tokens += len(tokens)
789
790 #print('%d %s' % (len(tokens), name))
791 i += 1
792
793 log('')
794 log(
795 ' %d tokens, %d start/end tags, %d start tags, %d attrs, %d max tag stack depth in %d files',
796 num_tokens, num_start_end_tags, num_start_tags, num_attrs,
797 max_tag_stack, i)
798 log(' %d errors', len(errors))
799 if 0:
800 for name, e in errors:
801 log('Error in %r: %s', name, e)
802
803 else:
804 raise RuntimeError('Invalid action %r' % action)
805
806
807if __name__ == '__main__':
808 main(sys.argv)