OILS / lazylex / html.py View on Github | oils.pub

867 lines, 440 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7Conflicts between HTML5 and XML:
8
9- In XML, <source> is like any tag, and must be closed,
10- In HTML, <source> is a VOID tag, and must NOT be closedlike any tag, and must be closed,
11
12- In XML, <script> and <style> don't have special treatment
13- In HTML, they do
14
15- The header is different - <!DOCTYPE html> vs. <?xml version= ... ?>
16
17So do have a mode for <script> <style> and void tags? Upgrade HX8 into HTM8?
18"""
19from __future__ import print_function
20
21try:
22 from cStringIO import StringIO
23except ImportError:
24 from io import StringIO # python3
25import re
26import sys
27
28if sys.version_info.major == 2:
29 from typing import List, Tuple, Optional
30
31
32def log(msg, *args):
33 msg = msg % args
34 print(msg, file=sys.stderr)
35
36
37class LexError(Exception):
38 """
39 Examples of lex errors:
40
41 - Tok.Invalid, like <> or &&
42 - Unclosed <!-- <? <![CDATA[ <script> <style>
43 """
44
45 def __init__(self, s, start_pos):
46 self.s = s
47 self.start_pos = start_pos
48
49 def __str__(self):
50 return '(LexError %r)' % (self.s[self.start_pos:self.start_pos + 20])
51
52
53def FindLineNum(s, error_pos):
54 current_pos = 0
55 line_num = 1
56 while True:
57 newline_pos = s.find('\n', current_pos)
58 #log('current = %d, N %d, line %d', current_pos, newline_pos, line_num)
59
60 if newline_pos == -1: # this is the last line
61 return line_num
62 if newline_pos >= error_pos:
63 return line_num
64 line_num += 1
65 current_pos = newline_pos + 1
66
67
68class ParseError(Exception):
69 """
70 Examples of parse errors
71
72 - unbalanced tag structure
73 - ul_table.py errors
74 """
75
76 def __init__(self, msg, s=None, start_pos=-1):
77 self.msg = msg
78 self.s = s
79 self.start_pos = start_pos
80
81 def __str__(self):
82 if self.s is not None:
83 assert self.start_pos != -1, self.start_pos
84 snippet = (self.s[self.start_pos:self.start_pos + 20])
85
86 line_num = FindLineNum(self.s, self.start_pos)
87 else:
88 snippet = ''
89 line_num = -1
90 msg = 'line %d: %r %r' % (line_num, self.msg, snippet)
91 return msg
92
93
94class Output(object):
95 """Takes an underlying input buffer and an output file. Maintains a
96 position in the input buffer.
97
98 Print FROM the input or print new text to the output.
99 """
100
101 def __init__(self, s, f, left_pos=0, right_pos=-1):
102 self.s = s
103 self.f = f
104 self.pos = left_pos
105 self.right_pos = len(s) if right_pos == -1 else right_pos
106
107 def SkipTo(self, pos):
108 """Skip to a position."""
109 self.pos = pos
110
111 def PrintUntil(self, pos):
112 """Print until a position."""
113 piece = self.s[self.pos:pos]
114 self.f.write(piece)
115 self.pos = pos
116
117 def PrintTheRest(self):
118 """Print until the end of the string."""
119 self.PrintUntil(self.right_pos)
120
121 def Print(self, s):
122 """Print text to the underlying buffer."""
123 self.f.write(s)
124
125
126# HTML Tokens
127# CommentBegin, ProcessingBegin, CDataBegin are "pseudo-tokens", not visible
128TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin CData CDataBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData HtmlCData Invalid EndOfStream'.split(
129)
130
131
132class Tok(object):
133 """
134 Avoid lint errors by using these aliases
135 """
136 pass
137
138
139TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
140
141this_module = sys.modules[__name__]
142for i, tok_str in enumerate(TOKENS):
143 setattr(this_module, tok_str, i)
144 setattr(Tok, tok_str, i)
145 TOKEN_NAMES[i] = tok_str
146
147
148def TokenName(tok_id):
149 return TOKEN_NAMES[tok_id]
150
151
152def MakeLexer(rules):
153 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
154
155
156#
157# Eggex
158#
159# Tag = / ~['>']+ /
160
161# Is this valid? A single character?
162# Tag = / ~'>'* /
163
164# Maybe better: / [NOT '>']+/
165# capital letters not allowed there?
166#
167# But then this is confusing:
168# / [NOT ~digit]+/
169#
170# / [NOT digit] / is [^\d]
171# / ~digit / is \D
172#
173# Or maybe:
174#
175# / [~ digit]+ /
176# / [~ '>']+ /
177# / [NOT '>']+ /
178
179# End = / '</' Tag '>' /
180# StartEnd = / '<' Tag '/>' /
181# Start = / '<' Tag '>' /
182#
183# EntityRef = / '&' dot{* N} ';' /
184
185# Tag name, or attribute name
186# colon is used in XML
187
188# https://www.w3.org/TR/xml/#NT-Name
189# Hm there is a lot of unicode stuff. We are simplifying parsing
190
191_NAME = r'[a-zA-Z][a-zA-Z0-9:_\-]*' # must start with letter
192
193LEXER = [
194 (r'<!--', Tok.CommentBegin),
195
196 # Processing instruction are used for the XML header:
197 # <?xml version="1.0" encoding="UTF-8"?>
198 # They are technically XML-only, but in HTML5, they are another kind of
199 # comment:
200 #
201 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
202 #
203 (r'<\?', Tok.ProcessingBegin),
204 # Not necessary in HTML5, but occurs in XML
205 (r'<!\[CDATA\[', Tok.CDataBegin), # <![CDATA[
206
207 # Markup declarations
208 # - In HTML5, there is only <!DOCTYPE html>
209 # - XML has 4 more declarations: <!ELEMENT ...> ATTLIST ENTITY NOTATION
210 # - these seem to be part of DTD
211 # - it's useful to skip these, and be able to parse the rest of the document
212 # - Note: < is allowed?
213 (r'<! [^>]+ >', Tok.Decl),
214
215 # Tags
216 # Notes:
217 # - We look for a valid tag name, but we don't validate attributes.
218 # That's done in the tag lexer.
219 # - We don't allow leading whitespace
220 (r'</ (%s) >' % _NAME, Tok.EndTag),
221 # self-closing <br/> comes before StarttTag
222 (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
223 (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
224
225 # Characters
226 # https://www.w3.org/TR/xml/#sec-references
227 (r'&\# [0-9]+ ;', Tok.DecChar),
228 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
229 (r'& %s ;' % _NAME, Tok.CharEntity),
230
231 # HTML5 allows unescaped > in raw data, but < is not allowed.
232 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
233 #
234 # - My early blog has THREE errors when disallowing >
235 # - So do some .wwz files
236 (r'[^&<]+', Tok.RawData),
237 (r'.', Tok.Invalid), # error!
238]
239
240# Old notes:
241#
242# Non-greedy matches are regular and can be matched in linear time
243# with RE2.
244#
245# https://news.ycombinator.com/item?id=27099798
246#
247# Maybe try combining all of these for speed.
248
249# . is any char except newline
250# https://re2c.org/manual/manual_c.html
251
252# Discarded options
253#(r'<!-- .*? -->', Tok.Comment),
254
255# Hack from Claude: \s\S instead of re.DOTALL. I don't like this
256#(r'<!-- [\s\S]*? -->', Tok.Comment),
257#(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
258
259LEXER = MakeLexer(LEXER)
260
261
262class Lexer(object):
263
264 def __init__(self, s, left_pos=0, right_pos=-1, no_special_tags=False):
265 self.s = s
266 self.pos = left_pos
267 self.right_pos = len(s) if right_pos == -1 else right_pos
268 self.no_special_tags = no_special_tags
269
270 self.cache = {} # string -> compiled regex pattern object
271
272 # either </script> or </style> - we search until we see that
273 self.search_state = None # type: Optional[str]
274
275 # Position of tag name, if applicable
276 # - Set after you get a StartTag, EndTag, or StartEndTag
277 # - Unset on other tags
278 self.tag_pos_left = -1
279 self.tag_pos_right = -1
280
281 def _Peek(self):
282 # type: () -> Tuple[int, int]
283 """
284 Note: not using _Peek() now
285 """
286 if self.pos == self.right_pos:
287 return Tok.EndOfStream, self.pos
288
289 assert self.pos < self.right_pos, self.pos
290
291 if self.search_state is not None and not self.no_special_tags:
292 pos = self.s.find(self.search_state, self.pos)
293 if pos == -1:
294 # unterminated <script> or <style>
295 raise LexError(self.s, self.pos)
296 self.search_state = None
297 # beginning
298 return Tok.HtmlCData, pos
299
300 # Find the first match.
301 # Note: frontend/match.py uses _LongestMatch(), which is different!
302 # TODO: reconcile them. This lexer should be expressible in re2c.
303
304 for pat, tok_id in LEXER:
305 m = pat.match(self.s, self.pos)
306 if m:
307 if tok_id in (Tok.StartTag, Tok.EndTag, Tok.StartEndTag):
308 self.tag_pos_left = m.start(1)
309 self.tag_pos_right = m.end(1)
310 else:
311 # Reset state
312 self.tag_pos_left = -1
313 self.tag_pos_right = -1
314
315 if tok_id == Tok.CommentBegin:
316 pos = self.s.find('-->', self.pos)
317 if pos == -1:
318 # unterminated <!--
319 raise LexError(self.s, self.pos)
320 return Tok.Comment, pos + 3 # -->
321
322 if tok_id == Tok.ProcessingBegin:
323 pos = self.s.find('?>', self.pos)
324 if pos == -1:
325 # unterminated <?
326 raise LexError(self.s, self.pos)
327 return Tok.Processing, pos + 2 # ?>
328
329 if tok_id == Tok.CDataBegin:
330 pos = self.s.find(']]>', self.pos)
331 if pos == -1:
332 # unterminated <![CDATA[
333 raise LexError(self.s, self.pos)
334 return Tok.CData, pos + 3 # ]]>
335
336 if tok_id == Tok.StartTag:
337 if self.TagNameEquals('script'):
338 self.search_state = '</script>'
339 elif self.TagNameEquals('style'):
340 self.search_state = '</style>'
341
342 return tok_id, m.end()
343 else:
344 raise AssertionError('Tok.Invalid rule should have matched')
345
346 def TagNameEquals(self, expected):
347 # type: (str) -> bool
348 assert self.tag_pos_left != -1, self.tag_pos_left
349 assert self.tag_pos_right != -1, self.tag_pos_right
350
351 # TODO: In C++, this does not need an allocation
352 # TODO: conditionally lower() case here (maybe not in XML mode)
353 return expected == self.s[self.tag_pos_left:self.tag_pos_right]
354
355 def TagName(self):
356 # type: () -> None
357 assert self.tag_pos_left != -1, self.tag_pos_left
358 assert self.tag_pos_right != -1, self.tag_pos_right
359
360 # TODO: conditionally lower() case here (maybe not in XML mode)
361 return self.s[self.tag_pos_left:self.tag_pos_right]
362
363 def Read(self):
364 # type: () -> Tuple[int, int]
365 tok_id, end_pos = self._Peek()
366 self.pos = end_pos # advance
367 return tok_id, end_pos
368
369 def LookAhead(self, regex):
370 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
371 # or something.
372 pat = self.cache.get(regex)
373 if pat is None:
374 pat = re.compile(regex)
375 self.cache[regex] = pat
376
377 m = pat.match(self.s, self.pos)
378 return m is not None
379
380
381def _Tokens(s, left_pos, right_pos):
382 """
383 Args:
384 s: string to parse
385 left_pos, right_pos: Optional span boundaries.
386 """
387 lx = Lexer(s, left_pos, right_pos)
388 while True:
389 tok_id, pos = lx.Read()
390 yield tok_id, pos
391 if tok_id == Tok.EndOfStream:
392 break
393
394
395def ValidTokens(s, left_pos=0, right_pos=-1):
396 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
397
398 I'm not combining the two functions because I might want to do a
399 'yield' transformation on Tokens()? Exceptions might complicate the
400 issue?
401 """
402 pos = left_pos
403 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
404 if tok_id == Tok.Invalid:
405 raise LexError(s, pos)
406 yield tok_id, end_pos
407 pos = end_pos
408
409
410def ValidTokenList(s, no_special_tags=False):
411 """A wrapper that can be more easily translated to C++. Doesn't use iterators."""
412
413 start_pos = 0
414 tokens = []
415 lx = Lexer(s, no_special_tags=no_special_tags)
416 while True:
417 tok_id, end_pos = lx.Read()
418 tokens.append((tok_id, end_pos))
419 if tok_id == Tok.EndOfStream:
420 break
421 if tok_id == Tok.Invalid:
422 raise LexError(s, start_pos)
423 start_pos = end_pos
424 return tokens
425
426
427# Tag names:
428# Match <a or </a
429# Match <h2, but not <2h
430#
431# HTML 5 doesn't restrict tag names at all
432# https://html.spec.whatwg.org/#toc-syntax
433#
434# XML allows : - .
435# https://www.w3.org/TR/xml/#NT-NameChar
436
437# Namespaces for MathML, SVG
438# XLink, XML, XMLNS
439#
440# https://infra.spec.whatwg.org/#namespaces
441#
442# Allow - for td-attrs
443
444_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
445
446# TODO: we don't need to capture the tag name here? That's done at the top
447# level
448_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
449
450# To match href="foo"
451
452_ATTR_RE = re.compile(
453 r'''
454\s+ # Leading whitespace is required
455(%s) # Attribute name
456(?: # Optional attribute value
457 \s* = \s*
458 (?:
459 " ([^>"]*) " # double quoted value
460 | (%s) # Attribute value
461 # TODO: relax this? for href=$foo
462 )
463)?
464''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
465
466TagName, AttrName, UnquotedValue, QuotedValue = range(4)
467
468
469class TagLexer(object):
470 """
471 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
472 provides a few operations:
473
474 - What is the tag?
475 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
476 """
477
478 def __init__(self, s):
479 self.s = s
480 self.start_pos = -1 # Invalid
481 self.end_pos = -1
482
483 def Reset(self, start_pos, end_pos):
484 """Reuse instances of this object."""
485 self.start_pos = start_pos
486 self.end_pos = end_pos
487
488 def TagString(self):
489 return self.s[self.start_pos:self.end_pos]
490
491 def TagName(self):
492 # First event
493 tok_id, start, end = next(self.Tokens())
494 return self.s[start:end]
495
496 def GetSpanForAttrValue(self, attr_name):
497 """
498 Used by oils_doc.py, for href shortcuts
499 """
500 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
501 # TODO: Could also cache these
502
503 events = self.Tokens()
504 val = (-1, -1)
505 try:
506 while True:
507 tok_id, start, end = next(events)
508 if tok_id == AttrName:
509 name = self.s[start:end]
510 if name == attr_name:
511 # The value should come next
512 tok_id, start, end = next(events)
513 if tok_id in (QuotedValue, UnquotedValue):
514 # Note: quoted values may have &amp;
515 # We would need ANOTHER lexer to unescape them.
516 # Right now help_gen.py and oils_doc.py
517 val = start, end
518 break
519
520 except StopIteration:
521 pass
522 return val
523
524 def GetAttrRaw(self, attr_name):
525 """
526 Return the value, which may be UNESCAPED.
527 """
528 start, end = self.GetSpanForAttrValue(attr_name)
529 if start == -1:
530 return None
531 return self.s[start:end]
532
533 def AllAttrsRaw(self):
534 """
535 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
536
537 The quoted values may be escaped. We would need another lexer to
538 unescape them.
539 """
540 pairs = []
541 events = self.Tokens()
542 try:
543 while True:
544 tok_id, start, end = next(events)
545 if tok_id == AttrName:
546 name = self.s[start:end]
547
548 # The value should come next
549 tok_id, start, end = next(events)
550 if tok_id in (QuotedValue, UnquotedValue):
551 # Note: quoted values may have &amp;
552 # We would need ANOTHER lexer to unescape them, but we
553 # don't need that for ul-table
554
555 val = self.s[start:end]
556 pairs.append((name, val))
557 except StopIteration:
558 pass
559 return pairs
560
561 def Tokens(self):
562 """
563 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
564
565 Where each Token is (Type, start_pos, end_pos)
566
567 Note that start and end are NOT redundant! We skip over some unwanted
568 characters.
569 """
570 m = _TAG_RE.match(self.s, self.start_pos + 1)
571 if not m:
572 raise RuntimeError("Couldn't find HTML tag in %r" %
573 self.TagString())
574 yield TagName, m.start(1), m.end(1)
575
576 pos = m.end(0)
577
578 while True:
579 # don't search past the end
580 m = _ATTR_RE.match(self.s, pos, self.end_pos)
581 if not m:
582 # A validating parser would check that > or /> is next -- there's no junk
583 break
584
585 yield AttrName, m.start(1), m.end(1)
586
587 # Quoted is group 2, unquoted is group 3.
588 if m.group(2) is not None:
589 yield QuotedValue, m.start(2), m.end(2)
590 elif m.group(3) is not None:
591 yield UnquotedValue, m.start(3), m.end(3)
592
593 # Skip past the "
594 pos = m.end(0)
595
596
597def ReadUntilStartTag(it, tag_lexer, tag_name):
598 """Find the next <foo>, returning its (start, end) positions
599
600 Raise ParseError if it's not found.
601
602 tag_lexer is RESET.
603 """
604 pos = 0
605 while True:
606 try:
607 tok_id, end_pos = next(it)
608 except StopIteration:
609 break
610 tag_lexer.Reset(pos, end_pos)
611 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
612 return pos, end_pos
613
614 pos = end_pos
615
616 raise ParseError('No start tag %r' % tag_name)
617
618
619def ReadUntilEndTag(it, tag_lexer, tag_name):
620 """Find the next </foo>, returning its (start, end) position
621
622 Raise ParseError if it's not found.
623
624 tag_lexer is RESET.
625 """
626 pos = 0
627 while True:
628 try:
629 tok_id, end_pos = next(it)
630 except StopIteration:
631 break
632 tag_lexer.Reset(pos, end_pos)
633 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
634 return pos, end_pos
635
636 pos = end_pos
637
638 raise ParseError('No end tag %r' % tag_name)
639
640
641CHAR_ENTITY = {
642 'amp': '&',
643 'lt': '<',
644 'gt': '>',
645 'quot': '"',
646}
647
648
649def ToText(s, left_pos=0, right_pos=-1):
650 """Given HTML, return text by unquoting &gt; and &lt; etc.
651
652 Used by:
653 doctools/oils_doc.py: PygmentsPlugin
654 doctools/help_gen.py: HelpIndexCards
655
656 In the latter case, we cold process some tags, like:
657
658 - Blue Link (not clickable, but still useful)
659 - Red X
660
661 That should be html.ToAnsi.
662 """
663 f = StringIO()
664 out = Output(s, f, left_pos, right_pos)
665
666 pos = left_pos
667 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
668 if tok_id == Tok.RawData:
669 out.SkipTo(pos)
670 out.PrintUntil(end_pos)
671
672 elif tok_id == Tok.CharEntity: # &amp;
673
674 entity = s[pos + 1:end_pos - 1]
675
676 out.SkipTo(pos)
677 out.Print(CHAR_ENTITY[entity])
678 out.SkipTo(end_pos)
679
680 # Not handling these yet
681 elif tok_id == Tok.HexChar:
682 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
683
684 elif tok_id == Tok.DecChar:
685 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
686
687 pos = end_pos
688
689 out.PrintTheRest()
690 return f.getvalue()
691
692
693# https://developer.mozilla.org/en-US/docs/Glossary/Void_element
694VOID_ELEMENTS = [
695 'area',
696 'base',
697 'br',
698 'col',
699 'embed',
700 'hr',
701 'img',
702 'input',
703 'link',
704 'meta',
705 'param',
706 'source',
707 'track',
708 'wbr',
709]
710
711LEX_ATTRS = 1 << 1
712LEX_QUOTED_VALUES = 1 << 2 # href="?x=42&amp;y=99"
713NO_SPECIAL_TAGS = 1 << 3 # <script> <style>, VOID tags, etc.
714BALANCED_TAGS = 1 << 4 # are tags balanced?
715
716
717def Validate(contents, flags, counters):
718 # type: (str, int, Counters) -> None
719
720 tag_lexer = TagLexer(contents)
721 no_special_tags = bool(flags & NO_SPECIAL_TAGS)
722 lx = Lexer(contents, no_special_tags=no_special_tags)
723 tokens = []
724 start_pos = 0
725 tag_stack = []
726 while True:
727 tok_id, end_pos = lx.Read()
728
729 if tok_id == Tok.Invalid:
730 raise LexError(contents, start_pos)
731 if tok_id == Tok.EndOfStream:
732 break
733
734 tokens.append((tok_id, end_pos))
735
736 if tok_id == Tok.StartEndTag:
737 counters.num_start_end_tags += 1
738
739 tag_lexer.Reset(start_pos, end_pos)
740 all_attrs = tag_lexer.AllAttrsRaw()
741 counters.num_attrs += len(all_attrs)
742
743 elif tok_id == Tok.StartTag:
744 counters.num_start_tags += 1
745
746 tag_lexer.Reset(start_pos, end_pos)
747 all_attrs = tag_lexer.AllAttrsRaw()
748 counters.num_attrs += len(all_attrs)
749
750 if flags & BALANCED_TAGS:
751 tag_name = lx.TagName()
752 if flags & NO_SPECIAL_TAGS:
753 tag_stack.append(tag_name)
754 else:
755 # e.g. <meta> is considered self-closing, like <meta/>
756 if tag_name not in VOID_ELEMENTS:
757 tag_stack.append(tag_name)
758
759 counters.max_tag_stack = max(counters.max_tag_stack,
760 len(tag_stack))
761 elif tok_id == Tok.EndTag:
762 if flags & BALANCED_TAGS:
763 try:
764 expected = tag_stack.pop()
765 except IndexError:
766 raise ParseError('Tag stack empty',
767 s=contents,
768 start_pos=start_pos)
769
770 actual = lx.TagName()
771 if expected != actual:
772 raise ParseError(
773 'Got unexpected closing tag %r; opening tag was %r' %
774 (contents[start_pos:end_pos], expected),
775 s=contents,
776 start_pos=start_pos)
777
778 start_pos = end_pos
779 counters.num_tokens += len(tokens)
780
781
782class Counters(object):
783
784 def __init__(self):
785 self.num_tokens = 0
786 self.num_start_tags = 0
787 self.num_start_end_tags = 0
788 self.num_attrs = 0
789 self.max_tag_stack = 0
790
791
792def main(argv):
793 action = argv[1]
794
795 if action == 'tokens':
796 contents = sys.stdin.read()
797
798 lx = Lexer(contents)
799 start_pos = 0
800 while True:
801 tok_id, end_pos = lx.Read()
802 if tok_id == Tok.Invalid:
803 raise LexError(contents, start_pos)
804 if tok_id == Tok.EndOfStream:
805 break
806
807 frag = contents[start_pos:end_pos]
808 log('%d %s %r', end_pos, TokenName(tok_id), frag)
809 start_pos = end_pos
810
811 return 0
812
813 elif action in ('lex-htm8', 'parse-htm8', 'parse-xml'):
814
815 errors = []
816 counters = Counters()
817
818 flags = LEX_ATTRS | LEX_QUOTED_VALUES
819 if action.startswith('parse-'):
820 flags |= BALANCED_TAGS
821 if action == 'parse-xml':
822 flags |= NO_SPECIAL_TAGS
823
824 i = 0
825 for line in sys.stdin:
826 filename = line.strip()
827 with open(filename) as f:
828 contents = f.read()
829
830 try:
831 Validate(contents, flags, counters)
832 except LexError as e:
833 log('Lex error in %r: %s', filename, e)
834 errors.append((filename, e))
835 except ParseError as e:
836 log('Parse error in %r: %s', filename, e)
837 errors.append((filename, e))
838 i += 1
839
840 log('')
841 log(
842 ' %d tokens, %d start/end tags, %d start tags, %d attrs, %d max tag stack depth in %d files',
843 counters.num_tokens, counters.num_start_end_tags,
844 counters.num_start_tags, counters.num_attrs,
845 counters.max_tag_stack, i)
846 log(' %d errors', len(errors))
847 if len(errors):
848 return 1
849 return 0
850
851 elif action == 'todo':
852 # Other algorithms:
853 #
854 # - select first subtree with given ID
855 # - this requires understanding the void tags I suppose
856 # - select all subtrees that have a class
857 # - materialize DOM
858
859 # Safe-HTM8? This is a filter
860 return 0
861
862 else:
863 raise RuntimeError('Invalid action %r' % action)
864
865
866if __name__ == '__main__':
867 sys.exit(main(sys.argv))