OILS / lazylex / html.py View on Github | oils.pub

605 lines, 275 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83# CommentBegin and ProcessingBegin are "pseudo-tokens", not visible
84TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData CData CDataStartTag CDataEndTag Invalid EndOfStream'.split(
85)
86
87
88class Tok(object):
89 """
90 Avoid lint errors by using these aliases
91 """
92 pass
93
94
95TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
96
97this_module = sys.modules[__name__]
98for i, tok_str in enumerate(TOKENS):
99 setattr(this_module, tok_str, i)
100 setattr(Tok, tok_str, i)
101 TOKEN_NAMES[i] = tok_str
102
103
104def TokenName(tok_id):
105 return TOKEN_NAMES[tok_id]
106
107
108def MakeLexer(rules):
109 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
110
111
112#
113# Eggex
114#
115# Tag = / ~['>']+ /
116
117# Is this valid? A single character?
118# Tag = / ~'>'* /
119
120# Maybe better: / [NOT '>']+/
121# capital letters not allowed there?
122#
123# But then this is confusing:
124# / [NOT ~digit]+/
125#
126# / [NOT digit] / is [^\d]
127# / ~digit / is \D
128#
129# Or maybe:
130#
131# / [~ digit]+ /
132# / [~ '>']+ /
133# / [NOT '>']+ /
134
135# End = / '</' Tag '>' /
136# StartEnd = / '<' Tag '/>' /
137# Start = / '<' Tag '>' /
138#
139# EntityRef = / '&' dot{* N} ';' /
140
141# Tag name, or attribute name
142_NAME = r'[a-zA-Z][a-zA-Z0-9_\-]*' # must start with letter
143
144LEXER = [
145 # Note non-greedy matches are regular and can be matched in linear time
146 # with RE2.
147 #
148 # https://news.ycombinator.com/item?id=27099798
149 #
150 # Maybe try combining all of these for speed.
151
152 # . is any char except newline
153 # https://re2c.org/manual/manual_c.html
154
155 # Discarded options
156 #(r'<!-- .*? -->', Tok.Comment),
157
158 # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
159 #(r'<!-- [\s\S]*? -->', Tok.Comment),
160 #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
161 (r'<!--', Tok.CommentBegin),
162
163 # Processing instruction are XML only, but they are treated like a comment
164 # in HTML:
165 #
166 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
167 #
168 # We don't want to confuse them with start tags, so we recognize them at
169 # the top level.
170 (r'<\?', Tok.ProcessingBegin),
171
172 # NOTE: < is allowed in these.
173 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
174 #(r'<(?:script|style) [^>]+>', Tok.CDataStartTag), # start <a>
175
176 # Notes:
177 # - We look for a valid tag name, but we don't validate attributes.
178 # That's done in the tag lexer.
179 # - We don't allow leading whitespace
180 #
181 # TODO: do something different for <script> and <style>. And maybe have a
182 # mode to also understand the difference between <pre> <textarea> and say
183 # <div>.
184 (r'</ (%s) [^>]* >' % _NAME, Tok.EndTag), # self-closing <br/> comes FIRST
185 (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
186 (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
187
188 (r'&\# [0-9]+ ;', Tok.DecChar),
189 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
190 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
191
192 # HTML5 allows > in raw data - should we? But < is not allowed.
193 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
194 #
195 # TODO: I think we should disallow it, like XML does. There should be "one
196 # way to do it". There is a stronger distinction between <script> <style>
197 # this way.
198 (r'[^&<]+', Tok.RawData),
199 (r'.', Tok.Invalid), # error!
200]
201
202LEXER = MakeLexer(LEXER)
203
204
205class Lexer(object):
206
207 def __init__(self, s, left_pos=0, right_pos=-1):
208 self.s = s
209 self.pos = left_pos
210 self.right_pos = len(s) if right_pos == -1 else right_pos
211 self.cache = {} # string -> compiled regex pattern object
212
213 def _Peek(self):
214 # type: () -> Tuple[int, int]
215 """
216 Note: not using _Peek() now
217 """
218 if self.pos == self.right_pos:
219 return Tok.EndOfStream, self.pos
220
221 assert self.pos < self.right_pos, self.pos
222
223 # Find the first match.
224 # Note: frontend/match.py uses _LongestMatch(), which is different!
225 # TODO: reconcile them. This lexer should be expressible in re2c.
226
227 # TODO: Get rid of non-greedy match
228
229 for pat, tok_id in LEXER:
230 m = pat.match(self.s, self.pos)
231 if m:
232 if tok_id == Tok.CommentBegin:
233 pos = self.s.find('-->', self.pos)
234 if pos == -1:
235 # unterminated <!--
236 raise LexError(self.s, self.pos)
237 return Tok.Comment, pos + 3 # -->
238
239 if tok_id == Tok.ProcessingBegin:
240 pos = self.s.find('?>', self.pos)
241 if pos == -1:
242 # unterminated <?
243 raise LexError(self.s, self.pos)
244 return Tok.Processing, pos + 2 # ?>
245
246 # TODO: we need to enter state so the NEXT call can be CData
247 # And then the one after that must be CDataEndTag.
248 if tok_id == Tok.CDataStartTag:
249 end_tag = '</script>'
250 pos = self.s.find(end_tag, self.pos)
251 if pos == -1:
252 # unterminated </script>
253 raise LexError(self.s, self.pos)
254
255 return tok_id, m.end()
256 else:
257 raise AssertionError('Tok.Invalid rule should have matched')
258
259 def Read(self):
260 # type: () -> Tuple[int, int]
261 tok_id, end_pos = self._Peek()
262 self.pos = end_pos # advance
263 return tok_id, end_pos
264
265 def LookAhead(self, regex):
266 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
267 # or something.
268 pat = self.cache.get(regex)
269 if pat is None:
270 pat = re.compile(regex)
271 self.cache[regex] = pat
272
273 m = pat.match(self.s, self.pos)
274 return m is not None
275
276
277def _Tokens(s, left_pos, right_pos):
278 """
279 Args:
280 s: string to parse
281 left_pos, right_pos: Optional span boundaries.
282 """
283 lx = Lexer(s, left_pos, right_pos)
284 while True:
285 tok_id, pos = lx.Read()
286 yield tok_id, pos
287 if tok_id == Tok.EndOfStream:
288 break
289
290
291def ValidTokens(s, left_pos=0, right_pos=-1):
292 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
293
294 I'm not combining the two functions because I might want to do a
295 'yield' transformation on Tokens()? Exceptions might complicate the
296 issue?
297 """
298 pos = left_pos
299 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
300 if tok_id == Tok.Invalid:
301 raise LexError(s, pos)
302 yield tok_id, end_pos
303 pos = end_pos
304
305
306# Tag names:
307# Match <a or </a
308# Match <h2, but not <2h
309#
310# HTML 5 doesn't restrict tag names at all
311# https://html.spec.whatwg.org/#toc-syntax
312#
313# XML allows : - .
314# https://www.w3.org/TR/xml/#NT-NameChar
315
316# Namespaces for MathML, SVG
317# XLink, XML, XMLNS
318#
319# https://infra.spec.whatwg.org/#namespaces
320#
321# Allow - for td-attrs
322
323_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
324
325# TODO: capture tag name above?
326_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
327
328# To match href="foo"
329
330_ATTR_RE = re.compile(
331 r'''
332\s+ # Leading whitespace is required
333(%s) # Attribute name
334(?: # Optional attribute value
335 \s* = \s*
336 (?:
337 " ([^>"]*) " # double quoted value
338 | (%s) # Attribute value
339 # TODO: relax this? for href=$foo
340 )
341)?
342''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
343
344TagName, AttrName, UnquotedValue, QuotedValue = range(4)
345
346
347class TagLexer(object):
348 """
349 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
350 provides a few operations:
351
352 - What is the tag?
353 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
354 """
355
356 def __init__(self, s):
357 self.s = s
358 self.start_pos = -1 # Invalid
359 self.end_pos = -1
360
361 def Reset(self, start_pos, end_pos):
362 """Reuse instances of this object."""
363 self.start_pos = start_pos
364 self.end_pos = end_pos
365
366 def TagString(self):
367 return self.s[self.start_pos:self.end_pos]
368
369 def TagName(self):
370 # First event
371 tok_id, start, end = next(self.Tokens())
372 return self.s[start:end]
373
374 def GetSpanForAttrValue(self, attr_name):
375 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
376 # TODO: Could also cache these
377
378 events = self.Tokens()
379 val = (-1, -1)
380 try:
381 while True:
382 tok_id, start, end = next(events)
383 if tok_id == AttrName:
384 name = self.s[start:end]
385 if name == attr_name:
386 # The value should come next
387 tok_id, start, end = next(events)
388 if tok_id in (QuotedValue, UnquotedValue):
389 # Note: quoted values may have &amp;
390 # We would need ANOTHER lexer to unescape them.
391 # Right now help_gen.py and oils_doc.py
392 val = start, end
393 break
394
395 except StopIteration:
396 pass
397 return val
398
399 def GetAttrRaw(self, attr_name):
400 """
401 Return the value, which may be UNESCAPED.
402 """
403 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
404 # TODO: Could also cache these
405 start, end = self.GetSpanForAttrValue(attr_name)
406 if start == -1:
407 return None
408 return self.s[start:end]
409
410 def AllAttrsRaw(self):
411 """
412 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
413
414 The quoted values may be escaped. We would need another lexer to
415 unescape them.
416 """
417 pairs = []
418 events = self.Tokens()
419 try:
420 while True:
421 tok_id, start, end = next(events)
422 if tok_id == AttrName:
423 name = self.s[start:end]
424
425 # The value should come next
426 tok_id, start, end = next(events)
427 if tok_id in (QuotedValue, UnquotedValue):
428 # Note: quoted values may have &amp;
429 # We would need ANOTHER lexer to unescape them, but we
430 # don't need that for ul-table
431
432 val = self.s[start:end]
433 pairs.append((name, val))
434 except StopIteration:
435 pass
436 return pairs
437
438 def Tokens(self):
439 """
440 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
441
442 Where each Token is (Type, start_pos, end_pos)
443
444 Note that start and end are NOT redundant! We skip over some unwanted
445 characters.
446 """
447 m = _TAG_RE.match(self.s, self.start_pos + 1)
448 if not m:
449 raise RuntimeError("Couldn't find HTML tag in %r" %
450 self.TagString())
451 yield TagName, m.start(1), m.end(1)
452
453 pos = m.end(0)
454
455 while True:
456 # don't search past the end
457 m = _ATTR_RE.match(self.s, pos, self.end_pos)
458 if not m:
459 # A validating parser would check that > or /> is next -- there's no junk
460 break
461
462 yield AttrName, m.start(1), m.end(1)
463
464 # Quoted is group 2, unquoted is group 3.
465 if m.group(2) is not None:
466 yield QuotedValue, m.start(2), m.end(2)
467 elif m.group(3) is not None:
468 yield UnquotedValue, m.start(3), m.end(3)
469
470 # Skip past the "
471 pos = m.end(0)
472
473
474def ReadUntilStartTag(it, tag_lexer, tag_name):
475 """Find the next <foo>, returning its (start, end) positions
476
477 Raise ParseError if it's not found.
478
479 tag_lexer is RESET.
480 """
481 pos = 0
482 while True:
483 try:
484 tok_id, end_pos = next(it)
485 except StopIteration:
486 break
487 tag_lexer.Reset(pos, end_pos)
488 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
489 return pos, end_pos
490
491 pos = end_pos
492
493 raise ParseError('No start tag %r', tag_name)
494
495
496def ReadUntilEndTag(it, tag_lexer, tag_name):
497 """Find the next </foo>, returning its (start, end) position
498
499 Raise ParseError if it's not found.
500
501 tag_lexer is RESET.
502 """
503 pos = 0
504 while True:
505 try:
506 tok_id, end_pos = next(it)
507 except StopIteration:
508 break
509 tag_lexer.Reset(pos, end_pos)
510 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
511 return pos, end_pos
512
513 pos = end_pos
514
515 raise ParseError('No end tag %r', tag_name)
516
517
518CHAR_ENTITY = {
519 'amp': '&',
520 'lt': '<',
521 'gt': '>',
522 'quot': '"',
523}
524
525
526def ToText(s, left_pos=0, right_pos=-1):
527 """Given HTML, return text by unquoting &gt; and &lt; etc.
528
529 Used by:
530 doctools/oils_doc.py: PygmentsPlugin
531 doctool/make_help.py: HelpIndexCards
532
533 In the latter case, we cold process some tags, like:
534
535 - Blue Link (not clickable, but still useful)
536 - Red X
537
538 That should be html.ToAnsi.
539 """
540 f = StringIO()
541 out = Output(s, f, left_pos, right_pos)
542
543 pos = left_pos
544 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
545 if tok_id == Tok.RawData:
546 out.SkipTo(pos)
547 out.PrintUntil(end_pos)
548
549 elif tok_id == Tok.CharEntity: # &amp;
550
551 entity = s[pos + 1:end_pos - 1]
552
553 out.SkipTo(pos)
554 out.Print(CHAR_ENTITY[entity])
555 out.SkipTo(end_pos)
556
557 # Not handling these yet
558 elif tok_id == Tok.HexChar:
559 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
560
561 elif tok_id == Tok.DecChar:
562 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
563
564 pos = end_pos
565
566 out.PrintTheRest()
567 return f.getvalue()
568
569
570def main(argv):
571 action = argv[1]
572
573 if action == 'well-formed':
574 num_tokens = 0
575 errors = []
576 i = 0
577 for line in sys.stdin:
578 name = line.strip()
579 with open(name) as f:
580 contents = f.read()
581
582 lx = ValidTokens(contents)
583 try:
584 tokens = list(lx)
585 except LexError as e:
586 log('Error in %r: %s', name, e)
587 errors.append((name, e))
588 else:
589 num_tokens += len(tokens)
590 #print('%d %s' % (len(tokens), name))
591 i += 1
592
593 log('')
594 log(' %d tokens in %d files', num_tokens, i)
595 log(' %d errors', len(errors))
596 if 0:
597 for name, e in errors:
598 log('Error in %r: %s', name, e)
599
600 else:
601 raise RuntimeError('Invalid action %r' % action)
602
603
604if __name__ == '__main__':
605 main(sys.argv)