OILS / lazylex / html.py View on Github | oils.pub

612 lines, 283 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple, Optional
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83# CommentBegin and ProcessingBegin are "pseudo-tokens", not visible
84TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData CData Invalid EndOfStream'.split(
85)
86
87
88class Tok(object):
89 """
90 Avoid lint errors by using these aliases
91 """
92 pass
93
94
95TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
96
97this_module = sys.modules[__name__]
98for i, tok_str in enumerate(TOKENS):
99 setattr(this_module, tok_str, i)
100 setattr(Tok, tok_str, i)
101 TOKEN_NAMES[i] = tok_str
102
103
104def TokenName(tok_id):
105 return TOKEN_NAMES[tok_id]
106
107
108def MakeLexer(rules):
109 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
110
111
112#
113# Eggex
114#
115# Tag = / ~['>']+ /
116
117# Is this valid? A single character?
118# Tag = / ~'>'* /
119
120# Maybe better: / [NOT '>']+/
121# capital letters not allowed there?
122#
123# But then this is confusing:
124# / [NOT ~digit]+/
125#
126# / [NOT digit] / is [^\d]
127# / ~digit / is \D
128#
129# Or maybe:
130#
131# / [~ digit]+ /
132# / [~ '>']+ /
133# / [NOT '>']+ /
134
135# End = / '</' Tag '>' /
136# StartEnd = / '<' Tag '/>' /
137# Start = / '<' Tag '>' /
138#
139# EntityRef = / '&' dot{* N} ';' /
140
141# Tag name, or attribute name
142_NAME = r'[a-zA-Z][a-zA-Z0-9_\-]*' # must start with letter
143
144LEXER = [
145 # Note non-greedy matches are regular and can be matched in linear time
146 # with RE2.
147 #
148 # https://news.ycombinator.com/item?id=27099798
149 #
150 # Maybe try combining all of these for speed.
151
152 # . is any char except newline
153 # https://re2c.org/manual/manual_c.html
154
155 # Discarded options
156 #(r'<!-- .*? -->', Tok.Comment),
157
158 # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
159 #(r'<!-- [\s\S]*? -->', Tok.Comment),
160 #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
161 (r'<!--', Tok.CommentBegin),
162
163 # Processing instruction are XML only, but they are treated like a comment
164 # in HTML:
165 #
166 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
167 #
168 # We don't want to confuse them with start tags, so we recognize them at
169 # the top level.
170 (r'<\?', Tok.ProcessingBegin),
171
172 # NOTE: < is allowed in these.
173 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
174 #(r'<(?:script|style) [^>]+>', Tok.CDataStartTag), # start <a>
175
176 # Notes:
177 # - We look for a valid tag name, but we don't validate attributes.
178 # That's done in the tag lexer.
179 # - We don't allow leading whitespace
180 (r'</ (%s) [^>]* >' % _NAME, Tok.EndTag),
181 # self-closing <br/> comes before StarttTag
182 (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
183 (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
184 (r'&\# [0-9]+ ;', Tok.DecChar),
185 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
186 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
187
188 # HTML5 allows > in raw data - should we? But < is not allowed.
189 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
190 (r'[^&<]+', Tok.RawData),
191 (r'.', Tok.Invalid), # error!
192]
193
194# TODO:
195# - I think we should unescaped <, like XML does. There should be "one way to
196# do it", and it should catch bugs
197# - end tags shouldn't allow any other data, it has to be </foo>, not </foo x=y>
198
199LEXER = MakeLexer(LEXER)
200
201
202class Lexer(object):
203
204 def __init__(self, s, left_pos=0, right_pos=-1):
205 self.s = s
206 self.pos = left_pos
207 self.right_pos = len(s) if right_pos == -1 else right_pos
208 self.cache = {} # string -> compiled regex pattern object
209
210 # either </script> or </style> - we search until we see that
211 self.search_state = None # type: Optional[str]
212
213 def _Peek(self):
214 # type: () -> Tuple[int, int]
215 """
216 Note: not using _Peek() now
217 """
218 if self.pos == self.right_pos:
219 return Tok.EndOfStream, self.pos
220
221 assert self.pos < self.right_pos, self.pos
222
223 if self.search_state is not None:
224 pos = self.s.find(self.search_state, self.pos)
225 if pos == -1:
226 # unterminated <script> or <style>
227 raise LexError(self.s, self.pos)
228 self.search_state = None
229 # beginning
230 return Tok.CData, pos
231
232 # Find the first match.
233 # Note: frontend/match.py uses _LongestMatch(), which is different!
234 # TODO: reconcile them. This lexer should be expressible in re2c.
235
236 # TODO: Get rid of non-greedy match
237
238 for pat, tok_id in LEXER:
239 m = pat.match(self.s, self.pos)
240 if m:
241 if tok_id == Tok.CommentBegin:
242 pos = self.s.find('-->', self.pos)
243 if pos == -1:
244 # unterminated <!--
245 raise LexError(self.s, self.pos)
246 return Tok.Comment, pos + 3 # -->
247
248 if tok_id == Tok.ProcessingBegin:
249 pos = self.s.find('?>', self.pos)
250 if pos == -1:
251 # unterminated <?
252 raise LexError(self.s, self.pos)
253 return Tok.Processing, pos + 2 # ?>
254
255 if tok_id == Tok.StartTag:
256 tag_name = m.group(1) # captured
257 if tag_name == 'script':
258 self.search_state = '</script>'
259 elif tag_name == 'style':
260 self.search_state = '</style>'
261
262 return tok_id, m.end()
263 else:
264 raise AssertionError('Tok.Invalid rule should have matched')
265
266 def Read(self):
267 # type: () -> Tuple[int, int]
268 tok_id, end_pos = self._Peek()
269 self.pos = end_pos # advance
270 return tok_id, end_pos
271
272 def LookAhead(self, regex):
273 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
274 # or something.
275 pat = self.cache.get(regex)
276 if pat is None:
277 pat = re.compile(regex)
278 self.cache[regex] = pat
279
280 m = pat.match(self.s, self.pos)
281 return m is not None
282
283
284def _Tokens(s, left_pos, right_pos):
285 """
286 Args:
287 s: string to parse
288 left_pos, right_pos: Optional span boundaries.
289 """
290 lx = Lexer(s, left_pos, right_pos)
291 while True:
292 tok_id, pos = lx.Read()
293 yield tok_id, pos
294 if tok_id == Tok.EndOfStream:
295 break
296
297
298def ValidTokens(s, left_pos=0, right_pos=-1):
299 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
300
301 I'm not combining the two functions because I might want to do a
302 'yield' transformation on Tokens()? Exceptions might complicate the
303 issue?
304 """
305 pos = left_pos
306 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
307 if tok_id == Tok.Invalid:
308 raise LexError(s, pos)
309 yield tok_id, end_pos
310 pos = end_pos
311
312
313# Tag names:
314# Match <a or </a
315# Match <h2, but not <2h
316#
317# HTML 5 doesn't restrict tag names at all
318# https://html.spec.whatwg.org/#toc-syntax
319#
320# XML allows : - .
321# https://www.w3.org/TR/xml/#NT-NameChar
322
323# Namespaces for MathML, SVG
324# XLink, XML, XMLNS
325#
326# https://infra.spec.whatwg.org/#namespaces
327#
328# Allow - for td-attrs
329
330_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
331
332# TODO: capture tag name above?
333_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
334
335# To match href="foo"
336
337_ATTR_RE = re.compile(
338 r'''
339\s+ # Leading whitespace is required
340(%s) # Attribute name
341(?: # Optional attribute value
342 \s* = \s*
343 (?:
344 " ([^>"]*) " # double quoted value
345 | (%s) # Attribute value
346 # TODO: relax this? for href=$foo
347 )
348)?
349''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
350
351TagName, AttrName, UnquotedValue, QuotedValue = range(4)
352
353
354class TagLexer(object):
355 """
356 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
357 provides a few operations:
358
359 - What is the tag?
360 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
361 """
362
363 def __init__(self, s):
364 self.s = s
365 self.start_pos = -1 # Invalid
366 self.end_pos = -1
367
368 def Reset(self, start_pos, end_pos):
369 """Reuse instances of this object."""
370 self.start_pos = start_pos
371 self.end_pos = end_pos
372
373 def TagString(self):
374 return self.s[self.start_pos:self.end_pos]
375
376 def TagName(self):
377 # First event
378 tok_id, start, end = next(self.Tokens())
379 return self.s[start:end]
380
381 def GetSpanForAttrValue(self, attr_name):
382 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
383 # TODO: Could also cache these
384
385 events = self.Tokens()
386 val = (-1, -1)
387 try:
388 while True:
389 tok_id, start, end = next(events)
390 if tok_id == AttrName:
391 name = self.s[start:end]
392 if name == attr_name:
393 # The value should come next
394 tok_id, start, end = next(events)
395 if tok_id in (QuotedValue, UnquotedValue):
396 # Note: quoted values may have &amp;
397 # We would need ANOTHER lexer to unescape them.
398 # Right now help_gen.py and oils_doc.py
399 val = start, end
400 break
401
402 except StopIteration:
403 pass
404 return val
405
406 def GetAttrRaw(self, attr_name):
407 """
408 Return the value, which may be UNESCAPED.
409 """
410 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
411 # TODO: Could also cache these
412 start, end = self.GetSpanForAttrValue(attr_name)
413 if start == -1:
414 return None
415 return self.s[start:end]
416
417 def AllAttrsRaw(self):
418 """
419 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
420
421 The quoted values may be escaped. We would need another lexer to
422 unescape them.
423 """
424 pairs = []
425 events = self.Tokens()
426 try:
427 while True:
428 tok_id, start, end = next(events)
429 if tok_id == AttrName:
430 name = self.s[start:end]
431
432 # The value should come next
433 tok_id, start, end = next(events)
434 if tok_id in (QuotedValue, UnquotedValue):
435 # Note: quoted values may have &amp;
436 # We would need ANOTHER lexer to unescape them, but we
437 # don't need that for ul-table
438
439 val = self.s[start:end]
440 pairs.append((name, val))
441 except StopIteration:
442 pass
443 return pairs
444
445 def Tokens(self):
446 """
447 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
448
449 Where each Token is (Type, start_pos, end_pos)
450
451 Note that start and end are NOT redundant! We skip over some unwanted
452 characters.
453 """
454 m = _TAG_RE.match(self.s, self.start_pos + 1)
455 if not m:
456 raise RuntimeError("Couldn't find HTML tag in %r" %
457 self.TagString())
458 yield TagName, m.start(1), m.end(1)
459
460 pos = m.end(0)
461
462 while True:
463 # don't search past the end
464 m = _ATTR_RE.match(self.s, pos, self.end_pos)
465 if not m:
466 # A validating parser would check that > or /> is next -- there's no junk
467 break
468
469 yield AttrName, m.start(1), m.end(1)
470
471 # Quoted is group 2, unquoted is group 3.
472 if m.group(2) is not None:
473 yield QuotedValue, m.start(2), m.end(2)
474 elif m.group(3) is not None:
475 yield UnquotedValue, m.start(3), m.end(3)
476
477 # Skip past the "
478 pos = m.end(0)
479
480
481def ReadUntilStartTag(it, tag_lexer, tag_name):
482 """Find the next <foo>, returning its (start, end) positions
483
484 Raise ParseError if it's not found.
485
486 tag_lexer is RESET.
487 """
488 pos = 0
489 while True:
490 try:
491 tok_id, end_pos = next(it)
492 except StopIteration:
493 break
494 tag_lexer.Reset(pos, end_pos)
495 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
496 return pos, end_pos
497
498 pos = end_pos
499
500 raise ParseError('No start tag %r', tag_name)
501
502
503def ReadUntilEndTag(it, tag_lexer, tag_name):
504 """Find the next </foo>, returning its (start, end) position
505
506 Raise ParseError if it's not found.
507
508 tag_lexer is RESET.
509 """
510 pos = 0
511 while True:
512 try:
513 tok_id, end_pos = next(it)
514 except StopIteration:
515 break
516 tag_lexer.Reset(pos, end_pos)
517 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
518 return pos, end_pos
519
520 pos = end_pos
521
522 raise ParseError('No end tag %r', tag_name)
523
524
525CHAR_ENTITY = {
526 'amp': '&',
527 'lt': '<',
528 'gt': '>',
529 'quot': '"',
530}
531
532
533def ToText(s, left_pos=0, right_pos=-1):
534 """Given HTML, return text by unquoting &gt; and &lt; etc.
535
536 Used by:
537 doctools/oils_doc.py: PygmentsPlugin
538 doctool/make_help.py: HelpIndexCards
539
540 In the latter case, we cold process some tags, like:
541
542 - Blue Link (not clickable, but still useful)
543 - Red X
544
545 That should be html.ToAnsi.
546 """
547 f = StringIO()
548 out = Output(s, f, left_pos, right_pos)
549
550 pos = left_pos
551 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
552 if tok_id == Tok.RawData:
553 out.SkipTo(pos)
554 out.PrintUntil(end_pos)
555
556 elif tok_id == Tok.CharEntity: # &amp;
557
558 entity = s[pos + 1:end_pos - 1]
559
560 out.SkipTo(pos)
561 out.Print(CHAR_ENTITY[entity])
562 out.SkipTo(end_pos)
563
564 # Not handling these yet
565 elif tok_id == Tok.HexChar:
566 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
567
568 elif tok_id == Tok.DecChar:
569 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
570
571 pos = end_pos
572
573 out.PrintTheRest()
574 return f.getvalue()
575
576
577def main(argv):
578 action = argv[1]
579
580 if action == 'well-formed':
581 num_tokens = 0
582 errors = []
583 i = 0
584 for line in sys.stdin:
585 name = line.strip()
586 with open(name) as f:
587 contents = f.read()
588
589 lx = ValidTokens(contents)
590 try:
591 tokens = list(lx)
592 except LexError as e:
593 log('Error in %r: %s', name, e)
594 errors.append((name, e))
595 else:
596 num_tokens += len(tokens)
597 #print('%d %s' % (len(tokens), name))
598 i += 1
599
600 log('')
601 log(' %d tokens in %d files', num_tokens, i)
602 log(' %d errors', len(errors))
603 if 0:
604 for name, e in errors:
605 log('Error in %r: %s', name, e)
606
607 else:
608 raise RuntimeError('Invalid action %r' % action)
609
610
611if __name__ == '__main__':
612 main(sys.argv)