OILS / lazylex / html.py View on Github | oils.pub

621 lines, 289 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple, Optional
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83# CommentBegin, ProcessingBegin, CDataBegin are "pseudo-tokens", not visible
84TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin CData CDataBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData HtmlCData Invalid EndOfStream'.split(
85)
86
87
88class Tok(object):
89 """
90 Avoid lint errors by using these aliases
91 """
92 pass
93
94
95TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
96
97this_module = sys.modules[__name__]
98for i, tok_str in enumerate(TOKENS):
99 setattr(this_module, tok_str, i)
100 setattr(Tok, tok_str, i)
101 TOKEN_NAMES[i] = tok_str
102
103
104def TokenName(tok_id):
105 return TOKEN_NAMES[tok_id]
106
107
108def MakeLexer(rules):
109 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
110
111
112#
113# Eggex
114#
115# Tag = / ~['>']+ /
116
117# Is this valid? A single character?
118# Tag = / ~'>'* /
119
120# Maybe better: / [NOT '>']+/
121# capital letters not allowed there?
122#
123# But then this is confusing:
124# / [NOT ~digit]+/
125#
126# / [NOT digit] / is [^\d]
127# / ~digit / is \D
128#
129# Or maybe:
130#
131# / [~ digit]+ /
132# / [~ '>']+ /
133# / [NOT '>']+ /
134
135# End = / '</' Tag '>' /
136# StartEnd = / '<' Tag '/>' /
137# Start = / '<' Tag '>' /
138#
139# EntityRef = / '&' dot{* N} ';' /
140
141# Tag name, or attribute name
142# colon is used in XML
143_NAME = r'[a-zA-Z][a-zA-Z0-9:_\-]*' # must start with letter
144
145LEXER = [
146 # Note non-greedy matches are regular and can be matched in linear time
147 # with RE2.
148 #
149 # https://news.ycombinator.com/item?id=27099798
150 #
151 # Maybe try combining all of these for speed.
152
153 # . is any char except newline
154 # https://re2c.org/manual/manual_c.html
155
156 # Discarded options
157 #(r'<!-- .*? -->', Tok.Comment),
158
159 # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
160 #(r'<!-- [\s\S]*? -->', Tok.Comment),
161 #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
162 (r'<!--', Tok.CommentBegin),
163
164 # Processing instruction are XML only, but they are treated like a comment
165 # in HTML:
166 #
167 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
168 #
169 # They are used for the XML comment:
170 # <?xml version="1.0" encoding="UTF-8"?>
171 (r'<\?', Tok.ProcessingBegin),
172 (r'<!\[CDATA\[', Tok.CDataBegin), # <![CDATA[
173
174 # NOTE: < is allowed in these?
175 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
176
177 # Notes:
178 # - We look for a valid tag name, but we don't validate attributes.
179 # That's done in the tag lexer.
180 # - We don't allow leading whitespace
181 (r'</ (%s) >' % _NAME, Tok.EndTag),
182 # self-closing <br/> comes before StarttTag
183 (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
184 (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
185 (r'&\# [0-9]+ ;', Tok.DecChar),
186 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
187 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
188
189 # HTML5 allows > in raw data - should we? But < is not allowed.
190 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
191 #
192 # - My early blog has THREE errors when disallowing >
193 # - So do some .wwz files
194 (r'[^&<]+', Tok.RawData),
195 (r'.', Tok.Invalid), # error!
196]
197
198# TODO:
199# - should we disallowed unescaped >, like XML does? There should be "one way to
200# do it", and it could catch escaping bugs
201
202LEXER = MakeLexer(LEXER)
203
204
205class Lexer(object):
206
207 def __init__(self, s, left_pos=0, right_pos=-1):
208 self.s = s
209 self.pos = left_pos
210 self.right_pos = len(s) if right_pos == -1 else right_pos
211 self.cache = {} # string -> compiled regex pattern object
212
213 # either </script> or </style> - we search until we see that
214 self.search_state = None # type: Optional[str]
215
216 def _Peek(self):
217 # type: () -> Tuple[int, int]
218 """
219 Note: not using _Peek() now
220 """
221 if self.pos == self.right_pos:
222 return Tok.EndOfStream, self.pos
223
224 assert self.pos < self.right_pos, self.pos
225
226 if self.search_state is not None:
227 pos = self.s.find(self.search_state, self.pos)
228 if pos == -1:
229 # unterminated <script> or <style>
230 raise LexError(self.s, self.pos)
231 self.search_state = None
232 # beginning
233 return Tok.HtmlCData, pos
234
235 # Find the first match.
236 # Note: frontend/match.py uses _LongestMatch(), which is different!
237 # TODO: reconcile them. This lexer should be expressible in re2c.
238
239 for pat, tok_id in LEXER:
240 m = pat.match(self.s, self.pos)
241 if m:
242 if tok_id == Tok.CommentBegin:
243 pos = self.s.find('-->', self.pos)
244 if pos == -1:
245 # unterminated <!--
246 raise LexError(self.s, self.pos)
247 return Tok.Comment, pos + 3 # -->
248
249 if tok_id == Tok.ProcessingBegin:
250 pos = self.s.find('?>', self.pos)
251 if pos == -1:
252 # unterminated <?
253 raise LexError(self.s, self.pos)
254 return Tok.Processing, pos + 2 # ?>
255
256 if tok_id == Tok.CDataBegin:
257 pos = self.s.find(']]>', self.pos)
258 if pos == -1:
259 # unterminated <![CDATA[
260 raise LexError(self.s, self.pos)
261 return Tok.CData, pos + 3 # ]]>
262
263 if tok_id == Tok.StartTag:
264 tag_name = m.group(1) # captured
265 if tag_name == 'script':
266 self.search_state = '</script>'
267 elif tag_name == 'style':
268 self.search_state = '</style>'
269
270 return tok_id, m.end()
271 else:
272 raise AssertionError('Tok.Invalid rule should have matched')
273
274 def Read(self):
275 # type: () -> Tuple[int, int]
276 tok_id, end_pos = self._Peek()
277 self.pos = end_pos # advance
278 return tok_id, end_pos
279
280 def LookAhead(self, regex):
281 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
282 # or something.
283 pat = self.cache.get(regex)
284 if pat is None:
285 pat = re.compile(regex)
286 self.cache[regex] = pat
287
288 m = pat.match(self.s, self.pos)
289 return m is not None
290
291
292def _Tokens(s, left_pos, right_pos):
293 """
294 Args:
295 s: string to parse
296 left_pos, right_pos: Optional span boundaries.
297 """
298 lx = Lexer(s, left_pos, right_pos)
299 while True:
300 tok_id, pos = lx.Read()
301 yield tok_id, pos
302 if tok_id == Tok.EndOfStream:
303 break
304
305
306def ValidTokens(s, left_pos=0, right_pos=-1):
307 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
308
309 I'm not combining the two functions because I might want to do a
310 'yield' transformation on Tokens()? Exceptions might complicate the
311 issue?
312 """
313 pos = left_pos
314 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
315 if tok_id == Tok.Invalid:
316 raise LexError(s, pos)
317 yield tok_id, end_pos
318 pos = end_pos
319
320
321# Tag names:
322# Match <a or </a
323# Match <h2, but not <2h
324#
325# HTML 5 doesn't restrict tag names at all
326# https://html.spec.whatwg.org/#toc-syntax
327#
328# XML allows : - .
329# https://www.w3.org/TR/xml/#NT-NameChar
330
331# Namespaces for MathML, SVG
332# XLink, XML, XMLNS
333#
334# https://infra.spec.whatwg.org/#namespaces
335#
336# Allow - for td-attrs
337
338_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
339
340# TODO: we don't need to capture the tag name here? That's done at the top
341# level
342_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
343
344# To match href="foo"
345
346_ATTR_RE = re.compile(
347 r'''
348\s+ # Leading whitespace is required
349(%s) # Attribute name
350(?: # Optional attribute value
351 \s* = \s*
352 (?:
353 " ([^>"]*) " # double quoted value
354 | (%s) # Attribute value
355 # TODO: relax this? for href=$foo
356 )
357)?
358''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
359
360TagName, AttrName, UnquotedValue, QuotedValue = range(4)
361
362
363class TagLexer(object):
364 """
365 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
366 provides a few operations:
367
368 - What is the tag?
369 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
370 """
371
372 def __init__(self, s):
373 self.s = s
374 self.start_pos = -1 # Invalid
375 self.end_pos = -1
376
377 def Reset(self, start_pos, end_pos):
378 """Reuse instances of this object."""
379 self.start_pos = start_pos
380 self.end_pos = end_pos
381
382 def TagString(self):
383 return self.s[self.start_pos:self.end_pos]
384
385 def TagName(self):
386 # First event
387 tok_id, start, end = next(self.Tokens())
388 return self.s[start:end]
389
390 def GetSpanForAttrValue(self, attr_name):
391 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
392 # TODO: Could also cache these
393
394 events = self.Tokens()
395 val = (-1, -1)
396 try:
397 while True:
398 tok_id, start, end = next(events)
399 if tok_id == AttrName:
400 name = self.s[start:end]
401 if name == attr_name:
402 # The value should come next
403 tok_id, start, end = next(events)
404 if tok_id in (QuotedValue, UnquotedValue):
405 # Note: quoted values may have &amp;
406 # We would need ANOTHER lexer to unescape them.
407 # Right now help_gen.py and oils_doc.py
408 val = start, end
409 break
410
411 except StopIteration:
412 pass
413 return val
414
415 def GetAttrRaw(self, attr_name):
416 """
417 Return the value, which may be UNESCAPED.
418 """
419 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
420 # TODO: Could also cache these
421 start, end = self.GetSpanForAttrValue(attr_name)
422 if start == -1:
423 return None
424 return self.s[start:end]
425
426 def AllAttrsRaw(self):
427 """
428 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
429
430 The quoted values may be escaped. We would need another lexer to
431 unescape them.
432 """
433 pairs = []
434 events = self.Tokens()
435 try:
436 while True:
437 tok_id, start, end = next(events)
438 if tok_id == AttrName:
439 name = self.s[start:end]
440
441 # The value should come next
442 tok_id, start, end = next(events)
443 if tok_id in (QuotedValue, UnquotedValue):
444 # Note: quoted values may have &amp;
445 # We would need ANOTHER lexer to unescape them, but we
446 # don't need that for ul-table
447
448 val = self.s[start:end]
449 pairs.append((name, val))
450 except StopIteration:
451 pass
452 return pairs
453
454 def Tokens(self):
455 """
456 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
457
458 Where each Token is (Type, start_pos, end_pos)
459
460 Note that start and end are NOT redundant! We skip over some unwanted
461 characters.
462 """
463 m = _TAG_RE.match(self.s, self.start_pos + 1)
464 if not m:
465 raise RuntimeError("Couldn't find HTML tag in %r" %
466 self.TagString())
467 yield TagName, m.start(1), m.end(1)
468
469 pos = m.end(0)
470
471 while True:
472 # don't search past the end
473 m = _ATTR_RE.match(self.s, pos, self.end_pos)
474 if not m:
475 # A validating parser would check that > or /> is next -- there's no junk
476 break
477
478 yield AttrName, m.start(1), m.end(1)
479
480 # Quoted is group 2, unquoted is group 3.
481 if m.group(2) is not None:
482 yield QuotedValue, m.start(2), m.end(2)
483 elif m.group(3) is not None:
484 yield UnquotedValue, m.start(3), m.end(3)
485
486 # Skip past the "
487 pos = m.end(0)
488
489
490def ReadUntilStartTag(it, tag_lexer, tag_name):
491 """Find the next <foo>, returning its (start, end) positions
492
493 Raise ParseError if it's not found.
494
495 tag_lexer is RESET.
496 """
497 pos = 0
498 while True:
499 try:
500 tok_id, end_pos = next(it)
501 except StopIteration:
502 break
503 tag_lexer.Reset(pos, end_pos)
504 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
505 return pos, end_pos
506
507 pos = end_pos
508
509 raise ParseError('No start tag %r', tag_name)
510
511
512def ReadUntilEndTag(it, tag_lexer, tag_name):
513 """Find the next </foo>, returning its (start, end) position
514
515 Raise ParseError if it's not found.
516
517 tag_lexer is RESET.
518 """
519 pos = 0
520 while True:
521 try:
522 tok_id, end_pos = next(it)
523 except StopIteration:
524 break
525 tag_lexer.Reset(pos, end_pos)
526 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
527 return pos, end_pos
528
529 pos = end_pos
530
531 raise ParseError('No end tag %r', tag_name)
532
533
534CHAR_ENTITY = {
535 'amp': '&',
536 'lt': '<',
537 'gt': '>',
538 'quot': '"',
539}
540
541
542def ToText(s, left_pos=0, right_pos=-1):
543 """Given HTML, return text by unquoting &gt; and &lt; etc.
544
545 Used by:
546 doctools/oils_doc.py: PygmentsPlugin
547 doctool/make_help.py: HelpIndexCards
548
549 In the latter case, we cold process some tags, like:
550
551 - Blue Link (not clickable, but still useful)
552 - Red X
553
554 That should be html.ToAnsi.
555 """
556 f = StringIO()
557 out = Output(s, f, left_pos, right_pos)
558
559 pos = left_pos
560 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
561 if tok_id == Tok.RawData:
562 out.SkipTo(pos)
563 out.PrintUntil(end_pos)
564
565 elif tok_id == Tok.CharEntity: # &amp;
566
567 entity = s[pos + 1:end_pos - 1]
568
569 out.SkipTo(pos)
570 out.Print(CHAR_ENTITY[entity])
571 out.SkipTo(end_pos)
572
573 # Not handling these yet
574 elif tok_id == Tok.HexChar:
575 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
576
577 elif tok_id == Tok.DecChar:
578 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
579
580 pos = end_pos
581
582 out.PrintTheRest()
583 return f.getvalue()
584
585
586def main(argv):
587 action = argv[1]
588
589 if action == 'well-formed':
590 num_tokens = 0
591 errors = []
592 i = 0
593 for line in sys.stdin:
594 name = line.strip()
595 with open(name) as f:
596 contents = f.read()
597
598 lx = ValidTokens(contents)
599 try:
600 tokens = list(lx)
601 except LexError as e:
602 log('Error in %r: %s', name, e)
603 errors.append((name, e))
604 else:
605 num_tokens += len(tokens)
606 #print('%d %s' % (len(tokens), name))
607 i += 1
608
609 log('')
610 log(' %d tokens in %d files', num_tokens, i)
611 log(' %d errors', len(errors))
612 if 0:
613 for name, e in errors:
614 log('Error in %r: %s', name, e)
615
616 else:
617 raise RuntimeError('Invalid action %r' % action)
618
619
620if __name__ == '__main__':
621 main(sys.argv)