OILS / lazylex / html.py View on Github | oils.pub

580 lines, 270 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83# CommentBegin and ProcessingBegin are "pseudo-tokens", not visible
84TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData Invalid EndOfStream'.split(
85)
86
87
88class Tok(object):
89 """
90 Avoid lint errors by using these aliases
91 """
92 pass
93
94
95TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
96
97this_module = sys.modules[__name__]
98for i, tok_str in enumerate(TOKENS):
99 setattr(this_module, tok_str, i)
100 setattr(Tok, tok_str, i)
101 TOKEN_NAMES[i] = tok_str
102
103
104def TokenName(tok_id):
105 return TOKEN_NAMES[tok_id]
106
107
108def MakeLexer(rules):
109 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
110
111
112#
113# Eggex
114#
115# Tag = / ~['>']+ /
116
117# Is this valid? A single character?
118# Tag = / ~'>'* /
119
120# Maybe better: / [NOT '>']+/
121# capital letters not allowed there?
122#
123# But then this is confusing:
124# / [NOT ~digit]+/
125#
126# / [NOT digit] / is [^\d]
127# / ~digit / is \D
128#
129# Or maybe:
130#
131# / [~ digit]+ /
132# / [~ '>']+ /
133# / [NOT '>']+ /
134
135# End = / '</' Tag '>' /
136# StartEnd = / '<' Tag '/>' /
137# Start = / '<' Tag '>' /
138#
139# EntityRef = / '&' dot{* N} ';' /
140
141LEXER = [
142 # Note non-greedy matches are regular and can be matched in linear time
143 # with RE2.
144 #
145 # https://news.ycombinator.com/item?id=27099798
146 #
147 # Maybe try combining all of these for speed.
148
149 # . is any char except newline
150 # https://re2c.org/manual/manual_c.html
151
152 # Discarded options
153 #(r'<!-- .*? -->', Tok.Comment),
154
155 # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
156 #(r'<!-- [\s\S]*? -->', Tok.Comment),
157 #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
158 (r'<!--', Tok.CommentBegin),
159
160 # Processing instruction are XML only, but they are treated like a comment
161 # in HTML:
162 #
163 # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
164 #
165 # We don't want to confuse them with start tags, so we recognize them at
166 # the top level.
167 (r'<\?', Tok.ProcessingBegin),
168
169 # NOTE: < is allowed in these.
170 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
171 (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
172 (r'< [^>]+ />', Tok.StartEndTag), # end </a>
173 (r'< [^>]+ >', Tok.StartTag), # start <a>
174 (r'&\# [0-9]+ ;', Tok.DecChar),
175 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
176 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
177
178 # Note: > is allowed in raw data.
179 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
180 (r'[^&<]+', Tok.RawData),
181 (r'.', Tok.Invalid), # error!
182]
183
184LEXER = MakeLexer(LEXER)
185
186
187class Lexer(object):
188
189 def __init__(self, s, left_pos=0, right_pos=-1):
190 self.s = s
191 self.pos = left_pos
192 self.right_pos = len(s) if right_pos == -1 else right_pos
193 self.cache = {} # string -> compiled regex pattern object
194
195 def _Peek(self):
196 # type: () -> Tuple[int, int]
197 """
198 Note: not using _Peek() now
199 """
200 if self.pos == self.right_pos:
201 return Tok.EndOfStream, self.pos
202
203 assert self.pos < self.right_pos, self.pos
204
205 # Find the first match.
206 # Note: frontend/match.py uses _LongestMatch(), which is different!
207 # TODO: reconcile them. This lexer should be expressible in re2c.
208
209 # TODO: Get rid of non-greedy match
210
211 for pat, tok_id in LEXER:
212 m = pat.match(self.s, self.pos)
213 if m:
214 if tok_id == Tok.CommentBegin:
215 pos = self.s.find('-->', self.pos)
216 if pos == -1:
217 # unterminated <!--
218 raise LexError(self.s, self.pos)
219 return Tok.Comment, pos + 3 # -->
220
221 if tok_id == Tok.ProcessingBegin:
222 pos = self.s.find('?>', self.pos)
223 if pos == -1:
224 # unterminated <?
225 raise LexError(self.s, self.pos)
226 return Tok.Processing, pos + 2 # ?>
227
228 return tok_id, m.end()
229 else:
230 raise AssertionError('Tok.Invalid rule should have matched')
231
232 def Read(self):
233 # type: () -> Tuple[int, int]
234 tok_id, end_pos = self._Peek()
235 self.pos = end_pos # advance
236 return tok_id, end_pos
237
238 def LookAhead(self, regex):
239 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
240 # or something.
241 pat = self.cache.get(regex)
242 if pat is None:
243 pat = re.compile(regex)
244 self.cache[regex] = pat
245
246 m = pat.match(self.s, self.pos)
247 return m is not None
248
249
250def _Tokens(s, left_pos, right_pos):
251 """
252 Args:
253 s: string to parse
254 left_pos, right_pos: Optional span boundaries.
255 """
256 lx = Lexer(s, left_pos, right_pos)
257 while True:
258 tok_id, pos = lx.Read()
259 yield tok_id, pos
260 if tok_id == Tok.EndOfStream:
261 break
262
263
264def ValidTokens(s, left_pos=0, right_pos=-1):
265 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
266
267 I'm not combining the two functions because I might want to do a
268 'yield' transformation on Tokens()? Exceptions might complicate the
269 issue?
270 """
271 pos = left_pos
272 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
273 if tok_id == Tok.Invalid:
274 raise LexError(s, pos)
275 yield tok_id, end_pos
276 pos = end_pos
277
278
279# Tag names:
280# Match <a or </a
281# Match <h2, but not <2h
282#
283# HTML 5 doesn't restrict tag names at all
284# https://html.spec.whatwg.org/#toc-syntax
285#
286# XML allows : - .
287# https://www.w3.org/TR/xml/#NT-NameChar
288
289# Namespaces for MathML, SVG
290# XLink, XML, XMLNS
291#
292# https://infra.spec.whatwg.org/#namespaces
293#
294# Allow - for td-attrs
295
296# Tag name, or attribue name
297_NAME = r'[a-zA-Z][a-zA-Z0-9_\-]*' # must start with letter
298
299_ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
300
301_TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
302
303# To match href="foo"
304
305_ATTR_RE = re.compile(
306 r'''
307\s+ # Leading whitespace is required
308(%s) # Attribute name
309(?: # Optional attribute value
310 \s* = \s*
311 (?:
312 " ([^>"]*) " # double quoted value
313 | (%s) # Attribute value
314 # TODO: relax this? for href=$foo
315 )
316)?
317''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
318
319TagName, AttrName, UnquotedValue, QuotedValue = range(4)
320
321
322class TagLexer(object):
323 """
324 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
325 provides a few operations:
326
327 - What is the tag?
328 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
329 """
330
331 def __init__(self, s):
332 self.s = s
333 self.start_pos = -1 # Invalid
334 self.end_pos = -1
335
336 def Reset(self, start_pos, end_pos):
337 """Reuse instances of this object."""
338 self.start_pos = start_pos
339 self.end_pos = end_pos
340
341 def TagString(self):
342 return self.s[self.start_pos:self.end_pos]
343
344 def TagName(self):
345 # First event
346 tok_id, start, end = next(self.Tokens())
347 return self.s[start:end]
348
349 def GetSpanForAttrValue(self, attr_name):
350 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
351 # TODO: Could also cache these
352
353 events = self.Tokens()
354 val = (-1, -1)
355 try:
356 while True:
357 tok_id, start, end = next(events)
358 if tok_id == AttrName:
359 name = self.s[start:end]
360 if name == attr_name:
361 # The value should come next
362 tok_id, start, end = next(events)
363 if tok_id in (QuotedValue, UnquotedValue):
364 # Note: quoted values may have &amp;
365 # We would need ANOTHER lexer to unescape them.
366 # Right now help_gen.py and oils_doc.py
367 val = start, end
368 break
369
370 except StopIteration:
371 pass
372 return val
373
374 def GetAttrRaw(self, attr_name):
375 """
376 Return the value, which may be UNESCAPED.
377 """
378 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
379 # TODO: Could also cache these
380 start, end = self.GetSpanForAttrValue(attr_name)
381 if start == -1:
382 return None
383 return self.s[start:end]
384
385 def AllAttrsRaw(self):
386 """
387 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
388
389 The quoted values may be escaped. We would need another lexer to
390 unescape them.
391 """
392 pairs = []
393 events = self.Tokens()
394 try:
395 while True:
396 tok_id, start, end = next(events)
397 if tok_id == AttrName:
398 name = self.s[start:end]
399
400 # The value should come next
401 tok_id, start, end = next(events)
402 if tok_id in (QuotedValue, UnquotedValue):
403 # Note: quoted values may have &amp;
404 # We would need ANOTHER lexer to unescape them, but we
405 # don't need that for ul-table
406
407 val = self.s[start:end]
408 pairs.append((name, val))
409 except StopIteration:
410 pass
411 return pairs
412
413 def Tokens(self):
414 """
415 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
416
417 Where each Token is (Type, start_pos, end_pos)
418
419 Note that start and end are NOT redundant! We skip over some unwanted
420 characters.
421 """
422 m = _TAG_RE.match(self.s, self.start_pos + 1)
423 if not m:
424 raise RuntimeError("Couldn't find HTML tag in %r" %
425 self.TagString())
426 yield TagName, m.start(1), m.end(1)
427
428 pos = m.end(0)
429
430 while True:
431 # don't search past the end
432 m = _ATTR_RE.match(self.s, pos, self.end_pos)
433 if not m:
434 # A validating parser would check that > or /> is next -- there's no junk
435 break
436
437 yield AttrName, m.start(1), m.end(1)
438
439 # Quoted is group 2, unquoted is group 3.
440 if m.group(2) is not None:
441 yield QuotedValue, m.start(2), m.end(2)
442 elif m.group(3) is not None:
443 yield UnquotedValue, m.start(3), m.end(3)
444
445 # Skip past the "
446 pos = m.end(0)
447
448
449def ReadUntilStartTag(it, tag_lexer, tag_name):
450 """Find the next <foo>, returning its (start, end) positions
451
452 Raise ParseError if it's not found.
453
454 tag_lexer is RESET.
455 """
456 pos = 0
457 while True:
458 try:
459 tok_id, end_pos = next(it)
460 except StopIteration:
461 break
462 tag_lexer.Reset(pos, end_pos)
463 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
464 return pos, end_pos
465
466 pos = end_pos
467
468 raise ParseError('No start tag %r', tag_name)
469
470
471def ReadUntilEndTag(it, tag_lexer, tag_name):
472 """Find the next </foo>, returning its (start, end) position
473
474 Raise ParseError if it's not found.
475
476 tag_lexer is RESET.
477 """
478 pos = 0
479 while True:
480 try:
481 tok_id, end_pos = next(it)
482 except StopIteration:
483 break
484 tag_lexer.Reset(pos, end_pos)
485 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
486 return pos, end_pos
487
488 pos = end_pos
489
490 raise ParseError('No end tag %r', tag_name)
491
492
493CHAR_ENTITY = {
494 'amp': '&',
495 'lt': '<',
496 'gt': '>',
497 'quot': '"',
498}
499
500
501def ToText(s, left_pos=0, right_pos=-1):
502 """Given HTML, return text by unquoting &gt; and &lt; etc.
503
504 Used by:
505 doctools/oils_doc.py: PygmentsPlugin
506 doctool/make_help.py: HelpIndexCards
507
508 In the latter case, we cold process some tags, like:
509
510 - Blue Link (not clickable, but still useful)
511 - Red X
512
513 That should be html.ToAnsi.
514 """
515 f = StringIO()
516 out = Output(s, f, left_pos, right_pos)
517
518 pos = left_pos
519 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
520 if tok_id == Tok.RawData:
521 out.SkipTo(pos)
522 out.PrintUntil(end_pos)
523
524 elif tok_id == Tok.CharEntity: # &amp;
525
526 entity = s[pos + 1:end_pos - 1]
527
528 out.SkipTo(pos)
529 out.Print(CHAR_ENTITY[entity])
530 out.SkipTo(end_pos)
531
532 # Not handling these yet
533 elif tok_id == Tok.HexChar:
534 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
535
536 elif tok_id == Tok.DecChar:
537 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
538
539 pos = end_pos
540
541 out.PrintTheRest()
542 return f.getvalue()
543
544
545def main(argv):
546 action = argv[1]
547
548 if action == 'well-formed':
549 num_tokens = 0
550 errors = []
551 i = 0
552 for line in sys.stdin:
553 name = line.strip()
554 with open(name) as f:
555 contents = f.read()
556
557 lx = ValidTokens(contents)
558 try:
559 tokens = list(lx)
560 except LexError as e:
561 log('Error in %r: %s', name, e)
562 errors.append((name, e))
563 else:
564 num_tokens += len(tokens)
565 #print('%d %s' % (len(tokens), name))
566 i += 1
567
568 log('')
569 log(' %d tokens in %d files', num_tokens, i)
570 log(' %d errors', len(errors))
571 if 0:
572 for name, e in errors:
573 log('Error in %r: %s', name, e)
574
575 else:
576 raise RuntimeError('Invalid action %r' % action)
577
578
579if __name__ == '__main__':
580 main(sys.argv)