OILS / lazylex / html.py View on Github | oils.pub

512 lines, 232 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83TOKENS = 'Decl Comment Processing StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData Invalid EndOfStream'.split(
84)
85
86
87class Tok(object):
88 """
89 Avoid lint errors by using these aliases
90 """
91 pass
92
93
94assert len(TOKENS) == 12, TOKENS
95
96TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
97
98this_module = sys.modules[__name__]
99for i, tok_str in enumerate(TOKENS):
100 setattr(this_module, tok_str, i)
101 setattr(Tok, tok_str, i)
102 TOKEN_NAMES[i] = tok_str
103
104
105def TokenName(tok_id):
106 return TOKEN_NAMES[tok_id]
107
108
109def MakeLexer(rules):
110 return [
111 # DOTALL is for the comment
112 (re.compile(pat, re.VERBOSE | re.DOTALL), i) for (pat, i) in rules
113 ]
114
115
116#
117# Eggex
118#
119# Tag = / ~['>']+ /
120
121# Is this valid? A single character?
122# Tag = / ~'>'* /
123
124# Maybe better: / [NOT '>']+/
125# capital letters not allowed there?
126#
127# But then this is confusing:
128# / [NOT ~digit]+/
129#
130# / [NOT digit] / is [^\d]
131# / ~digit / is \D
132#
133# Or maybe:
134#
135# / [~ digit]+ /
136# / [~ '>']+ /
137# / [NOT '>']+ /
138
139# End = / '</' Tag '>' /
140# StartEnd = / '<' Tag '/>' /
141# Start = / '<' Tag '>' /
142#
143# EntityRef = / '&' dot{* N} ';' /
144
145LEXER = [
146 # TODO: instead of nongreedy matches, the loop can just do .find('-->') and
147 # .find('?>')
148
149 # Actually non-greedy matches are regular and can be matched in linear time
150 # with RE2.
151 #
152 # https://news.ycombinator.com/item?id=27099798
153 #
154 # Maybe try combining all of these for speed.
155 (r'<!-- .*? -->', Tok.Comment),
156 (r'<\? .*? \?>', Tok.Processing),
157
158 # NOTE: < is allowed in these.
159 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
160 (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
161 (r'< [^>]+ />', Tok.StartEndTag), # end </a>
162 (r'< [^>]+ >', Tok.StartTag), # start <a>
163 (r'&\# [0-9]+ ;', Tok.DecChar),
164 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
165 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
166
167 # Note: > is allowed in raw data.
168 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
169 (r'[^&<]+', Tok.RawData),
170 (r'.', Tok.Invalid), # error!
171]
172
173LEXER = MakeLexer(LEXER)
174
175
176class Lexer(object):
177
178 def __init__(self, s, left_pos=0, right_pos=-1):
179 self.s = s
180 self.pos = left_pos
181 self.right_pos = len(s) if right_pos == -1 else right_pos
182 self.cache = {} # string -> compiled regex pattern object
183
184 def _Peek(self):
185 # type: () -> Tuple[int, int]
186 """
187 Note: not using _Peek() now
188 """
189 if self.pos == self.right_pos:
190 return Tok.EndOfStream, self.pos
191
192 assert self.pos < self.right_pos, self.pos
193
194 # Find the first match.
195 # Note: frontend/match.py uses _LongestMatch(), which is different!
196 # TODO: reconcile them. This lexer should be expressible in re2c.
197
198 # TODO: Get rid of non-greedy match
199
200 for pat, tok_id in LEXER:
201 m = pat.match(self.s, self.pos)
202 if m:
203 return tok_id, m.end()
204 else:
205 raise AssertionError('Tok.Invalid rule should have matched')
206
207 def Read(self):
208 # type: () -> Tuple[int, int]
209 tok_id, end_pos = self._Peek()
210 self.pos = end_pos # advance
211 return tok_id, end_pos
212
213 def LookAhead(self, regex):
214 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
215 # or something.
216 pat = self.cache.get(regex)
217 if pat is None:
218 pat = re.compile(regex)
219 self.cache[regex] = pat
220
221 m = pat.match(self.s, self.pos)
222 return m is not None
223
224
225def _Tokens(s, left_pos, right_pos):
226 """
227 Args:
228 s: string to parse
229 left_pos, right_pos: Optional span boundaries.
230 """
231 lx = Lexer(s, left_pos, right_pos)
232 while True:
233 tok_id, pos = lx.Read()
234 yield tok_id, pos
235 if tok_id == Tok.EndOfStream:
236 break
237
238
239def ValidTokens(s, left_pos=0, right_pos=-1):
240 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
241
242 I'm not combining the two functions because I might want to do a
243 'yield' transformation on Tokens()? Exceptions might complicate the
244 issue?
245 """
246 pos = left_pos
247 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
248 if tok_id == Tok.Invalid:
249 raise LexError(s, pos)
250 yield tok_id, end_pos
251 pos = end_pos
252
253
254# Tag names:
255# Match <a or </a
256# Match <h2, but not <2h
257#
258# HTML 5 doesn't restrict tag names at all
259# https://html.spec.whatwg.org/#toc-syntax
260#
261# XML allows : - .
262# https://www.w3.org/TR/xml/#NT-NameChar
263
264# Namespaces for MathML, SVG
265# XLink, XML, XMLNS
266#
267# https://infra.spec.whatwg.org/#namespaces
268#
269# Allow - for td-attrs
270
271_TAG_RE = re.compile(r'/? \s* ([a-zA-Z][a-zA-Z0-9-]*)', re.VERBOSE)
272
273# To match href="foo"
274
275_ATTR_RE = re.compile(
276 r'''
277\s+ # Leading whitespace is required
278([a-z]+) # Attribute name
279(?: # Optional attribute value
280 \s* = \s*
281 (?:
282 " ([^>"]*) " # double quoted value
283 | ([a-zA-Z0-9_\-]+) # Just allow unquoted "identifiers"
284 # TODO: relax this? for href=$foo
285 )
286)?
287''', re.VERBOSE)
288
289TagName, AttrName, UnquotedValue, QuotedValue = range(4)
290
291
292class TagLexer(object):
293 """
294 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
295 provides a few operations:
296
297 - What is the tag?
298 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
299 """
300
301 def __init__(self, s):
302 self.s = s
303 self.start_pos = -1 # Invalid
304 self.end_pos = -1
305
306 def Reset(self, start_pos, end_pos):
307 """Reuse instances of this object."""
308 self.start_pos = start_pos
309 self.end_pos = end_pos
310
311 def TagString(self):
312 return self.s[self.start_pos:self.end_pos]
313
314 def TagName(self):
315 # First event
316 tok_id, start, end = next(self.Tokens())
317 return self.s[start:end]
318
319 def GetSpanForAttrValue(self, attr_name):
320 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
321 # TODO: Could also cache these
322
323 events = self.Tokens()
324 val = (-1, -1)
325 try:
326 while True:
327 tok_id, start, end = next(events)
328 if tok_id == AttrName:
329 name = self.s[start:end]
330 if name == attr_name:
331 # The value should come next
332 tok_id, start, end = next(events)
333 if tok_id in (QuotedValue, UnquotedValue):
334 # Note: quoted values may have &amp;
335 # We would need ANOTHER lexer to unescape them.
336 # Right now help_gen.py and oils_doc.py
337 val = start, end
338 break
339
340 except StopIteration:
341 pass
342 return val
343
344 def GetAttrRaw(self, attr_name):
345 """
346 Return the value, which may be UNESCAPED.
347 """
348 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
349 # TODO: Could also cache these
350 start, end = self.GetSpanForAttrValue(attr_name)
351 if start == -1:
352 return None
353 return self.s[start:end]
354
355 def AllAttrsRaw(self):
356 """
357 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
358
359 The quoted values may be escaped. We would need another lexer to
360 unescape them.
361 """
362 pairs = []
363 events = self.Tokens()
364 try:
365 while True:
366 tok_id, start, end = next(events)
367 if tok_id == AttrName:
368 name = self.s[start:end]
369
370 # The value should come next
371 tok_id, start, end = next(events)
372 if tok_id in (QuotedValue, UnquotedValue):
373 # Note: quoted values may have &amp;
374 # We would need ANOTHER lexer to unescape them, but we
375 # don't need that for ul-table
376
377 val = self.s[start:end]
378 pairs.append((name, val))
379 except StopIteration:
380 pass
381 return pairs
382
383 def Tokens(self):
384 """
385 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
386
387 Where each Token is (Type, start_pos, end_pos)
388
389 Note that start and end are NOT redundant! We skip over some unwanted
390 characters.
391 """
392 m = _TAG_RE.match(self.s, self.start_pos + 1)
393 if not m:
394 raise RuntimeError("Couldn't find HTML tag in %r" %
395 self.TagString())
396 yield TagName, m.start(1), m.end(1)
397
398 pos = m.end(0)
399
400 while True:
401 # don't search past the end
402 m = _ATTR_RE.match(self.s, pos, self.end_pos)
403 if not m:
404 # A validating parser would check that > or /> is next -- there's no junk
405 break
406
407 yield AttrName, m.start(1), m.end(1)
408
409 # Quoted is group 2, unquoted is group 3.
410 if m.group(2) is not None:
411 yield QuotedValue, m.start(2), m.end(2)
412 elif m.group(3) is not None:
413 yield UnquotedValue, m.start(3), m.end(3)
414
415 # Skip past the "
416 pos = m.end(0)
417
418
419def ReadUntilStartTag(it, tag_lexer, tag_name):
420 """Find the next <foo>, returning its (start, end) positions
421
422 Raise ParseError if it's not found.
423
424 tag_lexer is RESET.
425 """
426 pos = 0
427 while True:
428 try:
429 tok_id, end_pos = next(it)
430 except StopIteration:
431 break
432 tag_lexer.Reset(pos, end_pos)
433 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
434 return pos, end_pos
435
436 pos = end_pos
437
438 raise ParseError('No start tag %r', tag_name)
439
440
441def ReadUntilEndTag(it, tag_lexer, tag_name):
442 """Find the next </foo>, returning its (start, end) position
443
444 Raise ParseError if it's not found.
445
446 tag_lexer is RESET.
447 """
448 pos = 0
449 while True:
450 try:
451 tok_id, end_pos = next(it)
452 except StopIteration:
453 break
454 tag_lexer.Reset(pos, end_pos)
455 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
456 return pos, end_pos
457
458 pos = end_pos
459
460 raise ParseError('No end tag %r', tag_name)
461
462
463CHAR_ENTITY = {
464 'amp': '&',
465 'lt': '<',
466 'gt': '>',
467 'quot': '"',
468}
469
470
471def ToText(s, left_pos=0, right_pos=-1):
472 """Given HTML, return text by unquoting &gt; and &lt; etc.
473
474 Used by:
475 doctools/oils_doc.py: PygmentsPlugin
476 doctool/make_help.py: HelpIndexCards
477
478 In the latter case, we cold process some tags, like:
479
480 - Blue Link (not clickable, but still useful)
481 - Red X
482
483 That should be html.ToAnsi.
484 """
485 f = StringIO()
486 out = Output(s, f, left_pos, right_pos)
487
488 pos = left_pos
489 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
490 if tok_id == Tok.RawData:
491 out.SkipTo(pos)
492 out.PrintUntil(end_pos)
493
494 elif tok_id == Tok.CharEntity: # &amp;
495
496 entity = s[pos + 1:end_pos - 1]
497
498 out.SkipTo(pos)
499 out.Print(CHAR_ENTITY[entity])
500 out.SkipTo(end_pos)
501
502 # Not handling these yet
503 elif tok_id == Tok.HexChar:
504 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
505
506 elif tok_id == Tok.DecChar:
507 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
508
509 pos = end_pos
510
511 out.PrintTheRest()
512 return f.getvalue()