OILS / lazylex / html.py View on Github | oils.pub

517 lines, 230 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12try:
13 from cStringIO import StringIO
14except ImportError:
15 from io import StringIO # python3
16import re
17import sys
18
19if sys.version_info.major == 2:
20 from typing import List, Tuple
21
22
23def log(msg, *args):
24 msg = msg % args
25 print(msg, file=sys.stderr)
26
27
28class LexError(Exception):
29 """For bad lexical elements like <> or &&"""
30
31 def __init__(self, s, pos):
32 self.s = s
33 self.pos = pos
34
35 def __str__(self):
36 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
37
38
39class ParseError(Exception):
40 """For errors in the tag structure."""
41
42 def __init__(self, msg, *args):
43 self.msg = msg
44 self.args = args
45
46 def __str__(self):
47 return '(ParseError %s)' % (self.msg % self.args)
48
49
50class Output(object):
51 """Takes an underlying input buffer and an output file. Maintains a
52 position in the input buffer.
53
54 Print FROM the input or print new text to the output.
55 """
56
57 def __init__(self, s, f, left_pos=0, right_pos=-1):
58 self.s = s
59 self.f = f
60 self.pos = left_pos
61 self.right_pos = len(s) if right_pos == -1 else right_pos
62
63 def SkipTo(self, pos):
64 """Skip to a position."""
65 self.pos = pos
66
67 def PrintUntil(self, pos):
68 """Print until a position."""
69 piece = self.s[self.pos:pos]
70 self.f.write(piece)
71 self.pos = pos
72
73 def PrintTheRest(self):
74 """Print until the end of the string."""
75 self.PrintUntil(self.right_pos)
76
77 def Print(self, s):
78 """Print text to the underlying buffer."""
79 self.f.write(s)
80
81
82# HTML Tokens
83TOKENS = 'Decl Comment Processing StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData Invalid EndOfStream'.split(
84)
85
86
87class Tok(object):
88 """
89 Avoid lint errors by using these aliases
90 """
91 pass
92
93
94assert len(TOKENS) == 12, TOKENS
95
96TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
97
98this_module = sys.modules[__name__]
99for i, tok_str in enumerate(TOKENS):
100 setattr(this_module, tok_str, i)
101 setattr(Tok, tok_str, i)
102 TOKEN_NAMES[i] = tok_str
103
104
105def TokenName(tok_id):
106 return TOKEN_NAMES[tok_id]
107
108
109def MakeLexer(rules):
110 return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
111
112
113#
114# Eggex
115#
116# Tag = / ~['>']+ /
117
118# Is this valid? A single character?
119# Tag = / ~'>'* /
120
121# Maybe better: / [NOT '>']+/
122# capital letters not allowed there?
123#
124# But then this is confusing:
125# / [NOT ~digit]+/
126#
127# / [NOT digit] / is [^\d]
128# / ~digit / is \D
129#
130# Or maybe:
131#
132# / [~ digit]+ /
133# / [~ '>']+ /
134# / [NOT '>']+ /
135
136# End = / '</' Tag '>' /
137# StartEnd = / '<' Tag '/>' /
138# Start = / '<' Tag '>' /
139#
140# EntityRef = / '&' dot{* N} ';' /
141
142LEXER = [
143 # TODO: instead of nongreedy matches, the loop can just do .find('-->') and
144 # .find('?>')
145
146 # Actually non-greedy matches are regular and can be matched in linear time
147 # with RE2.
148 #
149 # https://news.ycombinator.com/item?id=27099798
150 #
151 # Maybe try combining all of these for speed.
152
153 # . is any char except newline
154 # https://re2c.org/manual/manual_c.html
155
156 # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
157 #(r'<!-- [\s\S]*? -->', Tok.Comment),
158 (r'<!-- (?:.|[\n])*? -->', Tok.Comment),
159
160 #(r'<!-- .*? -->', Tok.Comment),
161 (r'<\? (?:.|\n)*? \?>', Tok.Processing),
162
163 # NOTE: < is allowed in these.
164 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
165 (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
166 (r'< [^>]+ />', Tok.StartEndTag), # end </a>
167 (r'< [^>]+ >', Tok.StartTag), # start <a>
168 (r'&\# [0-9]+ ;', Tok.DecChar),
169 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
170 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
171
172 # Note: > is allowed in raw data.
173 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
174 (r'[^&<]+', Tok.RawData),
175 (r'.', Tok.Invalid), # error!
176]
177
178LEXER = MakeLexer(LEXER)
179
180
181class Lexer(object):
182
183 def __init__(self, s, left_pos=0, right_pos=-1):
184 self.s = s
185 self.pos = left_pos
186 self.right_pos = len(s) if right_pos == -1 else right_pos
187 self.cache = {} # string -> compiled regex pattern object
188
189 def _Peek(self):
190 # type: () -> Tuple[int, int]
191 """
192 Note: not using _Peek() now
193 """
194 if self.pos == self.right_pos:
195 return Tok.EndOfStream, self.pos
196
197 assert self.pos < self.right_pos, self.pos
198
199 # Find the first match.
200 # Note: frontend/match.py uses _LongestMatch(), which is different!
201 # TODO: reconcile them. This lexer should be expressible in re2c.
202
203 # TODO: Get rid of non-greedy match
204
205 for pat, tok_id in LEXER:
206 m = pat.match(self.s, self.pos)
207 if m:
208 return tok_id, m.end()
209 else:
210 raise AssertionError('Tok.Invalid rule should have matched')
211
212 def Read(self):
213 # type: () -> Tuple[int, int]
214 tok_id, end_pos = self._Peek()
215 self.pos = end_pos # advance
216 return tok_id, end_pos
217
218 def LookAhead(self, regex):
219 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
220 # or something.
221 pat = self.cache.get(regex)
222 if pat is None:
223 pat = re.compile(regex)
224 self.cache[regex] = pat
225
226 m = pat.match(self.s, self.pos)
227 return m is not None
228
229
230def _Tokens(s, left_pos, right_pos):
231 """
232 Args:
233 s: string to parse
234 left_pos, right_pos: Optional span boundaries.
235 """
236 lx = Lexer(s, left_pos, right_pos)
237 while True:
238 tok_id, pos = lx.Read()
239 yield tok_id, pos
240 if tok_id == Tok.EndOfStream:
241 break
242
243
244def ValidTokens(s, left_pos=0, right_pos=-1):
245 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
246
247 I'm not combining the two functions because I might want to do a
248 'yield' transformation on Tokens()? Exceptions might complicate the
249 issue?
250 """
251 pos = left_pos
252 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
253 if tok_id == Tok.Invalid:
254 raise LexError(s, pos)
255 yield tok_id, end_pos
256 pos = end_pos
257
258
259# Tag names:
260# Match <a or </a
261# Match <h2, but not <2h
262#
263# HTML 5 doesn't restrict tag names at all
264# https://html.spec.whatwg.org/#toc-syntax
265#
266# XML allows : - .
267# https://www.w3.org/TR/xml/#NT-NameChar
268
269# Namespaces for MathML, SVG
270# XLink, XML, XMLNS
271#
272# https://infra.spec.whatwg.org/#namespaces
273#
274# Allow - for td-attrs
275
276_TAG_RE = re.compile(r'/? \s* ([a-zA-Z][a-zA-Z0-9-]*)', re.VERBOSE)
277
278# To match href="foo"
279
280_ATTR_RE = re.compile(
281 r'''
282\s+ # Leading whitespace is required
283([a-z]+) # Attribute name
284(?: # Optional attribute value
285 \s* = \s*
286 (?:
287 " ([^>"]*) " # double quoted value
288 | ([a-zA-Z0-9_\-]+) # Just allow unquoted "identifiers"
289 # TODO: relax this? for href=$foo
290 )
291)?
292''', re.VERBOSE)
293
294TagName, AttrName, UnquotedValue, QuotedValue = range(4)
295
296
297class TagLexer(object):
298 """
299 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
300 provides a few operations:
301
302 - What is the tag?
303 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
304 """
305
306 def __init__(self, s):
307 self.s = s
308 self.start_pos = -1 # Invalid
309 self.end_pos = -1
310
311 def Reset(self, start_pos, end_pos):
312 """Reuse instances of this object."""
313 self.start_pos = start_pos
314 self.end_pos = end_pos
315
316 def TagString(self):
317 return self.s[self.start_pos:self.end_pos]
318
319 def TagName(self):
320 # First event
321 tok_id, start, end = next(self.Tokens())
322 return self.s[start:end]
323
324 def GetSpanForAttrValue(self, attr_name):
325 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
326 # TODO: Could also cache these
327
328 events = self.Tokens()
329 val = (-1, -1)
330 try:
331 while True:
332 tok_id, start, end = next(events)
333 if tok_id == AttrName:
334 name = self.s[start:end]
335 if name == attr_name:
336 # The value should come next
337 tok_id, start, end = next(events)
338 if tok_id in (QuotedValue, UnquotedValue):
339 # Note: quoted values may have &amp;
340 # We would need ANOTHER lexer to unescape them.
341 # Right now help_gen.py and oils_doc.py
342 val = start, end
343 break
344
345 except StopIteration:
346 pass
347 return val
348
349 def GetAttrRaw(self, attr_name):
350 """
351 Return the value, which may be UNESCAPED.
352 """
353 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
354 # TODO: Could also cache these
355 start, end = self.GetSpanForAttrValue(attr_name)
356 if start == -1:
357 return None
358 return self.s[start:end]
359
360 def AllAttrsRaw(self):
361 """
362 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
363
364 The quoted values may be escaped. We would need another lexer to
365 unescape them.
366 """
367 pairs = []
368 events = self.Tokens()
369 try:
370 while True:
371 tok_id, start, end = next(events)
372 if tok_id == AttrName:
373 name = self.s[start:end]
374
375 # The value should come next
376 tok_id, start, end = next(events)
377 if tok_id in (QuotedValue, UnquotedValue):
378 # Note: quoted values may have &amp;
379 # We would need ANOTHER lexer to unescape them, but we
380 # don't need that for ul-table
381
382 val = self.s[start:end]
383 pairs.append((name, val))
384 except StopIteration:
385 pass
386 return pairs
387
388 def Tokens(self):
389 """
390 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
391
392 Where each Token is (Type, start_pos, end_pos)
393
394 Note that start and end are NOT redundant! We skip over some unwanted
395 characters.
396 """
397 m = _TAG_RE.match(self.s, self.start_pos + 1)
398 if not m:
399 raise RuntimeError("Couldn't find HTML tag in %r" %
400 self.TagString())
401 yield TagName, m.start(1), m.end(1)
402
403 pos = m.end(0)
404
405 while True:
406 # don't search past the end
407 m = _ATTR_RE.match(self.s, pos, self.end_pos)
408 if not m:
409 # A validating parser would check that > or /> is next -- there's no junk
410 break
411
412 yield AttrName, m.start(1), m.end(1)
413
414 # Quoted is group 2, unquoted is group 3.
415 if m.group(2) is not None:
416 yield QuotedValue, m.start(2), m.end(2)
417 elif m.group(3) is not None:
418 yield UnquotedValue, m.start(3), m.end(3)
419
420 # Skip past the "
421 pos = m.end(0)
422
423
424def ReadUntilStartTag(it, tag_lexer, tag_name):
425 """Find the next <foo>, returning its (start, end) positions
426
427 Raise ParseError if it's not found.
428
429 tag_lexer is RESET.
430 """
431 pos = 0
432 while True:
433 try:
434 tok_id, end_pos = next(it)
435 except StopIteration:
436 break
437 tag_lexer.Reset(pos, end_pos)
438 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
439 return pos, end_pos
440
441 pos = end_pos
442
443 raise ParseError('No start tag %r', tag_name)
444
445
446def ReadUntilEndTag(it, tag_lexer, tag_name):
447 """Find the next </foo>, returning its (start, end) position
448
449 Raise ParseError if it's not found.
450
451 tag_lexer is RESET.
452 """
453 pos = 0
454 while True:
455 try:
456 tok_id, end_pos = next(it)
457 except StopIteration:
458 break
459 tag_lexer.Reset(pos, end_pos)
460 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
461 return pos, end_pos
462
463 pos = end_pos
464
465 raise ParseError('No end tag %r', tag_name)
466
467
468CHAR_ENTITY = {
469 'amp': '&',
470 'lt': '<',
471 'gt': '>',
472 'quot': '"',
473}
474
475
476def ToText(s, left_pos=0, right_pos=-1):
477 """Given HTML, return text by unquoting &gt; and &lt; etc.
478
479 Used by:
480 doctools/oils_doc.py: PygmentsPlugin
481 doctool/make_help.py: HelpIndexCards
482
483 In the latter case, we cold process some tags, like:
484
485 - Blue Link (not clickable, but still useful)
486 - Red X
487
488 That should be html.ToAnsi.
489 """
490 f = StringIO()
491 out = Output(s, f, left_pos, right_pos)
492
493 pos = left_pos
494 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
495 if tok_id == Tok.RawData:
496 out.SkipTo(pos)
497 out.PrintUntil(end_pos)
498
499 elif tok_id == Tok.CharEntity: # &amp;
500
501 entity = s[pos + 1:end_pos - 1]
502
503 out.SkipTo(pos)
504 out.Print(CHAR_ENTITY[entity])
505 out.SkipTo(end_pos)
506
507 # Not handling these yet
508 elif tok_id == Tok.HexChar:
509 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
510
511 elif tok_id == Tok.DecChar:
512 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
513
514 pos = end_pos
515
516 out.PrintTheRest()
517 return f.getvalue()