OILS / lazylex / html.py View on Github | oilshell.org

508 lines, 228 significant
1#!/usr/bin/env python2
2"""
3lazylex/html.py - Low-Level HTML Processing.
4
5See lazylex/README.md for details.
6
7TODO: This should be an Oils library eventually. It's a "lazily-parsed data
8structure" like TSV8
9"""
10from __future__ import print_function
11
12import cStringIO
13import re
14import sys
15
16from typing import List, Tuple
17
18
19def log(msg, *args):
20 msg = msg % args
21 print(msg, file=sys.stderr)
22
23
24class LexError(Exception):
25 """For bad lexical elements like <> or &&"""
26
27 def __init__(self, s, pos):
28 self.s = s
29 self.pos = pos
30
31 def __str__(self):
32 return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
33
34
35class ParseError(Exception):
36 """For errors in the tag structure."""
37
38 def __init__(self, msg, *args):
39 self.msg = msg
40 self.args = args
41
42 def __str__(self):
43 return '(ParseError %s)' % (self.msg % self.args)
44
45
46class Output(object):
47 """Takes an underlying input buffer and an output file. Maintains a
48 position in the input buffer.
49
50 Print FROM the input or print new text to the output.
51 """
52
53 def __init__(self, s, f, left_pos=0, right_pos=-1):
54 self.s = s
55 self.f = f
56 self.pos = left_pos
57 self.right_pos = len(s) if right_pos == -1 else right_pos
58
59 def SkipTo(self, pos):
60 """Skip to a position."""
61 self.pos = pos
62
63 def PrintUntil(self, pos):
64 """Print until a position."""
65 piece = self.s[self.pos:pos]
66 self.f.write(piece)
67 self.pos = pos
68
69 def PrintTheRest(self):
70 """Print until the end of the string."""
71 self.PrintUntil(self.right_pos)
72
73 def Print(self, s):
74 """Print text to the underlying buffer."""
75 self.f.write(s)
76
77
78# HTML Tokens
79TOKENS = 'Decl Comment Processing StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData Invalid EndOfStream'.split(
80)
81
82
83class Tok(object):
84 """
85 Avoid lint errors by using these aliases
86 """
87 pass
88
89
90assert len(TOKENS) == 12, TOKENS
91
92TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
93
94this_module = sys.modules[__name__]
95for i, tok_str in enumerate(TOKENS):
96 setattr(this_module, tok_str, i)
97 setattr(Tok, tok_str, i)
98 TOKEN_NAMES[i] = tok_str
99
100
101def TokenName(tok_id):
102 return TOKEN_NAMES[tok_id]
103
104
105def MakeLexer(rules):
106 return [
107 # DOTALL is for the comment
108 (re.compile(pat, re.VERBOSE | re.DOTALL), i) for (pat, i) in rules
109 ]
110
111
112#
113# Eggex
114#
115# Tag = / ~['>']+ /
116
117# Is this valid? A single character?
118# Tag = / ~'>'* /
119
120# Maybe better: / [NOT '>']+/
121# capital letters not allowed there?
122#
123# But then this is confusing:
124# / [NOT ~digit]+/
125#
126# / [NOT digit] / is [^\d]
127# / ~digit / is \D
128#
129# Or maybe:
130#
131# / [~ digit]+ /
132# / [~ '>']+ /
133# / [NOT '>']+ /
134
135# End = / '</' Tag '>' /
136# StartEnd = / '<' Tag '/>' /
137# Start = / '<' Tag '>' /
138#
139# EntityRef = / '&' dot{* N} ';' /
140
141LEXER = [
142 # TODO: instead of nongreedy matches, the loop can just do .find('-->') and
143 # .find('?>')
144
145 # Actually non-greedy matches are regular and can be matched in linear time
146 # with RE2.
147 #
148 # https://news.ycombinator.com/item?id=27099798
149 #
150 # Maybe try combining all of these for speed.
151 (r'<!-- .*? -->', Tok.Comment),
152 (r'<\? .*? \?>', Tok.Processing),
153
154 # NOTE: < is allowed in these.
155 (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
156 (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
157 (r'< [^>]+ />', Tok.StartEndTag), # end </a>
158 (r'< [^>]+ >', Tok.StartTag), # start <a>
159 (r'&\# [0-9]+ ;', Tok.DecChar),
160 (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
161 (r'& [a-zA-Z]+ ;', Tok.CharEntity),
162
163 # Note: > is allowed in raw data.
164 # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
165 (r'[^&<]+', Tok.RawData),
166 (r'.', Tok.Invalid), # error!
167]
168
169LEXER = MakeLexer(LEXER)
170
171
172class Lexer(object):
173
174 def __init__(self, s, left_pos=0, right_pos=-1):
175 self.s = s
176 self.pos = left_pos
177 self.right_pos = len(s) if right_pos == -1 else right_pos
178 self.cache = {} # string -> compiled regex pattern object
179
180 def _Peek(self):
181 # type: () -> Tuple[int, int]
182 """
183 Note: not using _Peek() now
184 """
185 if self.pos == self.right_pos:
186 return Tok.EndOfStream, self.pos
187
188 assert self.pos < self.right_pos, self.pos
189
190 # Find the first match.
191 # Note: frontend/match.py uses _LongestMatch(), which is different!
192 # TODO: reconcile them. This lexer should be expressible in re2c.
193
194 # TODO: Get rid of non-greedy match
195
196 for pat, tok_id in LEXER:
197 m = pat.match(self.s, self.pos)
198 if m:
199 return tok_id, m.end()
200 else:
201 raise AssertionError('Tok.Invalid rule should have matched')
202
203 def Read(self):
204 # type: () -> Tuple[int, int]
205 tok_id, end_pos = self._Peek()
206 self.pos = end_pos # advance
207 return tok_id, end_pos
208
209 def LookAhead(self, regex):
210 # Cache the regex compilation. This could also be LookAheadFor(THEAD)
211 # or something.
212 pat = self.cache.get(regex)
213 if pat is None:
214 pat = re.compile(regex)
215 self.cache[regex] = pat
216
217 m = pat.match(self.s, self.pos)
218 return m is not None
219
220
221def _Tokens(s, left_pos, right_pos):
222 """
223 Args:
224 s: string to parse
225 left_pos, right_pos: Optional span boundaries.
226 """
227 lx = Lexer(s, left_pos, right_pos)
228 while True:
229 tok_id, pos = lx.Read()
230 yield tok_id, pos
231 if tok_id == Tok.EndOfStream:
232 break
233
234
235def ValidTokens(s, left_pos=0, right_pos=-1):
236 """Wrapper around _Tokens to prevent callers from having to handle Invalid.
237
238 I'm not combining the two functions because I might want to do a
239 'yield' transformation on Tokens()? Exceptions might complicate the
240 issue?
241 """
242 pos = left_pos
243 for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
244 if tok_id == Tok.Invalid:
245 raise LexError(s, pos)
246 yield tok_id, end_pos
247 pos = end_pos
248
249
250# Tag names:
251# Match <a or </a
252# Match <h2, but not <2h
253#
254# HTML 5 doesn't restrict tag names at all
255# https://html.spec.whatwg.org/#toc-syntax
256#
257# XML allows : - .
258# https://www.w3.org/TR/xml/#NT-NameChar
259
260# Namespaces for MathML, SVG
261# XLink, XML, XMLNS
262#
263# https://infra.spec.whatwg.org/#namespaces
264#
265# Allow - for td-attrs
266
267_TAG_RE = re.compile(r'/? \s* ([a-zA-Z][a-zA-Z0-9-]*)', re.VERBOSE)
268
269# To match href="foo"
270
271_ATTR_RE = re.compile(
272 r'''
273\s+ # Leading whitespace is required
274([a-z]+) # Attribute name
275(?: # Optional attribute value
276 \s* = \s*
277 (?:
278 " ([^>"]*) " # double quoted value
279 | ([a-zA-Z0-9_\-]+) # Just allow unquoted "identifiers"
280 # TODO: relax this? for href=$foo
281 )
282)?
283''', re.VERBOSE)
284
285TagName, AttrName, UnquotedValue, QuotedValue = range(4)
286
287
288class TagLexer(object):
289 """
290 Given a tag like <a href="..."> or <link type="..." />, the TagLexer
291 provides a few operations:
292
293 - What is the tag?
294 - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
295 """
296
297 def __init__(self, s):
298 self.s = s
299 self.start_pos = -1 # Invalid
300 self.end_pos = -1
301
302 def Reset(self, start_pos, end_pos):
303 """Reuse instances of this object."""
304 self.start_pos = start_pos
305 self.end_pos = end_pos
306
307 def TagString(self):
308 return self.s[self.start_pos:self.end_pos]
309
310 def TagName(self):
311 # First event
312 tok_id, start, end = next(self.Tokens())
313 return self.s[start:end]
314
315 def GetSpanForAttrValue(self, attr_name):
316 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
317 # TODO: Could also cache these
318
319 events = self.Tokens()
320 val = (-1, -1)
321 try:
322 while True:
323 tok_id, start, end = next(events)
324 if tok_id == AttrName:
325 name = self.s[start:end]
326 if name == attr_name:
327 # The value should come next
328 tok_id, start, end = next(events)
329 if tok_id in (QuotedValue, UnquotedValue):
330 # Note: quoted values may have &amp;
331 # We would need ANOTHER lexer to unescape them.
332 # Right now help_gen.py and oils_doc.py
333 val = start, end
334 break
335
336 except StopIteration:
337 pass
338 return val
339
340 def GetAttrRaw(self, attr_name):
341 """
342 Return the value, which may be UNESCAPED.
343 """
344 # Algorithm: search for QuotedValue or UnquotedValue after AttrName
345 # TODO: Could also cache these
346 start, end = self.GetSpanForAttrValue(attr_name)
347 if start == -1:
348 return None
349 return self.s[start:end]
350
351 def AllAttrsRaw(self):
352 """
353 Get a list of pairs [('class', 'foo'), ('href', '?foo=1&amp;bar=2')]
354
355 The quoted values may be escaped. We would need another lexer to
356 unescape them.
357 """
358 pairs = []
359 events = self.Tokens()
360 try:
361 while True:
362 tok_id, start, end = next(events)
363 if tok_id == AttrName:
364 name = self.s[start:end]
365
366 # The value should come next
367 tok_id, start, end = next(events)
368 if tok_id in (QuotedValue, UnquotedValue):
369 # Note: quoted values may have &amp;
370 # We would need ANOTHER lexer to unescape them, but we
371 # don't need that for ul-table
372
373 val = self.s[start:end]
374 pairs.append((name, val))
375 except StopIteration:
376 pass
377 return pairs
378
379 def Tokens(self):
380 """
381 Yields a sequence of tokens: Tag (AttrName AttrValue?)*
382
383 Where each Token is (Type, start_pos, end_pos)
384
385 Note that start and end are NOT redundant! We skip over some unwanted
386 characters.
387 """
388 m = _TAG_RE.match(self.s, self.start_pos + 1)
389 if not m:
390 raise RuntimeError("Couldn't find HTML tag in %r" %
391 self.TagString())
392 yield TagName, m.start(1), m.end(1)
393
394 pos = m.end(0)
395
396 while True:
397 # don't search past the end
398 m = _ATTR_RE.match(self.s, pos, self.end_pos)
399 if not m:
400 # A validating parser would check that > or /> is next -- there's no junk
401 break
402
403 yield AttrName, m.start(1), m.end(1)
404
405 # Quoted is group 2, unquoted is group 3.
406 if m.group(2) is not None:
407 yield QuotedValue, m.start(2), m.end(2)
408 elif m.group(3) is not None:
409 yield UnquotedValue, m.start(3), m.end(3)
410
411 # Skip past the "
412 pos = m.end(0)
413
414
415def ReadUntilStartTag(it, tag_lexer, tag_name):
416 """Find the next <foo>, returning its (start, end) positions
417
418 Raise ParseError if it's not found.
419
420 tag_lexer is RESET.
421 """
422 pos = 0
423 while True:
424 try:
425 tok_id, end_pos = next(it)
426 except StopIteration:
427 break
428 tag_lexer.Reset(pos, end_pos)
429 if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
430 return pos, end_pos
431
432 pos = end_pos
433
434 raise ParseError('No start tag %r', tag_name)
435
436
437def ReadUntilEndTag(it, tag_lexer, tag_name):
438 """Find the next </foo>, returning its (start, end) position
439
440 Raise ParseError if it's not found.
441
442 tag_lexer is RESET.
443 """
444 pos = 0
445 while True:
446 try:
447 tok_id, end_pos = next(it)
448 except StopIteration:
449 break
450 tag_lexer.Reset(pos, end_pos)
451 if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
452 return pos, end_pos
453
454 pos = end_pos
455
456 raise ParseError('No end tag %r', tag_name)
457
458
459CHAR_ENTITY = {
460 'amp': '&',
461 'lt': '<',
462 'gt': '>',
463 'quot': '"',
464}
465
466
467def ToText(s, left_pos=0, right_pos=-1):
468 """Given HTML, return text by unquoting &gt; and &lt; etc.
469
470 Used by:
471 doctools/oils_doc.py: PygmentsPlugin
472 doctool/make_help.py: HelpIndexCards
473
474 In the latter case, we cold process some tags, like:
475
476 - Blue Link (not clickable, but still useful)
477 - Red X
478
479 That should be html.ToAnsi.
480 """
481 f = cStringIO.StringIO()
482 out = Output(s, f, left_pos, right_pos)
483
484 pos = left_pos
485 for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
486 if tok_id == Tok.RawData:
487 out.SkipTo(pos)
488 out.PrintUntil(end_pos)
489
490 elif tok_id == Tok.CharEntity: # &amp;
491
492 entity = s[pos + 1:end_pos - 1]
493
494 out.SkipTo(pos)
495 out.Print(CHAR_ENTITY[entity])
496 out.SkipTo(end_pos)
497
498 # Not handling these yet
499 elif tok_id == Tok.HexChar:
500 raise AssertionError('Hex Char %r' % s[pos:pos + 20])
501
502 elif tok_id == Tok.DecChar:
503 raise AssertionError('Dec Char %r' % s[pos:pos + 20])
504
505 pos = end_pos
506
507 out.PrintTheRest()
508 return f.getvalue()