1 | #!/usr/bin/env python2
|
2 | """
|
3 | lazylex/html.py - Low-Level HTML Processing.
|
4 |
|
5 | See lazylex/README.md for details.
|
6 |
|
7 | TODO: This should be an Oils library eventually. It's a "lazily-parsed data
|
8 | structure" like TSV8
|
9 | """
|
10 | from __future__ import print_function
|
11 |
|
12 | import cStringIO
|
13 | import re
|
14 | import sys
|
15 |
|
16 | from typing import List, Tuple
|
17 |
|
18 |
|
19 | def log(msg, *args):
|
20 | msg = msg % args
|
21 | print(msg, file=sys.stderr)
|
22 |
|
23 |
|
24 | class LexError(Exception):
|
25 | """For bad lexical elements like <> or &&"""
|
26 |
|
27 | def __init__(self, s, pos):
|
28 | self.s = s
|
29 | self.pos = pos
|
30 |
|
31 | def __str__(self):
|
32 | return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
|
33 |
|
34 |
|
35 | class ParseError(Exception):
|
36 | """For errors in the tag structure."""
|
37 |
|
38 | def __init__(self, msg, *args):
|
39 | self.msg = msg
|
40 | self.args = args
|
41 |
|
42 | def __str__(self):
|
43 | return '(ParseError %s)' % (self.msg % self.args)
|
44 |
|
45 |
|
46 | class Output(object):
|
47 | """Takes an underlying input buffer and an output file. Maintains a
|
48 | position in the input buffer.
|
49 |
|
50 | Print FROM the input or print new text to the output.
|
51 | """
|
52 |
|
53 | def __init__(self, s, f, left_pos=0, right_pos=-1):
|
54 | self.s = s
|
55 | self.f = f
|
56 | self.pos = left_pos
|
57 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
58 |
|
59 | def SkipTo(self, pos):
|
60 | """Skip to a position."""
|
61 | self.pos = pos
|
62 |
|
63 | def PrintUntil(self, pos):
|
64 | """Print until a position."""
|
65 | piece = self.s[self.pos:pos]
|
66 | self.f.write(piece)
|
67 | self.pos = pos
|
68 |
|
69 | def PrintTheRest(self):
|
70 | """Print until the end of the string."""
|
71 | self.PrintUntil(self.right_pos)
|
72 |
|
73 | def Print(self, s):
|
74 | """Print text to the underlying buffer."""
|
75 | self.f.write(s)
|
76 |
|
77 |
|
78 | # HTML Tokens
|
79 | TOKENS = 'Decl Comment Processing StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData Invalid EndOfStream'.split(
|
80 | )
|
81 |
|
82 |
|
83 | class Tok(object):
|
84 | """
|
85 | Avoid lint errors by using these aliases
|
86 | """
|
87 | pass
|
88 |
|
89 |
|
90 | assert len(TOKENS) == 12, TOKENS
|
91 |
|
92 | TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
|
93 |
|
94 | this_module = sys.modules[__name__]
|
95 | for i, tok_str in enumerate(TOKENS):
|
96 | setattr(this_module, tok_str, i)
|
97 | setattr(Tok, tok_str, i)
|
98 | TOKEN_NAMES[i] = tok_str
|
99 |
|
100 |
|
101 | def TokenName(tok_id):
|
102 | return TOKEN_NAMES[tok_id]
|
103 |
|
104 |
|
105 | def MakeLexer(rules):
|
106 | return [
|
107 | # DOTALL is for the comment
|
108 | (re.compile(pat, re.VERBOSE | re.DOTALL), i) for (pat, i) in rules
|
109 | ]
|
110 |
|
111 |
|
112 | #
|
113 | # Eggex
|
114 | #
|
115 | # Tag = / ~['>']+ /
|
116 |
|
117 | # Is this valid? A single character?
|
118 | # Tag = / ~'>'* /
|
119 |
|
120 | # Maybe better: / [NOT '>']+/
|
121 | # capital letters not allowed there?
|
122 | #
|
123 | # But then this is confusing:
|
124 | # / [NOT ~digit]+/
|
125 | #
|
126 | # / [NOT digit] / is [^\d]
|
127 | # / ~digit / is \D
|
128 | #
|
129 | # Or maybe:
|
130 | #
|
131 | # / [~ digit]+ /
|
132 | # / [~ '>']+ /
|
133 | # / [NOT '>']+ /
|
134 |
|
135 | # End = / '</' Tag '>' /
|
136 | # StartEnd = / '<' Tag '/>' /
|
137 | # Start = / '<' Tag '>' /
|
138 | #
|
139 | # EntityRef = / '&' dot{* N} ';' /
|
140 |
|
141 | LEXER = [
|
142 | # TODO: instead of nongreedy matches, the loop can just do .find('-->') and
|
143 | # .find('?>')
|
144 |
|
145 | # Actually non-greedy matches are regular and can be matched in linear time
|
146 | # with RE2.
|
147 | #
|
148 | # https://news.ycombinator.com/item?id=27099798
|
149 | #
|
150 | # Maybe try combining all of these for speed.
|
151 | (r'<!-- .*? -->', Tok.Comment),
|
152 | (r'<\? .*? \?>', Tok.Processing),
|
153 |
|
154 | # NOTE: < is allowed in these.
|
155 | (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
|
156 | (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
|
157 | (r'< [^>]+ />', Tok.StartEndTag), # end </a>
|
158 | (r'< [^>]+ >', Tok.StartTag), # start <a>
|
159 | (r'&\# [0-9]+ ;', Tok.DecChar),
|
160 | (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
|
161 | (r'& [a-zA-Z]+ ;', Tok.CharEntity),
|
162 |
|
163 | # Note: > is allowed in raw data.
|
164 | # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
|
165 | (r'[^&<]+', Tok.RawData),
|
166 | (r'.', Tok.Invalid), # error!
|
167 | ]
|
168 |
|
169 | LEXER = MakeLexer(LEXER)
|
170 |
|
171 |
|
172 | class Lexer(object):
|
173 |
|
174 | def __init__(self, s, left_pos=0, right_pos=-1):
|
175 | self.s = s
|
176 | self.pos = left_pos
|
177 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
178 |
|
179 | def Peek(self):
|
180 | # type: () -> Tuple[int, int]
|
181 | if self.pos == self.right_pos:
|
182 | return Tok.EndOfStream, self.pos
|
183 |
|
184 | assert self.pos < self.right_pos, self.pos
|
185 |
|
186 | # Find the first match.
|
187 | # Note: frontend/match.py uses _LongestMatch(), which is different!
|
188 | # TODO: reconcile them. This lexer should be expressible in re2c.
|
189 | for pat, tok_id in LEXER:
|
190 | m = pat.match(self.s, self.pos)
|
191 | if m:
|
192 | return tok_id, m.end()
|
193 | else:
|
194 | raise AssertionError('Tok.Invalid rule should have matched')
|
195 |
|
196 | def Read(self):
|
197 | # type: () -> Tuple[int, int]
|
198 | tok_id, end_pos = self.Peek()
|
199 | self.pos = end_pos # advance
|
200 | return tok_id, end_pos
|
201 |
|
202 |
|
203 | def _Tokens(s, left_pos, right_pos):
|
204 | """
|
205 | Args:
|
206 | s: string to parse
|
207 | left_pos, right_pos: Optional span boundaries.
|
208 | """
|
209 | lx = Lexer(s, left_pos, right_pos)
|
210 | while True:
|
211 | tok_id, pos = lx.Read()
|
212 | yield tok_id, pos
|
213 | if tok_id == Tok.EndOfStream:
|
214 | break
|
215 |
|
216 |
|
217 | def ValidTokens(s, left_pos=0, right_pos=-1):
|
218 | """Wrapper around _Tokens to prevent callers from having to handle Invalid.
|
219 |
|
220 | I'm not combining the two functions because I might want to do a
|
221 | 'yield' transformation on Tokens()? Exceptions might complicate the
|
222 | issue?
|
223 | """
|
224 | pos = left_pos
|
225 | for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
|
226 | if tok_id == Tok.Invalid:
|
227 | raise LexError(s, pos)
|
228 | yield tok_id, end_pos
|
229 | pos = end_pos
|
230 |
|
231 |
|
232 | # Tag names:
|
233 | # Match <a or </a
|
234 | # Match <h2, but not <2h
|
235 | #
|
236 | # HTML 5 doesn't restrict tag names at all
|
237 | # https://html.spec.whatwg.org/#toc-syntax
|
238 | #
|
239 | # XML allows : - .
|
240 | # https://www.w3.org/TR/xml/#NT-NameChar
|
241 |
|
242 | # Namespaces for MathML, SVG
|
243 | # XLink, XML, XMLNS
|
244 | #
|
245 | # https://infra.spec.whatwg.org/#namespaces
|
246 | #
|
247 | # Allow - for td-attrs
|
248 |
|
249 | _TAG_RE = re.compile(r'/? \s* ([a-zA-Z][a-zA-Z0-9-]*)', re.VERBOSE)
|
250 |
|
251 | # To match href="foo"
|
252 |
|
253 | _ATTR_RE = re.compile(
|
254 | r'''
|
255 | \s+ # Leading whitespace is required
|
256 | ([a-z]+) # Attribute name
|
257 | (?: # Optional attribute value
|
258 | \s* = \s*
|
259 | (?:
|
260 | " ([^>"]*) " # double quoted value
|
261 | | ([a-zA-Z0-9_\-]+) # Just allow unquoted "identifiers"
|
262 | # TODO: relax this? for href=$foo
|
263 | )
|
264 | )?
|
265 | ''', re.VERBOSE)
|
266 |
|
267 | TagName, AttrName, UnquotedValue, QuotedValue = range(4)
|
268 |
|
269 |
|
270 | class TagLexer(object):
|
271 | """
|
272 | Given a tag like <a href="..."> or <link type="..." />, the TagLexer
|
273 | provides a few operations:
|
274 |
|
275 | - What is the tag?
|
276 | - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
|
277 | """
|
278 |
|
279 | def __init__(self, s):
|
280 | self.s = s
|
281 | self.start_pos = -1 # Invalid
|
282 | self.end_pos = -1
|
283 |
|
284 | def Reset(self, start_pos, end_pos):
|
285 | """Reuse instances of this object."""
|
286 | self.start_pos = start_pos
|
287 | self.end_pos = end_pos
|
288 |
|
289 | def TagString(self):
|
290 | return self.s[self.start_pos:self.end_pos]
|
291 |
|
292 | def TagName(self):
|
293 | # First event
|
294 | tok_id, start, end = next(self.Tokens())
|
295 | return self.s[start:end]
|
296 |
|
297 | def GetSpanForAttrValue(self, attr_name):
|
298 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
299 | # TODO: Could also cache these
|
300 |
|
301 | events = self.Tokens()
|
302 | val = (-1, -1)
|
303 | try:
|
304 | while True:
|
305 | tok_id, start, end = next(events)
|
306 | if tok_id == AttrName:
|
307 | name = self.s[start:end]
|
308 | if name == attr_name:
|
309 | # The value should come next
|
310 | tok_id, start, end = next(events)
|
311 | if tok_id in (QuotedValue, UnquotedValue):
|
312 | # Note: quoted values may have &
|
313 | # We would need ANOTHER lexer to unescape them.
|
314 | # Right now help_gen.py and oils_doc.py
|
315 | val = start, end
|
316 | break
|
317 |
|
318 | except StopIteration:
|
319 | pass
|
320 | return val
|
321 |
|
322 | def GetAttrRaw(self, attr_name):
|
323 | """
|
324 | Return the value, which may be UNESCAPED.
|
325 | """
|
326 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
327 | # TODO: Could also cache these
|
328 | start, end = self.GetSpanForAttrValue(attr_name)
|
329 | if start == -1:
|
330 | return None
|
331 | return self.s[start:end]
|
332 |
|
333 | def AllAttrsRaw(self):
|
334 | """
|
335 | Get a list of pairs [('class', 'foo'), ('href', '?foo=1&bar=2')]
|
336 |
|
337 | The quoted values may be escaped. We would need another lexer to
|
338 | unescape them.
|
339 | """
|
340 | pairs = []
|
341 | events = self.Tokens()
|
342 | try:
|
343 | while True:
|
344 | tok_id, start, end = next(events)
|
345 | if tok_id == AttrName:
|
346 | name = self.s[start:end]
|
347 |
|
348 | # The value should come next
|
349 | tok_id, start, end = next(events)
|
350 | if tok_id in (QuotedValue, UnquotedValue):
|
351 | # Note: quoted values may have &
|
352 | # We would need ANOTHER lexer to unescape them, but we
|
353 | # don't need that for ul-table
|
354 |
|
355 | val = self.s[start:end]
|
356 | pairs.append((name, val))
|
357 | except StopIteration:
|
358 | pass
|
359 | return pairs
|
360 |
|
361 | def Tokens(self):
|
362 | """
|
363 | Yields a sequence of tokens: Tag (AttrName AttrValue?)*
|
364 |
|
365 | Where each Token is (Type, start_pos, end_pos)
|
366 |
|
367 | Note that start and end are NOT redundant! We skip over some unwanted
|
368 | characters.
|
369 | """
|
370 | m = _TAG_RE.match(self.s, self.start_pos + 1)
|
371 | if not m:
|
372 | raise RuntimeError("Couldn't find HTML tag in %r" %
|
373 | self.TagString())
|
374 | yield TagName, m.start(1), m.end(1)
|
375 |
|
376 | pos = m.end(0)
|
377 |
|
378 | while True:
|
379 | # don't search past the end
|
380 | m = _ATTR_RE.match(self.s, pos, self.end_pos)
|
381 | if not m:
|
382 | # A validating parser would check that > or /> is next -- there's no junk
|
383 | break
|
384 |
|
385 | yield AttrName, m.start(1), m.end(1)
|
386 |
|
387 | # Quoted is group 2, unquoted is group 3.
|
388 | if m.group(2) is not None:
|
389 | yield QuotedValue, m.start(2), m.end(2)
|
390 | elif m.group(3) is not None:
|
391 | yield UnquotedValue, m.start(3), m.end(3)
|
392 |
|
393 | # Skip past the "
|
394 | pos = m.end(0)
|
395 |
|
396 |
|
397 | def ReadUntilStartTag(it, tag_lexer, tag_name):
|
398 | """Find the next <foo>, returning its (start, end) positions
|
399 |
|
400 | Raise ParseError if it's not found.
|
401 |
|
402 | tag_lexer is RESET.
|
403 | """
|
404 | pos = 0
|
405 | while True:
|
406 | try:
|
407 | tok_id, end_pos = next(it)
|
408 | except StopIteration:
|
409 | break
|
410 | tag_lexer.Reset(pos, end_pos)
|
411 | if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
|
412 | return pos, end_pos
|
413 |
|
414 | pos = end_pos
|
415 |
|
416 | raise ParseError('No start tag %r', tag_name)
|
417 |
|
418 |
|
419 | def ReadUntilEndTag(it, tag_lexer, tag_name):
|
420 | """Find the next </foo>, returning its (start, end) position
|
421 |
|
422 | Raise ParseError if it's not found.
|
423 |
|
424 | tag_lexer is RESET.
|
425 | """
|
426 | pos = 0
|
427 | while True:
|
428 | try:
|
429 | tok_id, end_pos = next(it)
|
430 | except StopIteration:
|
431 | break
|
432 | tag_lexer.Reset(pos, end_pos)
|
433 | if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
|
434 | return pos, end_pos
|
435 |
|
436 | pos = end_pos
|
437 |
|
438 | raise ParseError('No end tag %r', tag_name)
|
439 |
|
440 |
|
441 | CHAR_ENTITY = {
|
442 | 'amp': '&',
|
443 | 'lt': '<',
|
444 | 'gt': '>',
|
445 | 'quot': '"',
|
446 | }
|
447 |
|
448 |
|
449 | def ToText(s, left_pos=0, right_pos=-1):
|
450 | """Given HTML, return text by unquoting > and < etc.
|
451 |
|
452 | Used by:
|
453 | doctools/oils_doc.py: PygmentsPlugin
|
454 | doctool/make_help.py: HelpIndexCards
|
455 |
|
456 | In the latter case, we cold process some tags, like:
|
457 |
|
458 | - Blue Link (not clickable, but still useful)
|
459 | - Red X
|
460 |
|
461 | That should be html.ToAnsi.
|
462 | """
|
463 | f = cStringIO.StringIO()
|
464 | out = Output(s, f, left_pos, right_pos)
|
465 |
|
466 | pos = left_pos
|
467 | for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
|
468 | if tok_id == Tok.RawData:
|
469 | out.SkipTo(pos)
|
470 | out.PrintUntil(end_pos)
|
471 |
|
472 | elif tok_id == Tok.CharEntity: # &
|
473 |
|
474 | entity = s[pos + 1:end_pos - 1]
|
475 |
|
476 | out.SkipTo(pos)
|
477 | out.Print(CHAR_ENTITY[entity])
|
478 | out.SkipTo(end_pos)
|
479 |
|
480 | # Not handling these yet
|
481 | elif tok_id == Tok.HexChar:
|
482 | raise AssertionError('Hex Char %r' % s[pos:pos + 20])
|
483 |
|
484 | elif tok_id == Tok.DecChar:
|
485 | raise AssertionError('Dec Char %r' % s[pos:pos + 20])
|
486 |
|
487 | pos = end_pos
|
488 |
|
489 | out.PrintTheRest()
|
490 | return f.getvalue()
|