1 | #!/usr/bin/env python2
|
2 | """
|
3 | lazylex/html.py - Low-Level HTML Processing.
|
4 |
|
5 | See lazylex/README.md for details.
|
6 |
|
7 | TODO: This should be an Oils library eventually. It's a "lazily-parsed data
|
8 | structure" like TSV8
|
9 | """
|
10 | from __future__ import print_function
|
11 |
|
12 | try:
|
13 | from cStringIO import StringIO
|
14 | except ImportError:
|
15 | from io import StringIO # python3
|
16 | import re
|
17 | import sys
|
18 |
|
19 | if sys.version_info.major == 2:
|
20 | from typing import List, Tuple
|
21 |
|
22 |
|
23 | def log(msg, *args):
|
24 | msg = msg % args
|
25 | print(msg, file=sys.stderr)
|
26 |
|
27 |
|
28 | class LexError(Exception):
|
29 | """For bad lexical elements like <> or &&"""
|
30 |
|
31 | def __init__(self, s, pos):
|
32 | self.s = s
|
33 | self.pos = pos
|
34 |
|
35 | def __str__(self):
|
36 | return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
|
37 |
|
38 |
|
39 | class ParseError(Exception):
|
40 | """For errors in the tag structure."""
|
41 |
|
42 | def __init__(self, msg, *args):
|
43 | self.msg = msg
|
44 | self.args = args
|
45 |
|
46 | def __str__(self):
|
47 | return '(ParseError %s)' % (self.msg % self.args)
|
48 |
|
49 |
|
50 | class Output(object):
|
51 | """Takes an underlying input buffer and an output file. Maintains a
|
52 | position in the input buffer.
|
53 |
|
54 | Print FROM the input or print new text to the output.
|
55 | """
|
56 |
|
57 | def __init__(self, s, f, left_pos=0, right_pos=-1):
|
58 | self.s = s
|
59 | self.f = f
|
60 | self.pos = left_pos
|
61 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
62 |
|
63 | def SkipTo(self, pos):
|
64 | """Skip to a position."""
|
65 | self.pos = pos
|
66 |
|
67 | def PrintUntil(self, pos):
|
68 | """Print until a position."""
|
69 | piece = self.s[self.pos:pos]
|
70 | self.f.write(piece)
|
71 | self.pos = pos
|
72 |
|
73 | def PrintTheRest(self):
|
74 | """Print until the end of the string."""
|
75 | self.PrintUntil(self.right_pos)
|
76 |
|
77 | def Print(self, s):
|
78 | """Print text to the underlying buffer."""
|
79 | self.f.write(s)
|
80 |
|
81 |
|
82 | # HTML Tokens
|
83 | # CommentBegin and ProcessingBegin are "pseudo-tokens", not visible
|
84 | TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData CData CDataStartTag CDataEndTag Invalid EndOfStream'.split(
|
85 | )
|
86 |
|
87 |
|
88 | class Tok(object):
|
89 | """
|
90 | Avoid lint errors by using these aliases
|
91 | """
|
92 | pass
|
93 |
|
94 |
|
95 | TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
|
96 |
|
97 | this_module = sys.modules[__name__]
|
98 | for i, tok_str in enumerate(TOKENS):
|
99 | setattr(this_module, tok_str, i)
|
100 | setattr(Tok, tok_str, i)
|
101 | TOKEN_NAMES[i] = tok_str
|
102 |
|
103 |
|
104 | def TokenName(tok_id):
|
105 | return TOKEN_NAMES[tok_id]
|
106 |
|
107 |
|
108 | def MakeLexer(rules):
|
109 | return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
|
110 |
|
111 |
|
112 | #
|
113 | # Eggex
|
114 | #
|
115 | # Tag = / ~['>']+ /
|
116 |
|
117 | # Is this valid? A single character?
|
118 | # Tag = / ~'>'* /
|
119 |
|
120 | # Maybe better: / [NOT '>']+/
|
121 | # capital letters not allowed there?
|
122 | #
|
123 | # But then this is confusing:
|
124 | # / [NOT ~digit]+/
|
125 | #
|
126 | # / [NOT digit] / is [^\d]
|
127 | # / ~digit / is \D
|
128 | #
|
129 | # Or maybe:
|
130 | #
|
131 | # / [~ digit]+ /
|
132 | # / [~ '>']+ /
|
133 | # / [NOT '>']+ /
|
134 |
|
135 | # End = / '</' Tag '>' /
|
136 | # StartEnd = / '<' Tag '/>' /
|
137 | # Start = / '<' Tag '>' /
|
138 | #
|
139 | # EntityRef = / '&' dot{* N} ';' /
|
140 |
|
141 | LEXER = [
|
142 | # Note non-greedy matches are regular and can be matched in linear time
|
143 | # with RE2.
|
144 | #
|
145 | # https://news.ycombinator.com/item?id=27099798
|
146 | #
|
147 | # Maybe try combining all of these for speed.
|
148 |
|
149 | # . is any char except newline
|
150 | # https://re2c.org/manual/manual_c.html
|
151 |
|
152 | # Discarded options
|
153 | #(r'<!-- .*? -->', Tok.Comment),
|
154 |
|
155 | # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
|
156 | #(r'<!-- [\s\S]*? -->', Tok.Comment),
|
157 | #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
|
158 | (r'<!--', Tok.CommentBegin),
|
159 |
|
160 | # Processing instruction are XML only, but they are treated like a comment
|
161 | # in HTML:
|
162 | #
|
163 | # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
|
164 | #
|
165 | # We don't want to confuse them with start tags, so we recognize them at
|
166 | # the top level.
|
167 | (r'<\?', Tok.ProcessingBegin),
|
168 |
|
169 | # NOTE: < is allowed in these.
|
170 | (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
|
171 | (r'<(?:script|style) [^>]+>', Tok.CDataStartTag), # start <a>
|
172 | (r'</ [^>]+ >', Tok.EndTag), # self-closing <br/> comes FIRST
|
173 | (r'< [^>]+ />', Tok.StartEndTag), # end </a>
|
174 | (r'< [^>]+ >', Tok.StartTag), # start <a>
|
175 | (r'&\# [0-9]+ ;', Tok.DecChar),
|
176 | (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
|
177 | (r'& [a-zA-Z]+ ;', Tok.CharEntity),
|
178 |
|
179 | # HTML5 allows > in raw data - should we? It's apparently not allowed in
|
180 | # XML.
|
181 | # But < is not allowed.
|
182 | # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
|
183 | (r'[^&<]+', Tok.RawData),
|
184 | (r'.', Tok.Invalid), # error!
|
185 | ]
|
186 |
|
187 | LEXER = MakeLexer(LEXER)
|
188 |
|
189 |
|
190 | class Lexer(object):
|
191 |
|
192 | def __init__(self, s, left_pos=0, right_pos=-1):
|
193 | self.s = s
|
194 | self.pos = left_pos
|
195 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
196 | self.cache = {} # string -> compiled regex pattern object
|
197 |
|
198 | def _Peek(self):
|
199 | # type: () -> Tuple[int, int]
|
200 | """
|
201 | Note: not using _Peek() now
|
202 | """
|
203 | if self.pos == self.right_pos:
|
204 | return Tok.EndOfStream, self.pos
|
205 |
|
206 | assert self.pos < self.right_pos, self.pos
|
207 |
|
208 | # Find the first match.
|
209 | # Note: frontend/match.py uses _LongestMatch(), which is different!
|
210 | # TODO: reconcile them. This lexer should be expressible in re2c.
|
211 |
|
212 | # TODO: Get rid of non-greedy match
|
213 |
|
214 | for pat, tok_id in LEXER:
|
215 | m = pat.match(self.s, self.pos)
|
216 | if m:
|
217 | if tok_id == Tok.CommentBegin:
|
218 | pos = self.s.find('-->', self.pos)
|
219 | if pos == -1:
|
220 | # unterminated <!--
|
221 | raise LexError(self.s, self.pos)
|
222 | return Tok.Comment, pos + 3 # -->
|
223 |
|
224 | if tok_id == Tok.ProcessingBegin:
|
225 | pos = self.s.find('?>', self.pos)
|
226 | if pos == -1:
|
227 | # unterminated <?
|
228 | raise LexError(self.s, self.pos)
|
229 | return Tok.Processing, pos + 2 # ?>
|
230 |
|
231 | # TODO: we need to enter state so the NEXT call can be CData
|
232 | # And then the one after that must be CDataEndTag.
|
233 | if tok_id == Tok.CDataStartTag:
|
234 | end_tag = '</script>'
|
235 | pos = self.s.find(end_tag, self.pos)
|
236 | if pos == -1:
|
237 | # unterminated </script>
|
238 | raise LexError(self.s, self.pos)
|
239 |
|
240 | return tok_id, m.end()
|
241 | else:
|
242 | raise AssertionError('Tok.Invalid rule should have matched')
|
243 |
|
244 | def Read(self):
|
245 | # type: () -> Tuple[int, int]
|
246 | tok_id, end_pos = self._Peek()
|
247 | self.pos = end_pos # advance
|
248 | return tok_id, end_pos
|
249 |
|
250 | def LookAhead(self, regex):
|
251 | # Cache the regex compilation. This could also be LookAheadFor(THEAD)
|
252 | # or something.
|
253 | pat = self.cache.get(regex)
|
254 | if pat is None:
|
255 | pat = re.compile(regex)
|
256 | self.cache[regex] = pat
|
257 |
|
258 | m = pat.match(self.s, self.pos)
|
259 | return m is not None
|
260 |
|
261 |
|
262 | def _Tokens(s, left_pos, right_pos):
|
263 | """
|
264 | Args:
|
265 | s: string to parse
|
266 | left_pos, right_pos: Optional span boundaries.
|
267 | """
|
268 | lx = Lexer(s, left_pos, right_pos)
|
269 | while True:
|
270 | tok_id, pos = lx.Read()
|
271 | yield tok_id, pos
|
272 | if tok_id == Tok.EndOfStream:
|
273 | break
|
274 |
|
275 |
|
276 | def ValidTokens(s, left_pos=0, right_pos=-1):
|
277 | """Wrapper around _Tokens to prevent callers from having to handle Invalid.
|
278 |
|
279 | I'm not combining the two functions because I might want to do a
|
280 | 'yield' transformation on Tokens()? Exceptions might complicate the
|
281 | issue?
|
282 | """
|
283 | pos = left_pos
|
284 | for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
|
285 | if tok_id == Tok.Invalid:
|
286 | raise LexError(s, pos)
|
287 | yield tok_id, end_pos
|
288 | pos = end_pos
|
289 |
|
290 |
|
291 | # Tag names:
|
292 | # Match <a or </a
|
293 | # Match <h2, but not <2h
|
294 | #
|
295 | # HTML 5 doesn't restrict tag names at all
|
296 | # https://html.spec.whatwg.org/#toc-syntax
|
297 | #
|
298 | # XML allows : - .
|
299 | # https://www.w3.org/TR/xml/#NT-NameChar
|
300 |
|
301 | # Namespaces for MathML, SVG
|
302 | # XLink, XML, XMLNS
|
303 | #
|
304 | # https://infra.spec.whatwg.org/#namespaces
|
305 | #
|
306 | # Allow - for td-attrs
|
307 |
|
308 | # Tag name, or attribue name
|
309 | _NAME = r'[a-zA-Z][a-zA-Z0-9_\-]*' # must start with letter
|
310 |
|
311 | _ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
|
312 |
|
313 | _TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
|
314 |
|
315 | # To match href="foo"
|
316 |
|
317 | _ATTR_RE = re.compile(
|
318 | r'''
|
319 | \s+ # Leading whitespace is required
|
320 | (%s) # Attribute name
|
321 | (?: # Optional attribute value
|
322 | \s* = \s*
|
323 | (?:
|
324 | " ([^>"]*) " # double quoted value
|
325 | | (%s) # Attribute value
|
326 | # TODO: relax this? for href=$foo
|
327 | )
|
328 | )?
|
329 | ''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
|
330 |
|
331 | TagName, AttrName, UnquotedValue, QuotedValue = range(4)
|
332 |
|
333 |
|
334 | class TagLexer(object):
|
335 | """
|
336 | Given a tag like <a href="..."> or <link type="..." />, the TagLexer
|
337 | provides a few operations:
|
338 |
|
339 | - What is the tag?
|
340 | - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
|
341 | """
|
342 |
|
343 | def __init__(self, s):
|
344 | self.s = s
|
345 | self.start_pos = -1 # Invalid
|
346 | self.end_pos = -1
|
347 |
|
348 | def Reset(self, start_pos, end_pos):
|
349 | """Reuse instances of this object."""
|
350 | self.start_pos = start_pos
|
351 | self.end_pos = end_pos
|
352 |
|
353 | def TagString(self):
|
354 | return self.s[self.start_pos:self.end_pos]
|
355 |
|
356 | def TagName(self):
|
357 | # First event
|
358 | tok_id, start, end = next(self.Tokens())
|
359 | return self.s[start:end]
|
360 |
|
361 | def GetSpanForAttrValue(self, attr_name):
|
362 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
363 | # TODO: Could also cache these
|
364 |
|
365 | events = self.Tokens()
|
366 | val = (-1, -1)
|
367 | try:
|
368 | while True:
|
369 | tok_id, start, end = next(events)
|
370 | if tok_id == AttrName:
|
371 | name = self.s[start:end]
|
372 | if name == attr_name:
|
373 | # The value should come next
|
374 | tok_id, start, end = next(events)
|
375 | if tok_id in (QuotedValue, UnquotedValue):
|
376 | # Note: quoted values may have &
|
377 | # We would need ANOTHER lexer to unescape them.
|
378 | # Right now help_gen.py and oils_doc.py
|
379 | val = start, end
|
380 | break
|
381 |
|
382 | except StopIteration:
|
383 | pass
|
384 | return val
|
385 |
|
386 | def GetAttrRaw(self, attr_name):
|
387 | """
|
388 | Return the value, which may be UNESCAPED.
|
389 | """
|
390 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
391 | # TODO: Could also cache these
|
392 | start, end = self.GetSpanForAttrValue(attr_name)
|
393 | if start == -1:
|
394 | return None
|
395 | return self.s[start:end]
|
396 |
|
397 | def AllAttrsRaw(self):
|
398 | """
|
399 | Get a list of pairs [('class', 'foo'), ('href', '?foo=1&bar=2')]
|
400 |
|
401 | The quoted values may be escaped. We would need another lexer to
|
402 | unescape them.
|
403 | """
|
404 | pairs = []
|
405 | events = self.Tokens()
|
406 | try:
|
407 | while True:
|
408 | tok_id, start, end = next(events)
|
409 | if tok_id == AttrName:
|
410 | name = self.s[start:end]
|
411 |
|
412 | # The value should come next
|
413 | tok_id, start, end = next(events)
|
414 | if tok_id in (QuotedValue, UnquotedValue):
|
415 | # Note: quoted values may have &
|
416 | # We would need ANOTHER lexer to unescape them, but we
|
417 | # don't need that for ul-table
|
418 |
|
419 | val = self.s[start:end]
|
420 | pairs.append((name, val))
|
421 | except StopIteration:
|
422 | pass
|
423 | return pairs
|
424 |
|
425 | def Tokens(self):
|
426 | """
|
427 | Yields a sequence of tokens: Tag (AttrName AttrValue?)*
|
428 |
|
429 | Where each Token is (Type, start_pos, end_pos)
|
430 |
|
431 | Note that start and end are NOT redundant! We skip over some unwanted
|
432 | characters.
|
433 | """
|
434 | m = _TAG_RE.match(self.s, self.start_pos + 1)
|
435 | if not m:
|
436 | raise RuntimeError("Couldn't find HTML tag in %r" %
|
437 | self.TagString())
|
438 | yield TagName, m.start(1), m.end(1)
|
439 |
|
440 | pos = m.end(0)
|
441 |
|
442 | while True:
|
443 | # don't search past the end
|
444 | m = _ATTR_RE.match(self.s, pos, self.end_pos)
|
445 | if not m:
|
446 | # A validating parser would check that > or /> is next -- there's no junk
|
447 | break
|
448 |
|
449 | yield AttrName, m.start(1), m.end(1)
|
450 |
|
451 | # Quoted is group 2, unquoted is group 3.
|
452 | if m.group(2) is not None:
|
453 | yield QuotedValue, m.start(2), m.end(2)
|
454 | elif m.group(3) is not None:
|
455 | yield UnquotedValue, m.start(3), m.end(3)
|
456 |
|
457 | # Skip past the "
|
458 | pos = m.end(0)
|
459 |
|
460 |
|
461 | def ReadUntilStartTag(it, tag_lexer, tag_name):
|
462 | """Find the next <foo>, returning its (start, end) positions
|
463 |
|
464 | Raise ParseError if it's not found.
|
465 |
|
466 | tag_lexer is RESET.
|
467 | """
|
468 | pos = 0
|
469 | while True:
|
470 | try:
|
471 | tok_id, end_pos = next(it)
|
472 | except StopIteration:
|
473 | break
|
474 | tag_lexer.Reset(pos, end_pos)
|
475 | if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
|
476 | return pos, end_pos
|
477 |
|
478 | pos = end_pos
|
479 |
|
480 | raise ParseError('No start tag %r', tag_name)
|
481 |
|
482 |
|
483 | def ReadUntilEndTag(it, tag_lexer, tag_name):
|
484 | """Find the next </foo>, returning its (start, end) position
|
485 |
|
486 | Raise ParseError if it's not found.
|
487 |
|
488 | tag_lexer is RESET.
|
489 | """
|
490 | pos = 0
|
491 | while True:
|
492 | try:
|
493 | tok_id, end_pos = next(it)
|
494 | except StopIteration:
|
495 | break
|
496 | tag_lexer.Reset(pos, end_pos)
|
497 | if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
|
498 | return pos, end_pos
|
499 |
|
500 | pos = end_pos
|
501 |
|
502 | raise ParseError('No end tag %r', tag_name)
|
503 |
|
504 |
|
505 | CHAR_ENTITY = {
|
506 | 'amp': '&',
|
507 | 'lt': '<',
|
508 | 'gt': '>',
|
509 | 'quot': '"',
|
510 | }
|
511 |
|
512 |
|
513 | def ToText(s, left_pos=0, right_pos=-1):
|
514 | """Given HTML, return text by unquoting > and < etc.
|
515 |
|
516 | Used by:
|
517 | doctools/oils_doc.py: PygmentsPlugin
|
518 | doctool/make_help.py: HelpIndexCards
|
519 |
|
520 | In the latter case, we cold process some tags, like:
|
521 |
|
522 | - Blue Link (not clickable, but still useful)
|
523 | - Red X
|
524 |
|
525 | That should be html.ToAnsi.
|
526 | """
|
527 | f = StringIO()
|
528 | out = Output(s, f, left_pos, right_pos)
|
529 |
|
530 | pos = left_pos
|
531 | for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
|
532 | if tok_id == Tok.RawData:
|
533 | out.SkipTo(pos)
|
534 | out.PrintUntil(end_pos)
|
535 |
|
536 | elif tok_id == Tok.CharEntity: # &
|
537 |
|
538 | entity = s[pos + 1:end_pos - 1]
|
539 |
|
540 | out.SkipTo(pos)
|
541 | out.Print(CHAR_ENTITY[entity])
|
542 | out.SkipTo(end_pos)
|
543 |
|
544 | # Not handling these yet
|
545 | elif tok_id == Tok.HexChar:
|
546 | raise AssertionError('Hex Char %r' % s[pos:pos + 20])
|
547 |
|
548 | elif tok_id == Tok.DecChar:
|
549 | raise AssertionError('Dec Char %r' % s[pos:pos + 20])
|
550 |
|
551 | pos = end_pos
|
552 |
|
553 | out.PrintTheRest()
|
554 | return f.getvalue()
|
555 |
|
556 |
|
557 | def main(argv):
|
558 | action = argv[1]
|
559 |
|
560 | if action == 'well-formed':
|
561 | num_tokens = 0
|
562 | errors = []
|
563 | i = 0
|
564 | for line in sys.stdin:
|
565 | name = line.strip()
|
566 | with open(name) as f:
|
567 | contents = f.read()
|
568 |
|
569 | lx = ValidTokens(contents)
|
570 | try:
|
571 | tokens = list(lx)
|
572 | except LexError as e:
|
573 | log('Error in %r: %s', name, e)
|
574 | errors.append((name, e))
|
575 | else:
|
576 | num_tokens += len(tokens)
|
577 | #print('%d %s' % (len(tokens), name))
|
578 | i += 1
|
579 |
|
580 | log('')
|
581 | log(' %d tokens in %d files', num_tokens, i)
|
582 | log(' %d errors', len(errors))
|
583 | if 0:
|
584 | for name, e in errors:
|
585 | log('Error in %r: %s', name, e)
|
586 |
|
587 | else:
|
588 | raise RuntimeError('Invalid action %r' % action)
|
589 |
|
590 |
|
591 | if __name__ == '__main__':
|
592 | main(sys.argv)
|