1 | #!/usr/bin/env python2
|
2 | """
|
3 | lazylex/html.py - Low-Level HTML Processing.
|
4 |
|
5 | See lazylex/README.md for details.
|
6 |
|
7 | TODO: This should be an Oils library eventually. It's a "lazily-parsed data
|
8 | structure" like TSV8
|
9 | """
|
10 | from __future__ import print_function
|
11 |
|
12 | try:
|
13 | from cStringIO import StringIO
|
14 | except ImportError:
|
15 | from io import StringIO # python3
|
16 | import re
|
17 | import sys
|
18 |
|
19 | if sys.version_info.major == 2:
|
20 | from typing import List, Tuple, Optional
|
21 |
|
22 |
|
23 | def log(msg, *args):
|
24 | msg = msg % args
|
25 | print(msg, file=sys.stderr)
|
26 |
|
27 |
|
28 | class LexError(Exception):
|
29 | """For bad lexical elements like <> or &&"""
|
30 |
|
31 | def __init__(self, s, pos):
|
32 | self.s = s
|
33 | self.pos = pos
|
34 |
|
35 | def __str__(self):
|
36 | return '(LexError %r)' % (self.s[self.pos:self.pos + 20])
|
37 |
|
38 |
|
39 | class ParseError(Exception):
|
40 | """For errors in the tag structure."""
|
41 |
|
42 | def __init__(self, msg, *args):
|
43 | self.msg = msg
|
44 | self.args = args
|
45 |
|
46 | def __str__(self):
|
47 | return '(ParseError %s)' % (self.msg % self.args)
|
48 |
|
49 |
|
50 | class Output(object):
|
51 | """Takes an underlying input buffer and an output file. Maintains a
|
52 | position in the input buffer.
|
53 |
|
54 | Print FROM the input or print new text to the output.
|
55 | """
|
56 |
|
57 | def __init__(self, s, f, left_pos=0, right_pos=-1):
|
58 | self.s = s
|
59 | self.f = f
|
60 | self.pos = left_pos
|
61 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
62 |
|
63 | def SkipTo(self, pos):
|
64 | """Skip to a position."""
|
65 | self.pos = pos
|
66 |
|
67 | def PrintUntil(self, pos):
|
68 | """Print until a position."""
|
69 | piece = self.s[self.pos:pos]
|
70 | self.f.write(piece)
|
71 | self.pos = pos
|
72 |
|
73 | def PrintTheRest(self):
|
74 | """Print until the end of the string."""
|
75 | self.PrintUntil(self.right_pos)
|
76 |
|
77 | def Print(self, s):
|
78 | """Print text to the underlying buffer."""
|
79 | self.f.write(s)
|
80 |
|
81 |
|
82 | # HTML Tokens
|
83 | # CommentBegin and ProcessingBegin are "pseudo-tokens", not visible
|
84 | TOKENS = 'Decl Comment CommentBegin Processing ProcessingBegin StartTag StartEndTag EndTag DecChar HexChar CharEntity RawData CData Invalid EndOfStream'.split(
|
85 | )
|
86 |
|
87 |
|
88 | class Tok(object):
|
89 | """
|
90 | Avoid lint errors by using these aliases
|
91 | """
|
92 | pass
|
93 |
|
94 |
|
95 | TOKEN_NAMES = [None] * len(TOKENS) # type: List[str]
|
96 |
|
97 | this_module = sys.modules[__name__]
|
98 | for i, tok_str in enumerate(TOKENS):
|
99 | setattr(this_module, tok_str, i)
|
100 | setattr(Tok, tok_str, i)
|
101 | TOKEN_NAMES[i] = tok_str
|
102 |
|
103 |
|
104 | def TokenName(tok_id):
|
105 | return TOKEN_NAMES[tok_id]
|
106 |
|
107 |
|
108 | def MakeLexer(rules):
|
109 | return [(re.compile(pat, re.VERBOSE), i) for (pat, i) in rules]
|
110 |
|
111 |
|
112 | #
|
113 | # Eggex
|
114 | #
|
115 | # Tag = / ~['>']+ /
|
116 |
|
117 | # Is this valid? A single character?
|
118 | # Tag = / ~'>'* /
|
119 |
|
120 | # Maybe better: / [NOT '>']+/
|
121 | # capital letters not allowed there?
|
122 | #
|
123 | # But then this is confusing:
|
124 | # / [NOT ~digit]+/
|
125 | #
|
126 | # / [NOT digit] / is [^\d]
|
127 | # / ~digit / is \D
|
128 | #
|
129 | # Or maybe:
|
130 | #
|
131 | # / [~ digit]+ /
|
132 | # / [~ '>']+ /
|
133 | # / [NOT '>']+ /
|
134 |
|
135 | # End = / '</' Tag '>' /
|
136 | # StartEnd = / '<' Tag '/>' /
|
137 | # Start = / '<' Tag '>' /
|
138 | #
|
139 | # EntityRef = / '&' dot{* N} ';' /
|
140 |
|
141 | # Tag name, or attribute name
|
142 | # colon is used in XML
|
143 | _NAME = r'[a-zA-Z][a-zA-Z0-9:_\-]*' # must start with letter
|
144 |
|
145 | LEXER = [
|
146 | # Note non-greedy matches are regular and can be matched in linear time
|
147 | # with RE2.
|
148 | #
|
149 | # https://news.ycombinator.com/item?id=27099798
|
150 | #
|
151 | # Maybe try combining all of these for speed.
|
152 |
|
153 | # . is any char except newline
|
154 | # https://re2c.org/manual/manual_c.html
|
155 |
|
156 | # Discarded options
|
157 | #(r'<!-- .*? -->', Tok.Comment),
|
158 |
|
159 | # Hack from Claude: \s\S instead of re.DOTALL. I don't like this
|
160 | #(r'<!-- [\s\S]*? -->', Tok.Comment),
|
161 | #(r'<!-- (?:.|[\n])*? -->', Tok.Comment),
|
162 | (r'<!--', Tok.CommentBegin),
|
163 |
|
164 | # Processing instruction are XML only, but they are treated like a comment
|
165 | # in HTML:
|
166 | #
|
167 | # https://developer.mozilla.org/en-US/docs/Web/API/ProcessingInstruction
|
168 | #
|
169 | # They are used for the XML comment:
|
170 | # <?xml version="1.0" encoding="UTF-8"?>
|
171 | (r'<\?', Tok.ProcessingBegin),
|
172 |
|
173 | # NOTE: < is allowed in these?
|
174 | (r'<! [^>]+ >', Tok.Decl), # <!DOCTYPE html>
|
175 |
|
176 | # Notes:
|
177 | # - We look for a valid tag name, but we don't validate attributes.
|
178 | # That's done in the tag lexer.
|
179 | # - We don't allow leading whitespace
|
180 | (r'</ (%s) >' % _NAME, Tok.EndTag),
|
181 | # self-closing <br/> comes before StarttTag
|
182 | (r'< (%s) [^>]* />' % _NAME, Tok.StartEndTag), # end </a>
|
183 | (r'< (%s) [^>]* >' % _NAME, Tok.StartTag), # start <a>
|
184 | (r'&\# [0-9]+ ;', Tok.DecChar),
|
185 | (r'&\# x[0-9a-fA-F]+ ;', Tok.HexChar),
|
186 | (r'& [a-zA-Z]+ ;', Tok.CharEntity),
|
187 |
|
188 | # HTML5 allows > in raw data - should we? But < is not allowed.
|
189 | # https://stackoverflow.com/questions/10462348/right-angle-bracket-in-html
|
190 | #
|
191 | # - My early blog has THREE errors when disallowing >
|
192 | # - So do some .wwz files
|
193 | (r'[^&<]+', Tok.RawData),
|
194 | (r'.', Tok.Invalid), # error!
|
195 | ]
|
196 |
|
197 | # TODO:
|
198 | # - should we disallowed unescaped >, like XML does? There should be "one way to
|
199 | # do it", and it could catch escaping bugs
|
200 |
|
201 | LEXER = MakeLexer(LEXER)
|
202 |
|
203 |
|
204 | class Lexer(object):
|
205 |
|
206 | def __init__(self, s, left_pos=0, right_pos=-1):
|
207 | self.s = s
|
208 | self.pos = left_pos
|
209 | self.right_pos = len(s) if right_pos == -1 else right_pos
|
210 | self.cache = {} # string -> compiled regex pattern object
|
211 |
|
212 | # either </script> or </style> - we search until we see that
|
213 | self.search_state = None # type: Optional[str]
|
214 |
|
215 | def _Peek(self):
|
216 | # type: () -> Tuple[int, int]
|
217 | """
|
218 | Note: not using _Peek() now
|
219 | """
|
220 | if self.pos == self.right_pos:
|
221 | return Tok.EndOfStream, self.pos
|
222 |
|
223 | assert self.pos < self.right_pos, self.pos
|
224 |
|
225 | if self.search_state is not None:
|
226 | pos = self.s.find(self.search_state, self.pos)
|
227 | if pos == -1:
|
228 | # unterminated <script> or <style>
|
229 | raise LexError(self.s, self.pos)
|
230 | self.search_state = None
|
231 | # beginning
|
232 | return Tok.CData, pos
|
233 |
|
234 | # Find the first match.
|
235 | # Note: frontend/match.py uses _LongestMatch(), which is different!
|
236 | # TODO: reconcile them. This lexer should be expressible in re2c.
|
237 |
|
238 | # TODO: Get rid of non-greedy match
|
239 |
|
240 | for pat, tok_id in LEXER:
|
241 | m = pat.match(self.s, self.pos)
|
242 | if m:
|
243 | if tok_id == Tok.CommentBegin:
|
244 | pos = self.s.find('-->', self.pos)
|
245 | if pos == -1:
|
246 | # unterminated <!--
|
247 | raise LexError(self.s, self.pos)
|
248 | return Tok.Comment, pos + 3 # -->
|
249 |
|
250 | if tok_id == Tok.ProcessingBegin:
|
251 | pos = self.s.find('?>', self.pos)
|
252 | if pos == -1:
|
253 | # unterminated <?
|
254 | raise LexError(self.s, self.pos)
|
255 | return Tok.Processing, pos + 2 # ?>
|
256 |
|
257 | if tok_id == Tok.StartTag:
|
258 | tag_name = m.group(1) # captured
|
259 | if tag_name == 'script':
|
260 | self.search_state = '</script>'
|
261 | elif tag_name == 'style':
|
262 | self.search_state = '</style>'
|
263 |
|
264 | return tok_id, m.end()
|
265 | else:
|
266 | raise AssertionError('Tok.Invalid rule should have matched')
|
267 |
|
268 | def Read(self):
|
269 | # type: () -> Tuple[int, int]
|
270 | tok_id, end_pos = self._Peek()
|
271 | self.pos = end_pos # advance
|
272 | return tok_id, end_pos
|
273 |
|
274 | def LookAhead(self, regex):
|
275 | # Cache the regex compilation. This could also be LookAheadFor(THEAD)
|
276 | # or something.
|
277 | pat = self.cache.get(regex)
|
278 | if pat is None:
|
279 | pat = re.compile(regex)
|
280 | self.cache[regex] = pat
|
281 |
|
282 | m = pat.match(self.s, self.pos)
|
283 | return m is not None
|
284 |
|
285 |
|
286 | def _Tokens(s, left_pos, right_pos):
|
287 | """
|
288 | Args:
|
289 | s: string to parse
|
290 | left_pos, right_pos: Optional span boundaries.
|
291 | """
|
292 | lx = Lexer(s, left_pos, right_pos)
|
293 | while True:
|
294 | tok_id, pos = lx.Read()
|
295 | yield tok_id, pos
|
296 | if tok_id == Tok.EndOfStream:
|
297 | break
|
298 |
|
299 |
|
300 | def ValidTokens(s, left_pos=0, right_pos=-1):
|
301 | """Wrapper around _Tokens to prevent callers from having to handle Invalid.
|
302 |
|
303 | I'm not combining the two functions because I might want to do a
|
304 | 'yield' transformation on Tokens()? Exceptions might complicate the
|
305 | issue?
|
306 | """
|
307 | pos = left_pos
|
308 | for tok_id, end_pos in _Tokens(s, left_pos, right_pos):
|
309 | if tok_id == Tok.Invalid:
|
310 | raise LexError(s, pos)
|
311 | yield tok_id, end_pos
|
312 | pos = end_pos
|
313 |
|
314 |
|
315 | # Tag names:
|
316 | # Match <a or </a
|
317 | # Match <h2, but not <2h
|
318 | #
|
319 | # HTML 5 doesn't restrict tag names at all
|
320 | # https://html.spec.whatwg.org/#toc-syntax
|
321 | #
|
322 | # XML allows : - .
|
323 | # https://www.w3.org/TR/xml/#NT-NameChar
|
324 |
|
325 | # Namespaces for MathML, SVG
|
326 | # XLink, XML, XMLNS
|
327 | #
|
328 | # https://infra.spec.whatwg.org/#namespaces
|
329 | #
|
330 | # Allow - for td-attrs
|
331 |
|
332 | _ATTR_VALUE = r'[a-zA-Z0-9_\-]+' # allow hyphens
|
333 |
|
334 | # TODO: capture tag name above?
|
335 | _TAG_RE = re.compile(r'/? \s* (%s)' % _NAME, re.VERBOSE)
|
336 |
|
337 | # To match href="foo"
|
338 |
|
339 | _ATTR_RE = re.compile(
|
340 | r'''
|
341 | \s+ # Leading whitespace is required
|
342 | (%s) # Attribute name
|
343 | (?: # Optional attribute value
|
344 | \s* = \s*
|
345 | (?:
|
346 | " ([^>"]*) " # double quoted value
|
347 | | (%s) # Attribute value
|
348 | # TODO: relax this? for href=$foo
|
349 | )
|
350 | )?
|
351 | ''' % (_NAME, _ATTR_VALUE), re.VERBOSE)
|
352 |
|
353 | TagName, AttrName, UnquotedValue, QuotedValue = range(4)
|
354 |
|
355 |
|
356 | class TagLexer(object):
|
357 | """
|
358 | Given a tag like <a href="..."> or <link type="..." />, the TagLexer
|
359 | provides a few operations:
|
360 |
|
361 | - What is the tag?
|
362 | - Iterate through the attributes, giving (name, value_start_pos, value_end_pos)
|
363 | """
|
364 |
|
365 | def __init__(self, s):
|
366 | self.s = s
|
367 | self.start_pos = -1 # Invalid
|
368 | self.end_pos = -1
|
369 |
|
370 | def Reset(self, start_pos, end_pos):
|
371 | """Reuse instances of this object."""
|
372 | self.start_pos = start_pos
|
373 | self.end_pos = end_pos
|
374 |
|
375 | def TagString(self):
|
376 | return self.s[self.start_pos:self.end_pos]
|
377 |
|
378 | def TagName(self):
|
379 | # First event
|
380 | tok_id, start, end = next(self.Tokens())
|
381 | return self.s[start:end]
|
382 |
|
383 | def GetSpanForAttrValue(self, attr_name):
|
384 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
385 | # TODO: Could also cache these
|
386 |
|
387 | events = self.Tokens()
|
388 | val = (-1, -1)
|
389 | try:
|
390 | while True:
|
391 | tok_id, start, end = next(events)
|
392 | if tok_id == AttrName:
|
393 | name = self.s[start:end]
|
394 | if name == attr_name:
|
395 | # The value should come next
|
396 | tok_id, start, end = next(events)
|
397 | if tok_id in (QuotedValue, UnquotedValue):
|
398 | # Note: quoted values may have &
|
399 | # We would need ANOTHER lexer to unescape them.
|
400 | # Right now help_gen.py and oils_doc.py
|
401 | val = start, end
|
402 | break
|
403 |
|
404 | except StopIteration:
|
405 | pass
|
406 | return val
|
407 |
|
408 | def GetAttrRaw(self, attr_name):
|
409 | """
|
410 | Return the value, which may be UNESCAPED.
|
411 | """
|
412 | # Algorithm: search for QuotedValue or UnquotedValue after AttrName
|
413 | # TODO: Could also cache these
|
414 | start, end = self.GetSpanForAttrValue(attr_name)
|
415 | if start == -1:
|
416 | return None
|
417 | return self.s[start:end]
|
418 |
|
419 | def AllAttrsRaw(self):
|
420 | """
|
421 | Get a list of pairs [('class', 'foo'), ('href', '?foo=1&bar=2')]
|
422 |
|
423 | The quoted values may be escaped. We would need another lexer to
|
424 | unescape them.
|
425 | """
|
426 | pairs = []
|
427 | events = self.Tokens()
|
428 | try:
|
429 | while True:
|
430 | tok_id, start, end = next(events)
|
431 | if tok_id == AttrName:
|
432 | name = self.s[start:end]
|
433 |
|
434 | # The value should come next
|
435 | tok_id, start, end = next(events)
|
436 | if tok_id in (QuotedValue, UnquotedValue):
|
437 | # Note: quoted values may have &
|
438 | # We would need ANOTHER lexer to unescape them, but we
|
439 | # don't need that for ul-table
|
440 |
|
441 | val = self.s[start:end]
|
442 | pairs.append((name, val))
|
443 | except StopIteration:
|
444 | pass
|
445 | return pairs
|
446 |
|
447 | def Tokens(self):
|
448 | """
|
449 | Yields a sequence of tokens: Tag (AttrName AttrValue?)*
|
450 |
|
451 | Where each Token is (Type, start_pos, end_pos)
|
452 |
|
453 | Note that start and end are NOT redundant! We skip over some unwanted
|
454 | characters.
|
455 | """
|
456 | m = _TAG_RE.match(self.s, self.start_pos + 1)
|
457 | if not m:
|
458 | raise RuntimeError("Couldn't find HTML tag in %r" %
|
459 | self.TagString())
|
460 | yield TagName, m.start(1), m.end(1)
|
461 |
|
462 | pos = m.end(0)
|
463 |
|
464 | while True:
|
465 | # don't search past the end
|
466 | m = _ATTR_RE.match(self.s, pos, self.end_pos)
|
467 | if not m:
|
468 | # A validating parser would check that > or /> is next -- there's no junk
|
469 | break
|
470 |
|
471 | yield AttrName, m.start(1), m.end(1)
|
472 |
|
473 | # Quoted is group 2, unquoted is group 3.
|
474 | if m.group(2) is not None:
|
475 | yield QuotedValue, m.start(2), m.end(2)
|
476 | elif m.group(3) is not None:
|
477 | yield UnquotedValue, m.start(3), m.end(3)
|
478 |
|
479 | # Skip past the "
|
480 | pos = m.end(0)
|
481 |
|
482 |
|
483 | def ReadUntilStartTag(it, tag_lexer, tag_name):
|
484 | """Find the next <foo>, returning its (start, end) positions
|
485 |
|
486 | Raise ParseError if it's not found.
|
487 |
|
488 | tag_lexer is RESET.
|
489 | """
|
490 | pos = 0
|
491 | while True:
|
492 | try:
|
493 | tok_id, end_pos = next(it)
|
494 | except StopIteration:
|
495 | break
|
496 | tag_lexer.Reset(pos, end_pos)
|
497 | if tok_id == Tok.StartTag and tag_lexer.TagName() == tag_name:
|
498 | return pos, end_pos
|
499 |
|
500 | pos = end_pos
|
501 |
|
502 | raise ParseError('No start tag %r', tag_name)
|
503 |
|
504 |
|
505 | def ReadUntilEndTag(it, tag_lexer, tag_name):
|
506 | """Find the next </foo>, returning its (start, end) position
|
507 |
|
508 | Raise ParseError if it's not found.
|
509 |
|
510 | tag_lexer is RESET.
|
511 | """
|
512 | pos = 0
|
513 | while True:
|
514 | try:
|
515 | tok_id, end_pos = next(it)
|
516 | except StopIteration:
|
517 | break
|
518 | tag_lexer.Reset(pos, end_pos)
|
519 | if tok_id == Tok.EndTag and tag_lexer.TagName() == tag_name:
|
520 | return pos, end_pos
|
521 |
|
522 | pos = end_pos
|
523 |
|
524 | raise ParseError('No end tag %r', tag_name)
|
525 |
|
526 |
|
527 | CHAR_ENTITY = {
|
528 | 'amp': '&',
|
529 | 'lt': '<',
|
530 | 'gt': '>',
|
531 | 'quot': '"',
|
532 | }
|
533 |
|
534 |
|
535 | def ToText(s, left_pos=0, right_pos=-1):
|
536 | """Given HTML, return text by unquoting > and < etc.
|
537 |
|
538 | Used by:
|
539 | doctools/oils_doc.py: PygmentsPlugin
|
540 | doctool/make_help.py: HelpIndexCards
|
541 |
|
542 | In the latter case, we cold process some tags, like:
|
543 |
|
544 | - Blue Link (not clickable, but still useful)
|
545 | - Red X
|
546 |
|
547 | That should be html.ToAnsi.
|
548 | """
|
549 | f = StringIO()
|
550 | out = Output(s, f, left_pos, right_pos)
|
551 |
|
552 | pos = left_pos
|
553 | for tok_id, end_pos in ValidTokens(s, left_pos, right_pos):
|
554 | if tok_id == Tok.RawData:
|
555 | out.SkipTo(pos)
|
556 | out.PrintUntil(end_pos)
|
557 |
|
558 | elif tok_id == Tok.CharEntity: # &
|
559 |
|
560 | entity = s[pos + 1:end_pos - 1]
|
561 |
|
562 | out.SkipTo(pos)
|
563 | out.Print(CHAR_ENTITY[entity])
|
564 | out.SkipTo(end_pos)
|
565 |
|
566 | # Not handling these yet
|
567 | elif tok_id == Tok.HexChar:
|
568 | raise AssertionError('Hex Char %r' % s[pos:pos + 20])
|
569 |
|
570 | elif tok_id == Tok.DecChar:
|
571 | raise AssertionError('Dec Char %r' % s[pos:pos + 20])
|
572 |
|
573 | pos = end_pos
|
574 |
|
575 | out.PrintTheRest()
|
576 | return f.getvalue()
|
577 |
|
578 |
|
579 | def main(argv):
|
580 | action = argv[1]
|
581 |
|
582 | if action == 'well-formed':
|
583 | num_tokens = 0
|
584 | errors = []
|
585 | i = 0
|
586 | for line in sys.stdin:
|
587 | name = line.strip()
|
588 | with open(name) as f:
|
589 | contents = f.read()
|
590 |
|
591 | lx = ValidTokens(contents)
|
592 | try:
|
593 | tokens = list(lx)
|
594 | except LexError as e:
|
595 | log('Error in %r: %s', name, e)
|
596 | errors.append((name, e))
|
597 | else:
|
598 | num_tokens += len(tokens)
|
599 | #print('%d %s' % (len(tokens), name))
|
600 | i += 1
|
601 |
|
602 | log('')
|
603 | log(' %d tokens in %d files', num_tokens, i)
|
604 | log(' %d errors', len(errors))
|
605 | if 0:
|
606 | for name, e in errors:
|
607 | log('Error in %r: %s', name, e)
|
608 |
|
609 | else:
|
610 | raise RuntimeError('Invalid action %r' % action)
|
611 |
|
612 |
|
613 | if __name__ == '__main__':
|
614 | main(sys.argv)
|