OILS / lazylex / html_test.py View on Github | oils.pub

566 lines, 350 significant
1#!/usr/bin/env python2
2from __future__ import print_function
3
4import unittest
5
6from lazylex import html # module under test log = html.log
7from typing import List
8from typing import Tuple
9
10log = html.log
11
12with open('lazylex/testdata.html') as f:
13 TEST_HTML = f.read()
14
15
16class RegexTest(unittest.TestCase):
17
18 def testDotAll(self):
19 # type: () -> None
20 import re
21
22 # Note that $ matches end of line, not end of string
23 p1 = re.compile(r'.')
24 print(p1.match('\n'))
25
26 p2 = re.compile(r'.', re.DOTALL)
27 print(p2.match('\n'))
28
29 #p3 = re.compile(r'[.\n]', re.VERBOSE)
30 p3 = re.compile(r'[.\n]')
31 print(p3.match('\n'))
32
33 print('Negation')
34
35 p4 = re.compile(r'[^>]')
36 print(p4.match('\n'))
37
38 def testAttrRe(self):
39 # type: () -> None
40 _ATTR_RE = html._ATTR_RE
41 m = _ATTR_RE.match(' empty= val')
42 print(m.groups())
43
44
45class FunctionsTest(unittest.TestCase):
46
47 def testFindLineNum(self):
48 # type: () -> None
49 s = 'foo\n' * 3
50 for pos in [1, 5, 10, 50]: # out of bounds
51 line_num = html.FindLineNum(s, pos)
52 print(line_num)
53
54 def testToText(self):
55 # type: () -> None
56 t = html.ToText('<b name="&amp;"> three &lt; four && five </b>')
57 self.assertEqual(' three < four && five ', t)
58
59
60def _MakeTagLexer(s):
61 # type: (str) -> html.TagLexer
62 lex = html.TagLexer(s)
63 lex.Reset(0, len(s))
64 return lex
65
66
67def _PrintTokens(lex):
68 # type: (html.TagLexer) -> None
69 log('')
70 log('tag = %r', lex.TagName())
71 for tok, start, end in lex.Tokens():
72 log('%s %r', tok, lex.s[start:end])
73
74
75class TagLexerTest(unittest.TestCase):
76
77 def testTagLexer(self):
78 # type: () -> None
79 # Invalid!
80 #lex = _MakeTagLexer('< >')
81 #print(lex.Tag())
82
83 lex = _MakeTagLexer('<a>')
84 _PrintTokens(lex)
85
86 lex = _MakeTagLexer('<a novalue>')
87 _PrintTokens(lex)
88
89 # Note: we could have a different HasAttr() method
90 # <a novalue> means lex.Get('novalue') == ''
91 # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
92 self.assertEqual('', lex.GetAttrRaw('novalue'))
93
94 lex = _MakeTagLexer('<a href="double quoted">')
95 _PrintTokens(lex)
96
97 self.assertEqual('double quoted', lex.GetAttrRaw('href'))
98 self.assertEqual(None, lex.GetAttrRaw('oops'))
99
100 lex = _MakeTagLexer('<a href=foo class="bar">')
101 _PrintTokens(lex)
102
103 lex = _MakeTagLexer('<a href=foo class="bar" />')
104 _PrintTokens(lex)
105
106 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
107 self.assertEqual('?foo=1&amp;bar=2', lex.GetAttrRaw('href'))
108
109 def testTagName(self):
110 # type: () -> None
111 lex = _MakeTagLexer('<a href=foo class="bar" />')
112 self.assertEqual('a', lex.TagName())
113
114 def testAllAttrs(self):
115 # type: () -> None
116 """
117 [('key', 'value')] for all
118 """
119 # closed
120 lex = _MakeTagLexer('<a href=foo class="bar" />')
121 self.assertEqual([('href', 'foo'), ('class', 'bar')],
122 lex.AllAttrsRaw())
123
124 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
125 self.assertEqual([('href', '?foo=1&amp;bar=2')], lex.AllAttrsRaw())
126
127 def testEmptyMissingValues(self):
128 # type: () -> None
129 # equivalent to <button disabled="">
130 lex = _MakeTagLexer('<button disabled>')
131 all_attrs = lex.AllAttrsRaw()
132 self.assertEqual([('disabled', '')], all_attrs)
133
134 slices = lex.AllAttrsRawSlice()
135 log('slices %s', slices)
136
137 lex = _MakeTagLexer(
138 '''<p double="" single='' empty= value missing empty2=>''')
139 all_attrs = lex.AllAttrsRaw()
140 self.assertEqual([
141 ('double', ''),
142 ('single', ''),
143 ('empty', 'value'),
144 ('missing', ''),
145 ('empty2', ''),
146 ], all_attrs)
147 # TODO: should have
148 log('all %s', all_attrs)
149
150 slices = lex.AllAttrsRawSlice()
151 log('slices %s', slices)
152
153 def testInvalidTag(self):
154 # type: () -> None
155 try:
156 lex = _MakeTagLexer('<a foo=bar !></a>')
157 all_attrs = lex.AllAttrsRaw()
158 except html.LexError as e:
159 print(e)
160 else:
161 self.fail('Expected LexError')
162
163
164def _MakeAttrValueLexer(s):
165 # type: (str) -> html.AttrValueLexer
166 lex = html.AttrValueLexer(s)
167 lex.Reset(0, len(s))
168 return lex
169
170
171class AttrValueLexerTest(unittest.TestCase):
172
173 def testGood(self):
174 # type: () -> None
175 lex = _MakeAttrValueLexer('?foo=42&amp;bar=99')
176 n = lex.NumTokens()
177 self.assertEqual(3, n)
178
179
180def Lex(h, no_special_tags=False):
181 # type: (str, bool) -> List[Tuple[int, int]]
182 print(repr(h))
183 tokens = html.ValidTokenList(h, no_special_tags=no_special_tags)
184 start_pos = 0
185 for tok_id, end_pos in tokens:
186 frag = h[start_pos:end_pos]
187 log('%d %s %r', end_pos, html.TokenName(tok_id), frag)
188 start_pos = end_pos
189 return tokens
190
191
192class LexerTest(unittest.TestCase):
193
194 # IndexLinker in devtools/make_help.py
195 # <pre> sections in doc/html_help.py
196 # TocExtractor in devtools/cmark.py
197
198 def testPstrip(self):
199 # type: () -> None
200 """Remove anything like this.
201
202 <p><pstrip> </pstrip></p>
203 """
204 pass
205
206 def testCommentParse(self):
207 # type: () -> None
208 n = len(TEST_HTML)
209 tokens = Lex(TEST_HTML)
210
211 def testCommentParse2(self):
212 # type: () -> None
213
214 Tok = html.Tok
215 h = '''
216 hi <!-- line 1
217 line 2 --><br/>'''
218 tokens = Lex(h)
219
220 self.assertEqual(
221 [
222 (Tok.RawData, 12),
223 (Tok.Comment, 50), # <? err ?>
224 (Tok.StartEndTag, 55),
225 (Tok.EndOfStream, 55),
226 ],
227 tokens)
228
229 def testProcessingInstruction(self):
230 # type: () -> None
231 # <?xml ?> header
232 Tok = html.Tok
233 h = 'hi <? err ?>'
234 tokens = Lex(h)
235
236 self.assertEqual(
237 [
238 (Tok.RawData, 3),
239 (Tok.Processing, 12), # <? err ?>
240 (Tok.EndOfStream, 12),
241 ],
242 tokens)
243
244 def testScriptStyle(self):
245 # type: () -> None
246 Tok = html.Tok
247 h = '''
248 hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
249 </script>
250 '''
251 tokens = Lex(h)
252
253 expected = [
254 (Tok.RawData, 12),
255 (Tok.StartTag, 27), # <script>
256 (Tok.HtmlCData, 78), # JavaScript code is HTML CData
257 (Tok.EndTag, 87), # </script>
258 (Tok.RawData, 96), # \n
259 (Tok.EndOfStream, 96), # \n
260 ]
261 self.assertEqual(expected, tokens)
262
263 # Test case matching
264 tokens = Lex(h.replace('script', 'scrIPT'))
265 self.assertEqual(expected, tokens)
266
267 def testScriptStyleXml(self):
268 # type: () -> None
269 Tok = html.Tok
270 h = 'hi <script src=""> &lt; </script>'
271 # XML mode
272 tokens = Lex(h, no_special_tags=True)
273
274 self.assertEqual(
275 [
276 (Tok.RawData, 3),
277 (Tok.StartTag, 18), # <script>
278 (Tok.RawData, 19), # space
279 (Tok.CharEntity, 23), # </script>
280 (Tok.RawData, 24), # \n
281 (Tok.EndTag, 33), # \n
282 (Tok.EndOfStream, 33), # \n
283 ],
284 tokens)
285
286 def testCData(self):
287 # type: () -> None
288 Tok = html.Tok
289
290 # from
291 # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
292 h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
293 tokens = Lex(h)
294
295 self.assertEqual([
296 (Tok.StartTag, 9),
297 (Tok.CData, 61),
298 (Tok.EndTag, 71),
299 (Tok.EndOfStream, 71),
300 ], tokens)
301
302 def testEntity(self):
303 # type: () -> None
304 Tok = html.Tok
305
306 # from
307 # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
308 h = '&ent1;, &ent2;!'
309
310 tokens = Lex(h)
311
312 self.assertEqual([
313 (Tok.CharEntity, 6),
314 (Tok.RawData, 8),
315 (Tok.CharEntity, 14),
316 (Tok.RawData, 15),
317 (Tok.EndOfStream, 15),
318 ], tokens)
319
320 def testStartTag(self):
321 # type: () -> None
322 Tok = html.Tok
323
324 h = '<a>hi</a>'
325 tokens = Lex(h)
326
327 self.assertEqual([
328 (Tok.StartTag, 3),
329 (Tok.RawData, 5),
330 (Tok.EndTag, 9),
331 (Tok.EndOfStream, 9),
332 ], tokens)
333
334 # Make sure we don't consume too much
335 h = '<a><source>1.7</source></a>'
336
337 tokens = Lex(h)
338
339 self.assertEqual([
340 (Tok.StartTag, 3),
341 (Tok.StartTag, 11),
342 (Tok.RawData, 14),
343 (Tok.EndTag, 23),
344 (Tok.EndTag, 27),
345 (Tok.EndOfStream, 27),
346 ], tokens)
347
348 return
349
350 h = '''
351 <configuration>
352 <source>1.7</source>
353 </configuration>'''
354
355 tokens = Lex(h)
356
357 self.assertEqual([
358 (Tok.RawData, 9),
359 (Tok.StartTag, 24),
360 (Tok.RawData, 9),
361 (Tok.EndOfStream, 9),
362 ], tokens)
363
364 def testBad(self):
365 # type: () -> None
366 Tok = html.Tok
367
368 h = '&'
369 tokens = Lex(h)
370
371 self.assertEqual([
372 (Tok.BadAmpersand, 1),
373 (Tok.EndOfStream, 1),
374 ], tokens)
375
376 h = '>'
377 tokens = Lex(h)
378
379 self.assertEqual([
380 (Tok.BadGreaterThan, 1),
381 (Tok.EndOfStream, 1),
382 ], tokens)
383
384 def testInvalid(self):
385 # type: () -> None
386 Tok = html.Tok
387
388 for s in INVALID_LEX:
389 try:
390 tokens = html.ValidTokenList(s)
391 except html.LexError as e:
392 print(e)
393 else:
394 self.fail('Expected LexError %r' % s)
395
396 def testValid(self):
397 # type: () -> None
398 for s, _ in VALID_LEX:
399 tokens = Lex(s)
400 print()
401
402
403INVALID_LEX = [
404 '<a><',
405 '&amp<',
406 '&<',
407 # Hm > is allowed?
408 #'a > b',
409 'a < b',
410 '<!-- unfinished comment',
411 '<? unfinished processing',
412 '</div bad=attr> <a> <b>',
413
414 # not allowed, but 3 > 4 is allowed
415 '<a> 3 < 4 </a>',
416 # Not a CDATA tag
417 '<STYLEz><</STYLEz>',
418]
419
420SKIP = 0
421UNCHANGED = 1
422
423VALID_LEX = [
424 # TODO: convert these to XML
425 ('<foo></foo>', UNCHANGED),
426 ('<foo x=y></foo>', ''),
427 #('<foo x="&"></foo>', '<foo x="&amp;"></foo>'),
428 ('<foo x="&"></foo>', ''),
429
430 # Allowed with BadAmpersand
431 ('<p> x & y </p>', '<p> x &amp; y </p>'),
432]
433
434INVALID_PARSE = [
435 '<a></b>',
436 '<a>', # missing closing tag
437 '<meta></meta>', # this is a self-closing tag
438]
439
440VALID_PARSE = [
441 ('<!DOCTYPE html>\n', ''),
442 ('<!DOCTYPE>', ''),
443
444 # empty strings
445 ('<p x=""></p>', UNCHANGED),
446 ("<p x=''></p>", UNCHANGED),
447 ('<self-closing a="b" />', UNCHANGED),
448
449 # We could also normalize CDATA?
450 # Note that CDATA has an escaping problem: you need to handle it ]]> with
451 # concatenation. It just "pushes the problem around".
452 # So I think it's better to use ONE kind of escaping, which is &lt;
453 ('<script><![CDATA[ <wtf> >< ]]></script>', UNCHANGED),
454
455 # allowed, but 3 < 4 is not allowed
456 ('<a> 3 > 4 </a>', '<a> 3 &gt; 4 </a>'),
457 # allowed, but 3 > 4 is not allowed
458 ('<p x="3 < 4"></p>', ''),
459 ('<b><a href="foo">link</a></b>', UNCHANGED),
460
461 # TODO: should be self-closing
462 #('<meta><a></a>', '<meta/><a></a>'),
463 ('<meta><a></a>', ''),
464
465 # no attribute
466 ('<button disabled></button>', ''),
467 ('<button disabled=></button>', ''),
468 ('<button disabled= ></button>', ''),
469
470 # single quoted is pretty common
471 ("<a href='single'></a>", ''),
472
473 # Conceding to reality - I used these myself
474 ('<a href=ble.sh></a>', ''),
475 ('<a href=foo.html></a>', ''),
476 ('<foo x="&"></foo>', ''),
477
478 # caps
479 ('<foo></FOO>', ''),
480 ('<Foo></fOO>', ''),
481
482 # capital VOID tag
483 ('<META><a></a>', ''),
484 ('<script><</script>', ''),
485 # matching
486 ('<SCRipt><</SCRipt>', ''),
487 ('<SCRIPT><</SCRIPT>', ''),
488 ('<STYLE><</STYLE>', ''),
489 #'<SCRipt><</script>',
490
491 # Note: Python HTMLParser.py does DYNAMIC compilation of regex with re.I
492 # flag to handle this! Gah I want something faster.
493 #'<script><</SCRIPT>',
494
495 # TODO: Test <svg> and <math> ?
496]
497
498VALID_XML = [
499 '<meta></meta>',
500]
501
502INVALID_TAG_LEX = [
503 # not allowed, but 3 < 4 is allowed
504 '<p x="3 > 4"></p>',
505 # same thing
506 '<a href=">"></a>',
507 '<a foo=bar !></a>', # bad attr
508]
509
510
511class ValidateTest(unittest.TestCase):
512
513 def testInvalid(self):
514 # type: () -> None
515 counters = html.Counters()
516 for s in INVALID_LEX + INVALID_TAG_LEX:
517 try:
518 html.Validate(s, html.BALANCED_TAGS, counters)
519 except html.LexError as e:
520 print(e)
521 else:
522 self.fail('Expected LexError %r' % s)
523
524 for s in INVALID_PARSE:
525 try:
526 html.Validate(s, html.BALANCED_TAGS, counters)
527 except html.ParseError as e:
528 print(e)
529 else:
530 self.fail('Expected ParseError')
531
532 def testValid(self):
533 # type: () -> None
534 counters = html.Counters()
535 for s, _ in VALID_PARSE:
536 html.Validate(s, html.BALANCED_TAGS, counters)
537 print('HTML5 %r' % s)
538 print('HTML5 attrs %r' % counters.debug_attrs)
539
540 def testValidXml(self):
541 # type: () -> None
542 counters = html.Counters()
543 for s in VALID_XML:
544 html.Validate(s, html.BALANCED_TAGS | html.NO_SPECIAL_TAGS,
545 counters)
546 print('XML %r' % s)
547 print('XML attrs %r' % counters.debug_attrs)
548
549
550class XmlTest(unittest.TestCase):
551
552 def testValid(self):
553 # type: () -> None
554 counters = html.Counters()
555 for h, expected_xml in VALID_LEX + VALID_PARSE:
556 actual = html.ToXml(h)
557 if expected_xml == UNCHANGED: # Unchanged
558 self.assertEqual(h, actual)
559 elif expected_xml == '': # Skip
560 pass
561 else:
562 self.assertEqual(expected_xml, actual)
563
564
565if __name__ == '__main__':
566 unittest.main()