OILS / lazylex / html_test.py View on Github | oils.pub

534 lines, 348 significant
1#!/usr/bin/env python2
2from __future__ import print_function
3
4import unittest
5
6from lazylex import html # module under test log = html.log
7
8log = html.log
9
10with open('lazylex/testdata.html') as f:
11 TEST_HTML = f.read()
12
13
14class RegexTest(unittest.TestCase):
15
16 def testDotAll(self):
17 import re
18
19 # Note that $ matches end of line, not end of string
20 p1 = re.compile(r'.')
21 print(p1.match('\n'))
22
23 p2 = re.compile(r'.', re.DOTALL)
24 print(p2.match('\n'))
25
26 #p3 = re.compile(r'[.\n]', re.VERBOSE)
27 p3 = re.compile(r'[.\n]')
28 print(p3.match('\n'))
29
30 print('Negation')
31
32 p4 = re.compile(r'[^>]')
33 print(p4.match('\n'))
34
35 def testAttrRe(self):
36 _ATTR_RE = html._ATTR_RE
37 m = _ATTR_RE.match(' empty= val')
38 print(m.groups())
39
40
41class FunctionsTest(unittest.TestCase):
42
43 def testFindLineNum(self):
44 s = 'foo\n' * 3
45 for pos in [1, 5, 10, 50]: # out of bounds
46 line_num = html.FindLineNum(s, pos)
47 print(line_num)
48
49 def testToText(self):
50 t = html.ToText('<b name="&amp;"> three &lt; four && five </b>')
51 self.assertEqual(' three < four && five ', t)
52
53
54def _MakeTagLexer(s):
55 lex = html.TagLexer(s)
56 lex.Reset(0, len(s))
57 return lex
58
59
60def _PrintTokens(lex):
61 log('')
62 log('tag = %r', lex.TagName())
63 for tok, start, end in lex.Tokens():
64 log('%s %r', tok, lex.s[start:end])
65
66
67class TagLexerTest(unittest.TestCase):
68
69 def testTagLexer(self):
70 # Invalid!
71 #lex = _MakeTagLexer('< >')
72 #print(lex.Tag())
73
74 lex = _MakeTagLexer('<a>')
75 _PrintTokens(lex)
76
77 lex = _MakeTagLexer('<a novalue>')
78 _PrintTokens(lex)
79
80 # Note: we could have a different HasAttr() method
81 # <a novalue> means lex.Get('novalue') == ''
82 # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
83 self.assertEqual('', lex.GetAttrRaw('novalue'))
84
85 lex = _MakeTagLexer('<a href="double quoted">')
86 _PrintTokens(lex)
87
88 self.assertEqual('double quoted', lex.GetAttrRaw('href'))
89 self.assertEqual(None, lex.GetAttrRaw('oops'))
90
91 lex = _MakeTagLexer('<a href=foo class="bar">')
92 _PrintTokens(lex)
93
94 lex = _MakeTagLexer('<a href=foo class="bar" />')
95 _PrintTokens(lex)
96
97 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
98 self.assertEqual('?foo=1&amp;bar=2', lex.GetAttrRaw('href'))
99
100 def testTagName(self):
101 lex = _MakeTagLexer('<a href=foo class="bar" />')
102 self.assertEqual('a', lex.TagName())
103
104 def testAllAttrs(self):
105 """
106 [('key', 'value')] for all
107 """
108 # closed
109 lex = _MakeTagLexer('<a href=foo class="bar" />')
110 self.assertEqual([('href', 'foo'), ('class', 'bar')],
111 lex.AllAttrsRaw())
112
113 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
114 self.assertEqual([('href', '?foo=1&amp;bar=2')], lex.AllAttrsRaw())
115
116 def testEmptyMissingValues(self):
117 # equivalent to <button disabled="">
118 lex = _MakeTagLexer('<button disabled>')
119 all_attrs = lex.AllAttrsRaw()
120 self.assertEqual([('disabled', '')], all_attrs)
121
122 slices = lex.AllAttrsRawSlice()
123 log('slices %s', slices)
124
125 lex = _MakeTagLexer(
126 '''<p double="" single='' empty= value missing empty2=>''')
127 all_attrs = lex.AllAttrsRaw()
128 self.assertEqual([
129 ('double', ''),
130 ('single', ''),
131 ('empty', 'value'),
132 ('missing', ''),
133 ('empty2', ''),
134 ], all_attrs)
135 # TODO: should have
136 log('all %s', all_attrs)
137
138 slices = lex.AllAttrsRawSlice()
139 log('slices %s', slices)
140
141 def testInvalidTag(self):
142 try:
143 lex = _MakeTagLexer('<a foo=bar !></a>')
144 all_attrs = lex.AllAttrsRaw()
145 except html.LexError as e:
146 print(e)
147 else:
148 self.fail('Expected LexError')
149
150
151def _MakeAttrValueLexer(s):
152 lex = html.AttrValueLexer(s)
153 lex.Reset(0, len(s))
154 return lex
155
156
157class AttrValueLexerTest(unittest.TestCase):
158
159 def testGood(self):
160 lex = _MakeAttrValueLexer('?foo=42&amp;bar=99')
161 n = lex.NumTokens()
162 self.assertEqual(3, n)
163
164
165def Lex(h, no_special_tags=False):
166 print(repr(h))
167 tokens = html.ValidTokenList(h, no_special_tags=no_special_tags)
168 start_pos = 0
169 for tok_id, end_pos in tokens:
170 frag = h[start_pos:end_pos]
171 log('%d %s %r', end_pos, html.TokenName(tok_id), frag)
172 start_pos = end_pos
173 return tokens
174
175
176class LexerTest(unittest.TestCase):
177
178 # IndexLinker in devtools/make_help.py
179 # <pre> sections in doc/html_help.py
180 # TocExtractor in devtools/cmark.py
181
182 def testPstrip(self):
183 """Remove anything like this.
184
185 <p><pstrip> </pstrip></p>
186 """
187 pass
188
189 def testCommentParse(self):
190 n = len(TEST_HTML)
191 tokens = Lex(TEST_HTML)
192
193 def testCommentParse2(self):
194
195 Tok = html.Tok
196 h = '''
197 hi <!-- line 1
198 line 2 --><br/>'''
199 tokens = Lex(h)
200
201 self.assertEqual(
202 [
203 (Tok.RawData, 12),
204 (Tok.Comment, 50), # <? err ?>
205 (Tok.StartEndTag, 55),
206 (Tok.EndOfStream, 55),
207 ],
208 tokens)
209
210 def testProcessingInstruction(self):
211 # <?xml ?> header
212 Tok = html.Tok
213 h = 'hi <? err ?>'
214 tokens = Lex(h)
215
216 self.assertEqual(
217 [
218 (Tok.RawData, 3),
219 (Tok.Processing, 12), # <? err ?>
220 (Tok.EndOfStream, 12),
221 ],
222 tokens)
223
224 def testScriptStyle(self):
225 Tok = html.Tok
226 h = '''
227 hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
228 </script>
229 '''
230 tokens = Lex(h)
231
232 expected = [
233 (Tok.RawData, 12),
234 (Tok.StartTag, 27), # <script>
235 (Tok.HtmlCData, 78), # JavaScript code is HTML CData
236 (Tok.EndTag, 87), # </script>
237 (Tok.RawData, 96), # \n
238 (Tok.EndOfStream, 96), # \n
239 ]
240 self.assertEqual(expected, tokens)
241
242 # Test case matching
243 tokens = Lex(h.replace('script', 'scrIPT'))
244 self.assertEqual(expected, tokens)
245
246 def testScriptStyleXml(self):
247 Tok = html.Tok
248 h = 'hi <script src=""> &lt; </script>'
249 # XML mode
250 tokens = Lex(h, no_special_tags=True)
251
252 self.assertEqual(
253 [
254 (Tok.RawData, 3),
255 (Tok.StartTag, 18), # <script>
256 (Tok.RawData, 19), # space
257 (Tok.CharEntity, 23), # </script>
258 (Tok.RawData, 24), # \n
259 (Tok.EndTag, 33), # \n
260 (Tok.EndOfStream, 33), # \n
261 ],
262 tokens)
263
264 def testCData(self):
265 Tok = html.Tok
266
267 # from
268 # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
269 h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
270 tokens = Lex(h)
271
272 self.assertEqual([
273 (Tok.StartTag, 9),
274 (Tok.CData, 61),
275 (Tok.EndTag, 71),
276 (Tok.EndOfStream, 71),
277 ], tokens)
278
279 def testEntity(self):
280 Tok = html.Tok
281
282 # from
283 # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
284 h = '&ent1;, &ent2;!'
285
286 tokens = Lex(h)
287
288 self.assertEqual([
289 (Tok.CharEntity, 6),
290 (Tok.RawData, 8),
291 (Tok.CharEntity, 14),
292 (Tok.RawData, 15),
293 (Tok.EndOfStream, 15),
294 ], tokens)
295
296 def testStartTag(self):
297 Tok = html.Tok
298
299 h = '<a>hi</a>'
300 tokens = Lex(h)
301
302 self.assertEqual([
303 (Tok.StartTag, 3),
304 (Tok.RawData, 5),
305 (Tok.EndTag, 9),
306 (Tok.EndOfStream, 9),
307 ], tokens)
308
309 # Make sure we don't consume too much
310 h = '<a><source>1.7</source></a>'
311
312 tokens = Lex(h)
313
314 self.assertEqual([
315 (Tok.StartTag, 3),
316 (Tok.StartTag, 11),
317 (Tok.RawData, 14),
318 (Tok.EndTag, 23),
319 (Tok.EndTag, 27),
320 (Tok.EndOfStream, 27),
321 ], tokens)
322
323 return
324
325 h = '''
326 <configuration>
327 <source>1.7</source>
328 </configuration>'''
329
330 tokens = Lex(h)
331
332 self.assertEqual([
333 (Tok.RawData, 9),
334 (Tok.StartTag, 24),
335 (Tok.RawData, 9),
336 (Tok.EndOfStream, 9),
337 ], tokens)
338
339 def testBad(self):
340 Tok = html.Tok
341
342 h = '&'
343 tokens = Lex(h)
344
345 self.assertEqual([
346 (Tok.BadAmpersand, 1),
347 (Tok.EndOfStream, 1),
348 ], tokens)
349
350 h = '>'
351 tokens = Lex(h)
352
353 self.assertEqual([
354 (Tok.BadGreaterThan, 1),
355 (Tok.EndOfStream, 1),
356 ], tokens)
357
358 def testInvalid(self):
359 Tok = html.Tok
360
361 for s in INVALID_LEX:
362 try:
363 tokens = html.ValidTokenList(s)
364 except html.LexError as e:
365 print(e)
366 else:
367 self.fail('Expected LexError %r' % s)
368
369 def testValid(self):
370 for s, _ in VALID_LEX:
371 tokens = Lex(s)
372 print()
373
374
375INVALID_LEX = [
376 '<a><',
377 '&amp<',
378 '&<',
379 # Hm > is allowed?
380 #'a > b',
381 'a < b',
382 '<!-- unfinished comment',
383 '<? unfinished processing',
384 '</div bad=attr> <a> <b>',
385
386 # not allowed, but 3 > 4 is allowed
387 '<a> 3 < 4 </a>',
388 # Not a CDATA tag
389 '<STYLEz><</STYLEz>',
390]
391
392SKIP = 0
393UNCHANGED = 1
394
395VALID_LEX = [
396 # TODO: convert these to XML
397 ('<foo></foo>', UNCHANGED),
398 ('<foo x=y></foo>', ''),
399 #('<foo x="&"></foo>', '<foo x="&amp;"></foo>'),
400 ('<foo x="&"></foo>', ''),
401
402 # Allowed with BadAmpersand
403 ('<p> x & y </p>', '<p> x &amp; y </p>'),
404]
405
406INVALID_PARSE = [
407 '<a></b>',
408 '<a>', # missing closing tag
409 '<meta></meta>', # this is a self-closing tag
410]
411
412VALID_PARSE = [
413 ('<!DOCTYPE html>\n', ''),
414 ('<!DOCTYPE>', ''),
415
416 # empty strings
417 ('<p x=""></p>', UNCHANGED),
418 ("<p x=''></p>", UNCHANGED),
419 ('<self-closing a="b" />', UNCHANGED),
420
421 # We could also normalize CDATA?
422 # Note that CDATA has an escaping problem: you need to handle it ]]> with
423 # concatenation. It just "pushes the problem around".
424 # So I think it's better to use ONE kind of escaping, which is &lt;
425 ('<script><![CDATA[ <wtf> >< ]]></script>', UNCHANGED),
426
427 # allowed, but 3 < 4 is not allowed
428 ('<a> 3 > 4 </a>', '<a> 3 &gt; 4 </a>'),
429 # allowed, but 3 > 4 is not allowed
430 ('<p x="3 < 4"></p>', ''),
431 ('<b><a href="foo">link</a></b>', UNCHANGED),
432
433 # TODO: should be self-closing
434 #('<meta><a></a>', '<meta/><a></a>'),
435 ('<meta><a></a>', ''),
436
437 # no attribute
438 ('<button disabled></button>', ''),
439 ('<button disabled=></button>', ''),
440 ('<button disabled= ></button>', ''),
441
442 # single quoted is pretty common
443 ("<a href='single'></a>", ''),
444
445 # Conceding to reality - I used these myself
446 ('<a href=ble.sh></a>', ''),
447 ('<a href=foo.html></a>', ''),
448 ('<foo x="&"></foo>', ''),
449
450 # caps
451 ('<foo></FOO>', ''),
452 ('<Foo></fOO>', ''),
453
454 # capital VOID tag
455 ('<META><a></a>', ''),
456 ('<script><</script>', ''),
457 # matching
458 ('<SCRipt><</SCRipt>', ''),
459 ('<SCRIPT><</SCRIPT>', ''),
460 ('<STYLE><</STYLE>', ''),
461 #'<SCRipt><</script>',
462
463 # Note: Python HTMLParser.py does DYNAMIC compilation of regex with re.I
464 # flag to handle this! Gah I want something faster.
465 #'<script><</SCRIPT>',
466
467 # TODO: Test <svg> and <math> ?
468]
469
470VALID_XML = [
471 '<meta></meta>',
472]
473
474INVALID_TAG_LEX = [
475 # not allowed, but 3 < 4 is allowed
476 '<p x="3 > 4"></p>',
477 # same thing
478 '<a href=">"></a>',
479 '<a foo=bar !></a>', # bad attr
480]
481
482
483class ValidateTest(unittest.TestCase):
484
485 def testInvalid(self):
486 counters = html.Counters()
487 for s in INVALID_LEX + INVALID_TAG_LEX:
488 try:
489 html.Validate(s, html.BALANCED_TAGS, counters)
490 except html.LexError as e:
491 print(e)
492 else:
493 self.fail('Expected LexError %r' % s)
494
495 for s in INVALID_PARSE:
496 try:
497 html.Validate(s, html.BALANCED_TAGS, counters)
498 except html.ParseError as e:
499 print(e)
500 else:
501 self.fail('Expected ParseError')
502
503 def testValid(self):
504 counters = html.Counters()
505 for s, _ in VALID_PARSE:
506 html.Validate(s, html.BALANCED_TAGS, counters)
507 print('HTML5 %r' % s)
508 print('HTML5 attrs %r' % counters.debug_attrs)
509
510 def testValidXml(self):
511 counters = html.Counters()
512 for s in VALID_XML:
513 html.Validate(s, html.BALANCED_TAGS | html.NO_SPECIAL_TAGS,
514 counters)
515 print('XML %r' % s)
516 print('XML attrs %r' % counters.debug_attrs)
517
518
519class XmlTest(unittest.TestCase):
520
521 def testValid(self):
522 counters = html.Counters()
523 for h, expected_xml in VALID_LEX + VALID_PARSE:
524 actual = html.ToXml(h)
525 if expected_xml == UNCHANGED: # Unchanged
526 self.assertEqual(h, actual)
527 elif expected_xml == '': # Skip
528 pass
529 else:
530 self.assertEqual(expected_xml, actual)
531
532
533if __name__ == '__main__':
534 unittest.main()