1 | #!/usr/bin/env python2
|
2 | from __future__ import print_function
|
3 |
|
4 | import unittest
|
5 |
|
6 | from lazylex import html # module under test log = html.log
|
7 |
|
8 | log = html.log
|
9 |
|
10 | with open('lazylex/testdata.html') as f:
|
11 | TEST_HTML = f.read()
|
12 |
|
13 |
|
14 | class RegexTest(unittest.TestCase):
|
15 |
|
16 | def testDotAll(self):
|
17 | import re
|
18 |
|
19 | # Note that $ matches end of line, not end of string
|
20 | p1 = re.compile(r'.')
|
21 | print(p1.match('\n'))
|
22 |
|
23 | p2 = re.compile(r'.', re.DOTALL)
|
24 | print(p2.match('\n'))
|
25 |
|
26 | #p3 = re.compile(r'[.\n]', re.VERBOSE)
|
27 | p3 = re.compile(r'[.\n]')
|
28 | print(p3.match('\n'))
|
29 |
|
30 | print('Negation')
|
31 |
|
32 | p4 = re.compile(r'[^>]')
|
33 | print(p4.match('\n'))
|
34 |
|
35 | def testAttrRe(self):
|
36 | _ATTR_RE = html._ATTR_RE
|
37 | m = _ATTR_RE.match(' empty= val')
|
38 | print(m.groups())
|
39 |
|
40 |
|
41 | class FunctionsTest(unittest.TestCase):
|
42 |
|
43 | def testFindLineNum(self):
|
44 | s = 'foo\n' * 3
|
45 | for pos in [1, 5, 10, 50]: # out of bounds
|
46 | line_num = html.FindLineNum(s, pos)
|
47 | print(line_num)
|
48 |
|
49 | def testToText(self):
|
50 | t = html.ToText('<b name="&"> three < four && five </b>')
|
51 | self.assertEqual(' three < four && five ', t)
|
52 |
|
53 |
|
54 | def _MakeTagLexer(s):
|
55 | lex = html.TagLexer(s)
|
56 | lex.Reset(0, len(s))
|
57 | return lex
|
58 |
|
59 |
|
60 | def _PrintTokens(lex):
|
61 | log('')
|
62 | log('tag = %r', lex.TagName())
|
63 | for tok, start, end in lex.Tokens():
|
64 | log('%s %r', tok, lex.s[start:end])
|
65 |
|
66 |
|
67 | class TagLexerTest(unittest.TestCase):
|
68 |
|
69 | def testTagLexer(self):
|
70 | # Invalid!
|
71 | #lex = _MakeTagLexer('< >')
|
72 | #print(lex.Tag())
|
73 |
|
74 | lex = _MakeTagLexer('<a>')
|
75 | _PrintTokens(lex)
|
76 |
|
77 | lex = _MakeTagLexer('<a novalue>')
|
78 | _PrintTokens(lex)
|
79 |
|
80 | # Note: we could have a different HasAttr() method
|
81 | # <a novalue> means lex.Get('novalue') == ''
|
82 | # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
|
83 | self.assertEqual('', lex.GetAttrRaw('novalue'))
|
84 |
|
85 | lex = _MakeTagLexer('<a href="double quoted">')
|
86 | _PrintTokens(lex)
|
87 |
|
88 | self.assertEqual('double quoted', lex.GetAttrRaw('href'))
|
89 | self.assertEqual(None, lex.GetAttrRaw('oops'))
|
90 |
|
91 | lex = _MakeTagLexer('<a href=foo class="bar">')
|
92 | _PrintTokens(lex)
|
93 |
|
94 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
95 | _PrintTokens(lex)
|
96 |
|
97 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
98 | self.assertEqual('?foo=1&bar=2', lex.GetAttrRaw('href'))
|
99 |
|
100 | def testTagName(self):
|
101 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
102 | self.assertEqual('a', lex.TagName())
|
103 |
|
104 | def testAllAttrs(self):
|
105 | """
|
106 | [('key', 'value')] for all
|
107 | """
|
108 | # closed
|
109 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
110 | self.assertEqual([('href', 'foo'), ('class', 'bar')],
|
111 | lex.AllAttrsRaw())
|
112 |
|
113 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
114 | self.assertEqual([('href', '?foo=1&bar=2')], lex.AllAttrsRaw())
|
115 |
|
116 | def testEmptyMissingValues(self):
|
117 | # equivalent to <button disabled="">
|
118 | lex = _MakeTagLexer('<button disabled>')
|
119 | all_attrs = lex.AllAttrsRaw()
|
120 | self.assertEqual([('disabled', '')], all_attrs)
|
121 |
|
122 | slices = lex.AllAttrsRawSlice()
|
123 | log('slices %s', slices)
|
124 |
|
125 | lex = _MakeTagLexer(
|
126 | '''<p double="" single='' empty= value missing empty2=>''')
|
127 | all_attrs = lex.AllAttrsRaw()
|
128 | self.assertEqual([
|
129 | ('double', ''),
|
130 | ('single', ''),
|
131 | ('empty', 'value'),
|
132 | ('missing', ''),
|
133 | ('empty2', ''),
|
134 | ], all_attrs)
|
135 | # TODO: should have
|
136 | log('all %s', all_attrs)
|
137 |
|
138 | slices = lex.AllAttrsRawSlice()
|
139 | log('slices %s', slices)
|
140 |
|
141 | def testInvalidTag(self):
|
142 | try:
|
143 | lex = _MakeTagLexer('<a foo=bar !></a>')
|
144 | all_attrs = lex.AllAttrsRaw()
|
145 | except html.LexError as e:
|
146 | print(e)
|
147 | else:
|
148 | self.fail('Expected LexError')
|
149 |
|
150 |
|
151 | def _MakeAttrValueLexer(s):
|
152 | lex = html.AttrValueLexer(s)
|
153 | lex.Reset(0, len(s))
|
154 | return lex
|
155 |
|
156 |
|
157 | class AttrValueLexerTest(unittest.TestCase):
|
158 |
|
159 | def testGood(self):
|
160 | lex = _MakeAttrValueLexer('?foo=42&bar=99')
|
161 | n = lex.NumTokens()
|
162 | self.assertEqual(3, n)
|
163 |
|
164 |
|
165 | def Lex(h, no_special_tags=False):
|
166 | print(repr(h))
|
167 | tokens = html.ValidTokenList(h, no_special_tags=no_special_tags)
|
168 | start_pos = 0
|
169 | for tok_id, end_pos in tokens:
|
170 | frag = h[start_pos:end_pos]
|
171 | log('%d %s %r', end_pos, html.TokenName(tok_id), frag)
|
172 | start_pos = end_pos
|
173 | return tokens
|
174 |
|
175 |
|
176 | class LexerTest(unittest.TestCase):
|
177 |
|
178 | # IndexLinker in devtools/make_help.py
|
179 | # <pre> sections in doc/html_help.py
|
180 | # TocExtractor in devtools/cmark.py
|
181 |
|
182 | def testPstrip(self):
|
183 | """Remove anything like this.
|
184 |
|
185 | <p><pstrip> </pstrip></p>
|
186 | """
|
187 | pass
|
188 |
|
189 | def testCommentParse(self):
|
190 | n = len(TEST_HTML)
|
191 | tokens = Lex(TEST_HTML)
|
192 |
|
193 | def testCommentParse2(self):
|
194 |
|
195 | Tok = html.Tok
|
196 | h = '''
|
197 | hi <!-- line 1
|
198 | line 2 --><br/>'''
|
199 | tokens = Lex(h)
|
200 |
|
201 | self.assertEqual(
|
202 | [
|
203 | (Tok.RawData, 12),
|
204 | (Tok.Comment, 50), # <? err ?>
|
205 | (Tok.StartEndTag, 55),
|
206 | (Tok.EndOfStream, 55),
|
207 | ],
|
208 | tokens)
|
209 |
|
210 | def testProcessingInstruction(self):
|
211 | # <?xml ?> header
|
212 | Tok = html.Tok
|
213 | h = 'hi <? err ?>'
|
214 | tokens = Lex(h)
|
215 |
|
216 | self.assertEqual(
|
217 | [
|
218 | (Tok.RawData, 3),
|
219 | (Tok.Processing, 12), # <? err ?>
|
220 | (Tok.EndOfStream, 12),
|
221 | ],
|
222 | tokens)
|
223 |
|
224 | def testScriptStyle(self):
|
225 | Tok = html.Tok
|
226 | h = '''
|
227 | hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
|
228 | </script>
|
229 | '''
|
230 | tokens = Lex(h)
|
231 |
|
232 | expected = [
|
233 | (Tok.RawData, 12),
|
234 | (Tok.StartTag, 27), # <script>
|
235 | (Tok.HtmlCData, 78), # JavaScript code is HTML CData
|
236 | (Tok.EndTag, 87), # </script>
|
237 | (Tok.RawData, 96), # \n
|
238 | (Tok.EndOfStream, 96), # \n
|
239 | ]
|
240 | self.assertEqual(expected, tokens)
|
241 |
|
242 | # Test case matching
|
243 | tokens = Lex(h.replace('script', 'scrIPT'))
|
244 | self.assertEqual(expected, tokens)
|
245 |
|
246 | def testScriptStyleXml(self):
|
247 | Tok = html.Tok
|
248 | h = 'hi <script src=""> < </script>'
|
249 | # XML mode
|
250 | tokens = Lex(h, no_special_tags=True)
|
251 |
|
252 | self.assertEqual(
|
253 | [
|
254 | (Tok.RawData, 3),
|
255 | (Tok.StartTag, 18), # <script>
|
256 | (Tok.RawData, 19), # space
|
257 | (Tok.CharEntity, 23), # </script>
|
258 | (Tok.RawData, 24), # \n
|
259 | (Tok.EndTag, 33), # \n
|
260 | (Tok.EndOfStream, 33), # \n
|
261 | ],
|
262 | tokens)
|
263 |
|
264 | def testCData(self):
|
265 | Tok = html.Tok
|
266 |
|
267 | # from
|
268 | # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
|
269 | h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
|
270 | tokens = Lex(h)
|
271 |
|
272 | self.assertEqual([
|
273 | (Tok.StartTag, 9),
|
274 | (Tok.CData, 61),
|
275 | (Tok.EndTag, 71),
|
276 | (Tok.EndOfStream, 71),
|
277 | ], tokens)
|
278 |
|
279 | def testEntity(self):
|
280 | Tok = html.Tok
|
281 |
|
282 | # from
|
283 | # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
|
284 | h = '&ent1;, &ent2;!'
|
285 |
|
286 | tokens = Lex(h)
|
287 |
|
288 | self.assertEqual([
|
289 | (Tok.CharEntity, 6),
|
290 | (Tok.RawData, 8),
|
291 | (Tok.CharEntity, 14),
|
292 | (Tok.RawData, 15),
|
293 | (Tok.EndOfStream, 15),
|
294 | ], tokens)
|
295 |
|
296 | def testStartTag(self):
|
297 | Tok = html.Tok
|
298 |
|
299 | h = '<a>hi</a>'
|
300 | tokens = Lex(h)
|
301 |
|
302 | self.assertEqual([
|
303 | (Tok.StartTag, 3),
|
304 | (Tok.RawData, 5),
|
305 | (Tok.EndTag, 9),
|
306 | (Tok.EndOfStream, 9),
|
307 | ], tokens)
|
308 |
|
309 | # Make sure we don't consume too much
|
310 | h = '<a><source>1.7</source></a>'
|
311 |
|
312 | tokens = Lex(h)
|
313 |
|
314 | self.assertEqual([
|
315 | (Tok.StartTag, 3),
|
316 | (Tok.StartTag, 11),
|
317 | (Tok.RawData, 14),
|
318 | (Tok.EndTag, 23),
|
319 | (Tok.EndTag, 27),
|
320 | (Tok.EndOfStream, 27),
|
321 | ], tokens)
|
322 |
|
323 | return
|
324 |
|
325 | h = '''
|
326 | <configuration>
|
327 | <source>1.7</source>
|
328 | </configuration>'''
|
329 |
|
330 | tokens = Lex(h)
|
331 |
|
332 | self.assertEqual([
|
333 | (Tok.RawData, 9),
|
334 | (Tok.StartTag, 24),
|
335 | (Tok.RawData, 9),
|
336 | (Tok.EndOfStream, 9),
|
337 | ], tokens)
|
338 |
|
339 | def testInvalid(self):
|
340 | Tok = html.Tok
|
341 |
|
342 | for s in INVALID_LEX:
|
343 | try:
|
344 | tokens = html.ValidTokenList(s)
|
345 | except html.LexError as e:
|
346 | print(e)
|
347 | else:
|
348 | self.fail('Expected LexError %r' % s)
|
349 |
|
350 | def testValid(self):
|
351 | for s, _ in VALID_LEX:
|
352 | tokens = Lex(s)
|
353 | print()
|
354 |
|
355 |
|
356 | INVALID_LEX = [
|
357 | '<a><',
|
358 | '&<',
|
359 | '&<',
|
360 | # Hm > is allowed?
|
361 | #'a > b',
|
362 | 'a < b',
|
363 | '<!-- unfinished comment',
|
364 | '<? unfinished processing',
|
365 | '</div bad=attr> <a> <b>',
|
366 |
|
367 | # not allowed, but 3 > 4 is allowed
|
368 | '<a> 3 < 4 </a>',
|
369 | # Not a CDATA tag
|
370 | '<STYLEz><</STYLEz>',
|
371 | ]
|
372 |
|
373 | VALID_LEX = [
|
374 | # TODO: convert these to XML
|
375 | ('<foo></foo>', ''),
|
376 | ('<foo x=y></foo>', ''),
|
377 | ('<foo x="&"></foo>', ''),
|
378 |
|
379 | # Allowed with BadAmpersand
|
380 | ('<p> x & y </p>', ''),
|
381 | ]
|
382 |
|
383 | INVALID_PARSE = [
|
384 | '<a></b>',
|
385 | '<a>', # missing closing tag
|
386 | '<meta></meta>', # this is a self-closing tag
|
387 | ]
|
388 |
|
389 | SKIP = 0
|
390 | UNCHANGED = 1
|
391 |
|
392 | VALID_PARSE = [
|
393 | ('<!DOCTYPE html>\n', ''),
|
394 | ('<!DOCTYPE>', ''),
|
395 |
|
396 | # empty strings
|
397 | ('<p x=""></p>', UNCHANGED),
|
398 | ("<p x=''></p>", UNCHANGED),
|
399 |
|
400 | ('<self-closing a="b" />', UNCHANGED),
|
401 |
|
402 | # We could also normalize CDATA?
|
403 | # Note that CDATA has an escaping problem: you need to handle it ]]> with
|
404 | # concatenation. It just "pushes the problem around".
|
405 | # So I think it's better to use ONE kind of escaping, which is <
|
406 | ('<script><![CDATA[ <wtf> >< ]]></script>', UNCHANGED),
|
407 |
|
408 | # allowed, but 3 < 4 is not allowed
|
409 | ('<a> 3 > 4 </a>', ''),
|
410 | # allowed, but 3 > 4 is not allowed
|
411 | ('<p x="3 < 4"></p>', ''),
|
412 | ('<b><a href="foo">link</a></b>', ''),
|
413 | ('<meta><a></a>', ''),
|
414 | # no attribute
|
415 | ('<button disabled></button>', ''),
|
416 | ('<button disabled=></button>', ''),
|
417 | ('<button disabled= ></button>', ''),
|
418 |
|
419 | # single quoted is pretty common
|
420 | ("<a href='single'></a>", ''),
|
421 |
|
422 | # Conceding to reality - I used these myself
|
423 | ('<a href=ble.sh></a>', ''),
|
424 | ('<a href=foo.html></a>', ''),
|
425 | ('<foo x="&"></foo>', ''),
|
426 |
|
427 | # caps
|
428 | ('<foo></FOO>', ''),
|
429 | ('<Foo></fOO>', ''),
|
430 |
|
431 | # capital VOID tag
|
432 | ('<META><a></a>', ''),
|
433 | ('<script><</script>', ''),
|
434 | # matching
|
435 | ('<SCRipt><</SCRipt>', ''),
|
436 | ('<SCRIPT><</SCRIPT>', ''),
|
437 | ('<STYLE><</STYLE>', ''),
|
438 | #'<SCRipt><</script>',
|
439 |
|
440 | # Note: Python HTMLParser.py does DYNAMIC compilation of regex with re.I
|
441 | # flag to handle this! Gah I want something faster.
|
442 | #'<script><</SCRIPT>',
|
443 |
|
444 | # TODO: Test <svg> and <math> ?
|
445 | ]
|
446 |
|
447 | VALID_XML = [
|
448 | '<meta></meta>',
|
449 | ]
|
450 |
|
451 | INVALID_TAG_LEX = [
|
452 | # not allowed, but 3 < 4 is allowed
|
453 | '<p x="3 > 4"></p>',
|
454 | # same thing
|
455 | '<a href=">"></a>',
|
456 | '<a foo=bar !></a>', # bad attr
|
457 | ]
|
458 |
|
459 |
|
460 | class ValidateTest(unittest.TestCase):
|
461 |
|
462 | def testInvalid(self):
|
463 | counters = html.Counters()
|
464 | for s in INVALID_LEX + INVALID_TAG_LEX:
|
465 | try:
|
466 | html.Validate(s, html.BALANCED_TAGS, counters)
|
467 | except html.LexError as e:
|
468 | print(e)
|
469 | else:
|
470 | self.fail('Expected LexError %r' % s)
|
471 |
|
472 | for s in INVALID_PARSE:
|
473 | try:
|
474 | html.Validate(s, html.BALANCED_TAGS, counters)
|
475 | except html.ParseError as e:
|
476 | print(e)
|
477 | else:
|
478 | self.fail('Expected ParseError')
|
479 |
|
480 | def testValid(self):
|
481 | counters = html.Counters()
|
482 | for s, _ in VALID_PARSE:
|
483 | html.Validate(s, html.BALANCED_TAGS, counters)
|
484 | print('HTML5 %r' % s)
|
485 | print('HTML5 attrs %r' % counters.debug_attrs)
|
486 |
|
487 | def testValidXml(self):
|
488 | counters = html.Counters()
|
489 | for s in VALID_XML:
|
490 | html.Validate(s, html.BALANCED_TAGS | html.NO_SPECIAL_TAGS,
|
491 | counters)
|
492 | print('XML %r' % s)
|
493 | print('XML attrs %r' % counters.debug_attrs)
|
494 |
|
495 |
|
496 | class XmlTest(unittest.TestCase):
|
497 |
|
498 | def testValid(self):
|
499 | counters = html.Counters()
|
500 | for h, expected_xml in VALID_LEX + VALID_PARSE:
|
501 | actual = html.ToXml(h)
|
502 | if expected_xml == UNCHANGED: # Unchanged
|
503 | self.assertEqual(h, actual)
|
504 | elif expected_xml == '': # Skip
|
505 | pass
|
506 | else:
|
507 | self.assertEqual(expected_xml, actual)
|
508 |
|
509 |
|
510 | if __name__ == '__main__':
|
511 | unittest.main()
|