OILS / lazylex / html_test.py View on Github | oils.pub

448 lines, 293 significant
1#!/usr/bin/env python2
2from __future__ import print_function
3
4import unittest
5
6from lazylex import html # module under test log = html.log
7
8log = html.log
9
10with open('lazylex/testdata.html') as f:
11 TEST_HTML = f.read()
12
13
14class RegexTest(unittest.TestCase):
15
16 def testDotAll(self):
17 import re
18
19 # Note that $ matches end of line, not end of string
20 p1 = re.compile(r'.')
21 print(p1.match('\n'))
22
23 p2 = re.compile(r'.', re.DOTALL)
24 print(p2.match('\n'))
25
26 #p3 = re.compile(r'[.\n]', re.VERBOSE)
27 p3 = re.compile(r'[.\n]')
28 print(p3.match('\n'))
29
30 print('Negation')
31
32 p4 = re.compile(r'[^>]')
33 print(p4.match('\n'))
34
35 def testAttrRe(self):
36 _ATTR_RE = html._ATTR_RE
37 m = _ATTR_RE.match(' empty= val')
38 print(m.groups())
39
40
41class FunctionsTest(unittest.TestCase):
42
43 def testFindLineNum(self):
44 s = 'foo\n' * 3
45 for pos in [1, 5, 10, 50]: # out of bounds
46 line_num = html.FindLineNum(s, pos)
47 print(line_num)
48
49
50def _MakeTagLexer(s):
51 lex = html.TagLexer(s)
52 lex.Reset(0, len(s))
53 return lex
54
55
56def _PrintTokens(lex):
57 log('')
58 log('tag = %r', lex.TagName())
59 for tok, start, end in lex.Tokens():
60 log('%s %r', tok, lex.s[start:end])
61
62
63class TagLexerTest(unittest.TestCase):
64
65 def testTagLexer(self):
66 # Invalid!
67 #lex = _MakeTagLexer('< >')
68 #print(lex.Tag())
69
70 lex = _MakeTagLexer('<a>')
71 _PrintTokens(lex)
72
73 lex = _MakeTagLexer('<a novalue>')
74 _PrintTokens(lex)
75
76 # Note: we could have a different HasAttr() method
77 # <a novalue> means lex.Get('novalue') == ''
78 # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
79 self.assertEqual('', lex.GetAttrRaw('novalue'))
80
81 lex = _MakeTagLexer('<a href="double quoted">')
82 _PrintTokens(lex)
83
84 self.assertEqual('double quoted', lex.GetAttrRaw('href'))
85 self.assertEqual(None, lex.GetAttrRaw('oops'))
86
87 lex = _MakeTagLexer('<a href=foo class="bar">')
88 _PrintTokens(lex)
89
90 lex = _MakeTagLexer('<a href=foo class="bar" />')
91 _PrintTokens(lex)
92
93 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
94 self.assertEqual('?foo=1&amp;bar=2', lex.GetAttrRaw('href'))
95
96 def testTagName(self):
97 lex = _MakeTagLexer('<a href=foo class="bar" />')
98 self.assertEqual('a', lex.TagName())
99
100 def testAllAttrs(self):
101 """
102 [('key', 'value')] for all
103 """
104 # closed
105 lex = _MakeTagLexer('<a href=foo class="bar" />')
106 self.assertEqual([('href', 'foo'), ('class', 'bar')],
107 lex.AllAttrsRaw())
108
109 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
110 self.assertEqual([('href', '?foo=1&amp;bar=2')], lex.AllAttrsRaw())
111
112 def testEmptyMissingValues(self):
113 # equivalent to <button disabled="">
114 lex = _MakeTagLexer('<button disabled>')
115 all_attrs = lex.AllAttrsRaw()
116 self.assertEqual([('disabled', '')], all_attrs)
117
118 slices = lex.AllAttrsRawSlice()
119 log('slices %s', slices)
120
121 lex = _MakeTagLexer('''<p double="" single='' empty= value missing>''')
122 all_attrs = lex.AllAttrsRaw()
123 self.assertEqual([
124 ('double', ''),
125 ('single', ''),
126 ('empty', 'value'),
127 ('missing', ''),
128 ], all_attrs)
129 # TODO: should have
130 log('all %s', all_attrs)
131
132 slices = lex.AllAttrsRawSlice()
133 log('slices %s', slices)
134
135 def testInvalidTag(self):
136 try:
137 lex = _MakeTagLexer('<a foo=bar !></a>')
138 all_attrs = lex.AllAttrsRaw()
139 except html.LexError as e:
140 print(e)
141 else:
142 self.fail('Expected LexError')
143
144
145def _MakeAttrValueLexer(s):
146 lex = html.AttrValueLexer(s)
147 lex.Reset(0, len(s))
148 return lex
149
150
151class AttrValueLexerTest(unittest.TestCase):
152
153 def testGood(self):
154 lex = _MakeAttrValueLexer('?foo=42&amp;bar=99')
155 n = lex.NumTokens()
156 self.assertEqual(3, n)
157
158
159def Lex(h, no_special_tags=False):
160 print(repr(h))
161 tokens = html.ValidTokenList(h, no_special_tags=no_special_tags)
162 start_pos = 0
163 for tok_id, end_pos in tokens:
164 frag = h[start_pos:end_pos]
165 log('%d %s %r', end_pos, html.TokenName(tok_id), frag)
166 start_pos = end_pos
167 return tokens
168
169
170class LexerTest(unittest.TestCase):
171
172 # IndexLinker in devtools/make_help.py
173 # <pre> sections in doc/html_help.py
174 # TocExtractor in devtools/cmark.py
175
176 def testPstrip(self):
177 """Remove anything like this.
178
179 <p><pstrip> </pstrip></p>
180 """
181 pass
182
183 def testCommentParse(self):
184 n = len(TEST_HTML)
185 tokens = Lex(TEST_HTML)
186
187 def testCommentParse2(self):
188
189 Tok = html.Tok
190 h = '''
191 hi <!-- line 1
192 line 2 --><br/>'''
193 tokens = Lex(h)
194
195 self.assertEqual(
196 [
197 (Tok.RawData, 12),
198 (Tok.Comment, 50), # <? err ?>
199 (Tok.StartEndTag, 55),
200 (Tok.EndOfStream, 55),
201 ],
202 tokens)
203
204 def testProcessingInstruction(self):
205 # <?xml ?> header
206 Tok = html.Tok
207 h = 'hi <? err ?>'
208 tokens = Lex(h)
209
210 self.assertEqual(
211 [
212 (Tok.RawData, 3),
213 (Tok.Processing, 12), # <? err ?>
214 (Tok.EndOfStream, 12),
215 ],
216 tokens)
217
218 def testScriptStyle(self):
219 Tok = html.Tok
220 h = '''
221 hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
222 </script>
223 '''
224 tokens = Lex(h)
225
226 self.assertEqual(
227 [
228 (Tok.RawData, 12),
229 (Tok.StartTag, 27), # <script>
230 (Tok.HtmlCData, 78), # JavaScript code is HTML CData
231 (Tok.EndTag, 87), # </script>
232 (Tok.RawData, 96), # \n
233 (Tok.EndOfStream, 96), # \n
234 ],
235 tokens)
236
237 def testScriptStyleXml(self):
238 Tok = html.Tok
239 h = 'hi <script src=""> &lt; </script>'
240 # XML mode
241 tokens = Lex(h, no_special_tags=True)
242
243 self.assertEqual(
244 [
245 (Tok.RawData, 3),
246 (Tok.StartTag, 18), # <script>
247 (Tok.RawData, 19), # space
248 (Tok.CharEntity, 23), # </script>
249 (Tok.RawData, 24), # \n
250 (Tok.EndTag, 33), # \n
251 (Tok.EndOfStream, 33), # \n
252 ],
253 tokens)
254
255 def testCData(self):
256 Tok = html.Tok
257
258 # from
259 # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
260 h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
261 tokens = Lex(h)
262
263 self.assertEqual([
264 (Tok.StartTag, 9),
265 (Tok.CData, 61),
266 (Tok.EndTag, 71),
267 (Tok.EndOfStream, 71),
268 ], tokens)
269
270 def testEntity(self):
271 Tok = html.Tok
272
273 # from
274 # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
275 h = '&ent1;, &ent2;!'
276
277 tokens = Lex(h)
278
279 self.assertEqual([
280 (Tok.CharEntity, 6),
281 (Tok.RawData, 8),
282 (Tok.CharEntity, 14),
283 (Tok.RawData, 15),
284 (Tok.EndOfStream, 15),
285 ], tokens)
286
287 def testStartTag(self):
288 Tok = html.Tok
289
290 h = '<a>hi</a>'
291 tokens = Lex(h)
292
293 self.assertEqual([
294 (Tok.StartTag, 3),
295 (Tok.RawData, 5),
296 (Tok.EndTag, 9),
297 (Tok.EndOfStream, 9),
298 ], tokens)
299
300 # Make sure we don't consume too much
301 h = '<a><source>1.7</source></a>'
302
303 tokens = Lex(h)
304
305 self.assertEqual([
306 (Tok.StartTag, 3),
307 (Tok.StartTag, 11),
308 (Tok.RawData, 14),
309 (Tok.EndTag, 23),
310 (Tok.EndTag, 27),
311 (Tok.EndOfStream, 27),
312 ], tokens)
313
314 return
315
316 h = '''
317 <configuration>
318 <source>1.7</source>
319 </configuration>'''
320
321 tokens = Lex(h)
322
323 self.assertEqual([
324 (Tok.RawData, 9),
325 (Tok.StartTag, 24),
326 (Tok.RawData, 9),
327 (Tok.EndOfStream, 9),
328 ], tokens)
329
330 def testInvalid(self):
331 Tok = html.Tok
332
333 for s in INVALID_LEX:
334 try:
335 tokens = html.ValidTokenList(s)
336 except html.LexError as e:
337 print(e)
338 else:
339 self.fail('Expected LexError %r' % s)
340
341
342INVALID_LEX = [
343 # Should be &amp;
344 '<a>&',
345 '&amp', # not finished
346 '&#', # not finished
347 # Hm > is allowed?
348 #'a > b',
349 'a < b',
350 '<!-- unfinished comment',
351 '<? unfinished processing',
352 '</div bad=attr> <a> <b>',
353
354 # not allowed, but 3 > 4 is allowed
355 '<a> 3 < 4 </a>',
356]
357
358INVALID_PARSE = [
359 '<a></b>',
360 '<a>', # missing closing tag
361 '<meta></meta>', # this is a self-closing tag
362]
363
364VALID_PARSE = [
365 '<!DOCTYPE html>\n',
366 '<!DOCTYPE>',
367
368 # empty strings
369 '<p x=""></p>',
370 "<p x=''></p>",
371
372 # allowed, but 3 < 4 is not allowed
373 '<a> 3 > 4 </a>',
374 # allowed, but 3 > 4 is not allowed
375 '<p x="3 < 4"></p>',
376 '<b><a href="foo">link</a></b>',
377 '<meta><a></a>',
378 # no attribute
379 '<button disabled></button>',
380 '<button disabled=></button>',
381 '<button disabled= ></button>',
382
383 # single quoted is pretty common
384 "<a href='single'></a>",
385
386 # Conceding to reality - I used these myself
387 '<a href=ble.sh></a>',
388 '<a href=foo.html></a>',
389
390 # TODO: capitalization should be allowed
391 #'<META><a></a>',
392
393 # TODO: Test <svg> and <math> ?
394]
395
396VALID_XML = [
397 '<meta></meta>',
398]
399
400INVALID_TAG_LEX = [
401 # not allowed, but 3 < 4 is allowed
402 '<p x="3 > 4"></p>',
403 '<a foo=bar !></a>', # bad attr
404
405 # should be escaped
406 #'<a href="&"></a>',
407 #'<a href=">"></a>',
408]
409
410
411class ValidateTest(unittest.TestCase):
412
413 def testInvalid(self):
414 counters = html.Counters()
415 for s in INVALID_LEX + INVALID_TAG_LEX:
416 try:
417 html.Validate(s, html.BALANCED_TAGS, counters)
418 except html.LexError as e:
419 print(e)
420 else:
421 self.fail('Expected LexError %r' % s)
422
423 for s in INVALID_PARSE:
424 try:
425 html.Validate(s, html.BALANCED_TAGS, counters)
426 except html.ParseError as e:
427 print(e)
428 else:
429 self.fail('Expected ParseError')
430
431 def testValid(self):
432 counters = html.Counters()
433 for s in VALID_PARSE:
434 html.Validate(s, html.BALANCED_TAGS, counters)
435 print('HTML5 %r' % s)
436 print('HTML5 attrs %r' % counters.debug_attrs)
437
438 def testValidXml(self):
439 counters = html.Counters()
440 for s in VALID_XML:
441 html.Validate(s, html.BALANCED_TAGS | html.NO_SPECIAL_TAGS,
442 counters)
443 print('XML %r' % s)
444 print('XML attrs %r' % counters.debug_attrs)
445
446
447if __name__ == '__main__':
448 unittest.main()