OILS / lazylex / html_test.py View on Github | oils.pub

251 lines, 159 significant
1#!/usr/bin/env python2
2from __future__ import print_function
3
4import unittest
5
6from lazylex import html # module under test log = html.log
7
8log = html.log
9
10with open('lazylex/testdata.html') as f:
11 TEST_HTML = f.read()
12
13
14def _MakeTagLexer(s):
15 lex = html.TagLexer(s)
16 lex.Reset(0, len(s))
17 return lex
18
19
20def _PrintTokens(lex):
21 log('')
22 log('tag = %r', lex.TagName())
23 for tok, start, end in lex.Tokens():
24 log('%s %r', tok, lex.s[start:end])
25
26
27class RegexTest(unittest.TestCase):
28
29 def testDotAll(self):
30 import re
31
32 # Note that $ matches end of line, not end of string
33 p1 = re.compile(r'.')
34 print(p1.match('\n'))
35
36 p2 = re.compile(r'.', re.DOTALL)
37 print(p2.match('\n'))
38
39 #p3 = re.compile(r'[.\n]', re.VERBOSE)
40 p3 = re.compile(r'[.\n]')
41 print(p3.match('\n'))
42
43 print('Negation')
44
45 p4 = re.compile(r'[^>]')
46 print(p4.match('\n'))
47
48
49class TagLexerTest(unittest.TestCase):
50
51 def testTagLexer(self):
52 # Invalid!
53 #lex = _MakeTagLexer('< >')
54 #print(lex.Tag())
55
56 lex = _MakeTagLexer('<a>')
57 _PrintTokens(lex)
58
59 lex = _MakeTagLexer('<a novalue>')
60 _PrintTokens(lex)
61
62 # Note: we could have a different HasAttr() method
63 # <a novalue> means lex.Get('novalue') == None
64 # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
65 self.assertEqual(None, lex.GetAttrRaw('novalue'))
66
67 lex = _MakeTagLexer('<a href="double quoted">')
68 _PrintTokens(lex)
69
70 self.assertEqual('double quoted', lex.GetAttrRaw('href'))
71 self.assertEqual(None, lex.GetAttrRaw('oops'))
72
73 lex = _MakeTagLexer('<a href=foo class="bar">')
74 _PrintTokens(lex)
75
76 lex = _MakeTagLexer('<a href=foo class="bar" />')
77 _PrintTokens(lex)
78
79 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
80 self.assertEqual('?foo=1&amp;bar=2', lex.GetAttrRaw('href'))
81
82 def testTagName(self):
83 lex = _MakeTagLexer('<a href=foo class="bar" />')
84 self.assertEqual('a', lex.TagName())
85
86 def testAllAttrs(self):
87 """
88 [('key', 'value')] for all
89 """
90 # closed
91 lex = _MakeTagLexer('<a href=foo class="bar" />')
92 self.assertEqual([('href', 'foo'), ('class', 'bar')],
93 lex.AllAttrsRaw())
94
95 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
96 self.assertEqual([('href', '?foo=1&amp;bar=2')], lex.AllAttrsRaw())
97
98
99def Lex(h):
100 print(repr(h))
101 lex = html.ValidTokens(h)
102 tokens = list(lex)
103 for tok_id, end_pos in tokens:
104 log('%d %s', end_pos, html.TokenName(tok_id))
105 return tokens
106
107
108class LexerTest(unittest.TestCase):
109
110 # IndexLinker in devtools/make_help.py
111 # <pre> sections in doc/html_help.py
112 # TocExtractor in devtools/cmark.py
113
114 def testPstrip(self):
115 """Remove anything like this.
116
117 <p><pstrip> </pstrip></p>
118 """
119 pass
120
121 def testCommentParse(self):
122 n = len(TEST_HTML)
123 for tok_id, end_pos in html._Tokens(TEST_HTML, 0, n):
124 if tok_id == html.Invalid:
125 raise RuntimeError()
126 print(tok_id)
127
128 def testCommentParse2(self):
129
130 Tok = html.Tok
131 h = '''
132 hi <!-- line 1
133 line 2 --><br/>'''
134 tokens = Lex(h)
135
136 self.assertEqual(
137 [
138 (Tok.RawData, 12),
139 (Tok.Comment, 50), # <? err ?>
140 (Tok.StartEndTag, 55),
141 (Tok.EndOfStream, 55),
142 ],
143 tokens)
144
145 def testProcessingInstruction(self):
146 # <?xml ?> header
147 Tok = html.Tok
148 h = 'hi <? err ?>'
149 tokens = Lex(h)
150
151 self.assertEqual(
152 [
153 (Tok.RawData, 3),
154 (Tok.Processing, 12), # <? err ?>
155 (Tok.EndOfStream, 12),
156 ],
157 tokens)
158
159 def testScriptStyle(self):
160 Tok = html.Tok
161 h = '''
162 hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
163 </script>
164 '''
165 tokens = Lex(h)
166
167 self.assertEqual(
168 [
169 (Tok.RawData, 12),
170 (Tok.StartTag, 27), # <script>
171 (Tok.HtmlCData, 78), # JavaScript code is HTML CData
172 (Tok.EndTag, 87), # </script>
173 (Tok.RawData, 96), # \n
174 (Tok.EndOfStream, 96), # \n
175 ],
176 tokens)
177
178 def testCData(self):
179 Tok = html.Tok
180
181 # from
182 # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
183 h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
184 tokens = Lex(h)
185
186 self.assertEqual([
187 (Tok.StartTag, 9),
188 (Tok.CData, 61),
189 (Tok.EndTag, 71),
190 (Tok.EndOfStream, 71),
191 ], tokens)
192
193 def testEntity(self):
194 Tok = html.Tok
195
196 # from
197 # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
198 h = '&ent1;, &ent2;!'
199
200 tokens = Lex(h)
201
202 self.assertEqual([
203 (Tok.CharEntity, 6),
204 (Tok.RawData, 8),
205 (Tok.CharEntity, 14),
206 (Tok.RawData, 15),
207 (Tok.EndOfStream, 15),
208 ], tokens)
209
210 def testStartTag(self):
211 Tok = html.Tok
212
213 h = '<a>hi</a>'
214 tokens = Lex(h)
215
216 self.assertEqual([
217 (Tok.StartTag, 3),
218 (Tok.RawData, 5),
219 (Tok.EndTag, 9),
220 (Tok.EndOfStream, 9),
221 ], tokens)
222
223 def testInvalid(self):
224 Tok = html.Tok
225
226 INVALID = [
227 # Should be &amp;
228 '<a>&',
229 '&amp', # not finished
230 '&#', # not finished
231 # Hm > is allowed?
232 #'a > b',
233 'a < b',
234 '<!-- unfinished comment',
235 '<? unfinished processing',
236 '</div bad=attr> <a> <b>',
237 ]
238
239 for s in INVALID:
240 lex = html.ValidTokens(s)
241 try:
242 for i in xrange(5):
243 tok_id, pos = next(lex)
244 except html.LexError as e:
245 print(e)
246 else:
247 self.fail('Expected LexError')
248
249
250if __name__ == '__main__':
251 unittest.main()