1 | #!/usr/bin/env python2
|
2 | from __future__ import print_function
|
3 |
|
4 | import unittest
|
5 |
|
6 | from lazylex import html # module under test log = html.log
|
7 |
|
8 | log = html.log
|
9 |
|
10 | with open('lazylex/testdata.html') as f:
|
11 | TEST_HTML = f.read()
|
12 |
|
13 |
|
14 | def _MakeTagLexer(s):
|
15 | lex = html.TagLexer(s)
|
16 | lex.Reset(0, len(s))
|
17 | return lex
|
18 |
|
19 |
|
20 | def _PrintTokens(lex):
|
21 | log('')
|
22 | log('tag = %r', lex.TagName())
|
23 | for tok, start, end in lex.Tokens():
|
24 | log('%s %r', tok, lex.s[start:end])
|
25 |
|
26 |
|
27 | class RegexTest(unittest.TestCase):
|
28 |
|
29 | def testDotAll(self):
|
30 | import re
|
31 |
|
32 | # Note that $ matches end of line, not end of string
|
33 | p1 = re.compile(r'.')
|
34 | print(p1.match('\n'))
|
35 |
|
36 | p2 = re.compile(r'.', re.DOTALL)
|
37 | print(p2.match('\n'))
|
38 |
|
39 | #p3 = re.compile(r'[.\n]', re.VERBOSE)
|
40 | p3 = re.compile(r'[.\n]')
|
41 | print(p3.match('\n'))
|
42 |
|
43 | print('Negation')
|
44 |
|
45 | p4 = re.compile(r'[^>]')
|
46 | print(p4.match('\n'))
|
47 |
|
48 |
|
49 | class FunctionsTest(unittest.TestCase):
|
50 |
|
51 | def testFindLineNum(self):
|
52 | s = 'foo\n' * 3
|
53 | for pos in [1, 5, 10, 50]: # out of bounds
|
54 | line_num = html.FindLineNum(s, pos)
|
55 | print(line_num)
|
56 |
|
57 |
|
58 | class TagLexerTest(unittest.TestCase):
|
59 |
|
60 | def testTagLexer(self):
|
61 | # Invalid!
|
62 | #lex = _MakeTagLexer('< >')
|
63 | #print(lex.Tag())
|
64 |
|
65 | lex = _MakeTagLexer('<a>')
|
66 | _PrintTokens(lex)
|
67 |
|
68 | lex = _MakeTagLexer('<a novalue>')
|
69 | _PrintTokens(lex)
|
70 |
|
71 | # Note: we could have a different HasAttr() method
|
72 | # <a novalue> means lex.Get('novalue') == None
|
73 | # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
|
74 | self.assertEqual(None, lex.GetAttrRaw('novalue'))
|
75 |
|
76 | lex = _MakeTagLexer('<a href="double quoted">')
|
77 | _PrintTokens(lex)
|
78 |
|
79 | self.assertEqual('double quoted', lex.GetAttrRaw('href'))
|
80 | self.assertEqual(None, lex.GetAttrRaw('oops'))
|
81 |
|
82 | lex = _MakeTagLexer('<a href=foo class="bar">')
|
83 | _PrintTokens(lex)
|
84 |
|
85 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
86 | _PrintTokens(lex)
|
87 |
|
88 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
89 | self.assertEqual('?foo=1&bar=2', lex.GetAttrRaw('href'))
|
90 |
|
91 | def testTagName(self):
|
92 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
93 | self.assertEqual('a', lex.TagName())
|
94 |
|
95 | def testAllAttrs(self):
|
96 | """
|
97 | [('key', 'value')] for all
|
98 | """
|
99 | # closed
|
100 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
101 | self.assertEqual([('href', 'foo'), ('class', 'bar')],
|
102 | lex.AllAttrsRaw())
|
103 |
|
104 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
105 | self.assertEqual([('href', '?foo=1&bar=2')], lex.AllAttrsRaw())
|
106 |
|
107 |
|
108 | def Lex(h, no_special_tags=False):
|
109 | print(repr(h))
|
110 | tokens = html.ValidTokenList(h, no_special_tags=no_special_tags)
|
111 | start_pos = 0
|
112 | for tok_id, end_pos in tokens:
|
113 | frag = h[start_pos:end_pos]
|
114 | log('%d %s %r', end_pos, html.TokenName(tok_id), frag)
|
115 | start_pos = end_pos
|
116 | return tokens
|
117 |
|
118 |
|
119 | class LexerTest(unittest.TestCase):
|
120 |
|
121 | # IndexLinker in devtools/make_help.py
|
122 | # <pre> sections in doc/html_help.py
|
123 | # TocExtractor in devtools/cmark.py
|
124 |
|
125 | def testPstrip(self):
|
126 | """Remove anything like this.
|
127 |
|
128 | <p><pstrip> </pstrip></p>
|
129 | """
|
130 | pass
|
131 |
|
132 | def testCommentParse(self):
|
133 | n = len(TEST_HTML)
|
134 | tokens = Lex(TEST_HTML)
|
135 |
|
136 | def testCommentParse2(self):
|
137 |
|
138 | Tok = html.Tok
|
139 | h = '''
|
140 | hi <!-- line 1
|
141 | line 2 --><br/>'''
|
142 | tokens = Lex(h)
|
143 |
|
144 | self.assertEqual(
|
145 | [
|
146 | (Tok.RawData, 12),
|
147 | (Tok.Comment, 50), # <? err ?>
|
148 | (Tok.StartEndTag, 55),
|
149 | (Tok.EndOfStream, 55),
|
150 | ],
|
151 | tokens)
|
152 |
|
153 | def testProcessingInstruction(self):
|
154 | # <?xml ?> header
|
155 | Tok = html.Tok
|
156 | h = 'hi <? err ?>'
|
157 | tokens = Lex(h)
|
158 |
|
159 | self.assertEqual(
|
160 | [
|
161 | (Tok.RawData, 3),
|
162 | (Tok.Processing, 12), # <? err ?>
|
163 | (Tok.EndOfStream, 12),
|
164 | ],
|
165 | tokens)
|
166 |
|
167 | def testScriptStyle(self):
|
168 | Tok = html.Tok
|
169 | h = '''
|
170 | hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
|
171 | </script>
|
172 | '''
|
173 | tokens = Lex(h)
|
174 |
|
175 | self.assertEqual(
|
176 | [
|
177 | (Tok.RawData, 12),
|
178 | (Tok.StartTag, 27), # <script>
|
179 | (Tok.HtmlCData, 78), # JavaScript code is HTML CData
|
180 | (Tok.EndTag, 87), # </script>
|
181 | (Tok.RawData, 96), # \n
|
182 | (Tok.EndOfStream, 96), # \n
|
183 | ],
|
184 | tokens)
|
185 |
|
186 | def testScriptStyleXml(self):
|
187 | Tok = html.Tok
|
188 | h = 'hi <script src=""> < </script>'
|
189 | # XML mode
|
190 | tokens = Lex(h, no_special_tags=True)
|
191 |
|
192 | self.assertEqual(
|
193 | [
|
194 | (Tok.RawData, 3),
|
195 | (Tok.StartTag, 18), # <script>
|
196 | (Tok.RawData, 19), # space
|
197 | (Tok.CharEntity, 23), # </script>
|
198 | (Tok.RawData, 24), # \n
|
199 | (Tok.EndTag, 33), # \n
|
200 | (Tok.EndOfStream, 33), # \n
|
201 | ],
|
202 | tokens)
|
203 |
|
204 | def testCData(self):
|
205 | Tok = html.Tok
|
206 |
|
207 | # from
|
208 | # /home/andy/src/languages/Python-3.11.5/Lib/test/xmltestdata/c14n-20/inC14N4.xml
|
209 | h = '<compute><![CDATA[value>"0" && value<"10" ?"valid":"error"]]></compute>'
|
210 | tokens = Lex(h)
|
211 |
|
212 | self.assertEqual([
|
213 | (Tok.StartTag, 9),
|
214 | (Tok.CData, 61),
|
215 | (Tok.EndTag, 71),
|
216 | (Tok.EndOfStream, 71),
|
217 | ], tokens)
|
218 |
|
219 | def testEntity(self):
|
220 | Tok = html.Tok
|
221 |
|
222 | # from
|
223 | # /home/andy/src/Python-3.12.4/Lib/test/xmltestdata/c14n-20/inC14N5.xml
|
224 | h = '&ent1;, &ent2;!'
|
225 |
|
226 | tokens = Lex(h)
|
227 |
|
228 | self.assertEqual([
|
229 | (Tok.CharEntity, 6),
|
230 | (Tok.RawData, 8),
|
231 | (Tok.CharEntity, 14),
|
232 | (Tok.RawData, 15),
|
233 | (Tok.EndOfStream, 15),
|
234 | ], tokens)
|
235 |
|
236 | def testStartTag(self):
|
237 | Tok = html.Tok
|
238 |
|
239 | h = '<a>hi</a>'
|
240 | tokens = Lex(h)
|
241 |
|
242 | self.assertEqual([
|
243 | (Tok.StartTag, 3),
|
244 | (Tok.RawData, 5),
|
245 | (Tok.EndTag, 9),
|
246 | (Tok.EndOfStream, 9),
|
247 | ], tokens)
|
248 |
|
249 | # Make sure we don't consume too much
|
250 | h = '<a><source>1.7</source></a>'
|
251 |
|
252 | tokens = Lex(h)
|
253 |
|
254 | self.assertEqual([
|
255 | (Tok.StartTag, 3),
|
256 | (Tok.StartTag, 11),
|
257 | (Tok.RawData, 14),
|
258 | (Tok.EndTag, 23),
|
259 | (Tok.EndTag, 27),
|
260 | (Tok.EndOfStream, 27),
|
261 | ], tokens)
|
262 |
|
263 | return
|
264 |
|
265 | h = '''
|
266 | <configuration>
|
267 | <source>1.7</source>
|
268 | </configuration>'''
|
269 |
|
270 | tokens = Lex(h)
|
271 |
|
272 | self.assertEqual([
|
273 | (Tok.RawData, 9),
|
274 | (Tok.StartTag, 24),
|
275 | (Tok.RawData, 9),
|
276 | (Tok.EndOfStream, 9),
|
277 | ], tokens)
|
278 |
|
279 | def testInvalid(self):
|
280 | Tok = html.Tok
|
281 |
|
282 | INVALID = [
|
283 | # Should be &
|
284 | '<a>&',
|
285 | '&', # not finished
|
286 | '&#', # not finished
|
287 | # Hm > is allowed?
|
288 | #'a > b',
|
289 | 'a < b',
|
290 | '<!-- unfinished comment',
|
291 | '<? unfinished processing',
|
292 | '</div bad=attr> <a> <b>',
|
293 | ]
|
294 |
|
295 | for s in INVALID:
|
296 | try:
|
297 | tokens = html.ValidTokenList(s)
|
298 | except html.LexError as e:
|
299 | print(e)
|
300 | else:
|
301 | self.fail('Expected LexError')
|
302 |
|
303 |
|
304 | if __name__ == '__main__':
|
305 | unittest.main()
|