OILS / lazylex / html_test.py View on Github | oils.pub

265 lines, 164 significant
1#!/usr/bin/env python2
2from __future__ import print_function
3
4import unittest
5
6from lazylex import html # module under test log = html.log
7
8log = html.log
9
10with open('lazylex/testdata.html') as f:
11 TEST_HTML = f.read()
12
13
14def _MakeTagLexer(s):
15 lex = html.TagLexer(s)
16 lex.Reset(0, len(s))
17 return lex
18
19
20def _PrintTokens(lex):
21 log('')
22 log('tag = %r', lex.TagName())
23 for tok, start, end in lex.Tokens():
24 log('%s %r', tok, lex.s[start:end])
25
26
27class RegexTest(unittest.TestCase):
28
29 def testDotAll(self):
30 import re
31
32 # Note that $ matches end of line, not end of string
33 p1 = re.compile(r'.')
34 print(p1.match('\n'))
35
36 p2 = re.compile(r'.', re.DOTALL)
37 print(p2.match('\n'))
38
39 #p3 = re.compile(r'[.\n]', re.VERBOSE)
40 p3 = re.compile(r'[.\n]')
41 print(p3.match('\n'))
42
43 print('Negation')
44
45 p4 = re.compile(r'[^>]')
46 print(p4.match('\n'))
47
48
49class TagLexerTest(unittest.TestCase):
50
51 def testTagLexer(self):
52 # Invalid!
53 #lex = _MakeTagLexer('< >')
54 #print(lex.Tag())
55
56 lex = _MakeTagLexer('<a>')
57 _PrintTokens(lex)
58
59 lex = _MakeTagLexer('<a novalue>')
60 _PrintTokens(lex)
61
62 # Note: we could have a different HasAttr() method
63 # <a novalue> means lex.Get('novalue') == None
64 # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
65 self.assertEqual(None, lex.GetAttrRaw('novalue'))
66
67 lex = _MakeTagLexer('<a href="double quoted">')
68 _PrintTokens(lex)
69
70 self.assertEqual('double quoted', lex.GetAttrRaw('href'))
71 self.assertEqual(None, lex.GetAttrRaw('oops'))
72
73 lex = _MakeTagLexer('<a href=foo class="bar">')
74 _PrintTokens(lex)
75
76 lex = _MakeTagLexer('<a href=foo class="bar" />')
77 _PrintTokens(lex)
78
79 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
80 self.assertEqual('?foo=1&amp;bar=2', lex.GetAttrRaw('href'))
81
82 def testTagName(self):
83 lex = _MakeTagLexer('<a href=foo class="bar" />')
84 self.assertEqual('a', lex.TagName())
85
86 def testAllAttrs(self):
87 """
88 [('key', 'value')] for all
89 """
90 # closed
91 lex = _MakeTagLexer('<a href=foo class="bar" />')
92 self.assertEqual([('href', 'foo'), ('class', 'bar')],
93 lex.AllAttrsRaw())
94
95 lex = _MakeTagLexer('<a href="?foo=1&amp;bar=2" />')
96 self.assertEqual([('href', '?foo=1&amp;bar=2')], lex.AllAttrsRaw())
97
98
99class LexerTest(unittest.TestCase):
100
101 # IndexLinker in devtools/make_help.py
102 # <pre> sections in doc/html_help.py
103 # TocExtractor in devtools/cmark.py
104
105 def testPstrip(self):
106 """Remove anything like this.
107
108 <p><pstrip> </pstrip></p>
109 """
110 pass
111
112 def testCommentParse(self):
113 n = len(TEST_HTML)
114 for tok_id, end_pos in html._Tokens(TEST_HTML, 0, n):
115 if tok_id == html.Invalid:
116 raise RuntimeError()
117 print(tok_id)
118
119 def testCommentParse2(self):
120
121 Tok = html.Tok
122 h = '''
123 hi <!-- line 1
124 line 2 --><br/>'''
125 print(repr(h))
126 lex = html.ValidTokens(h)
127
128 tok_id, pos = next(lex)
129 self.assertEqual(12, pos)
130 self.assertEqual(Tok.RawData, tok_id)
131
132 tok_id, pos = next(lex)
133 log('tok %r', html.TokenName(tok_id))
134 self.assertEqual(50, pos)
135 self.assertEqual(Tok.Comment, tok_id)
136
137 tok_id, pos = next(lex)
138 self.assertEqual(55, pos)
139 self.assertEqual(Tok.StartEndTag, tok_id)
140
141 tok_id, pos = next(lex)
142 self.assertEqual(55, pos)
143 self.assertEqual(Tok.EndOfStream, tok_id)
144
145 def testProcessingInstruction(self):
146 # The TOP level should understand the <? ?> syntax, because otherwise
147 # it will be a start tag
148
149 Tok = html.Tok
150 h = 'hi <? err ?>'
151 print(repr(h))
152 lex = html.ValidTokens(h)
153
154 tok_id, pos = next(lex)
155 self.assertEqual(3, pos)
156 self.assertEqual(Tok.RawData, tok_id)
157
158 tok_id, pos = next(lex)
159 self.assertEqual(12, pos)
160 log('tok %r', html.TokenName(tok_id))
161 self.assertEqual(Tok.Processing, tok_id)
162
163 tok_id, pos = next(lex)
164 self.assertEqual(12, pos)
165 log('tok %r', html.TokenName(tok_id))
166 self.assertEqual(Tok.EndOfStream, tok_id)
167
168 def testScriptStyle(self):
169
170 Tok = html.Tok
171 h = '''
172 hi <script src=""> if (x < 1 && y > 2 ) { console.log(""); }
173 </script>
174 '''
175 print(repr(h))
176 lex = html.ValidTokens(h)
177
178 tok_id, pos = next(lex)
179 self.assertEqual(12, pos)
180 self.assertEqual(Tok.RawData, tok_id)
181
182 return
183
184 # <script>
185 tok_id, pos = next(lex)
186 self.assertEqual(27, pos)
187 self.assertEqual(Tok.CDataStartTag, tok_id)
188
189 return
190
191 # JavaScript code is CData
192 tok_id, pos = next(lex)
193 self.assertEqual(34, pos)
194 log('tok %r', html.TokenName(tok_id))
195 self.assertEqual(Tok.CData, tok_id)
196
197 # </script>
198 tok_id, pos = next(lex)
199 self.assertEqual(27, pos)
200 log('tok %r', html.TokenName(tok_id))
201 self.assertEqual(Tok.CDataEndTag, tok_id)
202
203 def testValid(self):
204 Tok = html.Tok
205
206 lex = html.ValidTokens('<a>hi</a>')
207
208 tok_id, pos = next(lex)
209 self.assertEqual(3, pos)
210 self.assertEqual(Tok.StartTag, tok_id)
211
212 tok_id, pos = next(lex)
213 self.assertEqual(5, pos)
214 self.assertEqual(Tok.RawData, tok_id)
215
216 tok_id, pos = next(lex)
217 self.assertEqual(9, pos)
218 self.assertEqual(Tok.EndTag, tok_id)
219
220 tok_id, pos = next(lex)
221 self.assertEqual(9, pos)
222 self.assertEqual(Tok.EndOfStream, tok_id)
223
224 lex = html.Lexer('<a>hi</a>')
225 while True:
226 tok_id, pos = lex.Read()
227 print('%d %s' % (pos, html.TokenName(tok_id)))
228 if tok_id == Tok.EndOfStream:
229 break
230
231 return
232 tok_id, pos = next(lex)
233 self.assertEqual(9, pos)
234 self.assertEqual(Tok.EndOfStream, tok_id)
235
236 while True:
237 tok_id, pos = next(lex)
238 print('%d %s' % (pos, html.TokenName(tok_id)))
239
240 def testInvalid(self):
241 Tok = html.Tok
242
243 INVALID = [
244 # Should be &amp;
245 '<a>&',
246 # Hm > is allowed?
247 #'a > b',
248 'a < b',
249 '<!-- unfinished comment',
250 '<? unfinished processing',
251 ]
252
253 for s in INVALID:
254 lex = html.ValidTokens(s)
255 try:
256 for i in xrange(10):
257 tok_id, pos = next(lex)
258 except html.LexError as e:
259 print(e)
260 else:
261 self.fail('Expected LexError')
262
263
264if __name__ == '__main__':
265 unittest.main()