1 | #!/usr/bin/env python2
|
2 | from __future__ import print_function
|
3 |
|
4 | import unittest
|
5 |
|
6 | from lazylex import html # module under test log = html.log
|
7 |
|
8 | log = html.log
|
9 |
|
10 | with open('lazylex/testdata.html') as f:
|
11 | TEST_HTML = f.read()
|
12 |
|
13 |
|
14 | def _MakeTagLexer(s):
|
15 | lex = html.TagLexer(s)
|
16 | lex.Reset(0, len(s))
|
17 | return lex
|
18 |
|
19 |
|
20 | def _PrintTokens(lex):
|
21 | log('')
|
22 | log('tag = %r', lex.TagName())
|
23 | for tok, start, end in lex.Tokens():
|
24 | log('%s %r', tok, lex.s[start:end])
|
25 |
|
26 |
|
27 | class RegexTest(unittest.TestCase):
|
28 |
|
29 | def testDotAll(self):
|
30 | import re
|
31 |
|
32 | # Note that $ matches end of line, not end of string
|
33 | p1 = re.compile(r'.')
|
34 | print(p1.match('\n'))
|
35 |
|
36 | p2 = re.compile(r'.', re.DOTALL)
|
37 | print(p2.match('\n'))
|
38 |
|
39 | #p3 = re.compile(r'[.\n]', re.VERBOSE)
|
40 | p3 = re.compile(r'[.\n]')
|
41 | print(p3.match('\n'))
|
42 |
|
43 | print('Negation')
|
44 |
|
45 | p4 = re.compile(r'[^>]')
|
46 | print(p4.match('\n'))
|
47 |
|
48 |
|
49 | class TagLexerTest(unittest.TestCase):
|
50 |
|
51 | def testTagLexer(self):
|
52 | # Invalid!
|
53 | #lex = _MakeTagLexer('< >')
|
54 | #print(lex.Tag())
|
55 |
|
56 | lex = _MakeTagLexer('<a>')
|
57 | _PrintTokens(lex)
|
58 |
|
59 | lex = _MakeTagLexer('<a novalue>')
|
60 | _PrintTokens(lex)
|
61 |
|
62 | # Note: we could have a different HasAttr() method
|
63 | # <a novalue> means lex.Get('novalue') == None
|
64 | # https://developer.mozilla.org/en-US/docs/Web/API/Element/hasAttribute
|
65 | self.assertEqual(None, lex.GetAttrRaw('novalue'))
|
66 |
|
67 | lex = _MakeTagLexer('<a href="double quoted">')
|
68 | _PrintTokens(lex)
|
69 |
|
70 | self.assertEqual('double quoted', lex.GetAttrRaw('href'))
|
71 | self.assertEqual(None, lex.GetAttrRaw('oops'))
|
72 |
|
73 | lex = _MakeTagLexer('<a href=foo class="bar">')
|
74 | _PrintTokens(lex)
|
75 |
|
76 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
77 | _PrintTokens(lex)
|
78 |
|
79 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
80 | self.assertEqual('?foo=1&bar=2', lex.GetAttrRaw('href'))
|
81 |
|
82 | def testTagName(self):
|
83 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
84 | self.assertEqual('a', lex.TagName())
|
85 |
|
86 | def testAllAttrs(self):
|
87 | """
|
88 | [('key', 'value')] for all
|
89 | """
|
90 | # closed
|
91 | lex = _MakeTagLexer('<a href=foo class="bar" />')
|
92 | self.assertEqual([('href', 'foo'), ('class', 'bar')],
|
93 | lex.AllAttrsRaw())
|
94 |
|
95 | lex = _MakeTagLexer('<a href="?foo=1&bar=2" />')
|
96 | self.assertEqual([('href', '?foo=1&bar=2')], lex.AllAttrsRaw())
|
97 |
|
98 |
|
99 | class LexerTest(unittest.TestCase):
|
100 |
|
101 | # IndexLinker in devtools/make_help.py
|
102 | # <pre> sections in doc/html_help.py
|
103 | # TocExtractor in devtools/cmark.py
|
104 |
|
105 | def testPstrip(self):
|
106 | """Remove anything like this.
|
107 |
|
108 | <p><pstrip> </pstrip></p>
|
109 | """
|
110 | pass
|
111 |
|
112 | def testCommentParse(self):
|
113 | n = len(TEST_HTML)
|
114 | for tok_id, end_pos in html._Tokens(TEST_HTML, 0, n):
|
115 | if tok_id == html.Invalid:
|
116 | raise RuntimeError()
|
117 | print(tok_id)
|
118 |
|
119 | Tok = html.Tok
|
120 | h = '''
|
121 | hi <!-- line 1
|
122 | line 2 --><br/>'''
|
123 | print(repr(h))
|
124 | lex = html.ValidTokens(h)
|
125 |
|
126 | tok_id, pos = next(lex)
|
127 | self.assertEqual(12, pos)
|
128 | self.assertEqual(Tok.RawData, tok_id)
|
129 |
|
130 | tok_id, pos = next(lex)
|
131 | log('tok %r', html.TokenName(tok_id))
|
132 | self.assertEqual(50, pos)
|
133 | self.assertEqual(Tok.Comment, tok_id)
|
134 |
|
135 | tok_id, pos = next(lex)
|
136 | self.assertEqual(55, pos)
|
137 | self.assertEqual(Tok.StartEndTag, tok_id)
|
138 |
|
139 | tok_id, pos = next(lex)
|
140 | self.assertEqual(55, pos)
|
141 | self.assertEqual(Tok.EndOfStream, tok_id)
|
142 |
|
143 | def testValid(self):
|
144 | Tok = html.Tok
|
145 |
|
146 | lex = html.ValidTokens('<a>hi</a>')
|
147 |
|
148 | tok_id, pos = next(lex)
|
149 | self.assertEqual(3, pos)
|
150 | self.assertEqual(Tok.StartTag, tok_id)
|
151 |
|
152 | tok_id, pos = next(lex)
|
153 | self.assertEqual(5, pos)
|
154 | self.assertEqual(Tok.RawData, tok_id)
|
155 |
|
156 | tok_id, pos = next(lex)
|
157 | self.assertEqual(9, pos)
|
158 | self.assertEqual(Tok.EndTag, tok_id)
|
159 |
|
160 | tok_id, pos = next(lex)
|
161 | self.assertEqual(9, pos)
|
162 | self.assertEqual(Tok.EndOfStream, tok_id)
|
163 |
|
164 | lex = html.Lexer('<a>hi</a>')
|
165 | while True:
|
166 | tok_id, pos = lex.Read()
|
167 | print('%d %s' % (pos, html.TokenName(tok_id)))
|
168 | if tok_id == Tok.EndOfStream:
|
169 | break
|
170 |
|
171 | return
|
172 | tok_id, pos = next(lex)
|
173 | self.assertEqual(9, pos)
|
174 | self.assertEqual(Tok.EndOfStream, tok_id)
|
175 |
|
176 | while True:
|
177 | tok_id, pos = next(lex)
|
178 | print('%d %s' % (pos, html.TokenName(tok_id)))
|
179 |
|
180 | def testInvalid(self):
|
181 | Tok = html.Tok
|
182 |
|
183 | lex = html.ValidTokens('<a>&')
|
184 |
|
185 | tok_id, pos = next(lex)
|
186 | self.assertEqual(3, pos)
|
187 | self.assertEqual(Tok.StartTag, tok_id)
|
188 |
|
189 | try:
|
190 | tok_id, pos = next(lex)
|
191 | except html.LexError as e:
|
192 | print(e)
|
193 | else:
|
194 | self.fail('Expected LexError')
|
195 |
|
196 |
|
197 | if __name__ == '__main__':
|
198 | unittest.main()
|