OILS / osh / word_.py View on Github | oils.pub

807 lines, 387 significant
1"""
2word.py - Utility functions for words, e.g. treating them as "tokens".
3"""
4
5from _devbuild.gen.id_kind_asdl import Id, Kind, Id_t, Kind_t
6from _devbuild.gen.syntax_asdl import (
7 Token,
8 CompoundWord,
9 DoubleQuoted,
10 SingleQuoted,
11 word,
12 word_e,
13 word_t,
14 word_str,
15 word_part,
16 word_part_t,
17 word_part_e,
18 AssocPair,
19)
20from frontend import consts
21from frontend import lexer
22from mycpp import mylib
23from mycpp.mylib import tagswitch, log
24
25from typing import Tuple, Optional, List, Any, cast, TYPE_CHECKING
26if TYPE_CHECKING:
27 from osh.word_parse import WordParser
28
29_ = log
30
31
32def LiteralId(p):
33 # type: (word_part_t) -> Id_t
34 """If the WordPart consists of a single literal token, return its Id.
35
36 Used for Id.KW_For, or Id.RBrace, etc.
37 """
38 UP_part = p
39 if p.tag() == word_part_e.Literal:
40 return cast(Token, UP_part).id
41 else:
42 return Id.Undefined_Tok # unequal to any other Id
43
44
45def _EvalWordPart(part):
46 # type: (word_part_t) -> Tuple[bool, str, bool]
47 """Evaluate a WordPart at PARSE TIME.
48
49 Used for:
50
51 1. here doc delimiters
52 2. function names
53 3. for loop variable names
54 4. Compiling constant regex words at parse time
55 5. a special case for ${a////c} to see if we got a leading slash in the
56 pattern.
57
58 Returns:
59 3-tuple of
60 ok: bool, success. If there are parts that can't be statically
61 evaluated, then we return false.
62 value: a string (not Value)
63 quoted: whether any part of the word was quoted
64 """
65 UP_part = part
66 with tagswitch(part) as case:
67 if case(word_part_e.Literal):
68 tok = cast(Token, UP_part)
69 # Weird performance issue: if we change this to lexer.LazyStr(),
70 # the parser slows down, e.g. on configure-coreutils from 805 B
71 # irefs to ~830 B. The real issue is that we should avoid calling
72 # this from CommandParser - for the Hay node.
73 return True, lexer.TokenVal(tok), False
74 #return True, lexer.LazyStr(tok), False
75
76 elif case(word_part_e.EscapedLiteral):
77 part = cast(word_part.EscapedLiteral, UP_part)
78 if mylib.PYTHON:
79 val = lexer.TokenVal(part.token)
80 assert len(val) == 2, val # e.g. \*
81 assert val[0] == '\\'
82 s = lexer.TokenSliceLeft(part.token, 1)
83 return True, s, True
84
85 elif case(word_part_e.SingleQuoted):
86 part = cast(SingleQuoted, UP_part)
87 return True, part.sval, True
88
89 elif case(word_part_e.DoubleQuoted):
90 part = cast(DoubleQuoted, UP_part)
91 strs = [] # type: List[str]
92 for p in part.parts:
93 ok, s, _ = _EvalWordPart(p)
94 if not ok:
95 return False, '', True
96 strs.append(s)
97
98 return True, ''.join(strs), True # At least one part was quoted!
99
100 elif case(word_part_e.YshArrayLiteral, word_part_e.InitializerLiteral,
101 word_part_e.ZshVarSub, word_part_e.CommandSub,
102 word_part_e.SimpleVarSub, word_part_e.BracedVarSub,
103 word_part_e.TildeSub, word_part_e.ArithSub,
104 word_part_e.ExtGlob, word_part_e.Splice,
105 word_part_e.ExprSub):
106 return False, '', False
107
108 else:
109 raise AssertionError(part.tag())
110
111
112def FastStrEval(w):
113 # type: (CompoundWord) -> Optional[str]
114 """
115 Detects common case
116
117 (1) CompoundWord([LiteralPart(Id.LitChars)])
118 For echo -e, test x -lt 0, etc.
119 (2) single quoted word like 'foo'
120
121 Other patterns we could detect are:
122 (1) "foo"
123 (2) "$var" and "${var}" - I think these are very common in OSH code (but not YSH)
124 - I think val_ops.Stringify() can handle all the errors
125 """
126 if len(w.parts) != 1:
127 return None
128
129 part0 = w.parts[0]
130 UP_part0 = part0
131 with tagswitch(part0) as case:
132 if case(word_part_e.Literal):
133 part0 = cast(Token, UP_part0)
134
135 if part0.id in (Id.Lit_Chars, Id.Lit_LBracket, Id.Lit_RBracket):
136 # Could add more tokens in this case
137 # e.g. + is Lit_Other, and it's a Token in 'expr'
138 # Right now it's Lit_Chars (e.g. ls -l) and [ and ] because I
139 # know those are common
140 # { } are not as common
141 return lexer.LazyStr(part0)
142
143 else:
144 # e.g. Id.Lit_Star needs to be glob expanded
145 # TODO: Consider moving Id.Lit_Star etc. to Kind.MaybeGlob?
146 return None
147
148 elif case(word_part_e.SingleQuoted):
149 part0 = cast(SingleQuoted, UP_part0)
150 # TODO: SingleQuoted should have lazy (str? sval) field
151 # This would only affect multi-line strings though?
152 return part0.sval
153
154 else:
155 # e.g. DoubleQuoted can't be optimized to a string, because it
156 # might have "$@" and such
157 return None
158
159
160def StaticEval(UP_w):
161 # type: (word_t) -> Tuple[bool, str, bool]
162 """Evaluate a Compound at PARSE TIME."""
163 quoted = False
164
165 # e.g. for ( instead of for (( is a token word
166 if UP_w.tag() != word_e.Compound:
167 return False, '', quoted
168
169 w = cast(CompoundWord, UP_w)
170
171 strs = [] # type: List[str]
172 for part in w.parts:
173 ok, s, q = _EvalWordPart(part)
174 if not ok:
175 return False, '', quoted
176 if q:
177 quoted = True # at least one part was quoted
178 strs.append(s)
179 #log('StaticEval parts %s', w.parts)
180 return True, ''.join(strs), quoted
181
182
183# From bash, general.c, unquoted_tilde_word():
184# POSIX.2, 3.6.1: A tilde-prefix consists of an unquoted tilde character at
185# the beginning of the word, followed by all of the characters preceding the
186# first unquoted slash in the word, or all the characters in the word if there
187# is no slash...If none of the characters in the tilde-prefix are quoted, the
188# characters in the tilde-prefix following the tilde shell be treated as a
189# possible login name.
190#define TILDE_END(c) ((c) == '\0' || (c) == '/' || (c) == ':')
191#
192# So an unquoted tilde can ALWAYS start a new lex mode? You respect quotes and
193# substitutions.
194#
195# We only detect ~Lit_Chars and split. So we might as well just write a regex.
196
197
198def TildeDetect(UP_w):
199 # type: (word_t) -> Optional[CompoundWord]
200 """Detect tilde expansion in a word.
201
202 It might begin with Literal that needs to be turned into a TildeSub.
203 (It depends on whether the second token begins with slash).
204
205 If so, it return a new word. Otherwise return None.
206
207 NOTE:
208 - The regex for Lit_TildeLike could be expanded. Right now it's
209 conservative, like Lit_Chars without the /.
210 - It's possible to write this in a mutating style, since only the first token
211 is changed. But note that we CANNOT know this during lexing.
212 """
213 # BracedTree can't be tilde expanded
214 if UP_w.tag() != word_e.Compound:
215 return None
216
217 w = cast(CompoundWord, UP_w)
218 return TildeDetect2(w)
219
220
221def TildeDetect2(w):
222 # type: (CompoundWord) -> Optional[CompoundWord]
223 """If tilde sub is detected, returns a new CompoundWord.
224
225 Accepts CompoundWord, not word_t. After brace expansion, we know we have a
226 List[CompoundWord].
227
228 Tilde detection:
229
230 YES:
231 ~ ~/
232 ~bob ~bob/
233
234 NO:
235 ~bob# ~bob#/
236 ~bob$x
237 ~$x
238
239 Pattern to match (all must be word_part_e.Literal):
240
241 Lit_Tilde Lit_Chars? (Lit_Slash | %end)
242 """
243 if len(w.parts) == 0: # ${a-} has no parts
244 return None
245
246 part0 = w.parts[0]
247 id0 = LiteralId(part0)
248 if id0 != Id.Lit_Tilde:
249 return None # $x is not TildeSub
250
251 tok0 = cast(Token, part0)
252
253 new_parts = [] # type: List[word_part_t]
254
255 if len(w.parts) == 1: # ~
256 new_parts.append(word_part.TildeSub(tok0, None, None))
257 return CompoundWord(new_parts)
258
259 id1 = LiteralId(w.parts[1])
260 if id1 == Id.Lit_Slash: # ~/
261 new_parts.append(word_part.TildeSub(tok0, None, None))
262 new_parts.extend(w.parts[1:])
263 return CompoundWord(new_parts)
264
265 if id1 != Id.Lit_Chars:
266 return None # ~$x is not TildeSub
267
268 tok1 = cast(Token, w.parts[1])
269
270 if len(w.parts) == 2: # ~foo
271 new_parts.append(word_part.TildeSub(tok0, tok1, lexer.TokenVal(tok1)))
272 return CompoundWord(new_parts)
273
274 id2 = LiteralId(w.parts[2])
275 if id2 != Id.Lit_Slash: # ~foo$x is not TildeSub
276 return None
277
278 new_parts.append(word_part.TildeSub(tok0, tok1, lexer.TokenVal(tok1)))
279 new_parts.extend(w.parts[2:])
280 return CompoundWord(new_parts)
281
282
283def TildeDetectAssign(w):
284 # type: (CompoundWord) -> None
285 """Detects multiple tilde sub, like a=~:~/src:~bob
286
287 MUTATES its argument.
288
289 Pattern for to match (all must be word_part_e.Literal):
290
291 Lit_Tilde Lit_Chars? (Lit_Slash | Lit_Colon | %end)
292 """
293 parts = w.parts
294
295 # Bail out EARLY if there are no ~ at all
296 has_tilde = False
297 for part in parts:
298 if LiteralId(part) == Id.Lit_Tilde:
299 has_tilde = True
300 break
301 if not has_tilde:
302 return # Avoid further work and allocations
303
304 # Avoid IndexError, since we have to look ahead up to 2 tokens
305 parts.append(None)
306 parts.append(None)
307
308 new_parts = [] # type: List[word_part_t]
309
310 tilde_could_be_next = True # true at first, and true after :
311
312 i = 0
313 n = len(parts)
314
315 while i < n:
316 part0 = parts[i]
317 if part0 is None:
318 break
319
320 #log('i = %d', i)
321 #log('part0 %s', part0)
322
323 # Skip tilde in middle of word, like a=foo~bar
324 if tilde_could_be_next and LiteralId(part0) == Id.Lit_Tilde:
325 # If ~ ends the string, we have
326 part1 = parts[i + 1]
327 part2 = parts[i + 2]
328
329 tok0 = cast(Token, part0)
330
331 if part1 is None: # x=foo:~
332 new_parts.append(word_part.TildeSub(tok0, None, None))
333 break # at end
334
335 id1 = LiteralId(part1)
336
337 if id1 in (Id.Lit_Slash, Id.Lit_Colon): # x=foo:~/ or x=foo:~:
338 new_parts.append(word_part.TildeSub(tok0, None, None))
339 new_parts.append(part1)
340 i += 2
341 continue
342
343 if id1 != Id.Lit_Chars:
344 new_parts.append(part0) # unchanged
345 new_parts.append(part1) # ...
346 i += 2
347 continue # x=foo:~$x is not tilde sub
348
349 tok1 = cast(Token, part1)
350
351 if part2 is None: # x=foo:~foo
352 # consume both
353 new_parts.append(
354 word_part.TildeSub(tok0, tok1, lexer.TokenVal(tok1)))
355 break # at end
356
357 id2 = LiteralId(part2)
358 if id2 not in (Id.Lit_Slash, Id.Lit_Colon): # x=foo:~foo$x
359 new_parts.append(part0) # unchanged
360 new_parts.append(part1) # ...
361 new_parts.append(part2) # ...
362 i += 3
363 continue
364
365 new_parts.append(
366 word_part.TildeSub(tok0, tok1, lexer.TokenVal(tok1)))
367 new_parts.append(part2)
368 i += 3
369
370 tilde_could_be_next = (id2 == Id.Lit_Colon)
371
372 else:
373 new_parts.append(part0)
374 i += 1
375
376 tilde_could_be_next = (LiteralId(part0) == Id.Lit_Colon)
377
378 parts.pop()
379 parts.pop()
380
381 # Mutate argument
382 w.parts = new_parts
383
384
385def TildeDetectAll(words):
386 # type: (List[word_t]) -> List[word_t]
387 out = [] # type: List[word_t]
388 for w in words:
389 t = TildeDetect(w)
390 if t:
391 out.append(t)
392 else:
393 out.append(w)
394 return out
395
396
397def HasArrayPart(w):
398 # type: (CompoundWord) -> bool
399 """Used in cmd_parse."""
400 for part in w.parts:
401 if part.tag() == word_part_e.InitializerLiteral:
402 return True
403 return False
404
405
406def ShFunctionName(w):
407 # type: (CompoundWord) -> str
408 """Returns a valid shell function name, or the empty string.
409
410 TODO: Maybe use this regex to validate:
411
412 FUNCTION_NAME_RE = r'[^{}\[\]=]*'
413
414 Bash is very lenient, but that would disallow confusing characters, for
415 better error messages on a[x]=(), etc.
416 """
417 ok, s, quoted = StaticEval(w)
418 # Function names should not have quotes
419 if not ok or quoted:
420 return ''
421 return s
422
423
424def LooksLikeArithVar(UP_w):
425 # type: (word_t) -> Optional[Token]
426 """Return a token if this word looks like an arith var.
427
428 NOTE: This can't be combined with DetectShAssignment because VarLike and
429 ArithVarLike must be different tokens. Otherwise _ReadCompoundWord will be
430 confused between array assignments foo=(1 2) and function calls foo(1, 2).
431 """
432 if UP_w.tag() != word_e.Compound:
433 return None
434
435 w = cast(CompoundWord, UP_w)
436 if len(w.parts) != 1:
437 return None
438
439 UP_part0 = w.parts[0]
440 if LiteralId(UP_part0) != Id.Lit_ArithVarLike:
441 return None
442
443 return cast(Token, UP_part0)
444
445
446def IsVarLike(w):
447 # type: (CompoundWord) -> bool
448 """Tests whether a word looks like FOO=bar.
449
450 This is a quick test for the command parser to distinguish:
451
452 func() { echo hi; }
453 func=(1 2 3)
454 """
455 if len(w.parts) == 0:
456 return False
457
458 return LiteralId(w.parts[0]) == Id.Lit_VarLike
459
460
461def DetectShAssignment(w):
462 # type: (CompoundWord) -> Tuple[Optional[Token], Optional[Token], int]
463 """Detects whether a word looks like FOO=bar or FOO[x]=bar.
464
465 Returns:
466 left_token or None # Lit_VarLike, Lit_ArrayLhsOpen, or None if it's not an
467 # assignment
468 close_token, # Lit_ArrayLhsClose if it was detected, or None
469 part_offset # where to start the value word, 0 if not an assignment
470
471 Cases:
472
473 s=1
474 s+=1
475 s[x]=1
476 s[x]+=1
477
478 a=()
479 a+=()
480 a[x]=(
481 a[x]+=() # We parse this (as bash does), but it's never valid because arrays
482 # can't be nested.
483 """
484 no_token = None # type: Optional[Token]
485
486 n = len(w.parts)
487 if n == 0:
488 return no_token, no_token, 0
489
490 UP_part0 = w.parts[0]
491 id0 = LiteralId(UP_part0)
492 if id0 == Id.Lit_VarLike:
493 tok = cast(Token, UP_part0)
494 return tok, no_token, 1 # everything after first token is the value
495
496 if id0 == Id.Lit_ArrayLhsOpen:
497 tok0 = cast(Token, UP_part0)
498 # NOTE that a[]=x should be an error. We don't want to silently decay.
499 if n < 2:
500 return no_token, no_token, 0
501 for i in xrange(1, n):
502 UP_part = w.parts[i]
503 if LiteralId(UP_part) == Id.Lit_ArrayLhsClose:
504 tok_close = cast(Token, UP_part)
505 return tok0, tok_close, i + 1
506
507 # Nothing detected. Could be 'foobar' or a[x+1+2/' without the closing ].
508 return no_token, no_token, 0
509
510
511def DetectAssocPair(w):
512 # type: (CompoundWord) -> Optional[AssocPair]
513 """Like DetectShAssignment, but for A=(['k']=v ['k2']=v)
514
515 The key and the value are both strings. So we just pick out
516 word_part. Unlike a[k]=v, A=([k]=v) is NOT ambiguous, because the
517 [k] syntax is only used for associative array literals, as opposed
518 to indexed array literals.
519 """
520 parts = w.parts
521 if LiteralId(parts[0]) != Id.Lit_LBracket:
522 return None
523
524 n = len(parts)
525 for i in xrange(n):
526 id_ = LiteralId(parts[i])
527 if id_ == Id.Lit_ArrayLhsClose: # ]=
528 # e.g. if we have [$x$y]=$a$b
529 key = CompoundWord(parts[1:i]) # $x$y
530 value = CompoundWord(parts[i + 1:]) # $a$b from
531
532 has_plus = lexer.IsPlusEquals(cast(Token, parts[i]))
533
534 # Type-annotated intermediate value for mycpp translation
535 return AssocPair(key, value, has_plus)
536
537 return None
538
539
540def IsControlFlow(w):
541 # type: (CompoundWord) -> Tuple[Kind_t, Optional[Token]]
542 """Tests if a word is a control flow word."""
543 no_token = None # type: Optional[Token]
544
545 if len(w.parts) != 1:
546 return Kind.Undefined, no_token
547
548 UP_part0 = w.parts[0]
549 token_type = LiteralId(UP_part0)
550 if token_type == Id.Undefined_Tok:
551 return Kind.Undefined, no_token
552
553 token_kind = consts.GetKind(token_type)
554 if token_kind == Kind.ControlFlow:
555 return token_kind, cast(Token, UP_part0)
556
557 return Kind.Undefined, no_token
558
559
560def LiteralToken(UP_w):
561 # type: (word_t) -> Optional[Token]
562 """If a word consists of a literal token, return it.
563
564 Otherwise return None.
565 """
566 # We're casting here because this function is called by the CommandParser for
567 # var, setvar, '...', etc. It's easier to cast in one place.
568 assert UP_w.tag() == word_e.Compound, UP_w
569 w = cast(CompoundWord, UP_w)
570
571 if len(w.parts) != 1:
572 return None
573
574 part0 = w.parts[0]
575 if part0.tag() == word_part_e.Literal:
576 return cast(Token, part0)
577
578 return None
579
580
581def BraceToken(UP_w):
582 # type: (word_t) -> Optional[Token]
583 """If a word has Id.Lit_LBrace or Lit_RBrace, return a Token.
584
585 This is a special case for osh/cmd_parse.py
586
587 The WordParser changes Id.Op_LBrace from ExprParser into Id.Lit_LBrace, so we
588 may get a token, not a word.
589 """
590 with tagswitch(UP_w) as case:
591 if case(word_e.Operator):
592 tok = cast(Token, UP_w)
593 assert tok.id in (Id.Lit_LBrace, Id.Lit_RBrace), tok
594 return tok
595
596 elif case(word_e.Compound):
597 w = cast(CompoundWord, UP_w)
598 return LiteralToken(w)
599
600 else:
601 raise AssertionError()
602
603
604def AsKeywordToken(UP_w):
605 # type: (word_t) -> Token
606 """Given a word that IS A CompoundWord containing just a keyword, return
607 the single token at the start."""
608 assert UP_w.tag() == word_e.Compound, UP_w
609 w = cast(CompoundWord, UP_w)
610
611 part = w.parts[0]
612 assert part.tag() == word_part_e.Literal, part
613 tok = cast(Token, part)
614 assert consts.GetKind(tok.id) == Kind.KW, tok
615 return tok
616
617
618def AsOperatorToken(word):
619 # type: (word_t) -> Token
620 """For a word that IS an operator (word.Token), return that token.
621
622 This must only be called on a word which is known to be an operator
623 (word.Token).
624 """
625 assert word.tag() == word_e.Operator, word
626 return cast(Token, word)
627
628
629#
630# Polymorphic between Token and Compound
631#
632
633
634def ArithId(w):
635 # type: (word_t) -> Id_t
636 """Used by shell arithmetic parsing."""
637 if w.tag() == word_e.Operator:
638 tok = cast(Token, w)
639 return tok.id
640
641 assert isinstance(w, CompoundWord)
642 return Id.Word_Compound
643
644
645def BoolId(w):
646 # type: (word_t) -> Id_t
647 UP_w = w
648 with tagswitch(w) as case:
649 if case(word_e.String): # for test/[
650 w = cast(word.String, UP_w)
651 return w.id
652
653 elif case(word_e.Operator):
654 tok = cast(Token, UP_w)
655 return tok.id
656
657 elif case(word_e.Compound):
658 w = cast(CompoundWord, UP_w)
659
660 if len(w.parts) != 1:
661 return Id.Word_Compound
662
663 token_type = LiteralId(w.parts[0])
664 if token_type == Id.Undefined_Tok:
665 return Id.Word_Compound # It's a regular word
666
667 # This is outside the BoolUnary/BoolBinary namespace, but works the same.
668 if token_type in (Id.KW_Bang, Id.Lit_DRightBracket):
669 return token_type # special boolean "tokens"
670
671 token_kind = consts.GetKind(token_type)
672 if token_kind in (Kind.BoolUnary, Kind.BoolBinary):
673 return token_type # boolean operators
674
675 return Id.Word_Compound
676
677 else:
678 # I think Empty never happens in this context?
679 raise AssertionError(w.tag())
680
681
682def CommandId(w):
683 # type: (word_t) -> Id_t
684 """Used by CommandParser."""
685 UP_w = w
686 with tagswitch(w) as case:
687 if case(word_e.Operator):
688 tok = cast(Token, UP_w)
689 return tok.id
690
691 elif case(word_e.Compound):
692 w = cast(CompoundWord, UP_w)
693
694 # Fine-grained categorization of SINGLE literal parts
695 if len(w.parts) != 1:
696 return Id.Word_Compound # generic word
697
698 token_type = LiteralId(w.parts[0])
699 if token_type == Id.Undefined_Tok:
700 return Id.Word_Compound # Not Kind.Lit, generic word
701
702 if token_type in (Id.Lit_LBrace, Id.Lit_RBrace, Id.Lit_Equals,
703 Id.Lit_TDot):
704 # - { } are for YSH braces
705 # - = is for the = keyword
706 # - ... is to start multiline mode
707 #
708 # TODO: Should we use Op_{LBrace,RBrace} and Kind.Op when
709 # parse_brace? Lit_Equals could be KW_Equals?
710 return token_type
711
712 token_kind = consts.GetKind(token_type)
713 if token_kind == Kind.KW:
714 return token_type # Id.KW_Var, etc.
715
716 return Id.Word_Compound # generic word
717
718 else:
719 raise AssertionError(w.tag())
720
721
722def CommandKind(w):
723 # type: (word_t) -> Kind_t
724 """The CommandKind is for coarse-grained decisions in the CommandParser.
725
726 NOTE: This is inconsistent with CommandId(), because we never return
727 Kind.KW or Kind.Lit. But the CommandParser is easier to write this way.
728
729 For example, these are valid redirects to a Kind.Word, and the parser
730 checks:
731
732 echo hi > =
733 echo hi > {
734
735 Invalid:
736 echo hi > (
737 echo hi > ;
738 """
739 if w.tag() == word_e.Operator:
740 tok = cast(Token, w)
741 # CommandParser uses Kind.Redir, Kind.Op, Kind.Eof, etc.
742 return consts.GetKind(tok.id)
743
744 return Kind.Word
745
746
747# Stubs for converting RHS of assignment to expression mode.
748# For osh2oil.py
749def IsVarSub(w):
750 # type: (word_t) -> bool
751 """Return whether it's any var sub, or a double quoted one."""
752 return False
753
754
755# Doesn't translate with mycpp because of dynamic %
756def ErrorWord(error_str):
757 # type: (str) -> CompoundWord
758 t = lexer.DummyToken(Id.Lit_Chars, error_str)
759 return CompoundWord([t])
760
761
762def Pretty(w):
763 # type: (word_t) -> str
764 """Return a string to display to the user."""
765 UP_w = w
766 if w.tag() == word_e.String:
767 w = cast(word.String, UP_w)
768 if w.id == Id.Eof_Real:
769 return 'EOF'
770 else:
771 return repr(w.s)
772 else:
773 return word_str(w.tag()) # tag name
774
775
776class ctx_EmitDocToken(object):
777 """For doc comments."""
778
779 def __init__(self, w_parser):
780 # type: (WordParser) -> None
781 w_parser.EmitDocToken(True)
782 self.w_parser = w_parser
783
784 def __enter__(self):
785 # type: () -> None
786 pass
787
788 def __exit__(self, type, value, traceback):
789 # type: (Any, Any, Any) -> None
790 self.w_parser.EmitDocToken(False)
791
792
793class ctx_Multiline(object):
794 """For multiline commands."""
795
796 def __init__(self, w_parser):
797 # type: (WordParser) -> None
798 w_parser.Multiline(True)
799 self.w_parser = w_parser
800
801 def __enter__(self):
802 # type: () -> None
803 pass
804
805 def __exit__(self, type, value, traceback):
806 # type: (Any, Any, Any) -> None
807 self.w_parser.Multiline(False)