OILS / frontend / lexer_def.py View on Github | oils.pub

1143 lines, 580 significant
1"""
2lexer_def.py - Lexing for OSH, YSH, and J8 Notation.
3
4The OSH/YSH lexer has lexer modes, each with a regex -> Id mapping.
5
6After changing this file, run:
7
8 build/py.sh all
9
10or at least:
11
12 build/py.sh fastlex
13
14Input Handling
15--------------
16
17Every line is NUL terminated:
18
19 'one\n\0' 'last line\0'
20
21which means that no regexes below should match \0.
22
23For example, use [^'\0]+ instead of [^']+ .
24
25If this rule isn't followed, we would read uninitialized memory past the
26sentinel. Python's regex engine knows where the end of the input string is, so
27it doesn't require need a sentinel like \0.
28
29The frontend/lexer_gen.py generator adds a pattern mapping \0 to Id.Eol_Tok.
30"""
31
32from _devbuild.gen.id_kind_asdl import Id, Id_t, Kind
33from _devbuild.gen.types_asdl import lex_mode_e
34
35from frontend import id_kind_def
36
37from typing import Tuple
38
39# Initialize spec that the lexer depends on.
40ID_SPEC = id_kind_def.IdSpec({}, {})
41
42id_kind_def.AddKinds(ID_SPEC)
43id_kind_def.AddBoolKinds(ID_SPEC) # must come second
44id_kind_def.SetupTestBuiltin(ID_SPEC, {}, {}, {})
45
46
47def C(pat, tok_type):
48 # type: (str, Id_t) -> Tuple[bool, str, Id_t]
49 """Lexer rule with a constant string, e.g. C('$*', VSub_Star)"""
50 return (False, pat, tok_type)
51
52
53def R(pat, tok_type):
54 # type: (str, Id_t) -> Tuple[bool, str, Id_t]
55 """Lexer rule with a regex string, e.g. R('\$[0-9]', VSub_Number)"""
56 return (True, pat, tok_type)
57
58
59# See unit tests in frontend/match_test.py.
60# We need the [^\0]* because the re2c translation assumes it's anchored like $.
61SHOULD_HIJACK_RE = r'#![^\0]*sh[ \t\r\n][^\0]*'
62
63# Separates words (\r it not whitespace here)
64_SIGNIFICANT_SPACE = R(r'[ \t]+', Id.WS_Space)
65
66_BACKSLASH = [
67 # To be conservative, we could deny a set of chars similar to
68 # _LITERAL_WHITELIST_REGEX, rather than allowing all the operator characters
69 # like \( and \;.
70 #
71 # strict_backslash makes this stricter.
72 R(r'\\[^\n\0]', Id.Lit_EscapedChar),
73 C('\\\n', Id.Ignored_LineCont),
74]
75
76# Only 4 characters are backslash escaped inside "".
77# https://www.gnu.org/software/bash/manual/bash.html#Double-Quotes
78_DQ_BACKSLASH = [
79 R(r'\\[$`"\\]', Id.Lit_EscapedChar),
80 C('\\', Id.Lit_BadBackslash), # syntax error in YSH, but NOT in OSH
81]
82
83VAR_NAME_RE = r'[a-zA-Z_][a-zA-Z0-9_]*'
84
85# All Kind.VSub
86_VARS = [
87 # Unbraced variables
88 R(r'\$' + VAR_NAME_RE, Id.VSub_DollarName),
89 R(r'\$[0-9]', Id.VSub_Number),
90 C(r'$!', Id.VSub_Bang),
91 C(r'$@', Id.VSub_At),
92 C(r'$#', Id.VSub_Pound),
93 C(r'$$', Id.VSub_Dollar),
94 C(r'$*', Id.VSub_Star),
95 C(r'$-', Id.VSub_Hyphen),
96 C(r'$?', Id.VSub_QMark),
97]
98
99# Kind.Left that are valid in double-quoted modes.
100
101_LEFT_SUBS = [
102 C('`', Id.Left_Backtick),
103 C('$(', Id.Left_DollarParen),
104 C('${', Id.Left_DollarBrace),
105 # Parse zsh syntax, but don't execute it.
106 # The examples we've seen so far are like ${(%):-} and ${(m)
107 R(r'\$\{\([^)\0]+\)', Id.Left_DollarBraceZsh),
108 C('$((', Id.Left_DollarDParen),
109 C('$[', Id.Left_DollarBracket),
110]
111
112# Additional Kind.Left that are valid in unquoted modes.
113_LEFT_UNQUOTED = [
114 C('"', Id.Left_DoubleQuote),
115 C("'", Id.Left_SingleQuote),
116 C('$"', Id.Left_DollarDoubleQuote),
117 C("$'", Id.Left_DollarSingleQuote),
118]
119
120_LEFT_PROCSUB = [
121 C('<(', Id.Left_ProcSubIn),
122 C('>(', Id.Left_ProcSubOut),
123]
124
125# The regexes below are in Python syntax, but are translate to re2c syntax by
126# frontend/lexer_gen.py.
127#
128# http://re2c.org/manual/syntax/syntax.html
129# https://docs.python.org/2/library/re.html
130#
131# We use a limited set of constructs:
132# - + and * for repetition
133# - Character classes [] with simple ranges and negation
134# - Escapes like \n \0
135
136LEXER_DEF = {} # TODO: Should be a list so we enforce order.
137
138# Anything until the end of the line is a comment. Does not match the newline
139# itself. We want to switch modes and possibly process Op_Newline for here
140# docs, etc.
141LEXER_DEF[lex_mode_e.Comment] = [R(r'[^\n\0]*', Id.Ignored_Comment)]
142
143# A whitelist to make bigger Lit_Chars tokens. We don't want one byte at a time.
144#
145# The shell language says that "anything other byte" is a literal character --
146# for example, unquoted $ \ ! are literal, not a syntax error.
147#
148# That is, a literal is defined NEGATIVELY, for a single characters. But here
149# we define a SUBSET of literal chars POSITIVELY.
150
151# The range \x80-\xff makes sure that UTF-8 sequences are a single token.
152_LITERAL_WHITELIST_REGEX = r'[\x80-\xffa-zA-Z0-9_.\-]+'
153
154_UNQUOTED = _BACKSLASH + _LEFT_SUBS + _LEFT_UNQUOTED + _LEFT_PROCSUB + _VARS + [
155 # NOTE: We could add anything 128 and above to this character class? So
156 # utf-8 characters don't get split?
157 R(_LITERAL_WHITELIST_REGEX, Id.Lit_Chars),
158 C('~', Id.Lit_Tilde), # for tilde sub
159 C('/', Id.Lit_Slash), # also for tilde sub
160 C(':', Id.Lit_Colon), # for special PATH=a:~foo tilde detection
161 C('$', Id.Lit_Dollar), # shopt -u parse_dollar
162 C('#', Id.Lit_Pound), # For comments
163 _SIGNIFICANT_SPACE,
164 C('\n', Id.Op_Newline),
165 C('&', Id.Op_Amp),
166 C('|', Id.Op_Pipe),
167 C('|&', Id.Op_PipeAmp),
168 C('&&', Id.Op_DAmp),
169 C('||', Id.Op_DPipe),
170 C(';', Id.Op_Semi),
171 # Case terminators
172 C(';;', Id.Op_DSemi),
173 C(';&', Id.Op_SemiAmp),
174 C(';;&', Id.Op_DSemiAmp),
175 C('(', Id.Op_LParen),
176 C(')', Id.Op_RParen),
177 R(r'[^\0]', Id.Lit_Other), # any other single char is a literal
178]
179
180# In ShCommand and DBracket states.
181_EXTGLOB_BEGIN = [
182 C(',(', Id.ExtGlob_Comma), # YSH synonym for @(...)
183 C('@(', Id.ExtGlob_At),
184 C('*(', Id.ExtGlob_Star),
185 C('+(', Id.ExtGlob_Plus),
186 C('?(', Id.ExtGlob_QMark),
187 C('!(', Id.ExtGlob_Bang),
188]
189
190KEYWORDS = [
191 # NOTE: { is matched elsewhere
192 C('[[', Id.KW_DLeftBracket),
193 C('!', Id.KW_Bang),
194 C('for', Id.KW_For),
195 C('while', Id.KW_While),
196 C('until', Id.KW_Until),
197 C('do', Id.KW_Do),
198 C('done', Id.KW_Done),
199 C('in', Id.KW_In),
200 C('case', Id.KW_Case),
201 C('esac', Id.KW_Esac),
202 C('if', Id.KW_If),
203 C('fi', Id.KW_Fi),
204 C('then', Id.KW_Then),
205 C('else', Id.KW_Else),
206 C('elif', Id.KW_Elif),
207 C('function', Id.KW_Function),
208 C('time', Id.KW_Time),
209
210 # YSH
211 C('const', Id.KW_Const), # maybe remove this
212 C('var', Id.KW_Var),
213 C('setvar', Id.KW_SetVar),
214 C('setglobal', Id.KW_SetGlobal),
215 C('call', Id.KW_Call),
216 C('proc', Id.KW_Proc),
217 C('typed', Id.KW_Typed),
218 C('func', Id.KW_Func),
219]
220
221# These are treated like builtins in bash, but keywords in OSH. However, we
222# maintain compatibility with bash for the 'type' builtin.
223CONTROL_FLOW = [
224 C('break', Id.ControlFlow_Break),
225 C('continue', Id.ControlFlow_Continue),
226 C('return', Id.ControlFlow_Return),
227 C('exit', Id.ControlFlow_Exit),
228]
229
230# Used by ysh/grammar_gen.py too
231EXPR_WORDS = [
232 C('null', Id.Expr_Null),
233 C('true', Id.Expr_True),
234 C('false', Id.Expr_False),
235 C('and', Id.Expr_And),
236 C('or', Id.Expr_Or),
237 C('not', Id.Expr_Not),
238 C('for', Id.Expr_For),
239 C('is', Id.Expr_Is),
240 C('in', Id.Expr_In),
241 C('if', Id.Expr_If),
242 C('else', Id.Expr_Else),
243
244 # Unused: could be for func and proc litearls
245 #
246 # Note: we also have lambda literals |x| x+1
247 # I don't think we need them now, but the difference vs func is that the
248 # body is an expression. Note: JavaScript uses (x, y) => x + y which
249 # causes parsing problems.
250 C('func', Id.Expr_Func),
251 C('proc', Id.Expr_Proc),
252
253 # / <capture d+/
254 C('capture', Id.Expr_Capture),
255 # / <capture d+ as date> /
256 C('as', Id.Expr_As),
257]
258
259FD_VAR_NAME = r'\{' + VAR_NAME_RE + r'\}'
260
261# file descriptors can only have two digits, like mksh
262# dash/zsh/etc. can have one
263FD_NUM = r'[0-9]?[0-9]?'
264
265# These two can must be recognized in the ShCommand state, but can't nested
266# within [[.
267# Keywords have to be checked before _UNQUOTED so we get <KW_If "if"> instead
268# of <Lit_Chars "if">.
269LEXER_DEF[lex_mode_e.ShCommand] = [
270 # These four are not allowed within [[, so they are in ShCommand but not
271 # _UNQUOTED.
272
273 # e.g. beginning of NAME=val, which will always be longer than
274 # _LITERAL_WHITELIST_REGEX.
275 R(VAR_NAME_RE + '\+?=', Id.Lit_VarLike),
276 R(VAR_NAME_RE + '\[', Id.Lit_ArrayLhsOpen),
277 R(r'\]\+?=', Id.Lit_ArrayLhsClose),
278 C('((', Id.Op_DLeftParen),
279
280 # For static globbing, and [] for array literals
281 C('[', Id.Lit_LBracket), # e.g. A=(['x']=1)
282 C(']', Id.Lit_RBracket), # e.g. *.[ch]
283 # NOTE: Glob_Star and Glob_QMark are for dynamic parsing
284 C('*', Id.Lit_Star),
285 C('?', Id.Lit_QMark),
286 C('###', Id.Lit_TPound), # like Lit_Pound, for doc comments
287 C('...', Id.Lit_TDot), # ... for multiline commands
288
289 # For brace expansion {a,b}
290 C('{', Id.Lit_LBrace),
291 C('}', Id.Lit_RBrace), # Also for var sub ${a}
292 C(',', Id.Lit_Comma),
293 C('=', Id.Lit_Equals), # for = f(x) and x = 1+2*3
294 C('@', Id.Lit_At), # for detecting @[, @' etc. shopt -s parse_at_all
295
296 # @array and @func(1, c)
297 R('@' + VAR_NAME_RE, Id.Lit_Splice), # for YSH splicing
298 C('@[', Id.Lit_AtLBracket), # @[split(x)]
299 C('@{.', Id.Lit_AtLBraceDot), # for split builtin sub @{.myproc arg1}
300 R(FD_NUM + r'<', Id.Redir_Less),
301 R(FD_NUM + r'>', Id.Redir_Great),
302 R(FD_NUM + r'<<', Id.Redir_DLess),
303 R(FD_NUM + r'<<<', Id.Redir_TLess),
304 R(FD_NUM + r'>>', Id.Redir_DGreat),
305 R(FD_NUM + r'<<-', Id.Redir_DLessDash),
306 R(FD_NUM + r'>&', Id.Redir_GreatAnd),
307 R(FD_NUM + r'<&', Id.Redir_LessAnd),
308 R(FD_NUM + r'<>', Id.Redir_LessGreat),
309 R(FD_NUM + r'>\|', Id.Redir_Clobber),
310 R(FD_VAR_NAME + r'<', Id.Redir_Less),
311 R(FD_VAR_NAME + r'>', Id.Redir_Great),
312 R(FD_VAR_NAME + r'<<', Id.Redir_DLess),
313 R(FD_VAR_NAME + r'<<<', Id.Redir_TLess),
314 R(FD_VAR_NAME + r'>>', Id.Redir_DGreat),
315 R(FD_VAR_NAME + r'<<-', Id.Redir_DLessDash),
316 R(FD_VAR_NAME + r'>&', Id.Redir_GreatAnd),
317 R(FD_VAR_NAME + r'<&', Id.Redir_LessAnd),
318 R(FD_VAR_NAME + r'<>', Id.Redir_LessGreat),
319 R(FD_VAR_NAME + r'>\|', Id.Redir_Clobber),
320
321 # No leading descriptor (2 is implied)
322 C(r'&>', Id.Redir_AndGreat),
323 C(r'&>>', Id.Redir_AndDGreat),
324] + KEYWORDS + CONTROL_FLOW + _UNQUOTED + _EXTGLOB_BEGIN
325
326# Preprocessing before ShCommand
327LEXER_DEF[lex_mode_e.Backtick] = [
328 C(r'`', Id.Backtick_Right),
329 # A backslash, and then $ or ` or \
330 R(r'\\[$`\\]', Id.Backtick_Quoted),
331 # \" treated specially, depending on whether bacticks are double-quoted!
332 R(r'\\"', Id.Backtick_DoubleQuote),
333 R(r'[^`\\\0]+', Id.Backtick_Other), # contiguous run of literals
334 R(r'[^\0]', Id.Backtick_Other), # anything else
335]
336
337# DBRACKET: can be like ShCommand, except:
338# - Don't really need redirects either... Redir_Less could be Op_Less
339# - Id.Op_DLeftParen can't be nested inside.
340LEXER_DEF[lex_mode_e.DBracket] = [
341 C(']]', Id.Lit_DRightBracket),
342 # Must be KW and not Op, because we can have stuff like [[ $foo == !* ]]
343 # in addition to [[ ! a && b ]]
344 C('!', Id.KW_Bang),
345 C('<', Id.Op_Less),
346 C('>', Id.Op_Great),
347] + ID_SPEC.LexerPairs(Kind.BoolUnary) + \
348 ID_SPEC.LexerPairs(Kind.BoolBinary) + \
349 _UNQUOTED + _EXTGLOB_BEGIN
350
351# Inside an extended glob, most characters are literals, including spaces and
352# punctuation. We also accept \, $var, ${var}, "", etc. They can also be
353# nested, so _EXTGLOB_BEGIN appears here.
354#
355# Example: echo @(<> <>|&&|'foo'|$bar)
356LEXER_DEF[lex_mode_e.ExtGlob] = \
357 _BACKSLASH + _LEFT_SUBS + _LEFT_UNQUOTED + _VARS + _EXTGLOB_BEGIN + [
358 R(r'[^\\$`"\'|)@*+!?\0]+', Id.Lit_Chars),
359 C('|', Id.Op_Pipe),
360 C(')', Id.Op_RParen), # maybe be translated to Id.ExtGlob_RParen
361 R(r'[^\0]', Id.Lit_Other), # everything else is literal
362]
363
364# Notes on BASH_REGEX states
365#
366# From bash manual:
367#
368# - Any part of the pattern may be quoted to force the quoted portion to be
369# matched as a string.
370# - Bracket expressions in regular expressions must be treated carefully, since
371# normal quoting characters lose their meanings between brackets.
372# - If the pattern is stored in a shell variable, quoting the variable
373# expansion forces the entire pattern to be matched as a string.
374#
375# Is there a re.escape function? It's just like EscapeGlob and UnescapeGlob.
376#
377# TODO: For testing, write a script to extract and save regexes... and compile
378# them with regcomp. I've only seen constant regexes.
379#
380# bash code: ( | ) are special
381
382LEXER_DEF[lex_mode_e.BashRegex] = _LEFT_SUBS + _LEFT_UNQUOTED + _VARS + [
383 # Like lex_mode_e.ShCommand
384 R(_LITERAL_WHITELIST_REGEX, Id.Lit_Chars),
385
386 # Tokens for Tilde sub. bash weirdness: RHS of [[ x =~ ~ ]] is expanded
387 C('~', Id.Lit_Tilde),
388 C('/', Id.Lit_Slash),
389
390 # Id.WS_Space delimits words. In lex_mode_e.BashRegexFakeInner, we
391 # translate them to Id.Lit_Chars.
392 _SIGNIFICANT_SPACE,
393
394 # Analogous to Id.ExtGlob_* - we need to change lexer modes when we hit this
395 C('(', Id.BashRegex_LParen),
396
397 # Not special, this is like lex_mode_e.Outer
398 C(')', Id.Op_RParen),
399
400 # Copied and adapted from _UNQUOTED
401 # \n & ; < > are parse errors OUTSIDE a group [[ s =~ ; ]]
402 # but become allowed INSIDE a group [[ s =~ (;) ]]
403 C('\n', Id.BashRegex_AllowedInParens),
404 C('&', Id.BashRegex_AllowedInParens),
405 C(';', Id.BashRegex_AllowedInParens),
406 C('>', Id.BashRegex_AllowedInParens),
407 C('<', Id.BashRegex_AllowedInParens),
408
409 # e.g. | is Id.Lit_Other, not pipe operator
410 R(r'[^\0]', Id.Lit_Other), # like _UNQUOTED, any other byte is literal
411] + _BACKSLASH # These have to come after RegexMeta
412
413LEXER_DEF[lex_mode_e.DQ] = _DQ_BACKSLASH + [
414 C('\\\n', Id.Ignored_LineCont),
415] + _LEFT_SUBS + _VARS + [
416 R(r'[^$`"\0\\]+', Id.Lit_Chars), # matches a line at most
417 C('$', Id.Lit_Dollar), # completion of var names relies on this
418 # NOTE: When parsing here doc line, this token doesn't end it.
419 C('"', Id.Right_DoubleQuote),
420]
421
422_VS_ARG_COMMON = [
423 C('/', Id.Lit_Slash), # for patsub (not Id.VOp2_Slash)
424 C('#', Id.Lit_Pound), # for patsub prefix (not Id.VOp1_Pound)
425 C('%', Id.Lit_Percent), # for patsdub suffix (not Id.VOp1_Percent)
426 C('}', Id.Right_DollarBrace), # For var sub "${a}"
427 C('$', Id.Lit_Dollar), # completion of var names relies on this
428]
429
430# We don't execute zsh var subs, but to find the closing } properly, we need to
431# to recognize \} and '}' and "}" $'}' etc.
432LEXER_DEF[lex_mode_e.VSub_Zsh] = \
433 _BACKSLASH + _LEFT_SUBS + _LEFT_UNQUOTED + _LEFT_PROCSUB + \
434 [
435 C('}', Id.Right_DollarBrace), # For var sub "${a}"
436 R(r'[^\0]', Id.Lit_Other), # e.g. "$", must be last
437]
438
439# Kind.{Lit,Ignored,VSub,Left,Right,Eof}
440LEXER_DEF[lex_mode_e.VSub_ArgUnquoted] = \
441 _BACKSLASH + _VS_ARG_COMMON + _LEFT_SUBS + _LEFT_UNQUOTED + _LEFT_PROCSUB + \
442 _VARS + _EXTGLOB_BEGIN + [
443
444 # Token for Tilde sub
445 C('~', Id.Lit_Tilde),
446
447 # - doesn't match ~ for tilde sub
448 # - doesn't match < and > so it doesn't eat <()
449 # - doesn't match @ ! ? + * so it doesn't eat _EXTGLOB_BEGIN -- ( alone it
450 # not enough
451 R(r'[^$`~/}"\'\0\\#%<>@!?+*]+', Id.Lit_Chars),
452 R(r'[^\0]', Id.Lit_Other), # e.g. "$", must be last
453]
454
455# Kind.{Lit,Ignored,VSub,Left,Right,Eof}
456LEXER_DEF[lex_mode_e.VSub_ArgDQ] = \
457 _DQ_BACKSLASH + _VS_ARG_COMMON + _LEFT_SUBS + _VARS + [
458
459 C(r'\}', Id.Lit_EscapedChar), # For "${var-\}}"
460
461 R(r'[^$`/}"\0\\#%]+', Id.Lit_Chars), # matches a line at most
462
463 # Weird wart: even in double quoted state, double quotes are allowed
464 C('"', Id.Left_DoubleQuote),
465
466 # Another weird wart of bash/mksh: $'' is recognized but NOT ''!
467 C("$'", Id.Left_DollarSingleQuote),
468]
469
470# NOTE: Id.Ignored_LineCont is NOT supported in SQ state, as opposed to DQ
471# state.
472LEXER_DEF[lex_mode_e.SQ_Raw] = [
473 R(r"[^'\0]+", Id.Lit_Chars), # matches a line at most
474 C("'", Id.Right_SingleQuote),
475]
476
477# The main purpose for EXPR_CHARS is in regex literals, e.g. [a-z \t \n].
478#
479# In YSH expressions, Chars are code point integers, so \u{1234} is the same as
480# 0x1234. And \0 is 0x0.
481
482# In Python:
483# chr(0x00012345) == u'\U00012345'
484#
485# In YSH:
486# 0x00012345 == \u{12345}
487# chr(0x00012345) == chr(\u{12345}) == $'\u{012345}'
488
489_U_BRACED_CHAR = R(r'\\[uU]\{[0-9a-fA-F]{1,6}\}', Id.Char_UBraced)
490
491_X_CHAR_LOOSE = R(r'\\x[0-9a-fA-F]{1,2}', Id.Char_Hex) # bash
492_CHAR_YHEX = R(r'\\y[0-9a-fA-F]{2}', Id.Char_YHex) # \yff - J8 only
493
494_U4_CHAR_LOOSE = R(r'\\u[0-9a-fA-F]{1,4}', Id.Char_Unicode4) # bash
495
496_U4_CHAR_STRICT = R(r'\\u[0-9a-fA-F]{4}', Id.Char_Unicode4) # JSON-only
497
498#_JSON_ONE_CHAR = R(r'\\[\\"/bfnrt]', Id.Char_OneChar)
499EXPR_CHARS = [
500 # Allow same backslash escapes as J8 strings, except;
501 # - legacy \b \f
502 # - unnecessary \/
503 #
504 # Note that \0 should be written \y00.
505 R(r'''\\[\\"'nrt]''', Id.Char_OneChar),
506 _CHAR_YHEX,
507
508 # Eggex. This is a LITERAL translation to \xff in ERE? So it's not \yff
509 # It doesn't have semantics; it's just syntax.
510 R(r'\\x[0-9a-fA-F]{2}', Id.Char_Hex),
511 _U_BRACED_CHAR,
512]
513
514# Shared between echo -e and $''.
515_C_STRING_COMMON = [
516
517 # \x6 is valid in bash
518 _X_CHAR_LOOSE,
519 _U4_CHAR_LOOSE,
520 R(r'\\U[0-9a-fA-F]{1,8}', Id.Char_Unicode8),
521 R(r'\\[0abeEfrtnv\\]', Id.Char_OneChar),
522
523 # e.g. \A is not an escape, and \x doesn't match a hex escape. We allow it,
524 # but a lint tool could warn about it.
525 C('\\', Id.Unknown_Backslash),
526]
527
528ECHO_E_DEF = _C_STRING_COMMON + [
529 # Note: tokens above \0377 can either be truncated or be flagged a syntax
530 # error in strict mode.
531 R(r'\\0[0-7]{1,3}', Id.Char_Octal4),
532 C(r'\c', Id.Char_Stop),
533
534 # e.g. 'foo', anything that's not a backslash escape
535 R(r'[^\\\0]+', Id.Lit_Chars),
536]
537
538# https://json.org/
539
540# Note that [0-9] has to come second, because Python chooses the first match.
541_JSON_INT = r'-?([1-9][0-9]*|[0-9])' # Numbers can't start with leading 0
542_JSON_FRACTION = r'(\.[0-9]+)?'
543_JSON_EXP = r'([eE][-+]?[0-9]+)?'
544
545# R5RS extended alphabetic characters
546# https://groups.csail.mit.edu/mac/ftpdir/scheme-reports/r5rs-html/r5rs_4.html
547#
548# ! $ % & * + - . / : < = > ? @ ^ _ ~
549
550# Description from Guile Scheme - https://www.gnu.org/software/guile/manual/html_node/Symbol-Read-Syntax.html
551#
552# "The read syntax for a symbol is a sequence of letters, digits, and extended
553# alphabetic characters, beginning with a character that cannot begin a
554# number. In addition, the special cases of +, -, and ... are read as symbols
555# even though numbers can begin with +, - or ."
556#
557# (They should have used regular languages!)
558
559# We take out $ and @ for our splicing syntax, i.e. $unquote and
560# @unquote-splicing. And : for now because we use it for name:value.
561
562# Also note Scheme allows |a b| for symbols with funny chars, and Guile scheme
563# allows #{a b}#. We could use `a b` or (symbol "a b").
564
565J8_SYMBOL_CHARS = r'!%&*+./<=>?^_~-' # - is last for regex char class
566
567# yapf: disable
568J8_SYMBOL_RE = (
569 r'[a-zA-Z' + J8_SYMBOL_CHARS + ']' +
570 r'[a-zA-Z0-9' + J8_SYMBOL_CHARS + ']*')
571# yapf: enable
572
573_J8_LEFT = [
574 C('"', Id.Left_DoubleQuote), # JSON string
575 C('j"', Id.Left_JDoubleQuote), # JSON string with explicit J8 prefix
576 # Three left quotes that are J8 only
577 C("u'", Id.Left_USingleQuote), # unicode string
578 C("'", Id.Left_USingleQuote), # '' is alias for u'' in data, not in code
579 C("b'", Id.Left_BSingleQuote), # byte string
580]
581
582J8_DEF = _J8_LEFT + [
583 C('[', Id.J8_LBracket),
584 C(']', Id.J8_RBracket),
585 C('{', Id.J8_LBrace),
586 C('}', Id.J8_RBrace),
587 C('(', Id.J8_LParen), # NIL8 only
588 C(')', Id.J8_RParen), # NIL8 only
589 C(',', Id.J8_Comma),
590 C(':', Id.J8_Colon),
591 C('null', Id.J8_Null),
592 C('true', Id.J8_Bool),
593 C('false', Id.J8_Bool),
594 R(_JSON_INT, Id.J8_Int),
595 R(_JSON_INT + _JSON_FRACTION + _JSON_EXP, Id.J8_Float),
596
597 # Identifier names come AFTER null true false.
598 # - Happens to be the same as shell identifier # names.
599 # - Note that JS allows $ as an identifier, but we don't.
600 # - Used for dict keys / NIL8 field names.
601 R(VAR_NAME_RE, Id.J8_Identifier),
602
603 # Symbol is a SUPERSET of Identifier. The first word in NIL8 can be can
604 # be either Symbol or plain Identifier, but field names can only be
605 # Identifier. JSON8 only has Identifier.
606 #R(J8_SYMBOL_RE, Id.J8_Symbol), # NIL8 only
607 R(r'[~!@$%^&*+=|;./<>?-]+', Id.J8_Operator), # NIL8 only
608 R(r'[ \r\t]+', Id.Ignored_Space),
609 # A separate token, to count lines for error messages
610 C('\n', Id.Ignored_Newline),
611 # comment is # until end of line
612 # // comments are JavaScript style, but right now we might want them as
613 # symbols?
614 R(r'#[^\n\0]*', Id.Ignored_Comment), # J8 only (JSON8, NIL8)
615
616 # This will reject ASCII control chars
617 R(r'[^\0]', Id.Unknown_Tok),
618]
619
620# Exclude control characters 0x00-0x1f, aka 0-31 in J8 data only (not YSH code)
621_ASCII_CONTROL = R(r'[\x01-\x1F]', Id.Char_AsciiControl)
622
623J8_LINES_DEF = _J8_LEFT + [
624 # not sure if we want \r here - same with lex_mode_e.Expr
625 R(r'[ \r\t]+', Id.WS_Space),
626 R(r'[\n]', Id.J8_Newline),
627
628 # doesn't match \t, which means tabs are allowed in the middle of unquoted
629 # lines
630 _ASCII_CONTROL,
631
632 # not space or ' or " or ASCII control or EOF
633 R(r'''[^ \t\r\n'"\x00-\x1F]+''', Id.Lit_Chars),
634]
635
636# https://json.org list of chars, plus '
637_JSON_ONE_CHAR = R(r'\\[\\"/bfnrt]', Id.Char_OneChar)
638
639# b'' u'' strings - what's common between code and data.
640_J8_STR_COMMON = [
641 C("'", Id.Right_SingleQuote), # end for J8
642 _JSON_ONE_CHAR,
643 C("\\'", Id.Char_OneChar), # since ' ends, allow \'
644 _CHAR_YHEX,
645 _U_BRACED_CHAR, # \u{123456} - J8 only
646
647 # osh/word_parse.py relies on this. It has to be consistent with $''
648 # lexing, which uses _C_STRING_COMMON
649 C('\\', Id.Unknown_Backslash),
650]
651
652# Lexer for J8 strings in CODE.
653LEXER_DEF[lex_mode_e.J8_Str] = _J8_STR_COMMON + [
654 # Don't produce Char_AsciiControl tokens - that's only for data
655
656 # will match invalid UTF-8 - we have a separate validation step
657 R(r"[^\\'\0]+", Id.Lit_Chars),
658]
659
660# Lexer for J8 string data.
661# ASCII control characters are disallowed in DATA, but not CODE!
662J8_STR_DEF = _J8_STR_COMMON + [
663 _ASCII_CONTROL,
664 # will match invalid UTF-8 - we have a separate validation step
665 R(r"[^\\'\x00-\x1F]+", Id.Lit_Chars),
666]
667
668# Lexer for JSON string data - e.g. "json \" \u1234"
669JSON_STR_DEF = [
670 C('"', Id.Right_DoubleQuote), # end for JSON
671 _JSON_ONE_CHAR,
672 _U4_CHAR_STRICT, # \u1234 - JSON only
673
674 # High surrogate [\uD800, \uDC00)
675 # Low surrogate [\uDC00, \uE000)
676 # This pattern makes it easier to decode. Unpaired surrogates because Id.Char_Unicode4.
677 R(
678 r'\\u[dD][89aAbB][0-9a-fA-F][0-9a-fA-F]\\u[dD][cCdDeEfF][0-9a-fA-F][0-9a-fA-F]',
679 Id.Char_SurrogatePair),
680 C('\\', Id.Unknown_Backslash), # e.g. the \ before bad \z
681 _ASCII_CONTROL,
682
683 # Note: This will match INVALID UTF-8. UTF-8 validation is another step.
684 R(r'[^\\"\x00-\x1F]+', Id.Lit_Chars),
685]
686
687_WHITESPACE = r'[ \t\r\n]*' # ASCII whitespace doesn't have legacy \f \v
688
689SH_NUMBER_DEF = [
690 R('0', Id.ShNumber_Dec),
691 R(r'[1-9][0-9]*', Id.ShNumber_Dec),
692 R(r'0[0-7]+', Id.ShNumber_Oct),
693 R(r'0x[0-9A-Fa-f]+', Id.ShNumber_Hex),
694 R(r'[1-9][0-9]*#[0-9a-zA-Z@_]+', Id.ShNumber_BaseN),
695 R(r'[^\0]', Id.Unknown_Tok), # any other char
696]
697
698OCTAL3_RE = r'\\[0-7]{1,3}'
699
700# https://www.gnu.org/software/bash/manual/html_node/Controlling-the-PromptEvaluator.html#Controlling-the-PromptEvaluator
701PS1_DEF = [
702 R(OCTAL3_RE, Id.PS_Octal3),
703 R(r'\\[adehHjlnrstT@AuvVwW!#$\\]', Id.PS_Subst),
704 # \D{%H:%M} strftime format
705 R(r'\\D\{[^}\0]*\}', Id.PS_Subst),
706 C(r'\[', Id.PS_LBrace), # non-printing
707 C(r'\]', Id.PS_RBrace),
708 R(r'[^\\\0]+', Id.PS_Literals),
709 # e.g. \x is not a valid escape.
710 C('\\', Id.PS_BadBackslash),
711]
712
713# NOTE: Id.Ignored_LineCont is also not supported here, even though the whole
714# point of it is that supports other backslash escapes like \n! It just
715# becomes a regular backslash.
716LEXER_DEF[lex_mode_e.SQ_C] = _C_STRING_COMMON + [
717 # Weird special case matching bash: backslash that ends a line. We emit
718 # this token literally in OSH, but disable it in YSH.
719 C('\\\n', Id.Unknown_Backslash),
720
721 # Silly difference! In echo -e, the syntax is \0377, but here it's $'\377',
722 # with no leading 0.
723 R(OCTAL3_RE, Id.Char_Octal3),
724
725 # ' and " are escaped in $'' mode, but not echo -e.
726 C(r"\'", Id.Char_OneChar),
727 C(r'\"', Id.Char_OneChar),
728
729 # e.g. 'foo', anything that's not a backslash escape or '
730 R(r"[^\\'\0]+", Id.Lit_Chars),
731 C("'", Id.Right_SingleQuote),
732]
733
734LEXER_DEF[lex_mode_e.PrintfOuter] = _C_STRING_COMMON + [
735 R(OCTAL3_RE, Id.Char_Octal3),
736 R(r"[^%\\\0]+", Id.Lit_Chars),
737 C('%%', Id.Format_EscapedPercent),
738 C('%', Id.Format_Percent),
739]
740
741# Maybe: bash also supports %(strftime)T
742LEXER_DEF[lex_mode_e.PrintfPercent] = [
743 # Flags
744 R('[- +#]', Id.Format_Flag),
745 C('0', Id.Format_Zero),
746 R('[1-9][0-9]*', Id.Format_Num),
747 C('*', Id.Format_Star),
748 C('.', Id.Format_Dot),
749 # We support dsq. The others we parse to display an error message.
750 R('[disqbcouxXeEfFgG]', Id.Format_Type),
751 R('\([^()\0]*\)T', Id.Format_Time),
752 R(r'[^\0]', Id.Unknown_Tok), # any other char
753]
754
755LEXER_DEF[lex_mode_e.VSub_1] = [
756 R(VAR_NAME_RE, Id.VSub_Name),
757 # ${11} is valid, compared to $11 which is $1 and then literal 1.
758 R(r'[0-9]+', Id.VSub_Number),
759 C('!', Id.VSub_Bang),
760 C('@', Id.VSub_At),
761 C('#', Id.VSub_Pound),
762 C('$', Id.VSub_Dollar),
763 C('*', Id.VSub_Star),
764 C('-', Id.VSub_Hyphen),
765 C('?', Id.VSub_QMark),
766 C('.', Id.VSub_Dot), # ${.myproc builtin sub}
767 C('}', Id.Right_DollarBrace),
768 C('\\\n', Id.Ignored_LineCont),
769 C('\n', Id.Unknown_Tok), # newline not allowed inside ${}
770 R(r'[^\0]', Id.Unknown_Tok), # any char except newline
771]
772
773LEXER_DEF[lex_mode_e.VSub_2] = \
774 ID_SPEC.LexerPairs(Kind.VTest) + \
775 ID_SPEC.LexerPairs(Kind.VOp0) + \
776 ID_SPEC.LexerPairs(Kind.VOpYsh) + \
777 ID_SPEC.LexerPairs(Kind.VOp1) + \
778 ID_SPEC.LexerPairs(Kind.VOp2) + \
779 ID_SPEC.LexerPairs(Kind.VOp3) + [
780 C('}', Id.Right_DollarBrace),
781
782 C('\\\n', Id.Ignored_LineCont),
783 C('\n', Id.Unknown_Tok), # newline not allowed inside ${}
784 R(r'[^\0]', Id.Unknown_Tok), # any char except newline
785]
786
787_EXPR_ARITH_SHARED = [
788 C('\\\n', Id.Ignored_LineCont),
789 R(r'[^\0]', Id.Unknown_Tok) # any char. This should be a syntax error.
790]
791
792# https://www.gnu.org/software/bash/manual/html_node/Shell-Arithmetic.html#Shell-Arithmetic
793LEXER_DEF[lex_mode_e.Arith] = \
794 _LEFT_SUBS + _VARS + _LEFT_UNQUOTED + [
795
796 # Arithmetic expressions can cross newlines.
797 R(r'[ \t\r\n]+', Id.Ignored_Space),
798
799 # Examples of arith constants:
800 # 64#azAZ
801 # 0xabc 0xABC
802 # 0123
803 # A separate digits token makes this easier to parse STATICALLY. But this
804 # doesn't help with DYNAMIC parsing.
805 R(VAR_NAME_RE, Id.Lit_ArithVarLike), # for variable names or 64#_
806 R(r'[0-9]+', Id.Lit_Digits),
807 C('@', Id.Lit_At), # for 64#@ or ${a[@]}
808 C('#', Id.Lit_Pound), # for 64#a
809
810 # TODO: 64#@ interferes with VS_AT. Hm.
811] + ID_SPEC.LexerPairs(Kind.Arith) + _EXPR_ARITH_SHARED
812
813# A lexer for the parser that converts globs to extended regexes. Since we're
814# only parsing character classes ([^[:space:][:alpha:]]) as opaque blobs, we
815# don't need lexer modes here.
816GLOB_DEF = [
817 # These could be operators in the glob, or just literals in a char class,
818 # e.g. touch '?'; echo [?].
819 C('*', Id.Glob_Star),
820 C('?', Id.Glob_QMark),
821
822 # For negation. Treated as operators inside [], but literals outside.
823 C('!', Id.Glob_Bang),
824 C('^', Id.Glob_Caret),
825
826 # Character classes.
827 C('[', Id.Glob_LBracket),
828 C(']', Id.Glob_RBracket),
829
830 # There is no whitelist of characters; backslashes are unconditionally
831 # removed. With libc.fnmatch(), the pattern r'\f' matches 'f' but not '\\f'.
832 # See libc_test.py.
833 R(r'\\[^\0]', Id.Glob_EscapedChar),
834 C('\\', Id.Glob_BadBackslash), # Trailing single backslash
835
836 # For efficiency, combine other characters into a single token, e.g. 'py' in
837 # '*.py' or 'alpha' in '[[:alpha:]]'.
838 R(r'[a-zA-Z0-9_]+', Id.Glob_CleanLiterals), # no regex escaping
839 R(r'[^\0]', Id.Glob_OtherLiteral), # anything else -- examine the char
840]
841
842# History expansion. We're doing this as "pre-lexing" since that's what bash
843# and zsh seem to do. Example:
844#
845# $ foo=x
846# $ echo $
847# $ !!foo # expands to echo $foo and prints x
848#
849# We can also reuse this in the RootCompleter to expand history interactively.
850#
851# bash note: handled in lib/readline/histexpand.c. Quite messy and handles
852# quotes AGAIN.
853#
854# Note: \! gets expanded to literal \! for the real lexer, but no history
855# expansion occurs.
856
857HISTORY_DEF = [
858 # Common operators.
859 R(r'![!*^$]', Id.History_Op),
860
861 # By command number.
862 R(r'!-?[0-9]+', Id.History_Num),
863
864 # Search by prefix of substring (optional '?').
865 # NOTE: there are no numbers allowed here! Bash doesn't seem to support it.
866 # No hyphen since it conflits with $-1 too.
867 #
868 # Required trailing whitespace is there to avoid conflict with [!charclass]
869 # and ${!indirect}. This is a simpler hack than the one bash has. See
870 # frontend/lex_test.py.
871 R(r'!\??[a-zA-Z_/.][0-9a-zA-Z_/.]+[ \t\r\n]', Id.History_Search),
872
873 # Comment is until end of line
874 R(r"#[^\0]*", Id.History_Other),
875
876 # Single quoted, e.g. 'a' or $'\n'. Terminated by another single quote or
877 # end of string.
878 R(r"'[^'\0]*'?", Id.History_Other),
879
880 # Runs of chars that are definitely not special
881 R(r"[^!\\'#\0]+", Id.History_Other),
882
883 # Escaped characters. \! disables history
884 R(r'\\[^\0]', Id.History_Other),
885 # Other single chars, like a trailing \ or !
886 R(r'[^\0]', Id.History_Other),
887]
888
889BRACE_RANGE_DEF = [
890 R(r'-?[0-9]+', Id.Range_Int),
891 R(r'[a-zA-Z]', Id.Range_Char), # just a single character
892 R(r'\.\.', Id.Range_Dots),
893 R(r'[^\0]', Id.Range_Other), # invalid
894]
895
896#
897# YSH lexing
898#
899
900# Valid in lex_mode_e.{Expr,DQ}
901# Used by ysh/grammar_gen.py
902YSH_LEFT_SUBS = [
903 C('$(', Id.Left_DollarParen),
904 C('${', Id.Left_DollarBrace),
905 C('$[', Id.Left_DollarBracket), # TODO: Implement $[x]
906]
907
908# Valid in lex_mode_e.Expr, but not valid in DQ
909# Used by ysh/grammar_gen.py
910
911YSH_LEFT_UNQUOTED = [
912 # Double quoted
913 C('"', Id.Left_DoubleQuote),
914 C('$"', Id.Left_DollarDoubleQuote), # $"" is synonym for ""
915 C('j"', Id.Left_JDoubleQuote), # for printing ERROR
916 # Single quoted
917 C("'", Id.Left_SingleQuote),
918 C("r'", Id.Left_RSingleQuote),
919 C("u'", Id.Left_USingleQuote),
920 C("b'", Id.Left_BSingleQuote),
921 C("$'", Id.Left_DollarSingleQuote), # legacy
922 C('^"', Id.Left_CaretDoubleQuote),
923 C('"""', Id.Left_TDoubleQuote),
924 C('$"""', Id.Left_DollarTDoubleQuote),
925 # In expression mode, we add the r'' and c'' prefixes for '' and $''.
926 C("'''", Id.Left_TSingleQuote),
927 C("r'''", Id.Left_RTSingleQuote),
928 C("u'''", Id.Left_UTSingleQuote),
929 C("b'''", Id.Left_BTSingleQuote),
930 C('@(', Id.Left_AtParen), # Split Command Sub
931 C('^(', Id.Left_CaretParen), # Block literals in expression mode
932 C('^[', Id.Left_CaretBracket), # Expr literals
933 C('^{', Id.Left_CaretBrace), # Unused
934 C(':|', Id.Left_ColonPipe), # shell-like word arrays.
935 C('%(', Id.Left_PercentParen), # old syntax for shell-like word arrays.
936 C('%[', Id.Expr_Reserved), # Maybe: like %() without unquoted [], {}
937 C('%{', Id.Expr_Reserved), # Table literals
938 # t = %{
939 # name:Str age:Int
940 # 'andy c' 10
941 # }
942 # Significant newlines. No unquoted [], {}
943
944 # Not sure if we'll use these
945 C('@{', Id.Expr_Reserved),
946 C('@[', Id.Expr_Reserved),
947
948 # Idea: Set literals are #{a, b} like Clojure
949]
950
951# Used by ysh/grammar_gen.py
952EXPR_OPS = [
953 # Terminator
954 C(';', Id.Op_Semi),
955 C('(', Id.Op_LParen),
956 C(')', Id.Op_RParen),
957 # NOTE: type expressions are expressions, e.g. Dict[Str, Int]
958 C('[', Id.Op_LBracket),
959 C(']', Id.Op_RBracket),
960 C('{', Id.Op_LBrace),
961 C('}', Id.Op_RBrace),
962]
963
964# Newline is significant, but sometimes elided by expr_parse.py.
965_EXPR_NEWLINE_COMMENT = [
966 C('\n', Id.Op_Newline),
967 R(r'#[^\n\0]*', Id.Ignored_Comment),
968 # Like lex_mode_e.Arith, \r is whitespace even without \n
969 R(r'[ \t\r]+', Id.Ignored_Space),
970]
971
972# Note: if you call match.LooksLikeInteger(s), mops.FromStr(s) may still
973# fail. However you should call BOTH, because we don't rely want to rely on
974# the underlying stroll() to define the language accepted.
975LOOKS_LIKE_INTEGER = _WHITESPACE + '-?[0-9]+' + _WHITESPACE
976
977# TODO: use for YSH comparison operators > >= < <=
978#
979# Python allows 0 to be written 00 or 0_0_0, which is weird. But let's be
980# consistent, and avoid '00' turning into a float!
981_YSH_DECIMAL_INT_RE = r'[0-9](_?[0-9])*'
982
983LOOKS_LIKE_YSH_INT = _WHITESPACE + '-?' + _YSH_DECIMAL_INT_RE + _WHITESPACE
984
985_YSH_FLOAT_RE = (
986 _YSH_DECIMAL_INT_RE +
987 # Unlike Python, exponent can't be like 42e5_000. There's no use because
988 # 1e309 is already inf. Let's keep our code simple.
989 r'(\.' + _YSH_DECIMAL_INT_RE + ')?([eE][+\-]?[0-9]+)?')
990
991# Ditto, used for YSH comparison operators
992# Added optional Optional -?
993# Example: -3_000_000.000_001e12
994LOOKS_LIKE_YSH_FLOAT = _WHITESPACE + '-?' + _YSH_FLOAT_RE + _WHITESPACE
995
996# Python 3 float literals:
997
998# digitpart ::= digit (["_"] digit)*
999# fraction ::= "." digitpart
1000# exponent ::= ("e" | "E") ["+" | "-"] digitpart
1001# pointfloat ::= [digitpart] fraction | digitpart "."
1002# exponentfloat ::= (digitpart | pointfloat) exponent
1003# floatnumber ::= pointfloat | exponentfloat
1004
1005# NOTE: Borrowing tokens from Arith (i.e. $(( )) ), but not using LexerPairs().
1006LEXER_DEF[lex_mode_e.Expr] = \
1007 _VARS + YSH_LEFT_SUBS + YSH_LEFT_UNQUOTED + EXPR_OPS + EXPR_WORDS + \
1008 EXPR_CHARS + [
1009
1010 # https://docs.python.org/3/reference/lexical_analysis.html#integer-literals
1011 #
1012 # integer ::= decinteger | bininteger | octinteger | hexinteger
1013 # decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] "0")*
1014 # bininteger ::= "0" ("b" | "B") (["_"] bindigit)+
1015 # octinteger ::= "0" ("o" | "O") (["_"] octdigit)+
1016 # hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+
1017 # nonzerodigit ::= "1"..."9"
1018 # digit ::= "0"..."9"
1019 # bindigit ::= "0" | "1"
1020 # octdigit ::= "0"..."7"
1021 # hexdigit ::= digit | "a"..."f" | "A"..."F"
1022
1023 R(_YSH_DECIMAL_INT_RE, Id.Expr_DecInt),
1024
1025 R(r'0[bB](_?[01])+', Id.Expr_BinInt),
1026 R(r'0[oO](_?[0-7])+', Id.Expr_OctInt),
1027 R(r'0[xX](_?[0-9a-fA-F])+', Id.Expr_HexInt),
1028
1029 R(_YSH_FLOAT_RE, Id.Expr_Float),
1030
1031 # These can be looked up as keywords separately, so you enforce that they have
1032 # space around them?
1033 R(VAR_NAME_RE, Id.Expr_Name),
1034
1035 R('%' + VAR_NAME_RE, Id.Expr_Symbol),
1036
1037 #
1038 # Arith
1039 #
1040
1041 C(',', Id.Arith_Comma),
1042 C(':', Id.Arith_Colon), # for slicing a[1:2], and mylist:pop()
1043
1044 C('?', Id.Arith_QMark), # regex postfix
1045
1046 C('+', Id.Arith_Plus), # arith infix, regex postfix
1047 C('-', Id.Arith_Minus), # arith infix, regex postfix
1048 C('*', Id.Arith_Star),
1049 C('^', Id.Arith_Caret), # xor
1050 C('/', Id.Arith_Slash),
1051 C('%', Id.Arith_Percent),
1052
1053 C('**', Id.Arith_DStar), # exponentiation
1054 C('++', Id.Arith_DPlus), # Option for string/list concatenation
1055
1056 C('<', Id.Arith_Less),
1057 C('>', Id.Arith_Great),
1058 C('<=', Id.Arith_LessEqual),
1059 C('>=', Id.Arith_GreatEqual),
1060 C('===', Id.Expr_TEqual),
1061 C('!==', Id.Expr_NotDEqual),
1062
1063 C('==', Id.Unknown_DEqual), # user must choose === or ~==
1064
1065 C('&&', Id.Unknown_DAmp),
1066 C('||', Id.Unknown_DPipe),
1067
1068 # Bitwise operators
1069 C('&', Id.Arith_Amp),
1070 C('|', Id.Arith_Pipe),
1071 C('>>', Id.Arith_DGreat),
1072 C('<<', Id.Arith_DLess), # Doesn't Java also have <<< ?
1073
1074 # Bitwise complement, as well as infix pattern matching
1075 C('~', Id.Arith_Tilde),
1076 C('!~', Id.Expr_NotTilde),
1077 C('~~', Id.Expr_DTilde),
1078 C('!~~', Id.Expr_NotDTilde),
1079
1080 # Left out for now:
1081 # ++ -- -- needed for loops, awk?
1082 # ! && || -- needed for find dialect
1083 # = += etc.
1084
1085 C('=', Id.Arith_Equal),
1086
1087 C('+=', Id.Arith_PlusEqual),
1088 C('-=', Id.Arith_MinusEqual),
1089 C('*=', Id.Arith_StarEqual),
1090 C('/=', Id.Arith_SlashEqual),
1091 C('%=', Id.Arith_PercentEqual),
1092
1093 C('>>=', Id.Arith_DGreatEqual),
1094 C('<<=', Id.Arith_DLessEqual),
1095 C('&=', Id.Arith_AmpEqual),
1096 C('|=', Id.Arith_PipeEqual),
1097 C('^=', Id.Arith_CaretEqual), # Exponentiation
1098
1099 # Augmented assignment that YSH has, but sh and OSH don't have
1100 C('**=', Id.Expr_DStarEqual),
1101 C('//=', Id.Expr_DSlashEqual),
1102
1103 #
1104 # Expr
1105 #
1106
1107 C('!', Id.Expr_Bang), # For eggex negation
1108
1109 C('//', Id.Expr_DSlash), # For YSH integer division
1110 C('~==', Id.Expr_TildeDEqual), # approximate equality
1111
1112 C('.', Id.Expr_Dot), # d.key is alias for d['key']
1113 C('..', Id.Unknown_DDot), # legacy half-open range 1..5
1114 C('..<', Id.Expr_DDotLessThan), # half-open range 1..<5
1115 C('..=', Id.Expr_DDotEqual), # closed range 1..5
1116 C('->', Id.Expr_RArrow), # s->startswith()
1117 C('$', Id.Expr_Dollar), # legacy regex end: /d+ $/ (better written /d+ >/
1118
1119 # Reserved this. Go uses it for channels, etc.
1120 # I guess it conflicts with -4<-3, but that's OK -- spaces suffices.
1121 C('<-', Id.Expr_Reserved),
1122 C('=>', Id.Expr_RDArrow), # for df => filter(age > 10)
1123 # and match (x) { 1 => "one" }
1124 # note: other languages use |>
1125 # R/dplyr uses %>%
1126
1127 C('...', Id.Expr_Ellipsis), # f(...args) and maybe a[:, ...]
1128
1129 # For multiline regex literals?
1130 C('///', Id.Expr_Reserved),
1131
1132 # Splat operators
1133 C('@', Id.Expr_At),
1134 # NOTE: Unused
1135 C('@@', Id.Expr_DoubleAt),
1136] + _EXPR_NEWLINE_COMMENT + _EXPR_ARITH_SHARED
1137
1138LEXER_DEF[lex_mode_e.FuncParens] = [
1139 # () with spaces
1140 R(r'[ \t]*\([ \t]*\)', Id.LookAhead_FuncParens),
1141 # anything else
1142 R(r'[^\0]', Id.Unknown_Tok)
1143]