OILS / core / executor.py View on Github | oilshell.org

733 lines, 397 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from builtin import hay_ysh
18from core import dev
19from core import error
20from core import process
21from core.error import e_die, e_die_status
22from core import pyos
23from core import pyutil
24from core import state
25from display import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log, print_stderr
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Tuple, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flags for RunSimpleCommand
92IS_LAST_CMD = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: state.Procs
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit, inherit_errtrace):
161 # type: (command_t, bool, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev, node, self.trap_state,
188 self.multi_trace, inherit_errexit,
189 inherit_errtrace)
190 p = process.Process(thunk, self.job_control, self.job_list,
191 self.tracer)
192 return p
193
194 def RunBuiltin(self, builtin_id, cmd_val):
195 # type: (int, cmd_value.Argv) -> int
196 """Run a builtin.
197
198 Also called by the 'builtin' builtin.
199 """
200 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
201
202 builtin_func = self.builtins[builtin_id]
203
204 io_errors = [] # type: List[error.IOError_OSError]
205 with vm.ctx_FlushStdout(io_errors):
206 # note: could be second word, like 'builtin read'
207 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
208 try:
209 status = builtin_func.Run(cmd_val)
210 assert isinstance(status, int)
211 except (IOError, OSError) as e:
212 self.errfmt.PrintMessage(
213 '%s builtin I/O error: %s' %
214 (cmd_val.argv[0], pyutil.strerror(e)),
215 cmd_val.arg_locs[0])
216 return 1
217 except error.Usage as e:
218 arg0 = cmd_val.argv[0]
219 # e.g. 'type' doesn't accept flag '-x'
220 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
221 return 2 # consistent error code for usage error
222
223 if len(io_errors): # e.g. disk full, ulimit
224 self.errfmt.PrintMessage(
225 '%s builtin I/O error: %s' %
226 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
227 cmd_val.arg_locs[0])
228 return 1
229
230 return status
231
232 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
233 # type: (cmd_value.Argv, CommandStatus, int) -> int
234 """Run builtins, functions, external commands.
235
236 Possible variations:
237 - YSH might have different, simpler rules. No special builtins, etc.
238 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
239 - Interpreters might want to define all their own builtins.
240 """
241 argv = cmd_val.argv
242 if len(cmd_val.arg_locs):
243 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
244 else:
245 arg0_loc = loc.Missing
246
247 # This happens when you write "$@" but have no arguments.
248 if len(argv) == 0:
249 if self.exec_opts.strict_argv():
250 e_die("Command evaluated to an empty argv array", arg0_loc)
251 else:
252 return 0 # status 0, or skip it?
253
254 arg0 = argv[0]
255
256 builtin_id = consts.LookupAssignBuiltin(arg0)
257 if builtin_id != consts.NO_INDEX:
258 # command readonly is disallowed, for technical reasons. Could relax it
259 # later.
260 self.errfmt.Print_("Can't run assignment builtin recursively",
261 arg0_loc)
262 return 1
263
264 builtin_id = consts.LookupSpecialBuiltin(arg0)
265 if builtin_id != consts.NO_INDEX:
266 cmd_st.show_code = True # this is a "leaf" for errors
267 status = self.RunBuiltin(builtin_id, cmd_val)
268 # TODO: Enable this and fix spec test failures.
269 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
270 #if status != 0:
271 # e_die_status(status, 'special builtin failed')
272 return status
273
274 call_procs = not (run_flags & NO_CALL_PROCS)
275 # Builtins like 'true' can be redefined as functions.
276 if call_procs:
277 # TODO: Look shell functions in self.sh_funcs, but procs are
278 # value.Proc in the var namespace.
279 # Pitfall: What happens if there are two of the same name? I guess
280 # that's why you have = and 'type' inspect them
281
282 proc_node, self_val = self.procs.GetInvokable(arg0)
283 if proc_node is not None:
284 if self.exec_opts.strict_errexit():
285 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
286 if disabled_tok:
287 self.errfmt.Print_(
288 'errexit was disabled for this construct',
289 disabled_tok)
290 self.errfmt.StderrLine('')
291 e_die(
292 "Can't run a proc while errexit is disabled. "
293 "Use 'try' or wrap it in a process with $0 myproc",
294 arg0_loc)
295
296 with dev.ctx_Tracer(self.tracer, 'proc', argv):
297 # NOTE: Functions could call 'exit 42' directly, etc.
298 status = self.cmd_ev.RunProc(proc_node,
299 cmd_val,
300 self_val=self_val)
301 return status
302
303 # Notes:
304 # - procs shadow hay names
305 # - hay names shadow normal builtins? Should we limit to CAPS or no?
306 if self.hay_state.Resolve(arg0):
307 return self.RunBuiltin(builtin_i.haynode, cmd_val)
308
309 builtin_id = consts.LookupNormalBuiltin(arg0)
310
311 if self.exec_opts._running_hay():
312 # Hay: limit the builtins that can be run
313 # - declare 'use dialect'
314 # - echo and write for debugging
315 # - no JSON?
316 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
317 builtin_i.write):
318 cmd_st.show_code = True # this is a "leaf" for errors
319 return self.RunBuiltin(builtin_id, cmd_val)
320
321 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
322 arg0_loc)
323 return 127
324
325 if builtin_id != consts.NO_INDEX:
326 cmd_st.show_code = True # this is a "leaf" for errors
327 return self.RunBuiltin(builtin_id, cmd_val)
328
329 environ = self.mem.GetExported() # Include temporary variables
330
331 if cmd_val.proc_args:
332 e_die(
333 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
334 % arg0, cmd_val.proc_args.typed_args.left)
335
336 # Resolve argv[0] BEFORE forking.
337 if run_flags & USE_DEFAULT_PATH:
338 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
339 else:
340 argv0_path = self.search_path.CachedLookup(arg0)
341 if argv0_path is None:
342 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
343 return 127
344
345 if self.trap_state.ThisProcessHasTraps():
346 do_fork = True
347 else:
348 do_fork = not cmd_val.is_last_cmd
349
350 # Normal case: ls /
351 if do_fork:
352 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
353 environ)
354 p = process.Process(thunk, self.job_control, self.job_list,
355 self.tracer)
356
357 if self.job_control.Enabled():
358 if self.fg_pipeline is not None:
359 pgid = self.fg_pipeline.ProcessGroupId()
360 # If job control is enabled, this should be true
361 assert pgid != process.INVALID_PGID
362
363 change = process.SetPgid(pgid, self.tracer)
364 self.fg_pipeline = None # clear to avoid confusion in subshells
365 else:
366 change = process.SetPgid(process.OWN_LEADER, self.tracer)
367 p.AddStateChange(change)
368
369 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
370
371 # this is close to a "leaf" for errors
372 # problem: permission denied EACCESS prints duplicate messages
373 # TODO: add message command 'ls' failed
374 cmd_st.show_code = True
375
376 return status
377
378 self.tracer.OnExec(cmd_val.argv)
379
380 # Already forked for pipeline: ls / | wc -l
381 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
382
383 raise AssertionError('for -Wreturn-type in C++')
384
385 def RunBackgroundJob(self, node):
386 # type: (command_t) -> int
387 """For & etc."""
388 # Special case for pipeline. There is some evidence here:
389 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
390 #
391 # "You can either make all the processes in the process group be children
392 # of the shell process, or you can make one process in group be the
393 # ancestor of all the other processes in that group. The sample shell
394 # program presented in this chapter uses the first approach because it
395 # makes bookkeeping somewhat simpler."
396 UP_node = node
397
398 if UP_node.tag() == command_e.Pipeline:
399 node = cast(command.Pipeline, UP_node)
400 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
401 self.job_control, self.job_list, self.tracer)
402 for child in node.children:
403 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
404 p.Init_ParentPipeline(pi)
405 pi.Add(p)
406
407 pi.StartPipeline(self.waiter)
408 pi.SetBackground()
409 last_pid = pi.LastPid()
410 self.mem.last_bg_pid = last_pid # for $!
411
412 job_id = self.job_list.AddJob(pi) # show in 'jobs' list
413
414 else:
415 # Problem: to get the 'set -b' behavior of immediate notifications, we
416 # have to register SIGCHLD. But then that introduces race conditions.
417 # If we haven't called Register yet, then we won't know who to notify.
418
419 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
420 if self.job_control.Enabled():
421 p.AddStateChange(
422 process.SetPgid(process.OWN_LEADER, self.tracer))
423
424 p.SetBackground()
425 pid = p.StartProcess(trace.Fork)
426 self.mem.last_bg_pid = pid # for $!
427 job_id = self.job_list.AddJob(p) # show in 'jobs' list
428
429 if self.exec_opts.interactive():
430 # Print it like %1 to show it's a job
431 print_stderr('[%%%d] PID %d Started' %
432 (job_id, self.mem.last_bg_pid))
433
434 return 0
435
436 def RunPipeline(self, node, status_out):
437 # type: (command.Pipeline, CommandStatus) -> None
438
439 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
440 self.job_control, self.job_list, self.tracer)
441
442 # initialized with CommandStatus.CreateNull()
443 pipe_locs = [] # type: List[loc_t]
444
445 # First n-1 processes (which is empty when n == 1)
446 n = len(node.children)
447 for i in xrange(n - 1):
448 child = node.children[i]
449
450 # TODO: determine these locations at parse time?
451 pipe_locs.append(loc.Command(child))
452
453 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
454 p.Init_ParentPipeline(pi)
455 pi.Add(p)
456
457 last_child = node.children[n - 1]
458 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
459 pi.AddLast((self.cmd_ev, last_child))
460 pipe_locs.append(loc.Command(last_child))
461
462 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
463 pi.StartPipeline(self.waiter)
464 self.fg_pipeline = pi
465 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
466 self.fg_pipeline = None # clear in case we didn't end up forking
467
468 status_out.pipe_locs = pipe_locs
469
470 def RunSubshell(self, node):
471 # type: (command_t) -> int
472 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
473 if self.job_control.Enabled():
474 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
475
476 return p.RunProcess(self.waiter, trace.ForkWait)
477
478 def CaptureStdout(self, node):
479 # type: (command_t) -> Tuple[int, str]
480
481 p = self._MakeProcess(node, self.exec_opts.inherit_errexit(),
482 self.exec_opts.errtrace())
483 # Shell quirk: Command subs remain part of the shell's process group, so we
484 # don't use p.AddStateChange(process.SetPgid(...))
485
486 r, w = posix.pipe()
487 p.AddStateChange(process.StdoutToPipe(r, w))
488
489 p.StartProcess(trace.CommandSub)
490 #log('Command sub started %d', pid)
491
492 chunks = [] # type: List[str]
493 posix.close(w) # not going to write
494 while True:
495 n, err_num = pyos.Read(r, 4096, chunks)
496
497 if n < 0:
498 if err_num == EINTR:
499 pass # retry
500 else:
501 # Like the top level IOError handler
502 e_die_status(
503 2,
504 'Oils I/O error (read): %s' % posix.strerror(err_num))
505
506 elif n == 0: # EOF
507 break
508 posix.close(r)
509
510 status = p.Wait(self.waiter)
511 stdout_str = ''.join(chunks).rstrip('\n')
512
513 return status, stdout_str
514
515 def RunCommandSub(self, cs_part):
516 # type: (CommandSub) -> str
517
518 if not self.exec_opts._allow_command_sub():
519 # _allow_command_sub is used in two places. Only one of them turns
520 # off _allow_process_sub
521 if not self.exec_opts._allow_process_sub():
522 why = "status wouldn't be checked (strict_errexit)"
523 else:
524 why = 'eval_unsafe_arith is off'
525
526 e_die("Command subs not allowed here because %s" % why,
527 loc.WordPart(cs_part))
528
529 node = cs_part.child
530
531 # Hack for weird $(<file) construct
532 if node.tag() == command_e.Redirect:
533 redir_node = cast(command.Redirect, node)
534 # Detect '< file'
535 if (len(redir_node.redirects) == 1 and
536 redir_node.redirects[0].op.id == Id.Redir_Less and
537 redir_node.child.tag() == command_e.NoOp):
538
539 # Change it to __cat < file.
540 # TODO: could be 'internal cat' (issue #1013)
541 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
542 cat_word = CompoundWord([tok])
543
544 # Blame < because __cat has no location
545 blame_tok = redir_node.redirects[0].op
546 simple = command.Simple(blame_tok, [], [cat_word], None, None,
547 False)
548
549 # MUTATE redir node so it's like $(<file _cat)
550 redir_node.child = simple
551
552 status, stdout_str = self.CaptureStdout(node)
553
554 # OSH has the concept of aborting in the middle of a WORD. We're not
555 # waiting until the command is over!
556 if self.exec_opts.command_sub_errexit():
557 if status != 0:
558 msg = 'Command Sub exited with status %d' % status
559 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
560
561 else:
562 # Set a flag so we check errexit at the same time as bash. Example:
563 #
564 # a=$(false)
565 # echo foo # no matter what comes here, the flag is reset
566 #
567 # Set ONLY until this command node has finished executing.
568
569 # HACK: move this
570 self.cmd_ev.check_command_sub_status = True
571 self.mem.SetLastStatus(status)
572
573 # Runtime errors test case: # $("echo foo > $@")
574 # Why rstrip()?
575 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
576 return stdout_str
577
578 def RunProcessSub(self, cs_part):
579 # type: (CommandSub) -> str
580 """Process sub creates a forks a process connected to a pipe.
581
582 The pipe is typically passed to another process via a /dev/fd/$FD path.
583
584 Life cycle of a process substitution:
585
586 1. Start with this code
587
588 diff <(seq 3) <(seq 4)
589
590 2. To evaluate the command line, we evaluate every word. The
591 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
592
593 a. Create a pipe(), getting r and w
594 b. Starts the seq process, which inherits r and w
595 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
596 and close(r)
597 c. Close the w FD, because neither the shell or 'diff' will write to it.
598 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
599 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
600
601 3. Now we're done evaluating every word, so we know the command line of
602 diff, which looks like
603
604 diff /dev/fd/64 /dev/fd/65
605
606 Those are the FDs for the read ends of the pipes we created.
607
608 4. diff inherits a copy of the read end of bot pipes. But it actually
609 calls open() both files passed as argv. (I think this is fine.)
610
611 5. wait() for the diff process.
612
613 6. The shell closes both the read ends of both pipes. Neither us or
614 'diffd' will read again.
615
616 7. The shell waits for both 'seq' processes.
617
618 Related:
619 shopt -s process_sub_fail
620 _process_sub_status
621 """
622 cs_loc = loc.WordPart(cs_part)
623
624 if not self.exec_opts._allow_process_sub():
625 e_die(
626 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
627 cs_loc)
628
629 p = self._MakeProcess(cs_part.child, True, self.exec_opts.errtrace())
630
631 r, w = posix.pipe()
632 #log('pipe = %d, %d', r, w)
633
634 op_id = cs_part.left_token.id
635 if op_id == Id.Left_ProcSubIn:
636 # Example: cat < <(head foo.txt)
637 #
638 # The head process should write its stdout to a pipe.
639 redir = process.StdoutToPipe(r,
640 w) # type: process.ChildStateChange
641
642 elif op_id == Id.Left_ProcSubOut:
643 # Example: head foo.txt > >(tac)
644 #
645 # The tac process should read its stdin from a pipe.
646
647 # Note: this example sometimes requires you to hit "enter" in bash and
648 # zsh. WHy?
649 redir = process.StdinFromPipe(r, w)
650
651 else:
652 raise AssertionError()
653
654 p.AddStateChange(redir)
655
656 if self.job_control.Enabled():
657 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
658
659 # Fork, letting the child inherit the pipe file descriptors.
660 p.StartProcess(trace.ProcessSub)
661
662 ps_frame = self.process_sub_stack[-1]
663
664 # Note: bash never waits() on the process, but zsh does. The calling
665 # program needs to read() before we can wait, e.g.
666 # diff <(sort left.txt) <(sort right.txt)
667
668 # After forking, close the end of the pipe we're not using.
669 if op_id == Id.Left_ProcSubIn:
670 posix.close(w) # cat < <(head foo.txt)
671 ps_frame.Append(p, r, cs_loc) # close later
672 elif op_id == Id.Left_ProcSubOut:
673 posix.close(r)
674 #log('Left_ProcSubOut closed %d', r)
675 ps_frame.Append(p, w, cs_loc) # close later
676 else:
677 raise AssertionError()
678
679 # Is /dev Linux-specific?
680 if op_id == Id.Left_ProcSubIn:
681 return '/dev/fd/%d' % r
682
683 elif op_id == Id.Left_ProcSubOut:
684 return '/dev/fd/%d' % w
685
686 else:
687 raise AssertionError()
688
689 def PushRedirects(self, redirects, err_out):
690 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
691 if len(redirects) == 0: # Optimized to avoid allocs
692 return
693 self.fd_state.Push(redirects, err_out)
694
695 def PopRedirects(self, num_redirects, err_out):
696 # type: (int, List[error.IOError_OSError]) -> None
697 if num_redirects == 0: # Optimized to avoid allocs
698 return
699 self.fd_state.Pop(err_out)
700
701 def PushProcessSub(self):
702 # type: () -> None
703 if len(self.clean_frame_pool):
704 # Optimized to avoid allocs
705 new_frame = self.clean_frame_pool.pop()
706 else:
707 new_frame = _ProcessSubFrame()
708 self.process_sub_stack.append(new_frame)
709
710 def PopProcessSub(self, compound_st):
711 # type: (StatusArray) -> None
712 """This method is called by a context manager, which means we always
713 wait() on the way out, which I think is the right thing.
714
715 We don't always set _process_sub_status, e.g. if some fatal
716 error occurs first, but we always wait.
717 """
718 frame = self.process_sub_stack.pop()
719 if frame.WasModified():
720 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
721 else:
722 # Optimized to avoid allocs
723 self.clean_frame_pool.append(frame)
724
725 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
726 # be nice to somehow "destroy" them here, rather than letting them become
727 # garbage that needs to be traced.
728
729 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
730 # Pop(), and Top() of VALUES rather than GC objects?
731
732
733# vim: sw=4