OILS / core / executor.py View on Github | oilshell.org

749 lines, 407 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value, value_e
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import pyutil
25from core import state
26from display import ui
27from core import vm
28from frontend import consts
29from frontend import lexer
30from mycpp.mylib import log, print_stderr, tagswitch
31
32import posix_ as posix
33
34from typing import cast, Dict, List, Tuple, Optional, TYPE_CHECKING
35if TYPE_CHECKING:
36 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
37 StatusArray)
38 from _devbuild.gen.syntax_asdl import command_t
39 from builtin import trap_osh
40 from core import optview
41 from core import state
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flags for RunSimpleCommand
92IS_LAST_CMD = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: state.Procs
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, vm._Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit, inherit_errtrace):
161 # type: (command_t, bool, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev, node, self.trap_state,
188 self.multi_trace, inherit_errexit,
189 inherit_errtrace)
190 p = process.Process(thunk, self.job_control, self.job_list,
191 self.tracer)
192 return p
193
194 def RunBuiltin(self, builtin_id, cmd_val):
195 # type: (int, cmd_value.Argv) -> int
196 """Run a builtin.
197
198 Also called by the 'builtin' builtin.
199 """
200 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
201
202 builtin_proc = self.builtins[builtin_id]
203
204 return self.RunBuiltinProc(builtin_proc, cmd_val)
205
206 def RunBuiltinProc(self, builtin_proc, cmd_val):
207 # type: (vm._Builtin, cmd_value.Argv) -> int
208
209 io_errors = [] # type: List[error.IOError_OSError]
210 with vm.ctx_FlushStdout(io_errors):
211 # note: could be second word, like 'builtin read'
212 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
213 try:
214 status = builtin_proc.Run(cmd_val)
215 assert isinstance(status, int)
216 except (IOError, OSError) as e:
217 self.errfmt.PrintMessage(
218 '%s builtin I/O error: %s' %
219 (cmd_val.argv[0], pyutil.strerror(e)),
220 cmd_val.arg_locs[0])
221 return 1
222 except error.Usage as e:
223 arg0 = cmd_val.argv[0]
224 # e.g. 'type' doesn't accept flag '-x'
225 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
226 return 2 # consistent error code for usage error
227
228 if len(io_errors): # e.g. disk full, ulimit
229 self.errfmt.PrintMessage(
230 '%s builtin I/O error: %s' %
231 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
232 cmd_val.arg_locs[0])
233 return 1
234
235 return status
236
237 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
238 # type: (cmd_value.Argv, CommandStatus, int) -> int
239 """Run builtins, functions, external commands.
240
241 Possible variations:
242 - YSH might have different, simpler rules. No special builtins, etc.
243 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
244 - Interpreters might want to define all their own builtins.
245 """
246 argv = cmd_val.argv
247 if len(cmd_val.arg_locs):
248 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
249 else:
250 arg0_loc = loc.Missing
251
252 # This happens when you write "$@" but have no arguments.
253 if len(argv) == 0:
254 if self.exec_opts.strict_argv():
255 e_die("Command evaluated to an empty argv array", arg0_loc)
256 else:
257 return 0 # status 0, or skip it?
258
259 arg0 = argv[0]
260
261 builtin_id = consts.LookupAssignBuiltin(arg0)
262 if builtin_id != consts.NO_INDEX:
263 # command readonly is disallowed, for technical reasons. Could relax it
264 # later.
265 self.errfmt.Print_("Can't run assignment builtin recursively",
266 arg0_loc)
267 return 1
268
269 builtin_id = consts.LookupSpecialBuiltin(arg0)
270 if builtin_id != consts.NO_INDEX:
271 cmd_st.show_code = True # this is a "leaf" for errors
272 status = self.RunBuiltin(builtin_id, cmd_val)
273 # TODO: Enable this and fix spec test failures.
274 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
275 #if status != 0:
276 # e_die_status(status, 'special builtin failed')
277 return status
278
279 # Builtins like 'true' can be redefined as functions.
280 call_procs = not (run_flags & NO_CALL_PROCS)
281 if call_procs:
282 proc_val, self_obj = self.procs.GetInvokable(arg0)
283 cmd_val.self_obj = self_obj # MAYBE bind self
284
285 if proc_val is not None:
286 if self.exec_opts.strict_errexit():
287 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
288 if disabled_tok:
289 self.errfmt.Print_(
290 'errexit was disabled for this construct',
291 disabled_tok)
292 self.errfmt.StderrLine('')
293 e_die(
294 "Can't run a proc while errexit is disabled. "
295 "Use 'try' or wrap it in a process with $0 myproc",
296 arg0_loc)
297
298 with tagswitch(proc_val) as case:
299 if case(value_e.BuiltinProc):
300 # Handle the special case of the BUILTIN proc
301 # module_ysh.ModuleInvoke, which is returned on the Obj
302 # created by 'use util.ysh'
303 builtin_proc = cast(value.BuiltinProc, proc_val)
304 b = cast(vm._Builtin, builtin_proc.builtin)
305 status = self.RunBuiltinProc(b, cmd_val)
306
307 elif case(value_e.Proc):
308 proc = cast(value.Proc, proc_val)
309 with dev.ctx_Tracer(self.tracer, 'proc', argv):
310 # NOTE: Functions could call 'exit 42' directly, etc.
311 status = self.cmd_ev.RunProc(proc, cmd_val)
312
313 else:
314 # GetInvokable() should only return 1 of 2 things
315 raise AssertionError()
316
317 return status
318
319 # Notes:
320 # - procs shadow hay names
321 # - hay names shadow normal builtins? Should we limit to CAPS or no?
322 if self.hay_state.Resolve(arg0):
323 return self.RunBuiltin(builtin_i.haynode, cmd_val)
324
325 builtin_id = consts.LookupNormalBuiltin(arg0)
326
327 if self.exec_opts._running_hay():
328 # Hay: limit the builtins that can be run
329 # - declare 'use dialect'
330 # - echo and write for debugging
331 # - no JSON?
332 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
333 builtin_i.write):
334 cmd_st.show_code = True # this is a "leaf" for errors
335 return self.RunBuiltin(builtin_id, cmd_val)
336
337 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
338 arg0_loc)
339 return 127
340
341 if builtin_id != consts.NO_INDEX:
342 cmd_st.show_code = True # this is a "leaf" for errors
343 return self.RunBuiltin(builtin_id, cmd_val)
344
345 environ = self.mem.GetExported() # Include temporary variables
346
347 if cmd_val.proc_args:
348 e_die(
349 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
350 % arg0, cmd_val.proc_args.typed_args.left)
351
352 # Resolve argv[0] BEFORE forking.
353 if run_flags & USE_DEFAULT_PATH:
354 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
355 else:
356 argv0_path = self.search_path.CachedLookup(arg0)
357 if argv0_path is None:
358 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
359 return 127
360
361 if self.trap_state.ThisProcessHasTraps():
362 do_fork = True
363 else:
364 do_fork = not cmd_val.is_last_cmd
365
366 # Normal case: ls /
367 if do_fork:
368 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
369 environ)
370 p = process.Process(thunk, self.job_control, self.job_list,
371 self.tracer)
372
373 if self.job_control.Enabled():
374 if self.fg_pipeline is not None:
375 pgid = self.fg_pipeline.ProcessGroupId()
376 # If job control is enabled, this should be true
377 assert pgid != process.INVALID_PGID
378
379 change = process.SetPgid(pgid, self.tracer)
380 self.fg_pipeline = None # clear to avoid confusion in subshells
381 else:
382 change = process.SetPgid(process.OWN_LEADER, self.tracer)
383 p.AddStateChange(change)
384
385 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
386
387 # this is close to a "leaf" for errors
388 # problem: permission denied EACCESS prints duplicate messages
389 # TODO: add message command 'ls' failed
390 cmd_st.show_code = True
391
392 return status
393
394 self.tracer.OnExec(cmd_val.argv)
395
396 # Already forked for pipeline: ls / | wc -l
397 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
398
399 raise AssertionError('for -Wreturn-type in C++')
400
401 def RunBackgroundJob(self, node):
402 # type: (command_t) -> int
403 """For & etc."""
404 # Special case for pipeline. There is some evidence here:
405 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
406 #
407 # "You can either make all the processes in the process group be children
408 # of the shell process, or you can make one process in group be the
409 # ancestor of all the other processes in that group. The sample shell
410 # program presented in this chapter uses the first approach because it
411 # makes bookkeeping somewhat simpler."
412 UP_node = node
413
414 if UP_node.tag() == command_e.Pipeline:
415 node = cast(command.Pipeline, UP_node)
416 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
417 self.job_control, self.job_list, self.tracer)
418 for child in node.children:
419 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
420 p.Init_ParentPipeline(pi)
421 pi.Add(p)
422
423 pi.StartPipeline(self.waiter)
424 pi.SetBackground()
425 last_pid = pi.LastPid()
426 self.mem.last_bg_pid = last_pid # for $!
427
428 job_id = self.job_list.AddJob(pi) # show in 'jobs' list
429
430 else:
431 # Problem: to get the 'set -b' behavior of immediate notifications, we
432 # have to register SIGCHLD. But then that introduces race conditions.
433 # If we haven't called Register yet, then we won't know who to notify.
434
435 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
436 if self.job_control.Enabled():
437 p.AddStateChange(
438 process.SetPgid(process.OWN_LEADER, self.tracer))
439
440 p.SetBackground()
441 pid = p.StartProcess(trace.Fork)
442 self.mem.last_bg_pid = pid # for $!
443 job_id = self.job_list.AddJob(p) # show in 'jobs' list
444
445 if self.exec_opts.interactive():
446 # Print it like %1 to show it's a job
447 print_stderr('[%%%d] PID %d Started' %
448 (job_id, self.mem.last_bg_pid))
449
450 return 0
451
452 def RunPipeline(self, node, status_out):
453 # type: (command.Pipeline, CommandStatus) -> None
454
455 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
456 self.job_control, self.job_list, self.tracer)
457
458 # initialized with CommandStatus.CreateNull()
459 pipe_locs = [] # type: List[loc_t]
460
461 # First n-1 processes (which is empty when n == 1)
462 n = len(node.children)
463 for i in xrange(n - 1):
464 child = node.children[i]
465
466 # TODO: determine these locations at parse time?
467 pipe_locs.append(loc.Command(child))
468
469 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
470 p.Init_ParentPipeline(pi)
471 pi.Add(p)
472
473 last_child = node.children[n - 1]
474 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
475 pi.AddLast((self.cmd_ev, last_child))
476 pipe_locs.append(loc.Command(last_child))
477
478 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
479 pi.StartPipeline(self.waiter)
480 self.fg_pipeline = pi
481 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
482 self.fg_pipeline = None # clear in case we didn't end up forking
483
484 status_out.pipe_locs = pipe_locs
485
486 def RunSubshell(self, node):
487 # type: (command_t) -> int
488 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
489 if self.job_control.Enabled():
490 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
491
492 return p.RunProcess(self.waiter, trace.ForkWait)
493
494 def CaptureStdout(self, node):
495 # type: (command_t) -> Tuple[int, str]
496
497 p = self._MakeProcess(node, self.exec_opts.inherit_errexit(),
498 self.exec_opts.errtrace())
499 # Shell quirk: Command subs remain part of the shell's process group, so we
500 # don't use p.AddStateChange(process.SetPgid(...))
501
502 r, w = posix.pipe()
503 p.AddStateChange(process.StdoutToPipe(r, w))
504
505 p.StartProcess(trace.CommandSub)
506 #log('Command sub started %d', pid)
507
508 chunks = [] # type: List[str]
509 posix.close(w) # not going to write
510 while True:
511 n, err_num = pyos.Read(r, 4096, chunks)
512
513 if n < 0:
514 if err_num == EINTR:
515 pass # retry
516 else:
517 # Like the top level IOError handler
518 e_die_status(
519 2,
520 'Oils I/O error (read): %s' % posix.strerror(err_num))
521
522 elif n == 0: # EOF
523 break
524 posix.close(r)
525
526 status = p.Wait(self.waiter)
527 stdout_str = ''.join(chunks).rstrip('\n')
528
529 return status, stdout_str
530
531 def RunCommandSub(self, cs_part):
532 # type: (CommandSub) -> str
533
534 if not self.exec_opts._allow_command_sub():
535 # _allow_command_sub is used in two places. Only one of them turns
536 # off _allow_process_sub
537 if not self.exec_opts._allow_process_sub():
538 why = "status wouldn't be checked (strict_errexit)"
539 else:
540 why = 'eval_unsafe_arith is off'
541
542 e_die("Command subs not allowed here because %s" % why,
543 loc.WordPart(cs_part))
544
545 node = cs_part.child
546
547 # Hack for weird $(<file) construct
548 if node.tag() == command_e.Redirect:
549 redir_node = cast(command.Redirect, node)
550 # Detect '< file'
551 if (len(redir_node.redirects) == 1 and
552 redir_node.redirects[0].op.id == Id.Redir_Less and
553 redir_node.child.tag() == command_e.NoOp):
554
555 # Change it to __cat < file.
556 # TODO: could be 'internal cat' (issue #1013)
557 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
558 cat_word = CompoundWord([tok])
559
560 # Blame < because __cat has no location
561 blame_tok = redir_node.redirects[0].op
562 simple = command.Simple(blame_tok, [], [cat_word], None, None,
563 False)
564
565 # MUTATE redir node so it's like $(<file _cat)
566 redir_node.child = simple
567
568 status, stdout_str = self.CaptureStdout(node)
569
570 # OSH has the concept of aborting in the middle of a WORD. We're not
571 # waiting until the command is over!
572 if self.exec_opts.command_sub_errexit():
573 if status != 0:
574 msg = 'Command Sub exited with status %d' % status
575 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
576
577 else:
578 # Set a flag so we check errexit at the same time as bash. Example:
579 #
580 # a=$(false)
581 # echo foo # no matter what comes here, the flag is reset
582 #
583 # Set ONLY until this command node has finished executing.
584
585 # HACK: move this
586 self.cmd_ev.check_command_sub_status = True
587 self.mem.SetLastStatus(status)
588
589 # Runtime errors test case: # $("echo foo > $@")
590 # Why rstrip()?
591 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
592 return stdout_str
593
594 def RunProcessSub(self, cs_part):
595 # type: (CommandSub) -> str
596 """Process sub creates a forks a process connected to a pipe.
597
598 The pipe is typically passed to another process via a /dev/fd/$FD path.
599
600 Life cycle of a process substitution:
601
602 1. Start with this code
603
604 diff <(seq 3) <(seq 4)
605
606 2. To evaluate the command line, we evaluate every word. The
607 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
608
609 a. Create a pipe(), getting r and w
610 b. Starts the seq process, which inherits r and w
611 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
612 and close(r)
613 c. Close the w FD, because neither the shell or 'diff' will write to it.
614 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
615 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
616
617 3. Now we're done evaluating every word, so we know the command line of
618 diff, which looks like
619
620 diff /dev/fd/64 /dev/fd/65
621
622 Those are the FDs for the read ends of the pipes we created.
623
624 4. diff inherits a copy of the read end of bot pipes. But it actually
625 calls open() both files passed as argv. (I think this is fine.)
626
627 5. wait() for the diff process.
628
629 6. The shell closes both the read ends of both pipes. Neither us or
630 'diffd' will read again.
631
632 7. The shell waits for both 'seq' processes.
633
634 Related:
635 shopt -s process_sub_fail
636 _process_sub_status
637 """
638 cs_loc = loc.WordPart(cs_part)
639
640 if not self.exec_opts._allow_process_sub():
641 e_die(
642 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
643 cs_loc)
644
645 p = self._MakeProcess(cs_part.child, True, self.exec_opts.errtrace())
646
647 r, w = posix.pipe()
648 #log('pipe = %d, %d', r, w)
649
650 op_id = cs_part.left_token.id
651 if op_id == Id.Left_ProcSubIn:
652 # Example: cat < <(head foo.txt)
653 #
654 # The head process should write its stdout to a pipe.
655 redir = process.StdoutToPipe(r,
656 w) # type: process.ChildStateChange
657
658 elif op_id == Id.Left_ProcSubOut:
659 # Example: head foo.txt > >(tac)
660 #
661 # The tac process should read its stdin from a pipe.
662
663 # Note: this example sometimes requires you to hit "enter" in bash and
664 # zsh. WHy?
665 redir = process.StdinFromPipe(r, w)
666
667 else:
668 raise AssertionError()
669
670 p.AddStateChange(redir)
671
672 if self.job_control.Enabled():
673 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
674
675 # Fork, letting the child inherit the pipe file descriptors.
676 p.StartProcess(trace.ProcessSub)
677
678 ps_frame = self.process_sub_stack[-1]
679
680 # Note: bash never waits() on the process, but zsh does. The calling
681 # program needs to read() before we can wait, e.g.
682 # diff <(sort left.txt) <(sort right.txt)
683
684 # After forking, close the end of the pipe we're not using.
685 if op_id == Id.Left_ProcSubIn:
686 posix.close(w) # cat < <(head foo.txt)
687 ps_frame.Append(p, r, cs_loc) # close later
688 elif op_id == Id.Left_ProcSubOut:
689 posix.close(r)
690 #log('Left_ProcSubOut closed %d', r)
691 ps_frame.Append(p, w, cs_loc) # close later
692 else:
693 raise AssertionError()
694
695 # Is /dev Linux-specific?
696 if op_id == Id.Left_ProcSubIn:
697 return '/dev/fd/%d' % r
698
699 elif op_id == Id.Left_ProcSubOut:
700 return '/dev/fd/%d' % w
701
702 else:
703 raise AssertionError()
704
705 def PushRedirects(self, redirects, err_out):
706 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
707 if len(redirects) == 0: # Optimized to avoid allocs
708 return
709 self.fd_state.Push(redirects, err_out)
710
711 def PopRedirects(self, num_redirects, err_out):
712 # type: (int, List[error.IOError_OSError]) -> None
713 if num_redirects == 0: # Optimized to avoid allocs
714 return
715 self.fd_state.Pop(err_out)
716
717 def PushProcessSub(self):
718 # type: () -> None
719 if len(self.clean_frame_pool):
720 # Optimized to avoid allocs
721 new_frame = self.clean_frame_pool.pop()
722 else:
723 new_frame = _ProcessSubFrame()
724 self.process_sub_stack.append(new_frame)
725
726 def PopProcessSub(self, compound_st):
727 # type: (StatusArray) -> None
728 """This method is called by a context manager, which means we always
729 wait() on the way out, which I think is the right thing.
730
731 We don't always set _process_sub_status, e.g. if some fatal
732 error occurs first, but we always wait.
733 """
734 frame = self.process_sub_stack.pop()
735 if frame.WasModified():
736 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
737 else:
738 # Optimized to avoid allocs
739 self.clean_frame_pool.append(frame)
740
741 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
742 # be nice to somehow "destroy" them here, rather than letting them become
743 # garbage that needs to be traced.
744
745 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
746 # Pop(), and Top() of VALUES rather than GC objects?
747
748
749# vim: sw=4