OILS / core / executor.py View on Github | oilshell.org

739 lines, 397 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from builtin import hay_ysh
18from core import dev
19from core import error
20from core import process
21from core.error import e_die, e_die_status
22from core import pyos
23from core import pyutil
24from core import state
25from display import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log, print_stderr
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Tuple, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flags for RunSimpleCommand
92IS_LAST_CMD = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: state.Procs
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit, inherit_errtrace):
161 # type: (command_t, bool, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev, node, self.trap_state,
188 self.multi_trace, inherit_errexit,
189 inherit_errtrace)
190 p = process.Process(thunk, self.job_control, self.job_list,
191 self.tracer)
192 return p
193
194 def RunBuiltin(self, builtin_id, cmd_val):
195 # type: (int, cmd_value.Argv) -> int
196 """Run a builtin.
197
198 Also called by the 'builtin' builtin.
199 """
200 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
201
202 builtin_func = self.builtins[builtin_id]
203
204 io_errors = [] # type: List[error.IOError_OSError]
205 with vm.ctx_FlushStdout(io_errors):
206 # note: could be second word, like 'builtin read'
207 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
208 try:
209 status = builtin_func.Run(cmd_val)
210 assert isinstance(status, int)
211 except (IOError, OSError) as e:
212 self.errfmt.PrintMessage(
213 '%s builtin I/O error: %s' %
214 (cmd_val.argv[0], pyutil.strerror(e)),
215 cmd_val.arg_locs[0])
216 return 1
217 except error.Usage as e:
218 arg0 = cmd_val.argv[0]
219 # e.g. 'type' doesn't accept flag '-x'
220 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
221 return 2 # consistent error code for usage error
222
223 if len(io_errors): # e.g. disk full, ulimit
224 self.errfmt.PrintMessage(
225 '%s builtin I/O error: %s' %
226 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
227 cmd_val.arg_locs[0])
228 return 1
229
230 return status
231
232 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
233 # type: (cmd_value.Argv, CommandStatus, int) -> int
234 """Run builtins, functions, external commands.
235
236 Possible variations:
237 - YSH might have different, simpler rules. No special builtins, etc.
238 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
239 - Interpreters might want to define all their own builtins.
240 """
241 argv = cmd_val.argv
242 if len(cmd_val.arg_locs):
243 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
244 else:
245 arg0_loc = loc.Missing
246
247 # This happens when you write "$@" but have no arguments.
248 if len(argv) == 0:
249 if self.exec_opts.strict_argv():
250 e_die("Command evaluated to an empty argv array", arg0_loc)
251 else:
252 return 0 # status 0, or skip it?
253
254 arg0 = argv[0]
255
256 builtin_id = consts.LookupAssignBuiltin(arg0)
257 if builtin_id != consts.NO_INDEX:
258 # command readonly is disallowed, for technical reasons. Could relax it
259 # later.
260 self.errfmt.Print_("Can't run assignment builtin recursively",
261 arg0_loc)
262 return 1
263
264 builtin_id = consts.LookupSpecialBuiltin(arg0)
265 if builtin_id != consts.NO_INDEX:
266 cmd_st.show_code = True # this is a "leaf" for errors
267 status = self.RunBuiltin(builtin_id, cmd_val)
268 # TODO: Enable this and fix spec test failures.
269 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
270 #if status != 0:
271 # e_die_status(status, 'special builtin failed')
272 return status
273
274 call_procs = not (run_flags & NO_CALL_PROCS)
275 # Builtins like 'true' can be redefined as functions.
276 if call_procs:
277 # TODO:
278 # - modules are callable value.Obj, but they have no proc_node.
279 # Instead of RunProc(), call RunBuiltin()
280 #
281 # - define InvokeModule(vm._Builtin) - but you to bind self_val in
282 # cmd_val.proc_args
283 #
284 # - Also sort out LookupSpecialBuiltin vs. LookupBuiltin
285 #
286 # Order is: Assign, Special Builtin, Invokable, Builtin, External
287
288 proc_node, self_val = self.procs.GetInvokable(arg0)
289 if proc_node is not None:
290 if self.exec_opts.strict_errexit():
291 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
292 if disabled_tok:
293 self.errfmt.Print_(
294 'errexit was disabled for this construct',
295 disabled_tok)
296 self.errfmt.StderrLine('')
297 e_die(
298 "Can't run a proc while errexit is disabled. "
299 "Use 'try' or wrap it in a process with $0 myproc",
300 arg0_loc)
301
302 with dev.ctx_Tracer(self.tracer, 'proc', argv):
303 # NOTE: Functions could call 'exit 42' directly, etc.
304 status = self.cmd_ev.RunProc(proc_node,
305 cmd_val,
306 self_val=self_val)
307 return status
308
309 # Notes:
310 # - procs shadow hay names
311 # - hay names shadow normal builtins? Should we limit to CAPS or no?
312 if self.hay_state.Resolve(arg0):
313 return self.RunBuiltin(builtin_i.haynode, cmd_val)
314
315 builtin_id = consts.LookupNormalBuiltin(arg0)
316
317 if self.exec_opts._running_hay():
318 # Hay: limit the builtins that can be run
319 # - declare 'use dialect'
320 # - echo and write for debugging
321 # - no JSON?
322 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
323 builtin_i.write):
324 cmd_st.show_code = True # this is a "leaf" for errors
325 return self.RunBuiltin(builtin_id, cmd_val)
326
327 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
328 arg0_loc)
329 return 127
330
331 if builtin_id != consts.NO_INDEX:
332 cmd_st.show_code = True # this is a "leaf" for errors
333 return self.RunBuiltin(builtin_id, cmd_val)
334
335 environ = self.mem.GetExported() # Include temporary variables
336
337 if cmd_val.proc_args:
338 e_die(
339 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
340 % arg0, cmd_val.proc_args.typed_args.left)
341
342 # Resolve argv[0] BEFORE forking.
343 if run_flags & USE_DEFAULT_PATH:
344 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
345 else:
346 argv0_path = self.search_path.CachedLookup(arg0)
347 if argv0_path is None:
348 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
349 return 127
350
351 if self.trap_state.ThisProcessHasTraps():
352 do_fork = True
353 else:
354 do_fork = not cmd_val.is_last_cmd
355
356 # Normal case: ls /
357 if do_fork:
358 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
359 environ)
360 p = process.Process(thunk, self.job_control, self.job_list,
361 self.tracer)
362
363 if self.job_control.Enabled():
364 if self.fg_pipeline is not None:
365 pgid = self.fg_pipeline.ProcessGroupId()
366 # If job control is enabled, this should be true
367 assert pgid != process.INVALID_PGID
368
369 change = process.SetPgid(pgid, self.tracer)
370 self.fg_pipeline = None # clear to avoid confusion in subshells
371 else:
372 change = process.SetPgid(process.OWN_LEADER, self.tracer)
373 p.AddStateChange(change)
374
375 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
376
377 # this is close to a "leaf" for errors
378 # problem: permission denied EACCESS prints duplicate messages
379 # TODO: add message command 'ls' failed
380 cmd_st.show_code = True
381
382 return status
383
384 self.tracer.OnExec(cmd_val.argv)
385
386 # Already forked for pipeline: ls / | wc -l
387 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
388
389 raise AssertionError('for -Wreturn-type in C++')
390
391 def RunBackgroundJob(self, node):
392 # type: (command_t) -> int
393 """For & etc."""
394 # Special case for pipeline. There is some evidence here:
395 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
396 #
397 # "You can either make all the processes in the process group be children
398 # of the shell process, or you can make one process in group be the
399 # ancestor of all the other processes in that group. The sample shell
400 # program presented in this chapter uses the first approach because it
401 # makes bookkeeping somewhat simpler."
402 UP_node = node
403
404 if UP_node.tag() == command_e.Pipeline:
405 node = cast(command.Pipeline, UP_node)
406 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
407 self.job_control, self.job_list, self.tracer)
408 for child in node.children:
409 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
410 p.Init_ParentPipeline(pi)
411 pi.Add(p)
412
413 pi.StartPipeline(self.waiter)
414 pi.SetBackground()
415 last_pid = pi.LastPid()
416 self.mem.last_bg_pid = last_pid # for $!
417
418 job_id = self.job_list.AddJob(pi) # show in 'jobs' list
419
420 else:
421 # Problem: to get the 'set -b' behavior of immediate notifications, we
422 # have to register SIGCHLD. But then that introduces race conditions.
423 # If we haven't called Register yet, then we won't know who to notify.
424
425 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
426 if self.job_control.Enabled():
427 p.AddStateChange(
428 process.SetPgid(process.OWN_LEADER, self.tracer))
429
430 p.SetBackground()
431 pid = p.StartProcess(trace.Fork)
432 self.mem.last_bg_pid = pid # for $!
433 job_id = self.job_list.AddJob(p) # show in 'jobs' list
434
435 if self.exec_opts.interactive():
436 # Print it like %1 to show it's a job
437 print_stderr('[%%%d] PID %d Started' %
438 (job_id, self.mem.last_bg_pid))
439
440 return 0
441
442 def RunPipeline(self, node, status_out):
443 # type: (command.Pipeline, CommandStatus) -> None
444
445 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
446 self.job_control, self.job_list, self.tracer)
447
448 # initialized with CommandStatus.CreateNull()
449 pipe_locs = [] # type: List[loc_t]
450
451 # First n-1 processes (which is empty when n == 1)
452 n = len(node.children)
453 for i in xrange(n - 1):
454 child = node.children[i]
455
456 # TODO: determine these locations at parse time?
457 pipe_locs.append(loc.Command(child))
458
459 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
460 p.Init_ParentPipeline(pi)
461 pi.Add(p)
462
463 last_child = node.children[n - 1]
464 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
465 pi.AddLast((self.cmd_ev, last_child))
466 pipe_locs.append(loc.Command(last_child))
467
468 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
469 pi.StartPipeline(self.waiter)
470 self.fg_pipeline = pi
471 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
472 self.fg_pipeline = None # clear in case we didn't end up forking
473
474 status_out.pipe_locs = pipe_locs
475
476 def RunSubshell(self, node):
477 # type: (command_t) -> int
478 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
479 if self.job_control.Enabled():
480 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
481
482 return p.RunProcess(self.waiter, trace.ForkWait)
483
484 def CaptureStdout(self, node):
485 # type: (command_t) -> Tuple[int, str]
486
487 p = self._MakeProcess(node, self.exec_opts.inherit_errexit(),
488 self.exec_opts.errtrace())
489 # Shell quirk: Command subs remain part of the shell's process group, so we
490 # don't use p.AddStateChange(process.SetPgid(...))
491
492 r, w = posix.pipe()
493 p.AddStateChange(process.StdoutToPipe(r, w))
494
495 p.StartProcess(trace.CommandSub)
496 #log('Command sub started %d', pid)
497
498 chunks = [] # type: List[str]
499 posix.close(w) # not going to write
500 while True:
501 n, err_num = pyos.Read(r, 4096, chunks)
502
503 if n < 0:
504 if err_num == EINTR:
505 pass # retry
506 else:
507 # Like the top level IOError handler
508 e_die_status(
509 2,
510 'Oils I/O error (read): %s' % posix.strerror(err_num))
511
512 elif n == 0: # EOF
513 break
514 posix.close(r)
515
516 status = p.Wait(self.waiter)
517 stdout_str = ''.join(chunks).rstrip('\n')
518
519 return status, stdout_str
520
521 def RunCommandSub(self, cs_part):
522 # type: (CommandSub) -> str
523
524 if not self.exec_opts._allow_command_sub():
525 # _allow_command_sub is used in two places. Only one of them turns
526 # off _allow_process_sub
527 if not self.exec_opts._allow_process_sub():
528 why = "status wouldn't be checked (strict_errexit)"
529 else:
530 why = 'eval_unsafe_arith is off'
531
532 e_die("Command subs not allowed here because %s" % why,
533 loc.WordPart(cs_part))
534
535 node = cs_part.child
536
537 # Hack for weird $(<file) construct
538 if node.tag() == command_e.Redirect:
539 redir_node = cast(command.Redirect, node)
540 # Detect '< file'
541 if (len(redir_node.redirects) == 1 and
542 redir_node.redirects[0].op.id == Id.Redir_Less and
543 redir_node.child.tag() == command_e.NoOp):
544
545 # Change it to __cat < file.
546 # TODO: could be 'internal cat' (issue #1013)
547 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
548 cat_word = CompoundWord([tok])
549
550 # Blame < because __cat has no location
551 blame_tok = redir_node.redirects[0].op
552 simple = command.Simple(blame_tok, [], [cat_word], None, None,
553 False)
554
555 # MUTATE redir node so it's like $(<file _cat)
556 redir_node.child = simple
557
558 status, stdout_str = self.CaptureStdout(node)
559
560 # OSH has the concept of aborting in the middle of a WORD. We're not
561 # waiting until the command is over!
562 if self.exec_opts.command_sub_errexit():
563 if status != 0:
564 msg = 'Command Sub exited with status %d' % status
565 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
566
567 else:
568 # Set a flag so we check errexit at the same time as bash. Example:
569 #
570 # a=$(false)
571 # echo foo # no matter what comes here, the flag is reset
572 #
573 # Set ONLY until this command node has finished executing.
574
575 # HACK: move this
576 self.cmd_ev.check_command_sub_status = True
577 self.mem.SetLastStatus(status)
578
579 # Runtime errors test case: # $("echo foo > $@")
580 # Why rstrip()?
581 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
582 return stdout_str
583
584 def RunProcessSub(self, cs_part):
585 # type: (CommandSub) -> str
586 """Process sub creates a forks a process connected to a pipe.
587
588 The pipe is typically passed to another process via a /dev/fd/$FD path.
589
590 Life cycle of a process substitution:
591
592 1. Start with this code
593
594 diff <(seq 3) <(seq 4)
595
596 2. To evaluate the command line, we evaluate every word. The
597 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
598
599 a. Create a pipe(), getting r and w
600 b. Starts the seq process, which inherits r and w
601 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
602 and close(r)
603 c. Close the w FD, because neither the shell or 'diff' will write to it.
604 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
605 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
606
607 3. Now we're done evaluating every word, so we know the command line of
608 diff, which looks like
609
610 diff /dev/fd/64 /dev/fd/65
611
612 Those are the FDs for the read ends of the pipes we created.
613
614 4. diff inherits a copy of the read end of bot pipes. But it actually
615 calls open() both files passed as argv. (I think this is fine.)
616
617 5. wait() for the diff process.
618
619 6. The shell closes both the read ends of both pipes. Neither us or
620 'diffd' will read again.
621
622 7. The shell waits for both 'seq' processes.
623
624 Related:
625 shopt -s process_sub_fail
626 _process_sub_status
627 """
628 cs_loc = loc.WordPart(cs_part)
629
630 if not self.exec_opts._allow_process_sub():
631 e_die(
632 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
633 cs_loc)
634
635 p = self._MakeProcess(cs_part.child, True, self.exec_opts.errtrace())
636
637 r, w = posix.pipe()
638 #log('pipe = %d, %d', r, w)
639
640 op_id = cs_part.left_token.id
641 if op_id == Id.Left_ProcSubIn:
642 # Example: cat < <(head foo.txt)
643 #
644 # The head process should write its stdout to a pipe.
645 redir = process.StdoutToPipe(r,
646 w) # type: process.ChildStateChange
647
648 elif op_id == Id.Left_ProcSubOut:
649 # Example: head foo.txt > >(tac)
650 #
651 # The tac process should read its stdin from a pipe.
652
653 # Note: this example sometimes requires you to hit "enter" in bash and
654 # zsh. WHy?
655 redir = process.StdinFromPipe(r, w)
656
657 else:
658 raise AssertionError()
659
660 p.AddStateChange(redir)
661
662 if self.job_control.Enabled():
663 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
664
665 # Fork, letting the child inherit the pipe file descriptors.
666 p.StartProcess(trace.ProcessSub)
667
668 ps_frame = self.process_sub_stack[-1]
669
670 # Note: bash never waits() on the process, but zsh does. The calling
671 # program needs to read() before we can wait, e.g.
672 # diff <(sort left.txt) <(sort right.txt)
673
674 # After forking, close the end of the pipe we're not using.
675 if op_id == Id.Left_ProcSubIn:
676 posix.close(w) # cat < <(head foo.txt)
677 ps_frame.Append(p, r, cs_loc) # close later
678 elif op_id == Id.Left_ProcSubOut:
679 posix.close(r)
680 #log('Left_ProcSubOut closed %d', r)
681 ps_frame.Append(p, w, cs_loc) # close later
682 else:
683 raise AssertionError()
684
685 # Is /dev Linux-specific?
686 if op_id == Id.Left_ProcSubIn:
687 return '/dev/fd/%d' % r
688
689 elif op_id == Id.Left_ProcSubOut:
690 return '/dev/fd/%d' % w
691
692 else:
693 raise AssertionError()
694
695 def PushRedirects(self, redirects, err_out):
696 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
697 if len(redirects) == 0: # Optimized to avoid allocs
698 return
699 self.fd_state.Push(redirects, err_out)
700
701 def PopRedirects(self, num_redirects, err_out):
702 # type: (int, List[error.IOError_OSError]) -> None
703 if num_redirects == 0: # Optimized to avoid allocs
704 return
705 self.fd_state.Pop(err_out)
706
707 def PushProcessSub(self):
708 # type: () -> None
709 if len(self.clean_frame_pool):
710 # Optimized to avoid allocs
711 new_frame = self.clean_frame_pool.pop()
712 else:
713 new_frame = _ProcessSubFrame()
714 self.process_sub_stack.append(new_frame)
715
716 def PopProcessSub(self, compound_st):
717 # type: (StatusArray) -> None
718 """This method is called by a context manager, which means we always
719 wait() on the way out, which I think is the right thing.
720
721 We don't always set _process_sub_status, e.g. if some fatal
722 error occurs first, but we always wait.
723 """
724 frame = self.process_sub_stack.pop()
725 if frame.WasModified():
726 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
727 else:
728 # Optimized to avoid allocs
729 self.clean_frame_pool.append(frame)
730
731 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
732 # be nice to somehow "destroy" them here, rather than letting them become
733 # garbage that needs to be traced.
734
735 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
736 # Pop(), and Top() of VALUES rather than GC objects?
737
738
739# vim: sw=4