OILS / core / process.py View on Github | oilshell.org

1972 lines, 959 significant
1# Copyright 2016 Andy Chu. All rights reserved.
2# Licensed under the Apache License, Version 2.0 (the "License");
3# you may not use this file except in compliance with the License.
4# You may obtain a copy of the License at
5#
6# http://www.apache.org/licenses/LICENSE-2.0
7"""
8process.py - Launch processes and manipulate file descriptors.
9"""
10from __future__ import print_function
11
12from errno import EACCES, EBADF, ECHILD, EINTR, ENOENT, ENOEXEC, EEXIST
13import fcntl as fcntl_
14from fcntl import F_DUPFD, F_GETFD, F_SETFD, FD_CLOEXEC
15from signal import (SIG_DFL, SIG_IGN, SIGINT, SIGPIPE, SIGQUIT, SIGTSTP,
16 SIGTTOU, SIGTTIN, SIGWINCH)
17
18from _devbuild.gen.id_kind_asdl import Id
19from _devbuild.gen.runtime_asdl import (job_state_e, job_state_t,
20 job_state_str, wait_status,
21 wait_status_t, RedirValue,
22 redirect_arg, redirect_arg_e, trace,
23 trace_t)
24from _devbuild.gen.syntax_asdl import (
25 loc_t,
26 redir_loc,
27 redir_loc_e,
28 redir_loc_t,
29)
30from _devbuild.gen.value_asdl import (value, value_e)
31from core import dev
32from core import error
33from core.error import e_die
34from core import pyutil
35from core import pyos
36from core import state
37from display import ui
38from core import util
39from data_lang import j8_lite
40from frontend import location
41from frontend import match
42from mycpp import mylib
43from mycpp.mylib import log, print_stderr, probe, tagswitch, iteritems
44
45import posix_ as posix
46from posix_ import (
47 # translated by mycpp and directly called! No wrapper!
48 WIFSIGNALED,
49 WIFEXITED,
50 WIFSTOPPED,
51 WEXITSTATUS,
52 WSTOPSIG,
53 WTERMSIG,
54 WNOHANG,
55 O_APPEND,
56 O_CREAT,
57 O_EXCL,
58 O_NONBLOCK,
59 O_NOCTTY,
60 O_RDONLY,
61 O_RDWR,
62 O_WRONLY,
63 O_TRUNC,
64)
65
66from typing import IO, List, Tuple, Dict, Optional, Any, cast, TYPE_CHECKING
67
68if TYPE_CHECKING:
69 from _devbuild.gen.runtime_asdl import cmd_value
70 from _devbuild.gen.syntax_asdl import command_t
71 from builtin import trap_osh
72 from core import optview
73 from core import pyos
74 from core.util import _DebugFile
75 from osh.cmd_eval import CommandEvaluator
76
77NO_FD = -1
78
79# Minimum file descriptor that the shell can use. Other descriptors can be
80# directly used by user programs, e.g. exec 9>&1
81#
82# Oil uses 100 because users are allowed TWO digits in frontend/lexer_def.py.
83# This is a compromise between bash (unlimited, but requires crazy
84# bookkeeping), and dash/zsh (10) and mksh (24)
85_SHELL_MIN_FD = 100
86
87# Style for 'jobs' builtin
88STYLE_DEFAULT = 0
89STYLE_LONG = 1
90STYLE_PID_ONLY = 2
91
92# To save on allocations in JobList::GetJobWithSpec()
93CURRENT_JOB_SPECS = ['', '%', '%%', '%+']
94
95
96class ctx_FileCloser(object):
97
98 def __init__(self, f):
99 # type: (mylib.LineReader) -> None
100 self.f = f
101
102 def __enter__(self):
103 # type: () -> None
104 pass
105
106 def __exit__(self, type, value, traceback):
107 # type: (Any, Any, Any) -> None
108 self.f.close()
109
110
111def InitInteractiveShell(signal_safe):
112 # type: (pyos.SignalSafe) -> None
113 """Called when initializing an interactive shell."""
114
115 # The shell itself should ignore Ctrl-\.
116 pyos.sigaction(SIGQUIT, SIG_IGN)
117
118 # This prevents Ctrl-Z from suspending OSH in interactive mode.
119 pyos.sigaction(SIGTSTP, SIG_IGN)
120
121 # More signals from
122 # https://www.gnu.org/software/libc/manual/html_node/Initializing-the-Shell.html
123 # (but not SIGCHLD)
124 pyos.sigaction(SIGTTOU, SIG_IGN)
125 pyos.sigaction(SIGTTIN, SIG_IGN)
126
127 # Register a callback to receive terminal width changes.
128 # NOTE: In line_input.c, we turned off rl_catch_sigwinch.
129
130 # This is ALWAYS on, which means that it can cause EINTR, and wait() and
131 # read() have to handle it
132 pyos.RegisterSignalInterest(SIGWINCH)
133
134
135def SaveFd(fd):
136 # type: (int) -> int
137 saved = fcntl_.fcntl(fd, F_DUPFD, _SHELL_MIN_FD) # type: int
138 return saved
139
140
141class _RedirFrame(object):
142
143 def __init__(self, saved_fd, orig_fd, forget):
144 # type: (int, int, bool) -> None
145 self.saved_fd = saved_fd
146 self.orig_fd = orig_fd
147 self.forget = forget
148
149
150class _FdFrame(object):
151
152 def __init__(self):
153 # type: () -> None
154 self.saved = [] # type: List[_RedirFrame]
155 self.need_wait = [] # type: List[Process]
156
157 def Forget(self):
158 # type: () -> None
159 """For exec 1>&2."""
160 for rf in reversed(self.saved):
161 if rf.saved_fd != NO_FD and rf.forget:
162 posix.close(rf.saved_fd)
163
164 del self.saved[:] # like list.clear() in Python 3.3
165 del self.need_wait[:]
166
167 def __repr__(self):
168 # type: () -> str
169 return '<_FdFrame %s>' % self.saved
170
171
172class FdState(object):
173 """File descriptor state for the current process.
174
175 For example, you can do 'myfunc > out.txt' without forking. Child
176 processes inherit our state.
177 """
178
179 def __init__(
180 self,
181 errfmt, # type: ui.ErrorFormatter
182 job_control, # type: JobControl
183 job_list, # type: JobList
184 mem, # type: state.Mem
185 tracer, # type: Optional[dev.Tracer]
186 waiter, # type: Optional[Waiter]
187 exec_opts, # type: optview.Exec
188 ):
189 # type: (...) -> None
190 """
191 Args:
192 errfmt: for errors
193 job_list: For keeping track of _HereDocWriterThunk
194 """
195 self.errfmt = errfmt
196 self.job_control = job_control
197 self.job_list = job_list
198 self.cur_frame = _FdFrame() # for the top level
199 self.stack = [self.cur_frame]
200 self.mem = mem
201 self.tracer = tracer
202 self.waiter = waiter
203 self.exec_opts = exec_opts
204
205 def Open(self, path):
206 # type: (str) -> mylib.LineReader
207 """Opens a path for read, but moves it out of the reserved 3-9 fd
208 range.
209
210 Returns:
211 A Python file object. The caller is responsible for Close().
212
213 Raises:
214 IOError or OSError if the path can't be found. (This is Python-induced wart)
215 """
216 fd_mode = O_RDONLY
217 f = self._Open(path, 'r', fd_mode)
218
219 # Hacky downcast
220 return cast('mylib.LineReader', f)
221
222 # used for util.DebugFile
223 def OpenForWrite(self, path):
224 # type: (str) -> mylib.Writer
225 fd_mode = O_CREAT | O_RDWR
226 f = self._Open(path, 'w', fd_mode)
227
228 # Hacky downcast
229 return cast('mylib.Writer', f)
230
231 def _Open(self, path, c_mode, fd_mode):
232 # type: (str, str, int) -> IO[str]
233 fd = posix.open(path, fd_mode, 0o666) # may raise OSError
234
235 # Immediately move it to a new location
236 new_fd = SaveFd(fd)
237 posix.close(fd)
238
239 # Return a Python file handle
240 f = posix.fdopen(new_fd, c_mode) # may raise IOError
241 return f
242
243 def _WriteFdToMem(self, fd_name, fd):
244 # type: (str, int) -> None
245 if self.mem:
246 # setvar, not setref
247 state.OshLanguageSetValue(self.mem, location.LName(fd_name),
248 value.Str(str(fd)))
249
250 def _ReadFdFromMem(self, fd_name):
251 # type: (str) -> int
252 val = self.mem.GetValue(fd_name)
253 if val.tag() == value_e.Str:
254 try:
255 return int(cast(value.Str, val).s)
256 except ValueError:
257 return NO_FD
258 return NO_FD
259
260 def _PushSave(self, fd):
261 # type: (int) -> bool
262 """Save fd to a new location and remember to restore it later."""
263 #log('---- _PushSave %s', fd)
264 ok = True
265 try:
266 new_fd = SaveFd(fd)
267 except (IOError, OSError) as e:
268 ok = False
269 # Example program that causes this error: exec 4>&1. Descriptor 4 isn't
270 # open.
271 # This seems to be ignored in dash too in savefd()?
272 if e.errno != EBADF:
273 raise
274 if ok:
275 posix.close(fd)
276 fcntl_.fcntl(new_fd, F_SETFD, FD_CLOEXEC)
277 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, True))
278 else:
279 # if we got EBADF, we still need to close the original on Pop()
280 self._PushClose(fd)
281
282 return ok
283
284 def _PushDup(self, fd1, blame_loc):
285 # type: (int, redir_loc_t) -> int
286 """Save fd2 in a higher range, and dup fd1 onto fd2.
287
288 Returns whether F_DUPFD/dup2 succeeded, and the new descriptor.
289 """
290 UP_loc = blame_loc
291 if blame_loc.tag() == redir_loc_e.VarName:
292 fd2_name = cast(redir_loc.VarName, UP_loc).name
293 try:
294 # F_DUPFD: GREATER than range
295 new_fd = fcntl_.fcntl(fd1, F_DUPFD, _SHELL_MIN_FD) # type: int
296 except (IOError, OSError) as e:
297 if e.errno == EBADF:
298 print_stderr('F_DUPFD fd %d: %s' %
299 (fd1, pyutil.strerror(e)))
300 return NO_FD
301 else:
302 raise # this redirect failed
303
304 self._WriteFdToMem(fd2_name, new_fd)
305
306 elif blame_loc.tag() == redir_loc_e.Fd:
307 fd2 = cast(redir_loc.Fd, UP_loc).fd
308
309 if fd1 == fd2:
310 # The user could have asked for it to be open on descriptor 3, but open()
311 # already returned 3, e.g. echo 3>out.txt
312 return NO_FD
313
314 # Check the validity of fd1 before _PushSave(fd2)
315 try:
316 fcntl_.fcntl(fd1, F_GETFD)
317 except (IOError, OSError) as e:
318 print_stderr('F_GETFD fd %d: %s' % (fd1, pyutil.strerror(e)))
319 raise
320
321 need_restore = self._PushSave(fd2)
322
323 #log('==== dup2 %s %s\n' % (fd1, fd2))
324 try:
325 posix.dup2(fd1, fd2)
326 except (IOError, OSError) as e:
327 # bash/dash give this error too, e.g. for 'echo hi 1>&3'
328 print_stderr('dup2(%d, %d): %s' %
329 (fd1, fd2, pyutil.strerror(e)))
330
331 # Restore and return error
332 if need_restore:
333 rf = self.cur_frame.saved.pop()
334 posix.dup2(rf.saved_fd, rf.orig_fd)
335 posix.close(rf.saved_fd)
336
337 raise # this redirect failed
338
339 new_fd = fd2
340
341 else:
342 raise AssertionError()
343
344 return new_fd
345
346 def _PushCloseFd(self, blame_loc):
347 # type: (redir_loc_t) -> bool
348 """For 2>&-"""
349 # exec {fd}>&- means close the named descriptor
350
351 UP_loc = blame_loc
352 if blame_loc.tag() == redir_loc_e.VarName:
353 fd_name = cast(redir_loc.VarName, UP_loc).name
354 fd = self._ReadFdFromMem(fd_name)
355 if fd == NO_FD:
356 return False
357
358 elif blame_loc.tag() == redir_loc_e.Fd:
359 fd = cast(redir_loc.Fd, UP_loc).fd
360
361 else:
362 raise AssertionError()
363
364 self._PushSave(fd)
365
366 return True
367
368 def _PushClose(self, fd):
369 # type: (int) -> None
370 self.cur_frame.saved.append(_RedirFrame(NO_FD, fd, False))
371
372 def _PushWait(self, proc):
373 # type: (Process) -> None
374 self.cur_frame.need_wait.append(proc)
375
376 def _ApplyRedirect(self, r):
377 # type: (RedirValue) -> None
378 arg = r.arg
379 UP_arg = arg
380 with tagswitch(arg) as case:
381
382 if case(redirect_arg_e.Path):
383 arg = cast(redirect_arg.Path, UP_arg)
384 # noclobber flag is OR'd with other flags when allowed
385 noclobber_mode = O_EXCL if self.exec_opts.noclobber() else 0
386 if r.op_id in (Id.Redir_Great, Id.Redir_AndGreat): # > &>
387 # NOTE: This is different than >| because it respects noclobber, but
388 # that option is almost never used. See test/wild.sh.
389 mode = O_CREAT | O_WRONLY | O_TRUNC | noclobber_mode
390 elif r.op_id == Id.Redir_Clobber: # >|
391 mode = O_CREAT | O_WRONLY | O_TRUNC
392 elif r.op_id in (Id.Redir_DGreat,
393 Id.Redir_AndDGreat): # >> &>>
394 mode = O_CREAT | O_WRONLY | O_APPEND | noclobber_mode
395 elif r.op_id == Id.Redir_Less: # <
396 mode = O_RDONLY
397 elif r.op_id == Id.Redir_LessGreat: # <>
398 mode = O_CREAT | O_RDWR
399 else:
400 raise NotImplementedError(r.op_id)
401
402 # NOTE: 0666 is affected by umask, all shells use it.
403 try:
404 open_fd = posix.open(arg.filename, mode, 0o666)
405 except (IOError, OSError) as e:
406 if e.errno == EEXIST and self.exec_opts.noclobber():
407 extra = ' (noclobber)'
408 else:
409 extra = ''
410 self.errfmt.Print_(
411 "Can't open %r: %s%s" %
412 (arg.filename, pyutil.strerror(e), extra),
413 blame_loc=r.op_loc)
414 raise # redirect failed
415
416 new_fd = self._PushDup(open_fd, r.loc)
417 if new_fd != NO_FD:
418 posix.close(open_fd)
419
420 # Now handle &> and &>> and their variants. These pairs are the same:
421 #
422 # stdout_stderr.py &> out-err.txt
423 # stdout_stderr.py > out-err.txt 2>&1
424 #
425 # stdout_stderr.py 3&> out-err.txt
426 # stdout_stderr.py 3> out-err.txt 2>&3
427 #
428 # Ditto for {fd}> and {fd}&>
429
430 if r.op_id in (Id.Redir_AndGreat, Id.Redir_AndDGreat):
431 self._PushDup(new_fd, redir_loc.Fd(2))
432
433 elif case(redirect_arg_e.CopyFd): # e.g. echo hi 1>&2
434 arg = cast(redirect_arg.CopyFd, UP_arg)
435
436 if r.op_id == Id.Redir_GreatAnd: # 1>&2
437 self._PushDup(arg.target_fd, r.loc)
438
439 elif r.op_id == Id.Redir_LessAnd: # 0<&5
440 # The only difference between >& and <& is the default file
441 # descriptor argument.
442 self._PushDup(arg.target_fd, r.loc)
443
444 else:
445 raise NotImplementedError()
446
447 elif case(redirect_arg_e.MoveFd): # e.g. echo hi 5>&6-
448 arg = cast(redirect_arg.MoveFd, UP_arg)
449 new_fd = self._PushDup(arg.target_fd, r.loc)
450 if new_fd != NO_FD:
451 posix.close(arg.target_fd)
452
453 UP_loc = r.loc
454 if r.loc.tag() == redir_loc_e.Fd:
455 fd = cast(redir_loc.Fd, UP_loc).fd
456 else:
457 fd = NO_FD
458
459 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, False))
460
461 elif case(redirect_arg_e.CloseFd): # e.g. echo hi 5>&-
462 self._PushCloseFd(r.loc)
463
464 elif case(redirect_arg_e.HereDoc):
465 arg = cast(redirect_arg.HereDoc, UP_arg)
466
467 # NOTE: Do these descriptors have to be moved out of the range 0-9?
468 read_fd, write_fd = posix.pipe()
469
470 self._PushDup(read_fd, r.loc) # stdin is now the pipe
471
472 # We can't close like we do in the filename case above? The writer can
473 # get a "broken pipe".
474 self._PushClose(read_fd)
475
476 thunk = _HereDocWriterThunk(write_fd, arg.body)
477
478 # Use PIPE_SIZE to save a process in the case of small here
479 # docs, which are the common case. (dash does this.)
480
481 # Note: could instrument this to see how often it happens.
482 # Though strace -ff can also work.
483 start_process = len(arg.body) > 4096
484 #start_process = True
485
486 if start_process:
487 here_proc = Process(thunk, self.job_control, self.job_list,
488 self.tracer)
489
490 # NOTE: we could close the read pipe here, but it doesn't really
491 # matter because we control the code.
492 here_proc.StartProcess(trace.HereDoc)
493 #log('Started %s as %d', here_proc, pid)
494 self._PushWait(here_proc)
495
496 # Now that we've started the child, close it in the parent.
497 posix.close(write_fd)
498
499 else:
500 posix.write(write_fd, arg.body)
501 posix.close(write_fd)
502
503 def Push(self, redirects, err_out):
504 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
505 """Apply a group of redirects and remember to undo them."""
506
507 #log('> fd_state.Push %s', redirects)
508 new_frame = _FdFrame()
509 self.stack.append(new_frame)
510 self.cur_frame = new_frame
511
512 for r in redirects:
513 #log('apply %s', r)
514 with ui.ctx_Location(self.errfmt, r.op_loc):
515 try:
516 self._ApplyRedirect(r)
517 except (IOError, OSError) as e:
518 err_out.append(e)
519 # This can fail too
520 self.Pop(err_out)
521 return # for bad descriptor, etc.
522
523 def PushStdinFromPipe(self, r):
524 # type: (int) -> bool
525 """Save the current stdin and make it come from descriptor 'r'.
526
527 'r' is typically the read-end of a pipe. For 'lastpipe'/ZSH
528 semantics of
529
530 echo foo | read line; echo $line
531 """
532 new_frame = _FdFrame()
533 self.stack.append(new_frame)
534 self.cur_frame = new_frame
535
536 self._PushDup(r, redir_loc.Fd(0))
537 return True
538
539 def Pop(self, err_out):
540 # type: (List[error.IOError_OSError]) -> None
541 frame = self.stack.pop()
542 #log('< Pop %s', frame)
543 for rf in reversed(frame.saved):
544 if rf.saved_fd == NO_FD:
545 #log('Close %d', orig)
546 try:
547 posix.close(rf.orig_fd)
548 except (IOError, OSError) as e:
549 err_out.append(e)
550 log('Error closing descriptor %d: %s', rf.orig_fd,
551 pyutil.strerror(e))
552 return
553 else:
554 try:
555 posix.dup2(rf.saved_fd, rf.orig_fd)
556 except (IOError, OSError) as e:
557 err_out.append(e)
558 log('dup2(%d, %d) error: %s', rf.saved_fd, rf.orig_fd,
559 pyutil.strerror(e))
560 #log('fd state:')
561 #posix.system('ls -l /proc/%s/fd' % posix.getpid())
562 return
563 posix.close(rf.saved_fd)
564 #log('dup2 %s %s', saved, orig)
565
566 # Wait for here doc processes to finish.
567 for proc in frame.need_wait:
568 unused_status = proc.Wait(self.waiter)
569
570 def MakePermanent(self):
571 # type: () -> None
572 self.cur_frame.Forget()
573
574
575class ChildStateChange(object):
576
577 def __init__(self):
578 # type: () -> None
579 """Empty constructor for mycpp."""
580 pass
581
582 def Apply(self):
583 # type: () -> None
584 raise NotImplementedError()
585
586 def ApplyFromParent(self, proc):
587 # type: (Process) -> None
588 """Noop for all state changes other than SetPgid for mycpp."""
589 pass
590
591
592class StdinFromPipe(ChildStateChange):
593
594 def __init__(self, pipe_read_fd, w):
595 # type: (int, int) -> None
596 self.r = pipe_read_fd
597 self.w = w
598
599 def __repr__(self):
600 # type: () -> str
601 return '<StdinFromPipe %d %d>' % (self.r, self.w)
602
603 def Apply(self):
604 # type: () -> None
605 posix.dup2(self.r, 0)
606 posix.close(self.r) # close after dup
607
608 posix.close(self.w) # we're reading from the pipe, not writing
609 #log('child CLOSE w %d pid=%d', self.w, posix.getpid())
610
611
612class StdoutToPipe(ChildStateChange):
613
614 def __init__(self, r, pipe_write_fd):
615 # type: (int, int) -> None
616 self.r = r
617 self.w = pipe_write_fd
618
619 def __repr__(self):
620 # type: () -> str
621 return '<StdoutToPipe %d %d>' % (self.r, self.w)
622
623 def Apply(self):
624 # type: () -> None
625 posix.dup2(self.w, 1)
626 posix.close(self.w) # close after dup
627
628 posix.close(self.r) # we're writing to the pipe, not reading
629 #log('child CLOSE r %d pid=%d', self.r, posix.getpid())
630
631
632INVALID_PGID = -1
633# argument to setpgid() that means the process is its own leader
634OWN_LEADER = 0
635
636
637class SetPgid(ChildStateChange):
638
639 def __init__(self, pgid, tracer):
640 # type: (int, dev.Tracer) -> None
641 self.pgid = pgid
642 self.tracer = tracer
643
644 def Apply(self):
645 # type: () -> None
646 try:
647 posix.setpgid(0, self.pgid)
648 except (IOError, OSError) as e:
649 self.tracer.OtherMessage(
650 'osh: child %d failed to set its process group to %d: %s' %
651 (posix.getpid(), self.pgid, pyutil.strerror(e)))
652
653 def ApplyFromParent(self, proc):
654 # type: (Process) -> None
655 try:
656 posix.setpgid(proc.pid, self.pgid)
657 except (IOError, OSError) as e:
658 self.tracer.OtherMessage(
659 'osh: parent failed to set process group for PID %d to %d: %s'
660 % (proc.pid, self.pgid, pyutil.strerror(e)))
661
662
663class ExternalProgram(object):
664 """The capability to execute an external program like 'ls'."""
665
666 def __init__(
667 self,
668 hijack_shebang, # type: str
669 fd_state, # type: FdState
670 errfmt, # type: ui.ErrorFormatter
671 debug_f, # type: _DebugFile
672 ):
673 # type: (...) -> None
674 """
675 Args:
676 hijack_shebang: The path of an interpreter to run instead of the one
677 specified in the shebang line. May be empty.
678 """
679 self.hijack_shebang = hijack_shebang
680 self.fd_state = fd_state
681 self.errfmt = errfmt
682 self.debug_f = debug_f
683
684 def Exec(self, argv0_path, cmd_val, environ):
685 # type: (str, cmd_value.Argv, Dict[str, str]) -> None
686 """Execute a program and exit this process.
687
688 Called by: ls / exec ls / ( ls / )
689 """
690 probe('process', 'ExternalProgram_Exec', argv0_path)
691 self._Exec(argv0_path, cmd_val.argv, cmd_val.arg_locs[0], environ,
692 True)
693 assert False, "This line should never execute" # NO RETURN
694
695 def _Exec(self, argv0_path, argv, argv0_loc, environ, should_retry):
696 # type: (str, List[str], loc_t, Dict[str, str], bool) -> None
697 if len(self.hijack_shebang):
698 opened = True
699 try:
700 f = self.fd_state.Open(argv0_path)
701 except (IOError, OSError) as e:
702 opened = False
703
704 if opened:
705 with ctx_FileCloser(f):
706 # Test if the shebang looks like a shell. TODO: The file might be
707 # binary with no newlines, so read 80 bytes instead of readline().
708
709 #line = f.read(80) # type: ignore # TODO: fix this
710 line = f.readline()
711
712 if match.ShouldHijack(line):
713 h_argv = [self.hijack_shebang, argv0_path]
714 h_argv.extend(argv[1:])
715 argv = h_argv
716 argv0_path = self.hijack_shebang
717 self.debug_f.writeln('Hijacked: %s' % argv0_path)
718 else:
719 #self.debug_f.log('Not hijacking %s (%r)', argv, line)
720 pass
721
722 try:
723 posix.execve(argv0_path, argv, environ)
724 except (IOError, OSError) as e:
725 # Run with /bin/sh when ENOEXEC error (no shebang). All shells do this.
726 if e.errno == ENOEXEC and should_retry:
727 new_argv = ['/bin/sh', argv0_path]
728 new_argv.extend(argv[1:])
729 self._Exec('/bin/sh', new_argv, argv0_loc, environ, False)
730 # NO RETURN
731
732 # Would be nice: when the path is relative and ENOENT: print PWD and do
733 # spelling correction?
734
735 self.errfmt.Print_(
736 "Can't execute %r: %s" % (argv0_path, pyutil.strerror(e)),
737 argv0_loc)
738
739 # POSIX mentions 126 and 127 for two specific errors. The rest are
740 # unspecified.
741 #
742 # http://pubs.opengroup.org/onlinepubs/9699919799.2016edition/utilities/V3_chap02.html#tag_18_08_02
743 if e.errno == EACCES:
744 status = 126
745 elif e.errno == ENOENT:
746 # TODO: most shells print 'command not found', rather than strerror()
747 # == "No such file or directory". That's better because it's at the
748 # end of the path search, and we're never searching for a directory.
749 status = 127
750 else:
751 # dash uses 2, but we use that for parse errors. This seems to be
752 # consistent with mksh and zsh.
753 status = 127
754
755 posix._exit(status)
756 # NO RETURN
757
758
759class Thunk(object):
760 """Abstract base class for things runnable in another process."""
761
762 def __init__(self):
763 # type: () -> None
764 """Empty constructor for mycpp."""
765 pass
766
767 def Run(self):
768 # type: () -> None
769 """Returns a status code."""
770 raise NotImplementedError()
771
772 def UserString(self):
773 # type: () -> str
774 """Display for the 'jobs' list."""
775 raise NotImplementedError()
776
777 def __repr__(self):
778 # type: () -> str
779 return self.UserString()
780
781
782class ExternalThunk(Thunk):
783 """An external executable."""
784
785 def __init__(self, ext_prog, argv0_path, cmd_val, environ):
786 # type: (ExternalProgram, str, cmd_value.Argv, Dict[str, str]) -> None
787 self.ext_prog = ext_prog
788 self.argv0_path = argv0_path
789 self.cmd_val = cmd_val
790 self.environ = environ
791
792 def UserString(self):
793 # type: () -> str
794
795 # NOTE: This is the format the Tracer uses.
796 # bash displays sleep $n & (code)
797 # but OSH displays sleep 1 & (argv array)
798 # We could switch the former but I'm not sure it's necessary.
799 tmp = [j8_lite.MaybeShellEncode(a) for a in self.cmd_val.argv]
800 return '[process] %s' % ' '.join(tmp)
801
802 def Run(self):
803 # type: () -> None
804 """An ExternalThunk is run in parent for the exec builtin."""
805 self.ext_prog.Exec(self.argv0_path, self.cmd_val, self.environ)
806
807
808class SubProgramThunk(Thunk):
809 """A subprogram that can be executed in another process."""
810
811 def __init__(
812 self,
813 cmd_ev, # type: CommandEvaluator
814 node, # type: command_t
815 trap_state, # type: trap_osh.TrapState
816 multi_trace, # type: dev.MultiTracer
817 inherit_errexit, # type: bool
818 inherit_errtrace, # type: bool
819 ):
820 # type: (...) -> None
821 self.cmd_ev = cmd_ev
822 self.node = node
823 self.trap_state = trap_state
824 self.multi_trace = multi_trace
825 self.inherit_errexit = inherit_errexit # for bash errexit compatibility
826 self.inherit_errtrace = inherit_errtrace # for bash errtrace compatibility
827
828 def UserString(self):
829 # type: () -> str
830
831 # NOTE: These can be pieces of a pipeline, so they're arbitrary nodes.
832 # TODO: Extract SPIDS from node to display source? Note that
833 # CompoundStatus also has locations of each pipeline component; see
834 # Executor.RunPipeline()
835 thunk_str = ui.CommandType(self.node)
836 return '[subprog] %s' % thunk_str
837
838 def Run(self):
839 # type: () -> None
840 #self.errfmt.OneLineErrExit() # don't quote code in child processes
841 probe('process', 'SubProgramThunk_Run')
842
843 # TODO: break circular dep. Bit flags could go in ASDL or headers.
844 from osh import cmd_eval
845
846 # signal handlers aren't inherited
847 self.trap_state.ClearForSubProgram(self.inherit_errtrace)
848
849 # NOTE: may NOT return due to exec().
850 if not self.inherit_errexit:
851 self.cmd_ev.mutable_opts.DisableErrExit()
852 try:
853 # optimize to eliminate redundant subshells like ( echo hi ) | wc -l etc.
854 self.cmd_ev.ExecuteAndCatch(
855 self.node,
856 cmd_eval.OptimizeSubshells | cmd_eval.MarkLastCommands)
857 status = self.cmd_ev.LastStatus()
858 # NOTE: We ignore the is_fatal return value. The user should set -o
859 # errexit so failures in subprocesses cause failures in the parent.
860 except util.UserExit as e:
861 status = e.status
862
863 # Handle errors in a subshell. These two cases are repeated from main()
864 # and the core/completion.py hook.
865 except KeyboardInterrupt:
866 print('')
867 status = 130 # 128 + 2
868 except (IOError, OSError) as e:
869 print_stderr('oils I/O error (subprogram): %s' %
870 pyutil.strerror(e))
871 status = 2
872
873 # If ProcessInit() doesn't turn off buffering, this is needed before
874 # _exit()
875 pyos.FlushStdout()
876
877 self.multi_trace.WriteDumps()
878
879 # We do NOT want to raise SystemExit here. Otherwise dev.Tracer::Pop()
880 # gets called in BOTH processes.
881 # The crash dump seems to be unaffected.
882 posix._exit(status)
883
884
885class _HereDocWriterThunk(Thunk):
886 """Write a here doc to one end of a pipe.
887
888 May be be executed in either a child process or the main shell
889 process.
890 """
891
892 def __init__(self, w, body_str):
893 # type: (int, str) -> None
894 self.w = w
895 self.body_str = body_str
896
897 def UserString(self):
898 # type: () -> str
899
900 # You can hit Ctrl-Z and the here doc writer will be suspended! Other
901 # shells don't have this problem because they use temp files! That's a bit
902 # unfortunate.
903 return '[here doc writer]'
904
905 def Run(self):
906 # type: () -> None
907 """do_exit: For small pipelines."""
908 probe('process', 'HereDocWriterThunk_Run')
909 #log('Writing %r', self.body_str)
910 posix.write(self.w, self.body_str)
911 #log('Wrote %r', self.body_str)
912 posix.close(self.w)
913 #log('Closed %d', self.w)
914
915 posix._exit(0)
916
917
918class Job(object):
919 """Interface for both Process and Pipeline.
920
921 They both can be put in the background and waited on.
922
923 Confusing thing about pipelines in the background: They have TOO MANY NAMES.
924
925 sleep 1 | sleep 2 &
926
927 - The LAST PID is what's printed at the prompt. This is $!, a PROCESS ID and
928 not a JOB ID.
929 # https://www.gnu.org/software/bash/manual/html_node/Special-Parameters.html#Special-Parameters
930 - The process group leader (setpgid) is the FIRST PID.
931 - It's also %1 or %+. The last job started.
932 """
933
934 def __init__(self):
935 # type: () -> None
936 # Initial state with & or Ctrl-Z is Running.
937 self.state = job_state_e.Running
938 self.job_id = -1
939 self.in_background = False
940
941 def DisplayJob(self, job_id, f, style):
942 # type: (int, mylib.Writer, int) -> None
943 raise NotImplementedError()
944
945 def State(self):
946 # type: () -> job_state_t
947 return self.state
948
949 def ProcessGroupId(self):
950 # type: () -> int
951 """Return the process group ID associated with this job."""
952 raise NotImplementedError()
953
954 def JobWait(self, waiter):
955 # type: (Waiter) -> wait_status_t
956 """Wait for this process/pipeline to be stopped or finished."""
957 raise NotImplementedError()
958
959 def SetBackground(self):
960 # type: () -> None
961 """Record that this job is running in the background."""
962 self.in_background = True
963
964 def SetForeground(self):
965 # type: () -> None
966 """Record that this job is running in the foreground."""
967 self.in_background = False
968
969
970class Process(Job):
971 """A process to run.
972
973 TODO: Should we make it clear that this is a FOREGROUND process? A
974 background process is wrapped in a "job". It is unevaluated.
975
976 It provides an API to manipulate file descriptor state in parent and child.
977 """
978
979 def __init__(self, thunk, job_control, job_list, tracer):
980 # type: (Thunk, JobControl, JobList, dev.Tracer) -> None
981 """
982 Args:
983 thunk: Thunk instance
984 job_list: for process bookkeeping
985 """
986 Job.__init__(self)
987 assert isinstance(thunk, Thunk), thunk
988 self.thunk = thunk
989 self.job_control = job_control
990 self.job_list = job_list
991 self.tracer = tracer
992
993 # For pipelines
994 self.parent_pipeline = None # type: Pipeline
995 self.state_changes = [] # type: List[ChildStateChange]
996 self.close_r = -1
997 self.close_w = -1
998
999 self.pid = -1
1000 self.status = -1
1001
1002 def Init_ParentPipeline(self, pi):
1003 # type: (Pipeline) -> None
1004 """For updating PIPESTATUS."""
1005 self.parent_pipeline = pi
1006
1007 def __repr__(self):
1008 # type: () -> str
1009
1010 # note: be wary of infinite mutual recursion
1011 #s = ' %s' % self.parent_pipeline if self.parent_pipeline else ''
1012 #return '<Process %s%s>' % (self.thunk, s)
1013 return '<Process %s %s>' % (_JobStateStr(self.state), self.thunk)
1014
1015 def ProcessGroupId(self):
1016 # type: () -> int
1017 """Returns the group ID of this process."""
1018 # This should only ever be called AFTER the process has started
1019 assert self.pid != -1
1020 if self.parent_pipeline:
1021 # XXX: Maybe we should die here instead? Unclear if this branch
1022 # should even be reachable with the current builtins.
1023 return self.parent_pipeline.ProcessGroupId()
1024
1025 return self.pid
1026
1027 def DisplayJob(self, job_id, f, style):
1028 # type: (int, mylib.Writer, int) -> None
1029 if job_id == -1:
1030 job_id_str = ' '
1031 else:
1032 job_id_str = '%%%d' % job_id
1033 if style == STYLE_PID_ONLY:
1034 f.write('%d\n' % self.pid)
1035 else:
1036 f.write('%s %d %7s ' %
1037 (job_id_str, self.pid, _JobStateStr(self.state)))
1038 f.write(self.thunk.UserString())
1039 f.write('\n')
1040
1041 def AddStateChange(self, s):
1042 # type: (ChildStateChange) -> None
1043 self.state_changes.append(s)
1044
1045 def AddPipeToClose(self, r, w):
1046 # type: (int, int) -> None
1047 self.close_r = r
1048 self.close_w = w
1049
1050 def MaybeClosePipe(self):
1051 # type: () -> None
1052 if self.close_r != -1:
1053 posix.close(self.close_r)
1054 posix.close(self.close_w)
1055
1056 def StartProcess(self, why):
1057 # type: (trace_t) -> int
1058 """Start this process with fork(), handling redirects."""
1059 pid = posix.fork()
1060 if pid < 0:
1061 # When does this happen?
1062 e_die('Fatal error in posix.fork()')
1063
1064 elif pid == 0: # child
1065 # Note: this happens in BOTH interactive and non-interactive shells.
1066 # We technically don't need to do most of it in non-interactive, since we
1067 # did not change state in InitInteractiveShell().
1068
1069 for st in self.state_changes:
1070 st.Apply()
1071
1072 # Python sets SIGPIPE handler to SIG_IGN by default. Child processes
1073 # shouldn't have this.
1074 # https://docs.python.org/2/library/signal.html
1075 # See Python/pythonrun.c.
1076 pyos.sigaction(SIGPIPE, SIG_DFL)
1077
1078 # Respond to Ctrl-\ (core dump)
1079 pyos.sigaction(SIGQUIT, SIG_DFL)
1080
1081 # Only standalone children should get Ctrl-Z. Pipelines remain in the
1082 # foreground because suspending them is difficult with our 'lastpipe'
1083 # semantics.
1084 pid = posix.getpid()
1085 if posix.getpgid(0) == pid and self.parent_pipeline is None:
1086 pyos.sigaction(SIGTSTP, SIG_DFL)
1087
1088 # More signals from
1089 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html
1090 # (but not SIGCHLD)
1091 pyos.sigaction(SIGTTOU, SIG_DFL)
1092 pyos.sigaction(SIGTTIN, SIG_DFL)
1093
1094 self.tracer.OnNewProcess(pid)
1095 # clear foreground pipeline for subshells
1096 self.thunk.Run()
1097 # Never returns
1098
1099 #log('STARTED process %s, pid = %d', self, pid)
1100 self.tracer.OnProcessStart(pid, why)
1101
1102 # Class invariant: after the process is started, it stores its PID.
1103 self.pid = pid
1104
1105 # SetPgid needs to be applied from the child and the parent to avoid
1106 # racing in calls to tcsetpgrp() in the parent. See APUE sec. 9.2.
1107 for st in self.state_changes:
1108 st.ApplyFromParent(self)
1109
1110 # Program invariant: We keep track of every child process!
1111 self.job_list.AddChildProcess(pid, self)
1112
1113 return pid
1114
1115 def Wait(self, waiter):
1116 # type: (Waiter) -> int
1117 """Wait for this process to finish."""
1118 while self.state == job_state_e.Running:
1119 # Only return if there's nothing to wait for. Keep waiting if we were
1120 # interrupted with a signal.
1121 if waiter.WaitForOne() == W1_ECHILD:
1122 break
1123
1124 assert self.status >= 0, self.status
1125 return self.status
1126
1127 def JobWait(self, waiter):
1128 # type: (Waiter) -> wait_status_t
1129 # wait builtin can be interrupted
1130 while self.state == job_state_e.Running:
1131 result = waiter.WaitForOne()
1132
1133 if result >= 0: # signal
1134 return wait_status.Cancelled(result)
1135
1136 if result == W1_ECHILD:
1137 break
1138
1139 return wait_status.Proc(self.status)
1140
1141 def WhenStopped(self, stop_sig):
1142 # type: (int) -> None
1143
1144 # 128 is a shell thing
1145 # https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
1146 self.status = 128 + stop_sig
1147 self.state = job_state_e.Stopped
1148
1149 if self.job_id == -1:
1150 # This process was started in the foreground
1151 self.job_list.AddJob(self)
1152
1153 if not self.in_background:
1154 self.job_control.MaybeTakeTerminal()
1155 self.SetBackground()
1156
1157 def WhenDone(self, pid, status):
1158 # type: (int, int) -> None
1159 """Called by the Waiter when this Process finishes."""
1160
1161 #log('Process WhenDone %d %d', pid, status)
1162 assert pid == self.pid, 'Expected %d, got %d' % (self.pid, pid)
1163 self.status = status
1164 self.state = job_state_e.Done
1165 if self.parent_pipeline:
1166 self.parent_pipeline.WhenDone(pid, status)
1167 else:
1168 if self.job_id != -1:
1169 # Job might have been brought to the foreground after being
1170 # assigned a job ID.
1171 if self.in_background:
1172 # TODO: bash only prints this interactively
1173 print_stderr('[%%%d] PID %d Done' %
1174 (self.job_id, self.pid))
1175
1176 self.job_list.RemoveJob(self.job_id)
1177
1178 self.job_list.RemoveChildProcess(self.pid)
1179
1180 if not self.in_background:
1181 self.job_control.MaybeTakeTerminal()
1182
1183 def RunProcess(self, waiter, why):
1184 # type: (Waiter, trace_t) -> int
1185 """Run this process synchronously."""
1186 self.StartProcess(why)
1187 # ShellExecutor might be calling this for the last part of a pipeline.
1188 if self.parent_pipeline is None:
1189 # QUESTION: Can the PGID of a single process just be the PID? i.e. avoid
1190 # calling getpgid()?
1191 self.job_control.MaybeGiveTerminal(posix.getpgid(self.pid))
1192 return self.Wait(waiter)
1193
1194
1195class ctx_Pipe(object):
1196
1197 def __init__(self, fd_state, fd, err_out):
1198 # type: (FdState, int, List[error.IOError_OSError]) -> None
1199 fd_state.PushStdinFromPipe(fd)
1200 self.fd_state = fd_state
1201 self.err_out = err_out
1202
1203 def __enter__(self):
1204 # type: () -> None
1205 pass
1206
1207 def __exit__(self, type, value, traceback):
1208 # type: (Any, Any, Any) -> None
1209 self.fd_state.Pop(self.err_out)
1210
1211
1212class Pipeline(Job):
1213 """A pipeline of processes to run.
1214
1215 Cases we handle:
1216
1217 foo | bar
1218 $(foo | bar)
1219 foo | bar | read v
1220 """
1221
1222 def __init__(self, sigpipe_status_ok, job_control, job_list, tracer):
1223 # type: (bool, JobControl, JobList, dev.Tracer) -> None
1224 Job.__init__(self)
1225 self.job_control = job_control
1226 self.job_list = job_list
1227 self.tracer = tracer
1228
1229 self.procs = [] # type: List[Process]
1230 self.pids = [] # type: List[int] # pids in order
1231 self.pipe_status = [] # type: List[int] # status in order
1232 self.status = -1 # for 'wait' jobs
1233
1234 self.pgid = INVALID_PGID
1235
1236 # Optional for foreground
1237 self.last_thunk = None # type: Tuple[CommandEvaluator, command_t]
1238 self.last_pipe = None # type: Tuple[int, int]
1239
1240 self.sigpipe_status_ok = sigpipe_status_ok
1241
1242 def ProcessGroupId(self):
1243 # type: () -> int
1244 """Returns the group ID of this pipeline."""
1245 return self.pgid
1246
1247 def DisplayJob(self, job_id, f, style):
1248 # type: (int, mylib.Writer, int) -> None
1249 if style == STYLE_PID_ONLY:
1250 f.write('%d\n' % self.procs[0].pid)
1251 else:
1252 # Note: this is STYLE_LONG.
1253 for i, proc in enumerate(self.procs):
1254 if i == 0: # show job ID for first element in pipeline
1255 job_id_str = '%%%d' % job_id
1256 else:
1257 job_id_str = ' ' # 2 spaces
1258
1259 f.write('%s %d %7s ' %
1260 (job_id_str, proc.pid, _JobStateStr(proc.state)))
1261 f.write(proc.thunk.UserString())
1262 f.write('\n')
1263
1264 def DebugPrint(self):
1265 # type: () -> None
1266 print('Pipeline in state %s' % _JobStateStr(self.state))
1267 if mylib.PYTHON: # %s for Process not allowed in C++
1268 for proc in self.procs:
1269 print(' proc %s' % proc)
1270 _, last_node = self.last_thunk
1271 print(' last %s' % last_node)
1272 print(' pipe_status %s' % self.pipe_status)
1273
1274 def Add(self, p):
1275 # type: (Process) -> None
1276 """Append a process to the pipeline."""
1277 if len(self.procs) == 0:
1278 self.procs.append(p)
1279 return
1280
1281 r, w = posix.pipe()
1282 #log('pipe for %s: %d %d', p, r, w)
1283 prev = self.procs[-1]
1284
1285 prev.AddStateChange(StdoutToPipe(r, w)) # applied on StartPipeline()
1286 p.AddStateChange(StdinFromPipe(r, w)) # applied on StartPipeline()
1287
1288 p.AddPipeToClose(r, w) # MaybeClosePipe() on StartPipeline()
1289
1290 self.procs.append(p)
1291
1292 def AddLast(self, thunk):
1293 # type: (Tuple[CommandEvaluator, command_t]) -> None
1294 """Append the last noden to the pipeline.
1295
1296 This is run in the CURRENT process. It is OPTIONAL, because
1297 pipelines in the background are run uniformly.
1298 """
1299 self.last_thunk = thunk
1300
1301 assert len(self.procs) != 0
1302
1303 r, w = posix.pipe()
1304 prev = self.procs[-1]
1305 prev.AddStateChange(StdoutToPipe(r, w))
1306
1307 self.last_pipe = (r, w) # So we can connect it to last_thunk
1308
1309 def StartPipeline(self, waiter):
1310 # type: (Waiter) -> None
1311
1312 # If we are creating a pipeline in a subshell or we aren't running with job
1313 # control, our children should remain in our inherited process group.
1314 # the pipelines's group ID.
1315 if self.job_control.Enabled():
1316 self.pgid = OWN_LEADER # first process in pipeline is the leader
1317
1318 for i, proc in enumerate(self.procs):
1319 if self.pgid != INVALID_PGID:
1320 proc.AddStateChange(SetPgid(self.pgid, self.tracer))
1321
1322 # Figure out the pid
1323 pid = proc.StartProcess(trace.PipelinePart)
1324 if i == 0 and self.pgid != INVALID_PGID:
1325 # Mimic bash and use the PID of the FIRST process as the group for the
1326 # whole pipeline.
1327 self.pgid = pid
1328
1329 self.pids.append(pid)
1330 self.pipe_status.append(-1) # uninitialized
1331
1332 # NOTE: This is done in the SHELL PROCESS after every fork() call.
1333 # It can't be done at the end; otherwise processes will have descriptors
1334 # from non-adjacent pipes.
1335 proc.MaybeClosePipe()
1336
1337 if self.last_thunk:
1338 self.pipe_status.append(-1) # for self.last_thunk
1339
1340 def LastPid(self):
1341 # type: () -> int
1342 """For the odd $! variable.
1343
1344 It would be better if job IDs or PGIDs were used consistently.
1345 """
1346 return self.pids[-1]
1347
1348 def Wait(self, waiter):
1349 # type: (Waiter) -> List[int]
1350 """Wait for this pipeline to finish."""
1351
1352 assert self.procs, "no procs for Wait()"
1353 # waitpid(-1) zero or more times
1354 while self.state == job_state_e.Running:
1355 # Keep waiting until there's nothing to wait for.
1356 if waiter.WaitForOne() == W1_ECHILD:
1357 break
1358
1359 return self.pipe_status
1360
1361 def JobWait(self, waiter):
1362 # type: (Waiter) -> wait_status_t
1363 """Called by 'wait' builtin, e.g. 'wait %1'."""
1364 # wait builtin can be interrupted
1365 assert self.procs, "no procs for Wait()"
1366 while self.state == job_state_e.Running:
1367 result = waiter.WaitForOne()
1368
1369 if result >= 0: # signal
1370 return wait_status.Cancelled(result)
1371
1372 if result == W1_ECHILD:
1373 break
1374
1375 return wait_status.Pipeline(self.pipe_status)
1376
1377 def RunLastPart(self, waiter, fd_state):
1378 # type: (Waiter, FdState) -> List[int]
1379 """Run this pipeline synchronously (foreground pipeline).
1380
1381 Returns:
1382 pipe_status (list of integers).
1383 """
1384 assert len(self.pids) == len(self.procs)
1385
1386 # TODO: break circular dep. Bit flags could go in ASDL or headers.
1387 from osh import cmd_eval
1388
1389 # This is tcsetpgrp()
1390 # TODO: fix race condition -- I believe the first process could have
1391 # stopped already, and thus getpgid() will fail
1392 self.job_control.MaybeGiveTerminal(self.pgid)
1393
1394 # Run the last part of the pipeline IN PARALLEL with other processes. It
1395 # may or may not fork:
1396 # echo foo | read line # no fork, the builtin runs in THIS shell process
1397 # ls | wc -l # fork for 'wc'
1398
1399 cmd_ev, last_node = self.last_thunk
1400
1401 assert self.last_pipe is not None
1402 r, w = self.last_pipe # set in AddLast()
1403 posix.close(w) # we will not write here
1404
1405 # Fix lastpipe / job control / DEBUG trap interaction
1406 cmd_flags = cmd_eval.NoDebugTrap if self.job_control.Enabled() else 0
1407
1408 # The ERR trap only runs for the WHOLE pipeline, not the COMPONENTS in
1409 # a pipeline.
1410 cmd_flags |= cmd_eval.NoErrTrap
1411
1412 io_errors = [] # type: List[error.IOError_OSError]
1413 with ctx_Pipe(fd_state, r, io_errors):
1414 cmd_ev.ExecuteAndCatch(last_node, cmd_flags)
1415
1416 if len(io_errors):
1417 e_die('Error setting up last part of pipeline: %s' %
1418 pyutil.strerror(io_errors[0]))
1419
1420 # We won't read anymore. If we don't do this, then 'cat' in 'cat
1421 # /dev/urandom | sleep 1' will never get SIGPIPE.
1422 posix.close(r)
1423
1424 self.pipe_status[-1] = cmd_ev.LastStatus()
1425 if self.AllDone():
1426 self.state = job_state_e.Done
1427
1428 #log('pipestatus before all have finished = %s', self.pipe_status)
1429 return self.Wait(waiter)
1430
1431 def AllDone(self):
1432 # type: () -> bool
1433
1434 # mycpp rewrite: all(status != -1 for status in self.pipe_status)
1435 for status in self.pipe_status:
1436 if status == -1:
1437 return False
1438 return True
1439
1440 def WhenDone(self, pid, status):
1441 # type: (int, int) -> None
1442 """Called by Process.WhenDone."""
1443 #log('Pipeline WhenDone %d %d', pid, status)
1444 i = self.pids.index(pid)
1445 assert i != -1, 'Unexpected PID %d' % pid
1446
1447 if status == 141 and self.sigpipe_status_ok:
1448 status = 0
1449
1450 self.job_list.RemoveChildProcess(pid)
1451 self.pipe_status[i] = status
1452 if self.AllDone():
1453 if self.job_id != -1:
1454 # Job might have been brought to the foreground after being
1455 # assigned a job ID.
1456 if self.in_background:
1457 print_stderr('[%%%d] PGID %d Done' %
1458 (self.job_id, self.pids[0]))
1459
1460 self.job_list.RemoveJob(self.job_id)
1461
1462 # status of pipeline is status of last process
1463 self.status = self.pipe_status[-1]
1464 self.state = job_state_e.Done
1465 if not self.in_background:
1466 self.job_control.MaybeTakeTerminal()
1467
1468
1469def _JobStateStr(i):
1470 # type: (job_state_t) -> str
1471 return job_state_str(i)[10:] # remove 'job_state.'
1472
1473
1474def _GetTtyFd():
1475 # type: () -> int
1476 """Returns -1 if stdio is not a TTY."""
1477 try:
1478 return posix.open("/dev/tty", O_NONBLOCK | O_NOCTTY | O_RDWR, 0o666)
1479 except (IOError, OSError) as e:
1480 return -1
1481
1482
1483class ctx_TerminalControl(object):
1484
1485 def __init__(self, job_control, errfmt):
1486 # type: (JobControl, ui.ErrorFormatter) -> None
1487 job_control.InitJobControl()
1488 self.job_control = job_control
1489 self.errfmt = errfmt
1490
1491 def __enter__(self):
1492 # type: () -> None
1493 pass
1494
1495 def __exit__(self, type, value, traceback):
1496 # type: (Any, Any, Any) -> None
1497
1498 # Return the TTY to the original owner before exiting.
1499 try:
1500 self.job_control.MaybeReturnTerminal()
1501 except error.FatalRuntime as e:
1502 # Don't abort the shell on error, just print a message.
1503 self.errfmt.PrettyPrintError(e)
1504
1505
1506class JobControl(object):
1507 """Interface to setpgid(), tcsetpgrp(), etc."""
1508
1509 def __init__(self):
1510 # type: () -> None
1511
1512 # The main shell's PID and group ID.
1513 self.shell_pid = -1
1514 self.shell_pgid = -1
1515
1516 # The fd of the controlling tty. Set to -1 when job control is disabled.
1517 self.shell_tty_fd = -1
1518
1519 # For giving the terminal back to our parent before exiting (if not a login
1520 # shell).
1521 self.original_tty_pgid = -1
1522
1523 def InitJobControl(self):
1524 # type: () -> None
1525 self.shell_pid = posix.getpid()
1526 orig_shell_pgid = posix.getpgid(0)
1527 self.shell_pgid = orig_shell_pgid
1528 self.shell_tty_fd = _GetTtyFd()
1529
1530 # If we aren't the leader of our process group, create a group and mark
1531 # ourselves as the leader.
1532 if self.shell_pgid != self.shell_pid:
1533 try:
1534 posix.setpgid(self.shell_pid, self.shell_pid)
1535 self.shell_pgid = self.shell_pid
1536 except (IOError, OSError) as e:
1537 self.shell_tty_fd = -1
1538
1539 if self.shell_tty_fd != -1:
1540 self.original_tty_pgid = posix.tcgetpgrp(self.shell_tty_fd)
1541
1542 # If stdio is a TTY, put the shell's process group in the foreground.
1543 try:
1544 posix.tcsetpgrp(self.shell_tty_fd, self.shell_pgid)
1545 except (IOError, OSError) as e:
1546 # We probably aren't in the session leader's process group. Disable job
1547 # control.
1548 self.shell_tty_fd = -1
1549 self.shell_pgid = orig_shell_pgid
1550 posix.setpgid(self.shell_pid, self.shell_pgid)
1551
1552 def Enabled(self):
1553 # type: () -> bool
1554 """
1555 Only the main shell process should bother with job control functions.
1556 """
1557 #log('ENABLED? %d', self.shell_tty_fd)
1558
1559 # TODO: get rid of getpid()? I think SubProgramThunk should set a
1560 # flag.
1561 return self.shell_tty_fd != -1 and posix.getpid() == self.shell_pid
1562
1563 # TODO: This isn't a PID. This is a process group ID?
1564 #
1565 # What should the table look like?
1566 #
1567 # Do we need the last PID? I don't know why bash prints that. Probably so
1568 # you can do wait $!
1569 # wait -n waits for any node to go from job_state_e.Running to job_state_e.Done?
1570 #
1571 # And it needs a flag for CURRENT, for the implicit arg to 'fg'.
1572 # job_id is just an integer. This is sort of lame.
1573 #
1574 # [job_id, flag, pgid, job_state, node]
1575
1576 def MaybeGiveTerminal(self, pgid):
1577 # type: (int) -> None
1578 """If stdio is a TTY, move the given process group to the
1579 foreground."""
1580 if not self.Enabled():
1581 # Only call tcsetpgrp when job control is enabled.
1582 return
1583
1584 try:
1585 posix.tcsetpgrp(self.shell_tty_fd, pgid)
1586 except (IOError, OSError) as e:
1587 e_die('osh: Failed to move process group %d to foreground: %s' %
1588 (pgid, pyutil.strerror(e)))
1589
1590 def MaybeTakeTerminal(self):
1591 # type: () -> None
1592 """If stdio is a TTY, return the main shell's process group to the
1593 foreground."""
1594 self.MaybeGiveTerminal(self.shell_pgid)
1595
1596 def MaybeReturnTerminal(self):
1597 # type: () -> None
1598 """Called before the shell exits."""
1599 self.MaybeGiveTerminal(self.original_tty_pgid)
1600
1601
1602class JobList(object):
1603 """Global list of jobs, used by a few builtins."""
1604
1605 def __init__(self):
1606 # type: () -> None
1607
1608 # job_id -> Job instance
1609 self.jobs = {} # type: Dict[int, Job]
1610
1611 # pid -> Process. This is for STOP notification.
1612 self.child_procs = {} # type: Dict[int, Process]
1613 self.debug_pipelines = [] # type: List[Pipeline]
1614
1615 # Counter used to assign IDs to jobs. It is incremented every time a job
1616 # is created. Once all active jobs are done it is reset to 1. I'm not
1617 # sure if this reset behavior is mandated by POSIX, but other shells do
1618 # it, so we mimic for the sake of compatibility.
1619 self.job_id = 1
1620
1621 def AddJob(self, job):
1622 # type: (Job) -> int
1623 """Add a background job to the list.
1624
1625 A job is either a Process or Pipeline. You can resume a job with 'fg',
1626 kill it with 'kill', etc.
1627
1628 Two cases:
1629
1630 1. async jobs: sleep 5 | sleep 4 &
1631 2. stopped jobs: sleep 5; then Ctrl-Z
1632 """
1633 job_id = self.job_id
1634 self.jobs[job_id] = job
1635 job.job_id = job_id
1636 self.job_id += 1
1637 return job_id
1638
1639 def RemoveJob(self, job_id):
1640 # type: (int) -> None
1641 """Process and Pipeline can call this."""
1642 mylib.dict_erase(self.jobs, job_id)
1643
1644 if len(self.jobs) == 0:
1645 self.job_id = 1
1646
1647 def AddChildProcess(self, pid, proc):
1648 # type: (int, Process) -> None
1649 """Every child process should be added here as soon as we know its PID.
1650
1651 When the Waiter gets an EXITED or STOPPED notification, we need
1652 to know about it so 'jobs' can work.
1653 """
1654 self.child_procs[pid] = proc
1655
1656 def RemoveChildProcess(self, pid):
1657 # type: (int) -> None
1658 """Remove the child process with the given PID."""
1659 mylib.dict_erase(self.child_procs, pid)
1660
1661 if mylib.PYTHON:
1662
1663 def AddPipeline(self, pi):
1664 # type: (Pipeline) -> None
1665 """For debugging only."""
1666 self.debug_pipelines.append(pi)
1667
1668 def ProcessFromPid(self, pid):
1669 # type: (int) -> Process
1670 """For wait $PID.
1671
1672 There's no way to wait for a pipeline with a PID. That uses job
1673 syntax, e.g. %1. Not a great interface.
1674 """
1675 return self.child_procs.get(pid)
1676
1677 def GetCurrentAndPreviousJobs(self):
1678 # type: () -> Tuple[Optional[Job], Optional[Job]]
1679 """Return the "current" and "previous" jobs (AKA `%+` and `%-`).
1680
1681 See the POSIX specification for the `jobs` builtin for details:
1682 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1683
1684 IMPORTANT NOTE: This method assumes that the jobs list will not change
1685 during its execution! This assumption holds for now because we only ever
1686 update the jobs list from the main loop after WaitPid() informs us of a
1687 change. If we implement `set -b` and install a signal handler for
1688 SIGCHLD we should be careful to synchronize it with this function. The
1689 unsafety of mutating GC data structures from a signal handler should
1690 make this a non-issue, but if bugs related to this appear this note may
1691 be helpful...
1692 """
1693 # Split all active jobs by state and sort each group by decreasing job
1694 # ID to approximate newness.
1695 stopped_jobs = [] # type: List[Job]
1696 running_jobs = [] # type: List[Job]
1697 for i in xrange(0, self.job_id):
1698 job = self.jobs.get(i, None)
1699 if not job:
1700 continue
1701
1702 if job.state == job_state_e.Stopped:
1703 stopped_jobs.append(job)
1704
1705 elif job.state == job_state_e.Running:
1706 running_jobs.append(job)
1707
1708 current = None # type: Optional[Job]
1709 previous = None # type: Optional[Job]
1710 # POSIX says: If there is any suspended job, then the current job shall
1711 # be a suspended job. If there are at least two suspended jobs, then the
1712 # previous job also shall be a suspended job.
1713 #
1714 # So, we will only return running jobs from here if there are no recent
1715 # stopped jobs.
1716 if len(stopped_jobs) > 0:
1717 current = stopped_jobs.pop()
1718
1719 if len(stopped_jobs) > 0:
1720 previous = stopped_jobs.pop()
1721
1722 if len(running_jobs) > 0 and not current:
1723 current = running_jobs.pop()
1724
1725 if len(running_jobs) > 0 and not previous:
1726 previous = running_jobs.pop()
1727
1728 if not previous:
1729 previous = current
1730
1731 return current, previous
1732
1733 def GetJobWithSpec(self, job_spec):
1734 # type: (str) -> Optional[Job]
1735 """Parse the given job spec and return the matching job. If there is no
1736 matching job, this function returns None.
1737
1738 See the POSIX spec for the `jobs` builtin for details about job specs:
1739 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1740 """
1741 if job_spec in CURRENT_JOB_SPECS:
1742 current, _ = self.GetCurrentAndPreviousJobs()
1743 return current
1744
1745 if job_spec == '%-':
1746 _, previous = self.GetCurrentAndPreviousJobs()
1747 return previous
1748
1749 # TODO: Add support for job specs based on prefixes of process argv.
1750 m = util.RegexSearch(r'^%([0-9]+)$', job_spec)
1751 if m is not None:
1752 assert len(m) == 2
1753 job_id = int(m[1])
1754 if job_id in self.jobs:
1755 return self.jobs[job_id]
1756
1757 return None
1758
1759 def DisplayJobs(self, style):
1760 # type: (int) -> None
1761 """Used by the 'jobs' builtin.
1762
1763 https://pubs.opengroup.org/onlinepubs/9699919799/utilities/jobs.html
1764
1765 "By default, the jobs utility shall display the status of all stopped jobs,
1766 running background jobs and all jobs whose status has changed and have not
1767 been reported by the shell."
1768 """
1769 # NOTE: A job is a background process or pipeline.
1770 #
1771 # echo hi | wc -l -- this starts two processes. Wait for TWO
1772 # echo hi | wc -l & -- this starts a process which starts two processes
1773 # Wait for ONE.
1774 #
1775 # 'jobs -l' GROUPS the PIDs by job. It has the job number, + - indicators
1776 # for %% and %-, PID, status, and "command".
1777 #
1778 # Every component of a pipeline is on the same line with 'jobs', but
1779 # they're separated into different lines with 'jobs -l'.
1780 #
1781 # See demo/jobs-builtin.sh
1782
1783 # $ jobs -l
1784 # [1]+ 24414 Stopped sleep 5
1785 # 24415 | sleep 5
1786 # [2] 24502 Running sleep 6
1787 # 24503 | sleep 6
1788 # 24504 | sleep 5 &
1789 # [3]- 24508 Running sleep 6
1790 # 24509 | sleep 6
1791 # 24510 | sleep 5 &
1792
1793 f = mylib.Stdout()
1794 for job_id, job in iteritems(self.jobs):
1795 # Use the %1 syntax
1796 job.DisplayJob(job_id, f, style)
1797
1798 def DebugPrint(self):
1799 # type: () -> None
1800
1801 f = mylib.Stdout()
1802 f.write('\n')
1803 f.write('[process debug info]\n')
1804
1805 for pid, proc in iteritems(self.child_procs):
1806 proc.DisplayJob(-1, f, STYLE_DEFAULT)
1807 #p = ' |' if proc.parent_pipeline else ''
1808 #print('%d %7s %s%s' % (pid, _JobStateStr(proc.state), proc.thunk.UserString(), p))
1809
1810 if len(self.debug_pipelines):
1811 f.write('\n')
1812 f.write('[pipeline debug info]\n')
1813 for pi in self.debug_pipelines:
1814 pi.DebugPrint()
1815
1816 def ListRecent(self):
1817 # type: () -> None
1818 """For jobs -n, which I think is also used in the interactive
1819 prompt."""
1820 pass
1821
1822 def NumRunning(self):
1823 # type: () -> int
1824 """Return the number of running jobs.
1825
1826 Used by 'wait' and 'wait -n'.
1827 """
1828 count = 0
1829 for _, job in iteritems(self.jobs): # mycpp rewrite: from itervalues()
1830 if job.State() == job_state_e.Running:
1831 count += 1
1832 return count
1833
1834
1835# Some WaitForOne() return values
1836W1_OK = -2 # waitpid(-1) returned
1837W1_ECHILD = -3 # no processes to wait for
1838W1_AGAIN = -4 # WNOHANG was passed and there were no state changes
1839
1840
1841class Waiter(object):
1842 """A capability to wait for processes.
1843
1844 This must be a singleton (and is because CommandEvaluator is a singleton).
1845
1846 Invariants:
1847 - Every child process is registered once
1848 - Every child process is waited for
1849
1850 Canonical example of why we need a GLOBAL waiter:
1851
1852 { sleep 3; echo 'done 3'; } &
1853 { sleep 4; echo 'done 4'; } &
1854
1855 # ... do arbitrary stuff ...
1856
1857 { sleep 1; exit 1; } | { sleep 2; exit 2; }
1858
1859 Now when you do wait() after starting the pipeline, you might get a pipeline
1860 process OR a background process! So you have to distinguish between them.
1861 """
1862
1863 def __init__(self, job_list, exec_opts, signal_safe, tracer):
1864 # type: (JobList, optview.Exec, pyos.SignalSafe, dev.Tracer) -> None
1865 self.job_list = job_list
1866 self.exec_opts = exec_opts
1867 self.signal_safe = signal_safe
1868 self.tracer = tracer
1869 self.last_status = 127 # wait -n error code
1870
1871 def WaitForOne(self, waitpid_options=0):
1872 # type: (int) -> int
1873 """Wait until the next process returns (or maybe Ctrl-C).
1874
1875 Returns:
1876 One of these negative numbers:
1877 W1_ECHILD Nothing to wait for
1878 W1_OK Caller should keep waiting
1879 UNTRAPPED_SIGWINCH
1880 Or
1881 result > 0 Signal that waitpid() was interrupted with
1882
1883 In the interactive shell, we return 0 if we get a Ctrl-C, so the caller
1884 will try again.
1885
1886 Callers:
1887 wait -n -- loop until there is one fewer process (TODO)
1888 wait -- loop until there are no processes
1889 wait $! -- loop until job state is Done (process or pipeline)
1890 Process::Wait() -- loop until Process state is done
1891 Pipeline::Wait() -- loop until Pipeline state is done
1892
1893 Comparisons:
1894 bash: jobs.c waitchld() Has a special case macro(!) CHECK_WAIT_INTR for
1895 the wait builtin
1896
1897 dash: jobs.c waitproc() uses sigfillset(), sigprocmask(), etc. Runs in a
1898 loop while (gotsigchld), but that might be a hack for System V!
1899
1900 Should we have a cleaner API like named posix::wait_for_one() ?
1901
1902 wait_result =
1903 ECHILD -- nothing to wait for
1904 | Done(int pid, int status) -- process done
1905 | EINTR(bool sigint) -- may or may not retry
1906 """
1907 pid, status = pyos.WaitPid(waitpid_options)
1908 if pid == 0: # WNOHANG passed, and no state changes
1909 return W1_AGAIN
1910 elif pid < 0: # error case
1911 err_num = status
1912 #log('waitpid() error => %d %s', e.errno, pyutil.strerror(e))
1913 if err_num == ECHILD:
1914 return W1_ECHILD # nothing to wait for caller should stop
1915 elif err_num == EINTR: # Bug #858 fix
1916 #log('WaitForOne() => %d', self.trap_state.GetLastSignal())
1917 return self.signal_safe.LastSignal() # e.g. 1 for SIGHUP
1918 else:
1919 # The signature of waitpid() means this shouldn't happen
1920 raise AssertionError()
1921
1922 # All child processes are supposed to be in this dict. But this may
1923 # legitimately happen if a grandchild outlives the child (its parent).
1924 # Then it is reparented under this process, so we might receive
1925 # notification of its exit, even though we didn't start it. We can't have
1926 # any knowledge of such processes, so print a warning.
1927 if pid not in self.job_list.child_procs:
1928 print_stderr("oils: PID %d Stopped, but osh didn't start it" % pid)
1929 return W1_OK
1930
1931 proc = self.job_list.child_procs[pid]
1932 if 0:
1933 self.job_list.DebugPrint()
1934
1935 if WIFSIGNALED(status):
1936 term_sig = WTERMSIG(status)
1937 status = 128 + term_sig
1938
1939 # Print newline after Ctrl-C.
1940 if term_sig == SIGINT:
1941 print('')
1942
1943 proc.WhenDone(pid, status)
1944
1945 elif WIFEXITED(status):
1946 status = WEXITSTATUS(status)
1947 #log('exit status: %s', status)
1948 proc.WhenDone(pid, status)
1949
1950 elif WIFSTOPPED(status):
1951 #status = WEXITSTATUS(status)
1952 stop_sig = WSTOPSIG(status)
1953
1954 print_stderr('')
1955 print_stderr('oils: PID %d Stopped with signal %d' %
1956 (pid, stop_sig))
1957 proc.WhenStopped(stop_sig)
1958
1959 else:
1960 raise AssertionError(status)
1961
1962 self.last_status = status # for wait -n
1963 self.tracer.OnProcessEnd(pid, status)
1964 return W1_OK
1965
1966 def PollNotifications(self):
1967 # type: () -> None
1968 """
1969 Process all pending state changes.
1970 """
1971 while self.WaitForOne(waitpid_options=WNOHANG) == W1_OK:
1972 continue