OILS / core / process.py View on Github | oils.pub

2242 lines, 1054 significant
1# Copyright 2016 Andy Chu. All rights reserved.
2# Licensed under the Apache License, Version 2.0 (the "License");
3# you may not use this file except in compliance with the License.
4# You may obtain a copy of the License at
5#
6# http://www.apache.org/licenses/LICENSE-2.0
7"""process.py - Launch processes and manipulate file descriptors."""
8from __future__ import print_function
9
10from errno import EACCES, EBADF, ECHILD, EINTR, ENOENT, ENOEXEC, EEXIST
11import fcntl as fcntl_
12from fcntl import F_DUPFD, F_GETFD, F_SETFD, FD_CLOEXEC
13from signal import (SIG_DFL, SIG_IGN, SIGINT, SIGPIPE, SIGQUIT, SIGTSTP,
14 SIGTTOU, SIGTTIN, SIGWINCH)
15
16from _devbuild.gen.id_kind_asdl import Id
17from _devbuild.gen.runtime_asdl import (job_state_e, job_state_t,
18 job_state_str, wait_status,
19 wait_status_t, RedirValue,
20 redirect_arg, redirect_arg_e, trace,
21 trace_t)
22from _devbuild.gen.syntax_asdl import (
23 loc_t,
24 redir_loc,
25 redir_loc_e,
26 redir_loc_t,
27)
28from _devbuild.gen.value_asdl import (value, value_e)
29from core import dev
30from core import error
31from core.error import e_die
32from core import pyutil
33from core import pyos
34from core import state
35from display import ui
36from core import util
37from data_lang import j8_lite
38from frontend import location
39from frontend import match
40from mycpp import iolib
41from mycpp import mylib
42from mycpp.mylib import log, print_stderr, probe, tagswitch, iteritems
43
44import posix_ as posix
45from posix_ import (
46 # translated by mycpp and directly called! No wrapper!
47 WIFSIGNALED,
48 WIFEXITED,
49 WIFSTOPPED,
50 WEXITSTATUS,
51 WSTOPSIG,
52 WTERMSIG,
53 WNOHANG,
54 O_APPEND,
55 O_CREAT,
56 O_EXCL,
57 O_NONBLOCK,
58 O_NOCTTY,
59 O_RDONLY,
60 O_RDWR,
61 O_WRONLY,
62 O_TRUNC,
63 F_DUPFD_CLOEXEC, # OSH patch - this didn't appear until Python 3.2
64)
65
66from typing import IO, List, Tuple, Dict, Optional, Any, cast, TYPE_CHECKING
67
68if TYPE_CHECKING:
69 from _devbuild.gen.runtime_asdl import cmd_value
70 from _devbuild.gen.syntax_asdl import command_t
71 from builtin import trap_osh
72 from core import optview
73 from core import vm
74 from core.util import _DebugFile
75 from osh.cmd_eval import CommandEvaluator
76
77NO_FD = -1
78
79# Minimum file descriptor that the shell can use. Other descriptors can be
80# directly used by user programs, e.g. exec 9>&1
81#
82# Oils uses 100 because users are allowed TWO digits in frontend/lexer_def.py.
83# This is a compromise between bash (unlimited, but requires crazy
84# bookkeeping), and dash/zsh (10) and mksh (24)
85_SHELL_MIN_FD = 100
86
87# Style for 'jobs' builtin
88STYLE_DEFAULT = 0
89STYLE_LONG = 1
90STYLE_PID_ONLY = 2
91
92# To save on allocations in JobList::JobFromSpec()
93CURRENT_JOB_SPECS = ['', '%', '%%', '%+']
94
95
96class ctx_FileCloser(object):
97
98 def __init__(self, f):
99 # type: (mylib.LineReader) -> None
100 self.f = f
101
102 def __enter__(self):
103 # type: () -> None
104 pass
105
106 def __exit__(self, type, value, traceback):
107 # type: (Any, Any, Any) -> None
108 self.f.close()
109
110
111def InitInteractiveShell(signal_safe):
112 # type: (iolib.SignalSafe) -> None
113 """Called when initializing an interactive shell."""
114
115 # The shell itself should ignore Ctrl-\.
116 iolib.sigaction(SIGQUIT, SIG_IGN)
117
118 # This prevents Ctrl-Z from suspending OSH in interactive mode.
119 iolib.sigaction(SIGTSTP, SIG_IGN)
120
121 # More signals from
122 # https://www.gnu.org/software/libc/manual/html_node/Initializing-the-Shell.html
123 # (but not SIGCHLD)
124 iolib.sigaction(SIGTTOU, SIG_IGN)
125 iolib.sigaction(SIGTTIN, SIG_IGN)
126
127 # Register a callback to receive terminal width changes.
128 # NOTE: In line_input.c, we turned off rl_catch_sigwinch.
129
130 # This is ALWAYS on, which means that it can cause EINTR, and wait() and
131 # read() have to handle it
132 iolib.RegisterSignalInterest(SIGWINCH)
133
134
135_ = F_DUPFD_CLOEXEC # shut up lint for now
136
137def SaveFd(fd):
138 # type: (int) -> int
139
140 # Note: may raise IOError_OSError
141 saved = fcntl_.fcntl(fd, F_DUPFD, _SHELL_MIN_FD) # type: int
142
143 # Bug fix: make sure we never leak the saved descriptor to child processes
144 fcntl_.fcntl(saved, F_SETFD, FD_CLOEXEC)
145
146 # TODO: use this
147 #saved = fcntl_.fcntl(fd, F_DUPFD_CLOEXEC, _SHELL_MIN_FD) # type: int
148
149 return saved
150
151
152class _RedirFrame(object):
153
154 def __init__(self, saved_fd, orig_fd, forget):
155 # type: (int, int, bool) -> None
156 self.saved_fd = saved_fd
157 self.orig_fd = orig_fd
158 self.forget = forget
159
160
161class _FdFrame(object):
162
163 def __init__(self):
164 # type: () -> None
165 self.saved = [] # type: List[_RedirFrame]
166 self.need_wait = [] # type: List[Process]
167
168 def Forget(self):
169 # type: () -> None
170 """For exec 1>&2."""
171 for rf in reversed(self.saved):
172 if rf.saved_fd != NO_FD and rf.forget:
173 posix.close(rf.saved_fd)
174
175 del self.saved[:] # like list.clear() in Python 3.3
176 del self.need_wait[:]
177
178 def __repr__(self):
179 # type: () -> str
180 return '<_FdFrame %s>' % self.saved
181
182
183class FdState(object):
184 """File descriptor state for the current process.
185
186 For example, you can do 'myfunc > out.txt' without forking. Child
187 processes inherit our state.
188 """
189
190 def __init__(
191 self,
192 errfmt, # type: ui.ErrorFormatter
193 job_control, # type: JobControl
194 job_list, # type: JobList
195 mem, # type: state.Mem
196 tracer, # type: Optional[dev.Tracer]
197 waiter, # type: Optional[Waiter]
198 exec_opts, # type: optview.Exec
199 ):
200 # type: (...) -> None
201 """
202 Args:
203 errfmt: for errors
204 job_list: For keeping track of _HereDocWriterThunk
205 """
206 self.errfmt = errfmt
207 self.job_control = job_control
208 self.job_list = job_list
209 self.cur_frame = _FdFrame() # for the top level
210 self.stack = [self.cur_frame]
211 self.mem = mem
212 self.tracer = tracer
213 self.waiter = waiter
214 self.exec_opts = exec_opts
215
216 def Open(self, path):
217 # type: (str) -> mylib.LineReader
218 """Opens a path for read, but moves it out of the reserved 3-9 fd
219 range.
220
221 Returns:
222 A Python file object. The caller is responsible for Close().
223
224 Raises:
225 IOError or OSError if the path can't be found. (This is Python-induced wart)
226 """
227 fd_mode = O_RDONLY
228 f = self._Open(path, 'r', fd_mode)
229
230 # Hacky downcast
231 return cast('mylib.LineReader', f)
232
233 # used for util.DebugFile
234 def OpenForWrite(self, path):
235 # type: (str) -> mylib.Writer
236 fd_mode = O_CREAT | O_RDWR
237 f = self._Open(path, 'w', fd_mode)
238
239 # Hacky downcast
240 return cast('mylib.Writer', f)
241
242 def _Open(self, path, c_mode, fd_mode):
243 # type: (str, str, int) -> IO[str]
244 fd = posix.open(path, fd_mode, 0o666) # may raise OSError
245
246 # Immediately move it to a new location
247 new_fd = SaveFd(fd)
248 posix.close(fd)
249
250 # Return a Python file handle
251 f = posix.fdopen(new_fd, c_mode) # may raise IOError
252 return f
253
254 def _WriteFdToMem(self, fd_name, fd):
255 # type: (str, int) -> None
256 if self.mem:
257 # setvar, not setref
258 state.OshLanguageSetValue(self.mem, location.LName(fd_name),
259 value.Str(str(fd)))
260
261 def _ReadFdFromMem(self, fd_name):
262 # type: (str) -> int
263 val = self.mem.GetValue(fd_name)
264 if val.tag() == value_e.Str:
265 try:
266 return int(cast(value.Str, val).s)
267 except ValueError:
268 return NO_FD
269 return NO_FD
270
271 def _PushSave(self, fd):
272 # type: (int) -> bool
273 """Save fd to a new location and remember to restore it later."""
274 #log('---- _PushSave %s', fd)
275 ok = True
276 try:
277 new_fd = SaveFd(fd)
278 except (IOError, OSError) as e:
279 ok = False
280 # Example program that causes this error: exec 4>&1. Descriptor 4 isn't
281 # open.
282 # This seems to be ignored in dash too in savefd()?
283 if e.errno != EBADF:
284 raise
285 if ok:
286 posix.close(fd)
287 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, True))
288 else:
289 # if we got EBADF, we still need to close the original on Pop()
290 self._PushClose(fd)
291
292 return ok
293
294 def _PushDup(self, fd1, blame_loc):
295 # type: (int, redir_loc_t) -> int
296 """Save fd2 in a higher range, and dup fd1 onto fd2.
297
298 Returns whether F_DUPFD/dup2 succeeded, and the new descriptor.
299 """
300 UP_loc = blame_loc
301 if blame_loc.tag() == redir_loc_e.VarName:
302 fd2_name = cast(redir_loc.VarName, UP_loc).name
303 try:
304 # F_DUPFD: GREATER than range
305 new_fd = fcntl_.fcntl(fd1, F_DUPFD, _SHELL_MIN_FD) # type: int
306 except (IOError, OSError) as e:
307 if e.errno == EBADF:
308 print_stderr('F_DUPFD fd %d: %s' %
309 (fd1, pyutil.strerror(e)))
310 return NO_FD
311 else:
312 raise # this redirect failed
313
314 self._WriteFdToMem(fd2_name, new_fd)
315
316 elif blame_loc.tag() == redir_loc_e.Fd:
317 fd2 = cast(redir_loc.Fd, UP_loc).fd
318
319 if fd1 == fd2:
320 # The user could have asked for it to be open on descriptor 3, but open()
321 # already returned 3, e.g. echo 3>out.txt
322 return NO_FD
323
324 # Check the validity of fd1 before _PushSave(fd2)
325 try:
326 fcntl_.fcntl(fd1, F_GETFD)
327 except (IOError, OSError) as e:
328 print_stderr('F_GETFD fd %d: %s' % (fd1, pyutil.strerror(e)))
329 raise
330
331 need_restore = self._PushSave(fd2)
332
333 #log('==== dup2 %s %s\n' % (fd1, fd2))
334 try:
335 posix.dup2(fd1, fd2)
336 except (IOError, OSError) as e:
337 # bash/dash give this error too, e.g. for 'echo hi 1>&3'
338 print_stderr('dup2(%d, %d): %s' %
339 (fd1, fd2, pyutil.strerror(e)))
340
341 # Restore and return error
342 if need_restore:
343 rf = self.cur_frame.saved.pop()
344 posix.dup2(rf.saved_fd, rf.orig_fd)
345 posix.close(rf.saved_fd)
346
347 raise # this redirect failed
348
349 new_fd = fd2
350
351 else:
352 raise AssertionError()
353
354 return new_fd
355
356 def _PushCloseFd(self, blame_loc):
357 # type: (redir_loc_t) -> bool
358 """For 2>&-"""
359 # exec {fd}>&- means close the named descriptor
360
361 UP_loc = blame_loc
362 if blame_loc.tag() == redir_loc_e.VarName:
363 fd_name = cast(redir_loc.VarName, UP_loc).name
364 fd = self._ReadFdFromMem(fd_name)
365 if fd == NO_FD:
366 return False
367
368 elif blame_loc.tag() == redir_loc_e.Fd:
369 fd = cast(redir_loc.Fd, UP_loc).fd
370
371 else:
372 raise AssertionError()
373
374 self._PushSave(fd)
375
376 return True
377
378 def _PushClose(self, fd):
379 # type: (int) -> None
380 self.cur_frame.saved.append(_RedirFrame(NO_FD, fd, False))
381
382 def _PushWait(self, proc):
383 # type: (Process) -> None
384 self.cur_frame.need_wait.append(proc)
385
386 def _ApplyRedirect(self, r, err_out):
387 # type: (RedirValue, List[int]) -> None
388 arg = r.arg
389 UP_arg = arg
390 with tagswitch(arg) as case:
391
392 if case(redirect_arg_e.Path):
393 arg = cast(redirect_arg.Path, UP_arg)
394
395 if r.op_id in (Id.Redir_Great, Id.Redir_AndGreat): # > &>
396 mode = O_CREAT | O_WRONLY | O_TRUNC
397 elif r.op_id == Id.Redir_Clobber: # >|
398 mode = O_CREAT | O_WRONLY | O_TRUNC
399 elif r.op_id in (Id.Redir_DGreat,
400 Id.Redir_AndDGreat): # >> &>>
401 mode = O_CREAT | O_WRONLY | O_APPEND
402 elif r.op_id == Id.Redir_Less: # <
403 mode = O_RDONLY
404 elif r.op_id == Id.Redir_LessGreat: # <>
405 mode = O_CREAT | O_RDWR
406 else:
407 raise NotImplementedError(r.op_id)
408
409 # noclobber: don't overwrite existing files (except for special
410 # files like /dev/null)
411 noclobber = self.exec_opts.noclobber()
412
413 # Only > and &> actually follow noclobber. See
414 # spec/redirect.test.sh
415 op_respects_noclobber = r.op_id in (Id.Redir_Great, Id.Redir_AndGreat)
416
417 if noclobber and op_respects_noclobber:
418 stat = mylib.stat(arg.filename)
419 if not stat:
420 # File doesn't currently exist, open with O_EXCL (open
421 # will fail is EEXIST if arg.filename exists when we
422 # call open(2)). This guards against a race where the
423 # file may be created *after* we check it with stat.
424 mode |= O_EXCL
425
426 elif stat.isreg():
427 # This is a regular file, opening it would clobber,
428 # so raise an error.
429 err_out.append(EEXIST)
430 return
431
432 # Otherwise, the file exists and is a special file like
433 # /dev/null, we can open(2) it without O_EXCL. (Note,
434 # there is a race here. See demo/noclobber-race.sh)
435
436 # NOTE: 0666 is affected by umask, all shells use it.
437 try:
438 open_fd = posix.open(arg.filename, mode, 0o666)
439 except (IOError, OSError) as e:
440 if e.errno == EEXIST and noclobber:
441 extra = ' (noclobber)'
442 else:
443 extra = ''
444 self.errfmt.Print_(
445 "Can't open %r: %s%s" %
446 (arg.filename, pyutil.strerror(e), extra),
447 blame_loc=r.op_loc)
448 raise
449
450 new_fd = self._PushDup(open_fd, r.loc)
451 if new_fd != NO_FD:
452 posix.close(open_fd)
453
454 # Now handle &> and &>> and their variants. These pairs are the same:
455 #
456 # stdout_stderr.py &> out-err.txt
457 # stdout_stderr.py > out-err.txt 2>&1
458 #
459 # stdout_stderr.py 3&> out-err.txt
460 # stdout_stderr.py 3> out-err.txt 2>&3
461 #
462 # Ditto for {fd}> and {fd}&>
463
464 if r.op_id in (Id.Redir_AndGreat, Id.Redir_AndDGreat):
465 self._PushDup(new_fd, redir_loc.Fd(2))
466
467 elif case(redirect_arg_e.CopyFd): # e.g. echo hi 1>&2
468 arg = cast(redirect_arg.CopyFd, UP_arg)
469
470 if r.op_id == Id.Redir_GreatAnd: # 1>&2
471 self._PushDup(arg.target_fd, r.loc)
472
473 elif r.op_id == Id.Redir_LessAnd: # 0<&5
474 # The only difference between >& and <& is the default file
475 # descriptor argument.
476 self._PushDup(arg.target_fd, r.loc)
477
478 else:
479 raise NotImplementedError()
480
481 elif case(redirect_arg_e.MoveFd): # e.g. echo hi 5>&6-
482 arg = cast(redirect_arg.MoveFd, UP_arg)
483 new_fd = self._PushDup(arg.target_fd, r.loc)
484 if new_fd != NO_FD:
485 posix.close(arg.target_fd)
486
487 UP_loc = r.loc
488 if r.loc.tag() == redir_loc_e.Fd:
489 fd = cast(redir_loc.Fd, UP_loc).fd
490 else:
491 fd = NO_FD
492
493 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, False))
494
495 elif case(redirect_arg_e.CloseFd): # e.g. echo hi 5>&-
496 self._PushCloseFd(r.loc)
497
498 elif case(redirect_arg_e.HereDoc):
499 arg = cast(redirect_arg.HereDoc, UP_arg)
500
501 # NOTE: Do these descriptors have to be moved out of the range 0-9?
502 read_fd, write_fd = posix.pipe()
503
504 self._PushDup(read_fd, r.loc) # stdin is now the pipe
505
506 # We can't close like we do in the filename case above? The writer can
507 # get a "broken pipe".
508 self._PushClose(read_fd)
509
510 thunk = _HereDocWriterThunk(write_fd, arg.body)
511
512 # Use PIPE_SIZE to save a process in the case of small here
513 # docs, which are the common case. (dash does this.)
514
515 # Note: could instrument this to see how often it happens.
516 # Though strace -ff can also work.
517 start_process = len(arg.body) > 4096
518 #start_process = True
519
520 if start_process:
521 here_proc = Process(thunk, self.job_control, self.job_list,
522 self.tracer)
523
524 # NOTE: we could close the read pipe here, but it doesn't really
525 # matter because we control the code.
526 here_proc.StartProcess(trace.HereDoc)
527 #log('Started %s as %d', here_proc, pid)
528 self._PushWait(here_proc)
529
530 # Now that we've started the child, close it in the parent.
531 posix.close(write_fd)
532
533 else:
534 posix.write(write_fd, arg.body)
535 posix.close(write_fd)
536
537 def Push(self, redirects, err_out):
538 # type: (List[RedirValue], List[int]) -> None
539 """Apply a group of redirects and remember to undo them."""
540
541 #log('> fd_state.Push %s', redirects)
542 new_frame = _FdFrame()
543 self.stack.append(new_frame)
544 self.cur_frame = new_frame
545
546 for r in redirects:
547 #log('apply %s', r)
548 with ui.ctx_Location(self.errfmt, r.op_loc):
549 try:
550 # _ApplyRedirect reports errors in 2 ways:
551 # 1. Raising an IOError or OSError from posix.* calls
552 # 2. Returning errors in err_out from checks like noclobber
553 self._ApplyRedirect(r, err_out)
554 except (IOError, OSError) as e:
555 err_out.append(e.errno)
556
557 if len(err_out):
558 # This can fail too
559 self.Pop(err_out)
560 return # for bad descriptor, etc.
561
562 def PushStdinFromPipe(self, r):
563 # type: (int) -> bool
564 """Save the current stdin and make it come from descriptor 'r'.
565
566 'r' is typically the read-end of a pipe. For 'lastpipe'/ZSH
567 semantics of
568
569 echo foo | read line; echo $line
570 """
571 new_frame = _FdFrame()
572 self.stack.append(new_frame)
573 self.cur_frame = new_frame
574
575 self._PushDup(r, redir_loc.Fd(0))
576 return True
577
578 def Pop(self, err_out):
579 # type: (List[int]) -> None
580 frame = self.stack.pop()
581 #log('< Pop %s', frame)
582 for rf in reversed(frame.saved):
583 if rf.saved_fd == NO_FD:
584 #log('Close %d', orig)
585 try:
586 posix.close(rf.orig_fd)
587 except (IOError, OSError) as e:
588 err_out.append(e.errno)
589 log('Error closing descriptor %d: %s', rf.orig_fd,
590 pyutil.strerror(e))
591 return
592 else:
593 try:
594 posix.dup2(rf.saved_fd, rf.orig_fd)
595 except (IOError, OSError) as e:
596 err_out.append(e.errno)
597 log('dup2(%d, %d) error: %s', rf.saved_fd, rf.orig_fd,
598 pyutil.strerror(e))
599 #log('fd state:')
600 #posix.system('ls -l /proc/%s/fd' % posix.getpid())
601 return
602 posix.close(rf.saved_fd)
603 #log('dup2 %s %s', saved, orig)
604
605 # Wait for here doc processes to finish.
606 for proc in frame.need_wait:
607 unused_status = proc.Wait(self.waiter)
608
609 def MakePermanent(self):
610 # type: () -> None
611 self.cur_frame.Forget()
612
613
614class ChildStateChange(object):
615
616 def __init__(self):
617 # type: () -> None
618 """Empty constructor for mycpp."""
619 pass
620
621 def Apply(self):
622 # type: () -> None
623 raise NotImplementedError()
624
625 def ApplyFromParent(self, proc):
626 # type: (Process) -> None
627 """Noop for all state changes other than SetPgid for mycpp."""
628 pass
629
630
631class StdinFromPipe(ChildStateChange):
632
633 def __init__(self, pipe_read_fd, w):
634 # type: (int, int) -> None
635 self.r = pipe_read_fd
636 self.w = w
637
638 def __repr__(self):
639 # type: () -> str
640 return '<StdinFromPipe %d %d>' % (self.r, self.w)
641
642 def Apply(self):
643 # type: () -> None
644 posix.dup2(self.r, 0)
645 posix.close(self.r) # close after dup
646
647 posix.close(self.w) # we're reading from the pipe, not writing
648 #log('child CLOSE w %d pid=%d', self.w, posix.getpid())
649
650
651class StdoutToPipe(ChildStateChange):
652
653 def __init__(self, r, pipe_write_fd):
654 # type: (int, int) -> None
655 self.r = r
656 self.w = pipe_write_fd
657
658 def __repr__(self):
659 # type: () -> str
660 return '<StdoutToPipe %d %d>' % (self.r, self.w)
661
662 def Apply(self):
663 # type: () -> None
664 posix.dup2(self.w, 1)
665 posix.close(self.w) # close after dup
666
667 posix.close(self.r) # we're writing to the pipe, not reading
668 #log('child CLOSE r %d pid=%d', self.r, posix.getpid())
669
670
671class StderrToPipe(ChildStateChange):
672
673 def __init__(self, r, pipe_write_fd):
674 # type: (int, int) -> None
675 self.r = r
676 self.w = pipe_write_fd
677
678 def __repr__(self):
679 # type: () -> str
680 return '<StderrToPipe %d %d>' % (self.r, self.w)
681
682 def Apply(self):
683 # type: () -> None
684 posix.dup2(self.w, 2)
685 posix.close(self.w) # close after dup
686
687 posix.close(self.r) # we're writing to the pipe, not reading
688 #log('child CLOSE r %d pid=%d', self.r, posix.getpid())
689
690
691INVALID_PGID = -1
692# argument to setpgid() that means the process is its own leader
693OWN_LEADER = 0
694
695
696class SetPgid(ChildStateChange):
697
698 def __init__(self, pgid, tracer):
699 # type: (int, dev.Tracer) -> None
700 self.pgid = pgid
701 self.tracer = tracer
702
703 def Apply(self):
704 # type: () -> None
705 try:
706 posix.setpgid(0, self.pgid)
707 except (IOError, OSError) as e:
708 self.tracer.OtherMessage(
709 'osh: child %d failed to set its process group to %d: %s' %
710 (posix.getpid(), self.pgid, pyutil.strerror(e)))
711
712 def ApplyFromParent(self, proc):
713 # type: (Process) -> None
714 try:
715 posix.setpgid(proc.pid, self.pgid)
716 except (IOError, OSError) as e:
717 self.tracer.OtherMessage(
718 'osh: parent failed to set process group for PID %d to %d: %s'
719 % (proc.pid, self.pgid, pyutil.strerror(e)))
720
721
722class ExternalProgram(object):
723 """The capability to execute an external program like 'ls'."""
724
725 def __init__(
726 self,
727 hijack_shebang, # type: str
728 fd_state, # type: FdState
729 errfmt, # type: ui.ErrorFormatter
730 debug_f, # type: _DebugFile
731 ):
732 # type: (...) -> None
733 """
734 Args:
735 hijack_shebang: The path of an interpreter to run instead of the one
736 specified in the shebang line. May be empty.
737 """
738 self.hijack_shebang = hijack_shebang
739 self.fd_state = fd_state
740 self.errfmt = errfmt
741 self.debug_f = debug_f
742
743 def Exec(self, argv0_path, cmd_val, environ):
744 # type: (str, cmd_value.Argv, Dict[str, str]) -> None
745 """Execute a program and exit this process.
746
747 Called by: ls / exec ls / ( ls / )
748 """
749 probe('process', 'ExternalProgram_Exec', argv0_path)
750 self._Exec(argv0_path, cmd_val.argv, cmd_val.arg_locs[0], environ,
751 True)
752 assert False, "This line should never execute" # NO RETURN
753
754 def _Exec(self, argv0_path, argv, argv0_loc, environ, should_retry):
755 # type: (str, List[str], loc_t, Dict[str, str], bool) -> None
756 if len(self.hijack_shebang):
757 opened = True
758 try:
759 f = self.fd_state.Open(argv0_path)
760 except (IOError, OSError) as e:
761 opened = False
762
763 if opened:
764 with ctx_FileCloser(f):
765 # Test if the shebang looks like a shell. TODO: The file might be
766 # binary with no newlines, so read 80 bytes instead of readline().
767
768 #line = f.read(80) # type: ignore # TODO: fix this
769 line = f.readline()
770
771 if match.ShouldHijack(line):
772 h_argv = [self.hijack_shebang, argv0_path]
773 h_argv.extend(argv[1:])
774 argv = h_argv
775 argv0_path = self.hijack_shebang
776 self.debug_f.writeln('Hijacked: %s' % argv0_path)
777 else:
778 #self.debug_f.log('Not hijacking %s (%r)', argv, line)
779 pass
780
781 try:
782 posix.execve(argv0_path, argv, environ)
783 except (IOError, OSError) as e:
784 # Run with /bin/sh when ENOEXEC error (no shebang). All shells do this.
785 if e.errno == ENOEXEC and should_retry:
786 new_argv = ['/bin/sh', argv0_path]
787 new_argv.extend(argv[1:])
788 self._Exec('/bin/sh', new_argv, argv0_loc, environ, False)
789 # NO RETURN
790
791 # Would be nice: when the path is relative and ENOENT: print PWD and do
792 # spelling correction?
793
794 self.errfmt.Print_(
795 "Can't execute %r: %s" % (argv0_path, pyutil.strerror(e)),
796 argv0_loc)
797
798 # POSIX mentions 126 and 127 for two specific errors. The rest are
799 # unspecified.
800 #
801 # http://pubs.opengroup.org/onlinepubs/9699919799.2016edition/utilities/V3_chap02.html#tag_18_08_02
802 if e.errno == EACCES:
803 status = 126
804 elif e.errno == ENOENT:
805 # TODO: most shells print 'command not found', rather than strerror()
806 # == "No such file or directory". That's better because it's at the
807 # end of the path search, and we're never searching for a directory.
808 status = 127
809 else:
810 # dash uses 2, but we use that for parse errors. This seems to be
811 # consistent with mksh and zsh.
812 status = 127
813
814 posix._exit(status)
815 # NO RETURN
816
817
818class Thunk(object):
819 """Abstract base class for things runnable in another process."""
820
821 def __init__(self):
822 # type: () -> None
823 """Empty constructor for mycpp."""
824 pass
825
826 def Run(self):
827 # type: () -> None
828 """Returns a status code."""
829 raise NotImplementedError()
830
831 def UserString(self):
832 # type: () -> str
833 """Display for the 'jobs' list."""
834 raise NotImplementedError()
835
836 def __repr__(self):
837 # type: () -> str
838 return self.UserString()
839
840
841class ExternalThunk(Thunk):
842 """An external executable."""
843
844 def __init__(self, ext_prog, argv0_path, cmd_val, environ):
845 # type: (ExternalProgram, str, cmd_value.Argv, Dict[str, str]) -> None
846 self.ext_prog = ext_prog
847 self.argv0_path = argv0_path
848 self.cmd_val = cmd_val
849 self.environ = environ
850
851 def UserString(self):
852 # type: () -> str
853
854 # NOTE: This is the format the Tracer uses.
855 # bash displays sleep $n & (code)
856 # but OSH displays sleep 1 & (argv array)
857 # We could switch the former but I'm not sure it's necessary.
858 tmp = [j8_lite.MaybeShellEncode(a) for a in self.cmd_val.argv]
859 return '[process] %s' % ' '.join(tmp)
860
861 def Run(self):
862 # type: () -> None
863 """An ExternalThunk is run in parent for the exec builtin."""
864 self.ext_prog.Exec(self.argv0_path, self.cmd_val, self.environ)
865
866
867class BuiltinThunk(Thunk):
868 """Builtin thunk - for running builtins in a forked subprocess"""
869
870 def __init__(self, shell_ex, builtin_id, cmd_val):
871 # type: (vm._Executor, int, cmd_value.Argv) -> None
872 self.shell_ex = shell_ex
873 self.builtin_id = builtin_id
874 self.cmd_val = cmd_val
875
876 def UserString(self):
877 # type: () -> str
878 tmp = [j8_lite.MaybeShellEncode(a) for a in self.cmd_val.argv]
879 return '[builtin] %s' % ' '.join(tmp)
880
881 def Run(self):
882 # type: () -> None
883 """No exec - we need to exit ourselves"""
884 status = self.shell_ex.RunBuiltin(self.builtin_id, self.cmd_val)
885 posix._exit(status)
886
887
888class SubProgramThunk(Thunk):
889 """A subprogram that can be executed in another process."""
890
891 def __init__(
892 self,
893 cmd_ev, # type: CommandEvaluator
894 node, # type: command_t
895 trap_state, # type: trap_osh.TrapState
896 multi_trace, # type: dev.MultiTracer
897 inherit_errexit, # type: bool
898 inherit_errtrace, # type: bool
899 ):
900 # type: (...) -> None
901 self.cmd_ev = cmd_ev
902 self.node = node
903 self.trap_state = trap_state
904 self.multi_trace = multi_trace
905 self.inherit_errexit = inherit_errexit # for bash errexit compatibility
906 self.inherit_errtrace = inherit_errtrace # for bash errtrace compatibility
907
908 def UserString(self):
909 # type: () -> str
910
911 # NOTE: These can be pieces of a pipeline, so they're arbitrary nodes.
912 # TODO: Extract SPIDS from node to display source? Note that
913 # CompoundStatus also has locations of each pipeline component; see
914 # Executor.RunPipeline()
915 thunk_str = ui.CommandType(self.node)
916 return '[subprog] %s' % thunk_str
917
918 def Run(self):
919 # type: () -> None
920 #self.errfmt.OneLineErrExit() # don't quote code in child processes
921 probe('process', 'SubProgramThunk_Run')
922
923 # TODO: break circular dep. Bit flags could go in ASDL or headers.
924 from osh import cmd_eval
925
926 # signal handlers aren't inherited
927 self.trap_state.ClearForSubProgram(self.inherit_errtrace)
928
929 # NOTE: may NOT return due to exec().
930 if not self.inherit_errexit:
931 self.cmd_ev.mutable_opts.DisableErrExit()
932 try:
933 # optimize to eliminate redundant subshells like ( echo hi ) | wc -l etc.
934 self.cmd_ev.ExecuteAndCatch(
935 self.node,
936 cmd_eval.OptimizeSubshells | cmd_eval.MarkLastCommands)
937 status = self.cmd_ev.LastStatus()
938 # NOTE: We ignore the is_fatal return value. The user should set -o
939 # errexit so failures in subprocesses cause failures in the parent.
940 except util.HardExit as e:
941 status = e.status
942
943 # Handle errors in a subshell. These two cases are repeated from main()
944 # and the core/completion.py hook.
945 except KeyboardInterrupt:
946 print('')
947 status = 130 # 128 + 2
948 except (IOError, OSError) as e:
949 print_stderr('oils I/O error (subprogram): %s' %
950 pyutil.strerror(e))
951 status = 2
952
953 # If ProcessInit() doesn't turn off buffering, this is needed before
954 # _exit()
955 pyos.FlushStdout()
956
957 self.multi_trace.WriteDumps()
958
959 # We do NOT want to raise SystemExit here. Otherwise dev.Tracer::Pop()
960 # gets called in BOTH processes.
961 # The crash dump seems to be unaffected.
962 posix._exit(status)
963
964
965class _HereDocWriterThunk(Thunk):
966 """Write a here doc to one end of a pipe.
967
968 May be be executed in either a child process or the main shell
969 process.
970 """
971
972 def __init__(self, w, body_str):
973 # type: (int, str) -> None
974 self.w = w
975 self.body_str = body_str
976
977 def UserString(self):
978 # type: () -> str
979
980 # You can hit Ctrl-Z and the here doc writer will be suspended! Other
981 # shells don't have this problem because they use temp files! That's a bit
982 # unfortunate.
983 return '[here doc writer]'
984
985 def Run(self):
986 # type: () -> None
987 """do_exit: For small pipelines."""
988 probe('process', 'HereDocWriterThunk_Run')
989 #log('Writing %r', self.body_str)
990 posix.write(self.w, self.body_str)
991 #log('Wrote %r', self.body_str)
992 posix.close(self.w)
993 #log('Closed %d', self.w)
994
995 posix._exit(0)
996
997
998class Job(object):
999 """Interface for both Process and Pipeline.
1000
1001 They both can be put in the background and waited on.
1002
1003 Confusing thing about pipelines in the background: They have TOO MANY NAMES.
1004
1005 sleep 1 | sleep 2 &
1006
1007 - The LAST PID is what's printed at the prompt. This is $!, a PROCESS ID and
1008 not a JOB ID.
1009 # https://www.gnu.org/software/bash/manual/html_node/Special-Parameters.html#Special-Parameters
1010 - The process group leader (setpgid) is the FIRST PID.
1011 - It's also %1 or %+. The last job started.
1012 """
1013
1014 def __init__(self):
1015 # type: () -> None
1016 # Initial state with & or Ctrl-Z is Running.
1017 self.state = job_state_e.Running
1018 self.job_id = -1
1019 self.in_background = False
1020
1021 def DisplayJob(self, job_id, f, style):
1022 # type: (int, mylib.Writer, int) -> None
1023 raise NotImplementedError()
1024
1025 def State(self):
1026 # type: () -> job_state_t
1027 return self.state
1028
1029 def ProcessGroupId(self):
1030 # type: () -> int
1031 """Return the process group ID associated with this job."""
1032 raise NotImplementedError()
1033
1034 def PidForWait(self):
1035 # type: () -> int
1036 """Return the pid we can wait on."""
1037 raise NotImplementedError()
1038
1039 def JobWait(self, waiter):
1040 # type: (Waiter) -> wait_status_t
1041 """Wait for this process/pipeline to be stopped or finished."""
1042 raise NotImplementedError()
1043
1044 def SetBackground(self):
1045 # type: () -> None
1046 """Record that this job is running in the background."""
1047 self.in_background = True
1048
1049 def SetForeground(self):
1050 # type: () -> None
1051 """Record that this job is running in the foreground."""
1052 self.in_background = False
1053
1054
1055class Process(Job):
1056 """A process to run.
1057
1058 TODO: Should we make it clear that this is a FOREGROUND process? A
1059 background process is wrapped in a "job". It is unevaluated.
1060
1061 It provides an API to manipulate file descriptor state in parent and child.
1062 """
1063
1064 def __init__(self, thunk, job_control, job_list, tracer):
1065 # type: (Thunk, JobControl, JobList, dev.Tracer) -> None
1066 """
1067 Args:
1068 thunk: Thunk instance
1069 job_list: for process bookkeeping
1070 """
1071 Job.__init__(self)
1072 assert isinstance(thunk, Thunk), thunk
1073 self.thunk = thunk
1074 self.job_control = job_control
1075 self.job_list = job_list
1076 self.tracer = tracer
1077 self.exec_opts = tracer.exec_opts
1078
1079 # For pipelines
1080 self.parent_pipeline = None # type: Pipeline
1081 self.state_changes = [] # type: List[ChildStateChange]
1082 self.close_r = -1
1083 self.close_w = -1
1084
1085 self.pid = -1
1086 self.status = -1
1087
1088 def Init_ParentPipeline(self, pi):
1089 # type: (Pipeline) -> None
1090 """For updating PIPESTATUS."""
1091 self.parent_pipeline = pi
1092
1093 def __repr__(self):
1094 # type: () -> str
1095
1096 # note: be wary of infinite mutual recursion
1097 #s = ' %s' % self.parent_pipeline if self.parent_pipeline else ''
1098 #return '<Process %s%s>' % (self.thunk, s)
1099 return '<Process pid=%d state=%s %s>' % (
1100 self.pid, _JobStateStr(self.state), self.thunk)
1101
1102 def ProcessGroupId(self):
1103 # type: () -> int
1104 """Returns the group ID of this process."""
1105 # This should only ever be called AFTER the process has started
1106 assert self.pid != -1
1107 if self.parent_pipeline:
1108 # XXX: Maybe we should die here instead? Unclear if this branch
1109 # should even be reachable with the current builtins.
1110 return self.parent_pipeline.ProcessGroupId()
1111
1112 return self.pid
1113
1114 def PidForWait(self):
1115 # type: () -> int
1116 """Return the pid we can wait on."""
1117 assert self.pid != -1
1118 return self.pid
1119
1120 def DisplayJob(self, job_id, f, style):
1121 # type: (int, mylib.Writer, int) -> None
1122 if job_id == -1:
1123 job_id_str = ' '
1124 else:
1125 job_id_str = '%%%d' % job_id
1126 if style == STYLE_PID_ONLY:
1127 f.write('%d\n' % self.pid)
1128 else:
1129 f.write('%s %d %7s ' %
1130 (job_id_str, self.pid, _JobStateStr(self.state)))
1131 f.write(self.thunk.UserString())
1132 f.write('\n')
1133
1134 def AddStateChange(self, s):
1135 # type: (ChildStateChange) -> None
1136 self.state_changes.append(s)
1137
1138 def AddPipeToClose(self, r, w):
1139 # type: (int, int) -> None
1140 self.close_r = r
1141 self.close_w = w
1142
1143 def MaybeClosePipe(self):
1144 # type: () -> None
1145 if self.close_r != -1:
1146 posix.close(self.close_r)
1147 posix.close(self.close_w)
1148
1149 def StartProcess(self, why):
1150 # type: (trace_t) -> int
1151 """Start this process with fork(), handling redirects."""
1152 pid = posix.fork()
1153 if pid < 0:
1154 # When does this happen?
1155 e_die('Fatal error in posix.fork()')
1156
1157 elif pid == 0: # child
1158 # Note: this happens in BOTH interactive and non-interactive shells.
1159 # We technically don't need to do most of it in non-interactive, since we
1160 # did not change state in InitInteractiveShell().
1161
1162 for st in self.state_changes:
1163 st.Apply()
1164
1165 # Python sets SIGPIPE handler to SIG_IGN by default. Child processes
1166 # shouldn't have this.
1167 # https://docs.python.org/2/library/signal.html
1168 # See Python/pythonrun.c.
1169 iolib.sigaction(SIGPIPE, SIG_DFL)
1170
1171 # Respond to Ctrl-\ (core dump)
1172 iolib.sigaction(SIGQUIT, SIG_DFL)
1173
1174 # Only standalone children should get Ctrl-Z. Pipelines remain in the
1175 # foreground because suspending them is difficult with our 'lastpipe'
1176 # semantics.
1177 pid = posix.getpid()
1178 if posix.getpgid(0) == pid and self.parent_pipeline is None:
1179 iolib.sigaction(SIGTSTP, SIG_DFL)
1180
1181 # More signals from
1182 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html
1183 # (but not SIGCHLD)
1184 iolib.sigaction(SIGTTOU, SIG_DFL)
1185 iolib.sigaction(SIGTTIN, SIG_DFL)
1186
1187 self.tracer.OnNewProcess(pid)
1188 # clear foreground pipeline for subshells
1189 self.thunk.Run()
1190 # Never returns
1191
1192 #log('STARTED process %s, pid = %d', self, pid)
1193 self.tracer.OnProcessStart(pid, why)
1194
1195 # Class invariant: after the process is started, it stores its PID.
1196 self.pid = pid
1197
1198 # SetPgid needs to be applied from the child and the parent to avoid
1199 # racing in calls to tcsetpgrp() in the parent. See APUE sec. 9.2.
1200 for st in self.state_changes:
1201 st.ApplyFromParent(self)
1202
1203 # Program invariant: We keep track of every child process!
1204 # Waiter::WaitForOne() needs it to update state
1205 self.job_list.AddChildProcess(pid, self)
1206
1207 return pid
1208
1209 def Wait(self, waiter):
1210 # type: (Waiter) -> int
1211 """Wait for this Process to finish."""
1212 # Keep waiting if waitpid() was interrupted with a signal (unlike the
1213 # 'wait' builtin)
1214 while self.state == job_state_e.Running:
1215 result, _ = waiter.WaitForOne()
1216 if result == W1_NO_CHILDREN:
1217 break
1218
1219 # Linear search
1220 # if we get a W1_EXITED event, and the pid is OUR PID, then we can
1221 # return?
1222 # well we need the status too
1223
1224 # Cleanup - for background jobs this happens in the 'wait' builtin,
1225 # e.g. after JobWait()
1226 if self.state == job_state_e.Exited:
1227 self.job_list.PopChildProcess(self.pid)
1228
1229 assert self.status >= 0, self.status
1230 return self.status
1231
1232 def JobWait(self, waiter):
1233 # type: (Waiter) -> wait_status_t
1234 """Process::JobWait, called by wait builtin"""
1235 # wait builtin can be interrupted
1236 while self.state == job_state_e.Running:
1237 result, w1_arg = waiter.WaitForOne() # mutates self.state
1238
1239 if result == W1_CALL_INTR:
1240 return wait_status.Cancelled(w1_arg)
1241
1242 if result == W1_NO_CHILDREN:
1243 break
1244
1245 # Ignore W1_EXITED, W1_STOPPED - these are OTHER processes
1246
1247 assert self.status >= 0, self.status
1248 return wait_status.Proc(self.state, self.status)
1249
1250 def WhenContinued(self):
1251 # type: () -> None
1252 self.state = job_state_e.Running
1253
1254 if self.parent_pipeline:
1255 # TODO: do we need anything here?
1256 pass
1257
1258 # TODO: Should we remove it as a job?
1259
1260 # Now job_id is set
1261 if self.exec_opts.interactive():
1262 #if 0:
1263 print_stderr('[%%%d] PID %d Continued' % (self.job_id, self.pid))
1264
1265 #if self.in_background:
1266 if 1:
1267 self.job_control.MaybeTakeTerminal()
1268 self.SetForeground()
1269
1270 def WhenStopped(self, stop_sig):
1271 # type: (int) -> None
1272 """Called by the Waiter when this Process is stopped."""
1273 # 128 is a shell thing
1274 # https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
1275 self.status = 128 + stop_sig
1276 self.state = job_state_e.Stopped
1277
1278 if self.parent_pipeline:
1279 # TODO: do we need anything here?
1280 # We need AllStopped() just like AllExited()?
1281
1282 #self.parent_pipeline.WhenPartIsStopped(pid, status)
1283 #return
1284 pass
1285
1286 if self.job_id == -1:
1287 # This process was started in the foreground, not with &. So it
1288 # was NOT a job, but after Ctrl-Z, it's a job.
1289 self.job_list.RegisterJob(self)
1290
1291 # Now self.job_id is set
1292 if self.exec_opts.interactive():
1293 print_stderr('') # newline after ^Z (TODO: consolidate with ^C)
1294 print_stderr('[%%%d] PID %d Stopped with signal %d' %
1295 (self.job_id, self.pid, stop_sig))
1296
1297 if not self.in_background:
1298 # e.g. sleep 5; then Ctrl-Z
1299 self.job_control.MaybeTakeTerminal()
1300 self.SetBackground()
1301
1302 def WhenExited(self, pid, status):
1303 # type: (int, int) -> None
1304 """Called by the Waiter when this Process exits."""
1305
1306 #log('Process WhenExited %d %d', pid, status)
1307 assert pid == self.pid, 'Expected %d, got %d' % (self.pid, pid)
1308 self.status = status
1309 self.state = job_state_e.Exited
1310
1311 if self.parent_pipeline:
1312 # populate pipeline status array; update Pipeline state, etc.
1313 self.parent_pipeline.WhenPartExited(pid, status)
1314 return
1315
1316 if self.job_id != -1 and self.in_background:
1317 # TODO: ONE condition should determine if this was a background
1318 # job, rather than a foreground process
1319 # "Job might have been brought to the foreground after being
1320 # assigned a job ID"
1321 if self.exec_opts.interactive():
1322 print_stderr('[%%%d] PID %d Done' % (self.job_id, self.pid))
1323
1324 if not self.in_background:
1325 self.job_control.MaybeTakeTerminal()
1326
1327 def RunProcess(self, waiter, why):
1328 # type: (Waiter, trace_t) -> int
1329 """Run this process synchronously."""
1330 self.StartProcess(why)
1331 # ShellExecutor might be calling this for the last part of a pipeline.
1332 if self.parent_pipeline is None:
1333 # QUESTION: Can the PGID of a single process just be the PID? i.e. avoid
1334 # calling getpgid()?
1335 self.job_control.MaybeGiveTerminal(posix.getpgid(self.pid))
1336 return self.Wait(waiter)
1337
1338
1339class ctx_Pipe(object):
1340
1341 def __init__(self, fd_state, fd, err_out):
1342 # type: (FdState, int, List[int]) -> None
1343 fd_state.PushStdinFromPipe(fd)
1344 self.fd_state = fd_state
1345 self.err_out = err_out
1346
1347 def __enter__(self):
1348 # type: () -> None
1349 pass
1350
1351 def __exit__(self, type, value, traceback):
1352 # type: (Any, Any, Any) -> None
1353 self.fd_state.Pop(self.err_out)
1354
1355
1356class Pipeline(Job):
1357 """A pipeline of processes to run.
1358
1359 Cases we handle:
1360
1361 foo | bar
1362 $(foo | bar)
1363 foo | bar | read v
1364 """
1365
1366 def __init__(self, sigpipe_status_ok, job_control, job_list, tracer):
1367 # type: (bool, JobControl, JobList, dev.Tracer) -> None
1368 Job.__init__(self)
1369 self.job_control = job_control
1370 self.job_list = job_list
1371 self.tracer = tracer
1372 self.exec_opts = tracer.exec_opts
1373
1374 self.procs = [] # type: List[Process]
1375 self.pids = [] # type: List[int] # pids in order
1376 self.pipe_status = [] # type: List[int] # status in order
1377 self.status = -1 # for 'wait' jobs
1378
1379 self.pgid = INVALID_PGID
1380
1381 # Optional for foreground
1382 self.last_thunk = None # type: Tuple[CommandEvaluator, command_t]
1383 self.last_pipe = None # type: Tuple[int, int]
1384
1385 self.sigpipe_status_ok = sigpipe_status_ok
1386
1387 def __repr__(self):
1388 # type: () -> str
1389 return '<Pipeline pgid=%d pids=%s state=%s procs=%s>' % (
1390 self.pgid, self.pids, _JobStateStr(self.state), self.procs)
1391
1392 def ProcessGroupId(self):
1393 # type: () -> int
1394 """Returns the group ID of this pipeline.
1395
1396 In an interactive shell, it's often the FIRST.
1397 """
1398 return self.pgid
1399
1400 def PidForWait(self):
1401 # type: () -> int
1402 """Return the PID we can wait on.
1403
1404 This is the same as the PID for $!
1405
1406 Shell WART:
1407 The $! variable is the PID of the LAST pipeline part.
1408 But in an interactive shell, the PGID is the PID of the FIRST pipeline part.
1409 It would be nicer if these were consistent!
1410 """
1411 return self.pids[-1]
1412
1413 def DisplayJob(self, job_id, f, style):
1414 # type: (int, mylib.Writer, int) -> None
1415 if style == STYLE_PID_ONLY:
1416 f.write('%d\n' % self.procs[0].pid)
1417 else:
1418 # Note: this is STYLE_LONG.
1419 for i, proc in enumerate(self.procs):
1420 if i == 0: # show job ID for first element in pipeline
1421 job_id_str = '%%%d' % job_id
1422 else:
1423 job_id_str = ' ' # 2 spaces
1424
1425 f.write('%s %d %7s ' %
1426 (job_id_str, proc.pid, _JobStateStr(proc.state)))
1427 f.write(proc.thunk.UserString())
1428 f.write('\n')
1429
1430 def DebugPrint(self):
1431 # type: () -> None
1432 print('Pipeline in state %s' % _JobStateStr(self.state))
1433 if mylib.PYTHON: # %s for Process not allowed in C++
1434 for proc in self.procs:
1435 print(' proc %s' % proc)
1436 _, last_node = self.last_thunk
1437 print(' last %s' % last_node)
1438 print(' pipe_status %s' % self.pipe_status)
1439
1440 def Add(self, p):
1441 # type: (Process) -> None
1442 """Append a process to the pipeline."""
1443 if len(self.procs) == 0:
1444 self.procs.append(p)
1445 return
1446
1447 r, w = posix.pipe()
1448 #log('pipe for %s: %d %d', p, r, w)
1449 prev = self.procs[-1]
1450
1451 prev.AddStateChange(StdoutToPipe(r, w)) # applied on StartPipeline()
1452 p.AddStateChange(StdinFromPipe(r, w)) # applied on StartPipeline()
1453
1454 p.AddPipeToClose(r, w) # MaybeClosePipe() on StartPipeline()
1455
1456 self.procs.append(p)
1457
1458 def AddLast(self, thunk):
1459 # type: (Tuple[CommandEvaluator, command_t]) -> None
1460 """Append the last noden to the pipeline.
1461
1462 This is run in the CURRENT process. It is OPTIONAL, because
1463 pipelines in the background are run uniformly.
1464 """
1465 self.last_thunk = thunk
1466
1467 assert len(self.procs) != 0
1468
1469 r, w = posix.pipe()
1470 prev = self.procs[-1]
1471 prev.AddStateChange(StdoutToPipe(r, w))
1472
1473 self.last_pipe = (r, w) # So we can connect it to last_thunk
1474
1475 def StartPipeline(self, waiter):
1476 # type: (Waiter) -> None
1477
1478 # If we are creating a pipeline in a subshell or we aren't running with job
1479 # control, our children should remain in our inherited process group.
1480 # the pipelines's group ID.
1481 if self.job_control.Enabled():
1482 self.pgid = OWN_LEADER # first process in pipeline is the leader
1483
1484 for i, proc in enumerate(self.procs):
1485 if self.pgid != INVALID_PGID:
1486 proc.AddStateChange(SetPgid(self.pgid, self.tracer))
1487
1488 # Figure out the pid
1489 pid = proc.StartProcess(trace.PipelinePart)
1490 if i == 0 and self.pgid != INVALID_PGID:
1491 # Mimic bash and use the PID of the FIRST process as the group
1492 # for the whole pipeline.
1493 self.pgid = pid
1494
1495 self.pids.append(pid)
1496 self.pipe_status.append(-1) # uninitialized
1497
1498 # NOTE: This is done in the SHELL PROCESS after every fork() call.
1499 # It can't be done at the end; otherwise processes will have descriptors
1500 # from non-adjacent pipes.
1501 proc.MaybeClosePipe()
1502
1503 if self.last_thunk:
1504 self.pipe_status.append(-1) # for self.last_thunk
1505
1506 #log('Started pipeline PIDS=%s, pgid=%d', self.pids, self.pgid)
1507
1508 def Wait(self, waiter):
1509 # type: (Waiter) -> List[int]
1510 """Wait for this Pipeline to finish."""
1511
1512 assert self.procs, "no procs for Wait()"
1513 # waitpid(-1) zero or more times
1514 while self.state == job_state_e.Running:
1515 # Keep waiting until there's nothing to wait for.
1516 result, _ = waiter.WaitForOne()
1517 if result == W1_NO_CHILDREN:
1518 break
1519
1520 return self.pipe_status
1521
1522 def JobWait(self, waiter):
1523 # type: (Waiter) -> wait_status_t
1524 """Pipeline::JobWait(), called by 'wait' builtin, e.g. 'wait %1'."""
1525 # wait builtin can be interrupted
1526 assert self.procs, "no procs for Wait()"
1527 while self.state == job_state_e.Running:
1528 result, w1_arg = waiter.WaitForOne()
1529
1530 if result == W1_CALL_INTR: # signal
1531 return wait_status.Cancelled(w1_arg)
1532
1533 if result == W1_NO_CHILDREN:
1534 break
1535
1536 # Ignore W1_EXITED, W1_STOPPED - these are OTHER processes
1537
1538 assert all(st >= 0 for st in self.pipe_status), self.pipe_status
1539 return wait_status.Pipeline(self.state, self.pipe_status)
1540
1541 def RunLastPart(self, waiter, fd_state):
1542 # type: (Waiter, FdState) -> List[int]
1543 """Run this pipeline synchronously (foreground pipeline).
1544
1545 Returns:
1546 pipe_status (list of integers).
1547 """
1548 assert len(self.pids) == len(self.procs)
1549
1550 # TODO: break circular dep. Bit flags could go in ASDL or headers.
1551 from osh import cmd_eval
1552
1553 # This is tcsetpgrp()
1554 # TODO: fix race condition -- I believe the first process could have
1555 # stopped already, and thus getpgid() will fail
1556 self.job_control.MaybeGiveTerminal(self.pgid)
1557
1558 # Run the last part of the pipeline IN PARALLEL with other processes. It
1559 # may or may not fork:
1560 # echo foo | read line # no fork, the builtin runs in THIS shell process
1561 # ls | wc -l # fork for 'wc'
1562
1563 cmd_ev, last_node = self.last_thunk
1564
1565 assert self.last_pipe is not None
1566 r, w = self.last_pipe # set in AddLast()
1567 posix.close(w) # we will not write here
1568
1569 # Fix lastpipe / job control / DEBUG trap interaction
1570 cmd_flags = cmd_eval.NoDebugTrap if self.job_control.Enabled() else 0
1571
1572 # The ERR trap only runs for the WHOLE pipeline, not the COMPONENTS in
1573 # a pipeline.
1574 cmd_flags |= cmd_eval.NoErrTrap
1575
1576 io_errors = [] # type: List[int]
1577 with ctx_Pipe(fd_state, r, io_errors):
1578 cmd_ev.ExecuteAndCatch(last_node, cmd_flags)
1579
1580 if len(io_errors):
1581 e_die('Error setting up last part of pipeline: %s' %
1582 posix.strerror(io_errors[0]))
1583
1584 # We won't read anymore. If we don't do this, then 'cat' in 'cat
1585 # /dev/urandom | sleep 1' will never get SIGPIPE.
1586 posix.close(r)
1587
1588 self.pipe_status[-1] = cmd_ev.LastStatus()
1589 if self.AllExited():
1590 self.state = job_state_e.Exited
1591
1592 #log('pipestatus before all have finished = %s', self.pipe_status)
1593 return self.Wait(waiter)
1594
1595 def AllExited(self):
1596 # type: () -> bool
1597
1598 # mycpp rewrite: all(status != -1 for status in self.pipe_status)
1599 for status in self.pipe_status:
1600 if status == -1:
1601 return False
1602 return True
1603
1604 def WhenPartExited(self, pid, status):
1605 # type: (int, int) -> None
1606 """Called by Process::WhenExited()"""
1607 #log('Pipeline WhenExited %d %d', pid, status)
1608 i = self.pids.index(pid)
1609 assert i != -1, 'Unexpected PID %d' % pid
1610
1611 if status == 141 and self.sigpipe_status_ok:
1612 status = 0
1613
1614 self.pipe_status[i] = status
1615 if not self.AllExited():
1616 return
1617
1618 if self.job_id != -1 and self.in_background:
1619 # TODO: ONE condition
1620 # "Job might have been brought to the foreground after being
1621 # assigned a job ID"
1622 if self.exec_opts.interactive():
1623 print_stderr('[%%%d] PGID %d Done' %
1624 (self.job_id, self.pids[0]))
1625
1626 # Status of pipeline is status of last process
1627 self.status = self.pipe_status[-1]
1628 self.state = job_state_e.Exited
1629
1630 if not self.in_background:
1631 self.job_control.MaybeTakeTerminal()
1632
1633
1634def _JobStateStr(i):
1635 # type: (job_state_t) -> str
1636 return job_state_str(i, dot=False)
1637
1638
1639def _GetTtyFd():
1640 # type: () -> int
1641 """Returns -1 if stdio is not a TTY."""
1642 try:
1643 return posix.open("/dev/tty", O_NONBLOCK | O_NOCTTY | O_RDWR, 0o666)
1644 except (IOError, OSError) as e:
1645 return -1
1646
1647
1648class ctx_TerminalControl(object):
1649
1650 def __init__(self, job_control, errfmt):
1651 # type: (JobControl, ui.ErrorFormatter) -> None
1652 job_control.InitJobControl()
1653 self.job_control = job_control
1654 self.errfmt = errfmt
1655
1656 def __enter__(self):
1657 # type: () -> None
1658 pass
1659
1660 def __exit__(self, type, value, traceback):
1661 # type: (Any, Any, Any) -> None
1662
1663 # Return the TTY to the original owner before exiting.
1664 try:
1665 self.job_control.MaybeReturnTerminal()
1666 except error.FatalRuntime as e:
1667 # Don't abort the shell on error, just print a message.
1668 self.errfmt.PrettyPrintError(e)
1669
1670
1671class JobControl(object):
1672 """Interface to setpgid(), tcsetpgrp(), etc."""
1673
1674 def __init__(self):
1675 # type: () -> None
1676
1677 # The main shell's PID and group ID.
1678 self.shell_pid = -1
1679 self.shell_pgid = -1
1680
1681 # The fd of the controlling tty. Set to -1 when job control is disabled.
1682 self.shell_tty_fd = -1
1683
1684 # For giving the terminal back to our parent before exiting (if not a login
1685 # shell).
1686 self.original_tty_pgid = -1
1687
1688 def InitJobControl(self):
1689 # type: () -> None
1690 self.shell_pid = posix.getpid()
1691 orig_shell_pgid = posix.getpgid(0)
1692 self.shell_pgid = orig_shell_pgid
1693 self.shell_tty_fd = _GetTtyFd()
1694
1695 # If we aren't the leader of our process group, create a group and mark
1696 # ourselves as the leader.
1697 if self.shell_pgid != self.shell_pid:
1698 try:
1699 posix.setpgid(self.shell_pid, self.shell_pid)
1700 self.shell_pgid = self.shell_pid
1701 except (IOError, OSError) as e:
1702 self.shell_tty_fd = -1
1703
1704 if self.shell_tty_fd != -1:
1705 self.original_tty_pgid = posix.tcgetpgrp(self.shell_tty_fd)
1706
1707 # If stdio is a TTY, put the shell's process group in the foreground.
1708 try:
1709 posix.tcsetpgrp(self.shell_tty_fd, self.shell_pgid)
1710 except (IOError, OSError) as e:
1711 # We probably aren't in the session leader's process group. Disable job
1712 # control.
1713 self.shell_tty_fd = -1
1714 self.shell_pgid = orig_shell_pgid
1715 posix.setpgid(self.shell_pid, self.shell_pgid)
1716
1717 def Enabled(self):
1718 # type: () -> bool
1719 """
1720 Only the main shell process should bother with job control functions.
1721 """
1722 #log('ENABLED? %d', self.shell_tty_fd)
1723
1724 # TODO: get rid of getpid()? I think SubProgramThunk should set a
1725 # flag.
1726 return self.shell_tty_fd != -1 and posix.getpid() == self.shell_pid
1727
1728 # TODO: This isn't a PID. This is a process group ID?
1729 #
1730 # What should the table look like?
1731 #
1732 # Do we need the last PID? I don't know why bash prints that. Probably so
1733 # you can do wait $!
1734 # wait -n waits for any node to go from job_state_e.Running to job_state_e.Done?
1735 #
1736 # And it needs a flag for CURRENT, for the implicit arg to 'fg'.
1737 # job_id is just an integer. This is sort of lame.
1738 #
1739 # [job_id, flag, pgid, job_state, node]
1740
1741 def MaybeGiveTerminal(self, pgid):
1742 # type: (int) -> None
1743 """If stdio is a TTY, move the given process group to the
1744 foreground."""
1745 if not self.Enabled():
1746 # Only call tcsetpgrp when job control is enabled.
1747 return
1748
1749 try:
1750 posix.tcsetpgrp(self.shell_tty_fd, pgid)
1751 except (IOError, OSError) as e:
1752 e_die('osh: Failed to move process group %d to foreground: %s' %
1753 (pgid, pyutil.strerror(e)))
1754
1755 def MaybeTakeTerminal(self):
1756 # type: () -> None
1757 """If stdio is a TTY, return the main shell's process group to the
1758 foreground."""
1759 self.MaybeGiveTerminal(self.shell_pgid)
1760
1761 def MaybeReturnTerminal(self):
1762 # type: () -> None
1763 """Called before the shell exits."""
1764 self.MaybeGiveTerminal(self.original_tty_pgid)
1765
1766
1767class JobList(object):
1768 """Global list of jobs, used by a few builtins."""
1769
1770 def __init__(self):
1771 # type: () -> None
1772
1773 # self.child_procs is used by WaitForOne() to call proc.WhenExited()
1774 # and proc.WhenStopped().
1775 self.child_procs = {} # type: Dict[int, Process]
1776
1777 # self.jobs is used by 'wait %1' and 'fg %2'
1778 # job_id -> Job
1779 self.jobs = {} # type: Dict[int, Job]
1780
1781 # self.pid_to_job is used by 'wait -n' and 'wait' - to call
1782 # CleanupWhenProcessExits(). They Dict key is job.PidForWait()
1783 self.pid_to_job = {} # type: Dict[int, Job]
1784
1785 # TODO: consider linear search through JobList
1786 # - by job ID
1787 # - by PID
1788 # - then you don't have to bother as much with the dicts
1789 # - you still need the child process dict to set the status and
1790 # state?
1791
1792 self.debug_pipelines = [] # type: List[Pipeline]
1793
1794 # Counter used to assign IDs to jobs. It is incremented every time a job
1795 # is created. Once all active jobs are done it is reset to 1. I'm not
1796 # sure if this reset behavior is mandated by POSIX, but other shells do
1797 # it, so we mimic for the sake of compatibility.
1798 self.next_job_id = 1
1799
1800 def RegisterJob(self, job):
1801 # type: (Job) -> int
1802 """Create a background job, which you can wait %2, fg %2, kill %2, etc.
1803
1804 - A job is either a Process or Pipeline.
1805 - A job is registered in these 2 situations:
1806 1. async: sleep 5 &
1807 2. stopped: sleep 5; then Ctrl-Z
1808 That is, in the interactive shell, the foreground process can be
1809 receives signals, and can be stopped
1810 """
1811 job_id = self.next_job_id
1812 self.next_job_id += 1
1813
1814 # Look up the job by job ID, for wait %1, kill %1, etc.
1815 self.jobs[job_id] = job
1816
1817 # Pipelines
1818 # TODO: register all PIDs? And conversely, remove all PIDs
1819 # what do other shells do?
1820 self.pid_to_job[job.PidForWait()] = job
1821
1822 # Mutate the job itself
1823 job.job_id = job_id
1824
1825 return job_id
1826
1827 def JobFromPid(self, pid):
1828 # type: (int) -> Optional[Job]
1829 return self.pid_to_job.get(pid)
1830
1831 def _MaybeResetCounter(self):
1832 # type: () -> None
1833 if len(self.jobs) == 0:
1834 self.next_job_id = 1
1835
1836 def CleanupWhenJobExits(self, job):
1837 # type: (Job) -> None
1838 """Called when say 'fg %2' exits, and when 'wait %2' exits"""
1839 mylib.dict_erase(self.jobs, job.job_id)
1840
1841 mylib.dict_erase(self.pid_to_job, job.PidForWait())
1842
1843 self._MaybeResetCounter()
1844
1845 def CleanupWhenProcessExits(self, pid):
1846 # type: (int) -> None
1847 """Given a PID, remove the job if it has Exited."""
1848
1849 job = self.pid_to_job.get(pid)
1850 if 0:
1851 # TODO: background pipelines don't clean up properly, because only
1852 # the last PID is registered in job_list.pid_to_job
1853
1854 # Should we switch to a linear search of a background job array?
1855 # Foreground jobs are stored in self.child_procs, and we migrate
1856 # between them?
1857
1858 log('*** CleanupWhenProcessExits %d', pid)
1859 log('job %s', job)
1860 #log('STATE %s', _JobStateStr(job.state))
1861
1862 if job and job.state == job_state_e.Exited:
1863 # Note: only the LAST PID in a pipeline will ever be here, but it's
1864 # OK to try to delete it.
1865 mylib.dict_erase(self.pid_to_job, pid)
1866
1867 mylib.dict_erase(self.jobs, job.job_id)
1868
1869 self._MaybeResetCounter()
1870
1871 def AddChildProcess(self, pid, proc):
1872 # type: (int, Process) -> None
1873 """Every child process should be added here as soon as we know its PID.
1874
1875 When the Waiter gets an EXITED or STOPPED notification, we need
1876 to know about it so 'jobs' can work.
1877
1878 Note: this contains Process objects that are part of a Pipeline object.
1879 Does it need to?
1880 """
1881 self.child_procs[pid] = proc
1882
1883 def PopChildProcess(self, pid):
1884 # type: (int) -> Optional[Process]
1885 """Remove the child process with the given PID."""
1886 pr = self.child_procs.get(pid)
1887 if pr is not None:
1888 mylib.dict_erase(self.child_procs, pid)
1889 return pr
1890
1891 if mylib.PYTHON:
1892
1893 def AddPipeline(self, pi):
1894 # type: (Pipeline) -> None
1895 """For debugging only."""
1896 self.debug_pipelines.append(pi)
1897
1898 def GetCurrentAndPreviousJobs(self):
1899 # type: () -> Tuple[Optional[Job], Optional[Job]]
1900 """Return the "current" and "previous" jobs (AKA `%+` and `%-`).
1901
1902 See the POSIX specification for the `jobs` builtin for details:
1903 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1904
1905 IMPORTANT NOTE: This method assumes that the jobs list will not change
1906 during its execution! This assumption holds for now because we only ever
1907 update the jobs list from the main loop after WaitPid() informs us of a
1908 change. If we implement `set -b` and install a signal handler for
1909 SIGCHLD we should be careful to synchronize it with this function. The
1910 unsafety of mutating GC data structures from a signal handler should
1911 make this a non-issue, but if bugs related to this appear this note may
1912 be helpful...
1913 """
1914 # Split all active jobs by state and sort each group by decreasing job
1915 # ID to approximate newness.
1916 stopped_jobs = [] # type: List[Job]
1917 running_jobs = [] # type: List[Job]
1918 for i in xrange(0, self.next_job_id):
1919 job = self.jobs.get(i, None)
1920 if not job:
1921 continue
1922
1923 if job.state == job_state_e.Stopped:
1924 stopped_jobs.append(job)
1925
1926 elif job.state == job_state_e.Running:
1927 running_jobs.append(job)
1928
1929 current = None # type: Optional[Job]
1930 previous = None # type: Optional[Job]
1931 # POSIX says: If there is any suspended job, then the current job shall
1932 # be a suspended job. If there are at least two suspended jobs, then the
1933 # previous job also shall be a suspended job.
1934 #
1935 # So, we will only return running jobs from here if there are no recent
1936 # stopped jobs.
1937 if len(stopped_jobs) > 0:
1938 current = stopped_jobs.pop()
1939
1940 if len(stopped_jobs) > 0:
1941 previous = stopped_jobs.pop()
1942
1943 if len(running_jobs) > 0 and not current:
1944 current = running_jobs.pop()
1945
1946 if len(running_jobs) > 0 and not previous:
1947 previous = running_jobs.pop()
1948
1949 if not previous:
1950 previous = current
1951
1952 return current, previous
1953
1954 def JobFromSpec(self, job_spec):
1955 # type: (str) -> Optional[Job]
1956 """Parse the given job spec and return the matching job. If there is no
1957 matching job, this function returns None.
1958
1959 See the POSIX spec for the `jobs` builtin for details about job specs:
1960 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1961 """
1962 if job_spec in CURRENT_JOB_SPECS:
1963 current, _ = self.GetCurrentAndPreviousJobs()
1964 return current
1965
1966 if job_spec == '%-':
1967 _, previous = self.GetCurrentAndPreviousJobs()
1968 return previous
1969
1970 #log('** SEARCHING %s', self.jobs)
1971 # TODO: Add support for job specs based on prefixes of process argv.
1972 m = util.RegexSearch(r'^%([0-9]+)$', job_spec)
1973 if m is not None:
1974 assert len(m) == 2
1975 job_id = int(m[1])
1976 if job_id in self.jobs:
1977 return self.jobs[job_id]
1978
1979 return None
1980
1981 def DisplayJobs(self, style):
1982 # type: (int) -> None
1983 """Used by the 'jobs' builtin.
1984
1985 https://pubs.opengroup.org/onlinepubs/9699919799/utilities/jobs.html
1986
1987 "By default, the jobs utility shall display the status of all stopped jobs,
1988 running background jobs and all jobs whose status has changed and have not
1989 been reported by the shell."
1990 """
1991 # NOTE: A job is a background process or pipeline.
1992 #
1993 # echo hi | wc -l -- this starts two processes. Wait for TWO
1994 # echo hi | wc -l & -- this starts a process which starts two processes
1995 # Wait for ONE.
1996 #
1997 # 'jobs -l' GROUPS the PIDs by job. It has the job number, + - indicators
1998 # for %% and %-, PID, status, and "command".
1999 #
2000 # Every component of a pipeline is on the same line with 'jobs', but
2001 # they're separated into different lines with 'jobs -l'.
2002 #
2003 # See demo/jobs-builtin.sh
2004
2005 # $ jobs -l
2006 # [1]+ 24414 Stopped sleep 5
2007 # 24415 | sleep 5
2008 # [2] 24502 Running sleep 6
2009 # 24503 | sleep 6
2010 # 24504 | sleep 5 &
2011 # [3]- 24508 Running sleep 6
2012 # 24509 | sleep 6
2013 # 24510 | sleep 5 &
2014
2015 f = mylib.Stdout()
2016 for job_id, job in iteritems(self.jobs):
2017 # Use the %1 syntax
2018 job.DisplayJob(job_id, f, style)
2019
2020 def DebugPrint(self):
2021 # type: () -> None
2022
2023 f = mylib.Stdout()
2024 f.write('\n')
2025 f.write('[process debug info]\n')
2026
2027 for pid, proc in iteritems(self.child_procs):
2028 proc.DisplayJob(-1, f, STYLE_DEFAULT)
2029 #p = ' |' if proc.parent_pipeline else ''
2030 #print('%d %7s %s%s' % (pid, _JobStateStr(proc.state), proc.thunk.UserString(), p))
2031
2032 if len(self.debug_pipelines):
2033 f.write('\n')
2034 f.write('[pipeline debug info]\n')
2035 for pi in self.debug_pipelines:
2036 pi.DebugPrint()
2037
2038 def ListRecent(self):
2039 # type: () -> None
2040 """For jobs -n, which I think is also used in the interactive
2041 prompt."""
2042 pass
2043
2044 def NumRunning(self):
2045 # type: () -> int
2046 """Return the number of running jobs.
2047
2048 Used by 'wait' and 'wait -n'.
2049 """
2050 count = 0
2051 for _, job in iteritems(self.jobs): # mycpp rewrite: from itervalues()
2052 if job.State() == job_state_e.Running:
2053 count += 1
2054 return count
2055
2056
2057# Some WaitForOne() return values, which are negative. The numbers are
2058# arbitrary negative numbers.
2059#
2060# They don't overlap with iolib.UNTRAPPED_SIGWINCH == -10
2061# which LastSignal() can return
2062
2063W1_EXITED = -11 # process exited
2064W1_STOPPED = -12 # process was stopped
2065W1_CALL_INTR = -15 # the waitpid(-1) call was interrupted
2066
2067W1_NO_CHILDREN = -13 # no child processes to wait for
2068W1_NO_CHANGE = -14 # WNOHANG was passed and there were no state changes
2069
2070NO_ARG = -20
2071
2072
2073class Waiter(object):
2074 """A capability to wait for processes.
2075
2076 This must be a singleton (and is because CommandEvaluator is a singleton).
2077
2078 Invariants:
2079 - Every child process is registered once
2080 - Every child process is waited for
2081
2082 Canonical example of why we need a GLOBAL waiter:
2083
2084 { sleep 3; echo 'done 3'; } &
2085 { sleep 4; echo 'done 4'; } &
2086
2087 # ... do arbitrary stuff ...
2088
2089 { sleep 1; exit 1; } | { sleep 2; exit 2; }
2090
2091 Now when you do wait() after starting the pipeline, you might get a pipeline
2092 process OR a background process! So you have to distinguish between them.
2093 """
2094
2095 def __init__(self, job_list, exec_opts, signal_safe, tracer):
2096 # type: (JobList, optview.Exec, iolib.SignalSafe, dev.Tracer) -> None
2097 self.job_list = job_list
2098 self.exec_opts = exec_opts
2099 self.signal_safe = signal_safe
2100 self.tracer = tracer
2101 self.last_status = 127 # wait -n error code
2102
2103 def LastStatusCode(self):
2104 # type: () -> int
2105 """Returns exit code for wait -n"""
2106 return self.last_status
2107
2108 def WaitForOne(self, waitpid_options=0):
2109 # type: (int) -> Tuple[int, int]
2110 """Wait until the next process returns (or maybe Ctrl-C).
2111
2112 Returns:
2113 One of these negative numbers:
2114 W1_NO_CHILDREN Nothing to wait for
2115 W1_NO_CHANGE no state changes when WNOHANG passed - used by
2116 main loop
2117 W1_EXITED Process exited (with or without signal)
2118 W1_STOPPED Process stopped
2119 W1_CALL_INTR
2120 UNTRAPPED_SIGWINCH
2121 Or
2122 result > 0 Signal that waitpid() was interrupted with
2123
2124 In the interactive shell, we return 0 if we get a Ctrl-C, so the caller
2125 will try again.
2126
2127 Callers:
2128 wait -n -- loop until there is one fewer process (TODO)
2129 wait -- loop until there are no processes
2130 wait $! -- loop until job state is Done (process or pipeline)
2131 Process::Wait() -- loop until Process state is done
2132 Pipeline::Wait() -- loop until Pipeline state is done
2133
2134 Comparisons:
2135 bash: jobs.c waitchld() Has a special case macro(!) CHECK_WAIT_INTR for
2136 the wait builtin
2137
2138 dash: jobs.c waitproc() uses sigfillset(), sigprocmask(), etc. Runs in a
2139 loop while (gotsigchld), but that might be a hack for System V!
2140
2141 Should we have a cleaner API like posix::wait_for_one() ?
2142
2143 wait_result =
2144 NoChildren -- ECHILD - no more
2145 | Exited(int pid) -- process done - call job_list.PopStatus() for status
2146 # do we also we want ExitedWithSignal() ?
2147 | Stopped(int pid)
2148 | Interrupted(int sig_num) -- may or may not retry
2149 | UntrappedSigwinch -- ignored
2150
2151 | NoChange -- for WNOHANG - is this a different API?
2152 """
2153 #waitpid_options |= WCONTINUED
2154 pid, status = pyos.WaitPid(waitpid_options)
2155 if pid == 0:
2156 return W1_NO_CHANGE, NO_ARG # WNOHANG passed, and no state changes
2157
2158 if pid < 0: # error case
2159 err_num = status
2160 #log('waitpid() error => %d %s', e.errno, pyutil.strerror(e))
2161 if err_num == ECHILD:
2162 return W1_NO_CHILDREN, NO_ARG
2163
2164 if err_num == EINTR: # Bug #858 fix
2165 # e.g. 1 for SIGHUP, or also be UNTRAPPED_SIGWINCH == -1
2166 last_sig = self.signal_safe.LastSignal()
2167 if last_sig == iolib.UNTRAPPED_SIGWINCH:
2168 return iolib.UNTRAPPED_SIGWINCH, NO_ARG
2169 else:
2170 return W1_CALL_INTR, last_sig
2171
2172 # No other errors? Man page says waitpid(INT_MIN) == ESRCH, "no
2173 # such process", an invalid PID
2174 raise AssertionError()
2175
2176 # All child processes are supposed to be in this dict. Even if a
2177 # grandchild outlives the child (its parent), the shell does NOT become
2178 # the parent. The init process does.
2179 proc = self.job_list.child_procs.get(pid)
2180
2181 if proc is None and self.exec_opts.verbose_warn():
2182 print_stderr("oils: PID %d exited, but oils didn't start it" % pid)
2183
2184 if 0:
2185 self.job_list.DebugPrint()
2186
2187 was_stopped = False
2188 if WIFSIGNALED(status):
2189 term_sig = WTERMSIG(status)
2190 status = 128 + term_sig
2191
2192 # Print newline after Ctrl-C.
2193 if term_sig == SIGINT:
2194 print('')
2195
2196 if proc:
2197 proc.WhenExited(pid, status)
2198
2199 elif WIFEXITED(status):
2200 status = WEXITSTATUS(status)
2201 if proc:
2202 proc.WhenExited(pid, status)
2203
2204 elif WIFSTOPPED(status):
2205 was_stopped = True
2206
2207 stop_sig = WSTOPSIG(status)
2208
2209 if proc:
2210 proc.WhenStopped(stop_sig)
2211
2212 # This would be more consistent, but it's an extension to POSIX
2213 #elif WIFCONTINUED(status):
2214 # if proc:
2215 # proc.WhenContinued()
2216
2217 else:
2218 raise AssertionError(status)
2219
2220 self.last_status = status # for wait -n
2221 self.tracer.OnProcessEnd(pid, status)
2222
2223 if was_stopped:
2224 return W1_STOPPED, pid
2225 else:
2226 return W1_EXITED, pid
2227
2228 def PollForEvents(self):
2229 # type: () -> None
2230 """For the interactive shell to print when processes have exited."""
2231 while True:
2232 result, _ = self.WaitForOne(waitpid_options=WNOHANG)
2233
2234 if result == W1_NO_CHANGE:
2235 break
2236 if result == W1_NO_CHILDREN:
2237 break
2238
2239 # Keep polling here
2240 assert result in (W1_EXITED, W1_STOPPED), result
2241 # W1_CALL_INTR and iolib.UNTRAPPED_SIGWINCH should not happen,
2242 # because WNOHANG is a non-blocking call