OILS / builtin / process_osh.py View on Github | oils.pub

683 lines, 420 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import typed_args
28from mycpp import mops
29from mycpp import mylib
30from mycpp.mylib import log, tagswitch, print_stderr
31
32import posix_ as posix
33
34from typing import TYPE_CHECKING, List, Tuple, Optional, cast
35if TYPE_CHECKING:
36 from core.process import Waiter, ExternalProgram, FdState
37 from core import executor
38 from core import state
39 from display import ui
40
41_ = log
42
43
44class Jobs(vm._Builtin):
45 """List jobs."""
46
47 def __init__(self, job_list):
48 # type: (process.JobList) -> None
49 self.job_list = job_list
50
51 def Run(self, cmd_val):
52 # type: (cmd_value.Argv) -> int
53
54 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
55 arg = arg_types.jobs(attrs.attrs)
56
57 if arg.l:
58 style = process.STYLE_LONG
59 elif arg.p:
60 style = process.STYLE_PID_ONLY
61 else:
62 style = process.STYLE_DEFAULT
63
64 self.job_list.DisplayJobs(style)
65
66 if arg.debug:
67 self.job_list.DebugPrint()
68
69 return 0
70
71
72class Fg(vm._Builtin):
73 """Put a job in the foreground."""
74
75 def __init__(self, job_control, job_list, waiter):
76 # type: (process.JobControl, process.JobList, Waiter) -> None
77 self.job_control = job_control
78 self.job_list = job_list
79 self.waiter = waiter
80 self.exec_opts = waiter.exec_opts
81
82 def Run(self, cmd_val):
83 # type: (cmd_value.Argv) -> int
84
85 job_spec = '' # Job spec for current job is the default
86 if len(cmd_val.argv) > 1:
87 job_spec = cmd_val.argv[1]
88
89 job = self.job_list.JobFromSpec(job_spec)
90 # note: the 'wait' builtin falls back to JobFromPid()
91 if job is None:
92 print_stderr('fg: No job to put in the foreground')
93 return 1
94
95 pgid = job.ProcessGroupId()
96 assert pgid != process.INVALID_PGID, \
97 'Processes put in the background should have a PGID'
98
99 # Put the job's process group back into the foreground. GiveTerminal() must
100 # be called before sending SIGCONT or else the process might immediately get
101 # suspended again if it tries to read/write on the terminal.
102 self.job_control.MaybeGiveTerminal(pgid)
103 posix.killpg(pgid, SIGCONT) # Send signal
104
105 if self.exec_opts.interactive():
106 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
107
108 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
109 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
110 job.SetForeground()
111 job.state = job_state_e.Running
112
113 status = -1
114
115 wait_st = job.JobWait(self.waiter)
116 UP_wait_st = wait_st
117 with tagswitch(wait_st) as case:
118 if case(wait_status_e.Proc):
119 wait_st = cast(wait_status.Proc, UP_wait_st)
120 if wait_st.state == job_state_e.Exited:
121 self.job_list.PopChildProcess(job.PidForWait())
122 self.job_list.CleanupWhenJobExits(job)
123 status = wait_st.code
124
125 elif case(wait_status_e.Pipeline):
126 wait_st = cast(wait_status.Pipeline, UP_wait_st)
127 # TODO: handle PIPESTATUS? Is this right?
128 status = wait_st.codes[-1]
129
130 elif case(wait_status_e.Cancelled):
131 wait_st = cast(wait_status.Cancelled, UP_wait_st)
132 status = 128 + wait_st.sig_num
133
134 else:
135 raise AssertionError()
136
137 return status
138
139
140class Bg(vm._Builtin):
141 """Put a job in the background."""
142
143 def __init__(self, job_list):
144 # type: (process.JobList) -> None
145 self.job_list = job_list
146
147 def Run(self, cmd_val):
148 # type: (cmd_value.Argv) -> int
149
150 # How does this differ from 'fg'? It doesn't wait and it sets controlling
151 # terminal?
152
153 raise error.Usage("isn't implemented", loc.Missing)
154
155
156class Fork(vm._Builtin):
157
158 def __init__(self, shell_ex):
159 # type: (vm._Executor) -> None
160 self.shell_ex = shell_ex
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164 _, arg_r = flag_util.ParseCmdVal('fork',
165 cmd_val,
166 accept_typed_args=True)
167
168 arg, location = arg_r.Peek2()
169 if arg is not None:
170 e_usage('got unexpected argument %r' % arg, location)
171
172 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
173 return self.shell_ex.RunBackgroundJob(cmd_frag)
174
175
176class ForkWait(vm._Builtin):
177
178 def __init__(self, shell_ex):
179 # type: (vm._Executor) -> None
180 self.shell_ex = shell_ex
181
182 def Run(self, cmd_val):
183 # type: (cmd_value.Argv) -> int
184 _, arg_r = flag_util.ParseCmdVal('forkwait',
185 cmd_val,
186 accept_typed_args=True)
187 arg, location = arg_r.Peek2()
188 if arg is not None:
189 e_usage('got unexpected argument %r' % arg, location)
190
191 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
192 return self.shell_ex.RunSubshell(cmd_frag)
193
194
195class Exec(vm._Builtin):
196
197 def __init__(
198 self,
199 mem, # type: state.Mem
200 ext_prog, # type: ExternalProgram
201 fd_state, # type: FdState
202 search_path, # type: executor.SearchPath
203 errfmt, # type: ui.ErrorFormatter
204 ):
205 # type: (...) -> None
206 self.mem = mem
207 self.ext_prog = ext_prog
208 self.fd_state = fd_state
209 self.search_path = search_path
210 self.errfmt = errfmt
211
212 def Run(self, cmd_val):
213 # type: (cmd_value.Argv) -> int
214 _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
215
216 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
217 if arg_r.AtEnd():
218 self.fd_state.MakePermanent()
219 return 0
220
221 environ = self.mem.GetEnv()
222 if 0:
223 log('E %r', environ)
224 log('E %r', environ)
225 log('ZZ %r', environ.get('ZZ'))
226 i = arg_r.i
227 cmd = cmd_val.argv[i]
228 argv0_path = self.search_path.CachedLookup(cmd)
229 if argv0_path is None:
230 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
231
232 # shift off 'exec', and remove typed args because they don't apply
233 c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
234 cmd_val.is_last_cmd, cmd_val.self_obj, None)
235
236 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
237 # makes mypy and C++ compiler happy
238 raise AssertionError('unreachable')
239
240
241class Wait(vm._Builtin):
242 """
243 wait: wait [-n] [id ...]
244 Wait for job completion and return exit status.
245
246 Waits for each process identified by an ID, which may be a process ID or a
247 job specification, and reports its termination status. If ID is not
248 given, waits for all currently active child processes, and the return
249 status is zero. If ID is a a job specification, waits for all processes
250 in that job's pipeline.
251
252 If the -n option is supplied, waits for the next job to terminate and
253 returns its exit status.
254
255 Exit Status:
256 Returns the status of the last ID; fails if ID is invalid or an invalid
257 option is given.
258 """
259
260 def __init__(
261 self,
262 waiter, # type: Waiter
263 job_list, #type: process.JobList
264 mem, # type: state.Mem
265 tracer, # type: dev.Tracer
266 errfmt, # type: ui.ErrorFormatter
267 ):
268 # type: (...) -> None
269 self.waiter = waiter
270 self.job_list = job_list
271 self.mem = mem
272 self.tracer = tracer
273 self.errfmt = errfmt
274
275 def Run(self, cmd_val):
276 # type: (cmd_value.Argv) -> int
277 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
278 return self._Run(cmd_val)
279
280 def _WaitForJobs(self, job_ids, arg_locs):
281 # type: (List[str], List[CompoundWord]) -> int
282
283 # Get list of jobs. Then we need to check if they are ALL stopped.
284 # Returns the exit code of the last one on the COMMAND LINE, not the
285 # exit code of last one to FINISH.
286
287 jobs = [] # type: List[process.Job]
288 for i, job_id in enumerate(job_ids):
289 location = arg_locs[i]
290
291 job = None # type: Optional[process.Job]
292 if job_id == '' or job_id.startswith('%'):
293 job = self.job_list.JobFromSpec(job_id)
294
295 if job is None:
296 #log('JOB %s', job_id)
297 # Does it look like a PID?
298 try:
299 pid = int(job_id)
300 except ValueError:
301 raise error.Usage(
302 'expected PID or jobspec, got %r' % job_id, location)
303
304 job = self.job_list.JobFromPid(pid)
305 #log('WAIT JOB %r', job)
306
307 if job is None:
308 self.errfmt.Print_("Job %s was't found" % job_id,
309 blame_loc=location)
310 return 127
311
312 jobs.append(job)
313
314 status = 1 # error
315 for job in jobs:
316 # polymorphic call: Process, Pipeline
317 wait_st = job.JobWait(self.waiter)
318
319 UP_wait_st = wait_st
320 with tagswitch(wait_st) as case:
321 if case(wait_status_e.Proc):
322 wait_st = cast(wait_status.Proc, UP_wait_st)
323 if wait_st.state == job_state_e.Exited:
324 self.job_list.PopChildProcess(job.PidForWait())
325 self.job_list.CleanupWhenJobExits(job)
326 status = wait_st.code
327
328 elif case(wait_status_e.Pipeline):
329 wait_st = cast(wait_status.Pipeline, UP_wait_st)
330 # TODO: handle PIPESTATUS? Is this right?
331 status = wait_st.codes[-1]
332
333 # It would be logical to set PIPESTATUS here, but it's NOT
334 # what other shells do
335 #
336 # I think PIPESTATUS is legacy, and we can design better
337 # YSH semantics
338 #self.mem.SetPipeStatus(wait_st.codes)
339
340 elif case(wait_status_e.Cancelled):
341 wait_st = cast(wait_status.Cancelled, UP_wait_st)
342 status = 128 + wait_st.sig_num
343
344 else:
345 raise AssertionError()
346
347 # Return the last status
348 return status
349
350 def _WaitNext(self):
351 # type: () -> int
352
353 # Loop until there is one fewer process running, there's nothing to wait
354 # for, or there's a signal
355 n = self.job_list.NumRunning()
356 if n == 0:
357 status = 127
358 else:
359 target = n - 1
360 status = 0
361 while self.job_list.NumRunning() > target:
362 result, w1_arg = self.waiter.WaitForOne()
363 if result == process.W1_EXITED:
364 pid = w1_arg
365 pr = self.job_list.PopChildProcess(pid)
366 # TODO: background pipelines don't clean up properly,
367 # because only the last PID is registered in
368 # job_list.pid_to_job
369 self.job_list.CleanupWhenProcessExits(pid)
370
371 if pr is not None:
372 status = pr.status
373 elif self.exec_opts.xtrace():
374 print_stderr(
375 "oils: PID %d exited, but oils didn't start it" %
376 pid)
377
378 elif result == process.W1_NO_CHILDREN:
379 status = 127
380 break
381
382 elif result == process.W1_CALL_INTR: # signal
383 status = 128 + w1_arg
384 break
385
386 return status
387
388 def _Run(self, cmd_val):
389 # type: (cmd_value.Argv) -> int
390 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
391 arg = arg_types.wait(attrs.attrs)
392
393 job_ids, arg_locs = arg_r.Rest2()
394
395 if len(job_ids):
396 # Note: -n and --all ignored in this case, like bash
397 return self._WaitForJobs(job_ids, arg_locs)
398
399 if arg.n:
400 return self._WaitNext()
401
402 # 'wait' or wait --all
403
404 status = 0
405
406 # Note: NumRunning() makes sure we ignore stopped processes, which
407 # cause WaitForOne() to return
408 while self.job_list.NumRunning() != 0:
409 result, w1_arg = self.waiter.WaitForOne()
410 if result == process.W1_EXITED:
411 pid = w1_arg
412 pr = self.job_list.PopChildProcess(pid)
413 # TODO: background pipelines don't clean up properly, because
414 # only the last PID is registered in job_list.pid_to_job
415 self.job_list.CleanupWhenProcessExits(pid)
416
417 if arg.verbose:
418 self.errfmt.PrintMessage(
419 '(wait) PID %d exited with status %d' %
420 (pid, pr.status), cmd_val.arg_locs[0])
421
422 if pr.status != 0 and arg.all: # YSH extension: respect failure
423 if arg.verbose:
424 self.errfmt.PrintMessage(
425 'wait --all: will fail with status 1')
426 status = 1 # set status, but keep waiting
427
428 if result == process.W1_NO_CHILDREN:
429 break # status is 0
430
431 if result == process.W1_CALL_INTR:
432 status = 128 + w1_arg
433 break
434
435 return status
436
437
438class Umask(vm._Builtin):
439
440 def __init__(self):
441 # type: () -> None
442 """Dummy constructor for mycpp."""
443 pass
444
445 def Run(self, cmd_val):
446 # type: (cmd_value.Argv) -> int
447
448 argv = cmd_val.argv[1:]
449 if len(argv) == 0:
450 # umask() has a dumb API: you can't get it without modifying it first!
451 # NOTE: dash disables interrupts around the two umask() calls, but that
452 # shouldn't be a concern for us. Signal handlers won't call umask().
453 mask = posix.umask(0)
454 posix.umask(mask) #
455 print('0%03o' % mask) # octal format
456 return 0
457
458 if len(argv) == 1:
459 a = argv[0]
460 try:
461 new_mask = int(a, 8)
462 except ValueError:
463 # NOTE: This also happens when we have '8' or '9' in the input.
464 print_stderr(
465 "oils warning: umask with symbolic input isn't implemented"
466 )
467 return 1
468
469 posix.umask(new_mask)
470 return 0
471
472 e_usage('umask: unexpected arguments', loc.Missing)
473
474
475def _LimitString(lim, factor):
476 # type: (mops.BigInt, int) -> str
477 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
478 return 'unlimited'
479 else:
480 i = mops.Div(lim, mops.IntWiden(factor))
481 return mops.ToStr(i)
482
483
484class Ulimit(vm._Builtin):
485
486 def __init__(self):
487 # type: () -> None
488 """Dummy constructor for mycpp."""
489
490 self._table = None # type: List[Tuple[str, int, int, str]]
491
492 def _Table(self):
493 # type: () -> List[Tuple[str, int, int, str]]
494
495 # POSIX 2018
496 #
497 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
498 if self._table is None:
499 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
500
501 # flag, RLIMIT_X, factor, description
502 self._table = [
503 # Following POSIX and most shells except bash, -f is in
504 # blocks of 512 bytes
505 ('-c', RLIMIT_CORE, 512, 'core dump size'),
506 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
507 ('-f', RLIMIT_FSIZE, 512, 'file size'),
508 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
509 ('-s', RLIMIT_STACK, 1024, 'stack size'),
510 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
511 ('-v', RLIMIT_AS, 1024, 'address space size'),
512 ]
513
514 return self._table
515
516 def _FindFactor(self, what):
517 # type: (int) -> int
518 for _, w, factor, _ in self._Table():
519 if w == what:
520 return factor
521 raise AssertionError()
522
523 def Run(self, cmd_val):
524 # type: (cmd_value.Argv) -> int
525
526 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
527 arg = arg_types.ulimit(attrs.attrs)
528
529 what = 0
530 num_what_flags = 0
531
532 if arg.c:
533 what = RLIMIT_CORE
534 num_what_flags += 1
535
536 if arg.d:
537 what = RLIMIT_DATA
538 num_what_flags += 1
539
540 if arg.f:
541 what = RLIMIT_FSIZE
542 num_what_flags += 1
543
544 if arg.n:
545 what = RLIMIT_NOFILE
546 num_what_flags += 1
547
548 if arg.s:
549 what = RLIMIT_STACK
550 num_what_flags += 1
551
552 if arg.t:
553 what = RLIMIT_CPU
554 num_what_flags += 1
555
556 if arg.v:
557 what = RLIMIT_AS
558 num_what_flags += 1
559
560 if num_what_flags > 1:
561 raise error.Usage(
562 'can only handle one resource at a time; got too many flags',
563 cmd_val.arg_locs[0])
564
565 # Print all
566 show_all = arg.a or arg.all
567 if show_all:
568 if num_what_flags > 0:
569 raise error.Usage("doesn't accept resource flags with -a",
570 cmd_val.arg_locs[0])
571
572 extra, extra_loc = arg_r.Peek2()
573 if extra is not None:
574 raise error.Usage('got extra arg with -a', extra_loc)
575
576 # Worst case 20 == len(str(2**64))
577 fmt = '%5s %15s %15s %7s %s'
578 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
579 for flag, what, factor, desc in self._Table():
580 soft, hard = pyos.GetRLimit(what)
581
582 soft2 = _LimitString(soft, factor)
583 hard2 = _LimitString(hard, factor)
584 print(fmt % (flag, soft2, hard2, str(factor), desc))
585
586 return 0
587
588 if num_what_flags == 0:
589 what = RLIMIT_FSIZE # -f is the default
590
591 s, s_loc = arg_r.Peek2()
592
593 if s is None:
594 factor = self._FindFactor(what)
595 soft, hard = pyos.GetRLimit(what)
596 if arg.H:
597 print(_LimitString(hard, factor))
598 else:
599 print(_LimitString(soft, factor))
600 return 0
601
602 # Set the given resource
603 if s == 'unlimited':
604 # In C, RLIM_INFINITY is rlim_t
605 limit = mops.FromC(RLIM_INFINITY)
606 else:
607 if match.LooksLikeInteger(s):
608 ok, big_int = mops.FromStr2(s)
609 if not ok:
610 raise error.Usage('Integer too big: %s' % s, s_loc)
611 else:
612 raise error.Usage(
613 "expected a number or 'unlimited', got %r" % s, s_loc)
614
615 if mops.Greater(mops.IntWiden(0), big_int):
616 raise error.Usage(
617 "doesn't accept negative numbers, got %r" % s, s_loc)
618
619 factor = self._FindFactor(what)
620
621 fac = mops.IntWiden(factor)
622 limit = mops.Mul(big_int, fac)
623
624 # Overflow check like bash does
625 # TODO: This should be replaced with a different overflow check
626 # when we have arbitrary precision integers
627 if not mops.Equal(mops.Div(limit, fac), big_int):
628 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
629 raise error.Usage(
630 'detected integer overflow: %s' % mops.ToStr(big_int),
631 s_loc)
632
633 arg_r.Next()
634 extra2, extra_loc2 = arg_r.Peek2()
635 if extra2 is not None:
636 raise error.Usage('got extra arg', extra_loc2)
637
638 # Now set the resource
639 soft, hard = pyos.GetRLimit(what)
640
641 # For error message
642 old_soft = soft
643 old_hard = hard
644
645 # Bash behavior: manipulate both, unless a flag is parsed. This
646 # differs from zsh!
647 if not arg.S and not arg.H:
648 soft = limit
649 hard = limit
650 if arg.S:
651 soft = limit
652 if arg.H:
653 hard = limit
654
655 if mylib.PYTHON:
656 try:
657 pyos.SetRLimit(what, soft, hard)
658 except OverflowError: # only happens in CPython
659 raise error.Usage('detected overflow', s_loc)
660 except (ValueError, resource.error) as e:
661 # Annoying: Python binding changes IOError -> ValueError
662
663 print_stderr('oils: ulimit error: %s' % e)
664
665 # Extra info we could expose in C++ too
666 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
667 _LimitString(old_soft, factor),
668 _LimitString(old_hard, factor),
669 _LimitString(soft, factor),
670 _LimitString(hard, factor),
671 ))
672 return 1
673 else:
674 try:
675 pyos.SetRLimit(what, soft, hard)
676 except (IOError, OSError) as e:
677 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
678 return 1
679
680 return 0
681
682
683# vim: sw=4